From 3252380922e69ff0182e66bfea2c8143bdbd1b9c Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 8 Feb 2022 17:16:04 -0500 Subject: [PATCH 01/29] move garbage collection to seize --- Cargo.toml | 5 +- src/iter/mod.rs | 32 +-- src/iter/traverser.rs | 95 +++++---- src/lib.rs | 24 +-- src/map.rs | 443 +++++++++++++++++++++-------------------- src/map_ref.rs | 6 +- src/node.rs | 239 +++++++++++++--------- src/raw/mod.rs | 75 ++++--- src/reclaim.rs | 186 +++++++++++++++++ src/set.rs | 44 ++-- src/set_ref.rs | 6 +- tests/basic_ref.rs | 9 +- tests/borrow.rs | 29 ++- tests/cuckoo/stress.rs | 40 ++-- tests/hasher.rs | 3 +- tests/jdk/map_check.rs | 21 +- tests/jsr166/main.rs | 9 +- tests/regressions.rs | 4 +- 18 files changed, 757 insertions(+), 513 deletions(-) create mode 100644 src/reclaim.rs diff --git a/Cargo.toml b/Cargo.toml index c09f35de..b673b48c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,15 +19,12 @@ azure-devops = { project = "jonhoo/jonhoo", pipeline = "flurry", build = "15" } codecov = { repository = "jonhoo/flurry", branch = "master", service = "github" } maintenance = { status = "experimental" } -[features] -sanitize = ['crossbeam-epoch/sanitize'] - [dependencies] -crossbeam-epoch = "0.8.2" parking_lot = "0.10" num_cpus = "1.12.0" rayon = {version = "1.3", optional = true} serde = {version = "1.0.105", optional = true} +seize = { git = "https://github.com/ibraheemdev/seize" } [dependencies.ahash] version = "0.3.2" diff --git a/src/iter/mod.rs b/src/iter/mod.rs index 3fa40566..22ce9c9d 100644 --- a/src/iter/mod.rs +++ b/src/iter/mod.rs @@ -1,7 +1,8 @@ mod traverser; +use seize::Linked; pub(crate) use traverser::NodeIter; -use crossbeam_epoch::Guard; +use crate::reclaim::Guard; use std::sync::atomic::Ordering; /// An iterator over a map's entries. @@ -10,12 +11,11 @@ use std::sync::atomic::Ordering; #[derive(Debug)] pub struct Iter<'g, K, V> { pub(crate) node_iter: NodeIter<'g, K, V>, - pub(crate) guard: &'g Guard, + pub(crate) guard: &'g Guard<'g>, } -impl<'g, K, V> Iterator for Iter<'g, K, V> { - type Item = (&'g K, &'g V); - fn next(&mut self) -> Option { +impl<'g, K, V> Iter<'g, K, V> { + pub(crate) fn next_internal(&mut self) -> Option<(&'g K, &'g Linked)> { let node = self.node_iter.next()?; let value = node.value.load(Ordering::SeqCst, self.guard); // safety: flurry does not drop or move until after guard drop @@ -24,6 +24,13 @@ impl<'g, K, V> Iterator for Iter<'g, K, V> { } } +impl<'g, K, V> Iterator for Iter<'g, K, V> { + type Item = (&'g K, &'g V); + fn next(&mut self) -> Option { + self.next_internal().map(|(k, v)| (k, &**v)) + } +} + /// An iterator over a map's keys. /// /// See [`HashMap::keys`](crate::HashMap::keys) for details. @@ -46,7 +53,7 @@ impl<'g, K, V> Iterator for Keys<'g, K, V> { #[derive(Debug)] pub struct Values<'g, K, V> { pub(crate) node_iter: NodeIter<'g, K, V>, - pub(crate) guard: &'g Guard, + pub(crate) guard: &'g Guard<'g>, } impl<'g, K, V> Iterator for Values<'g, K, V> { @@ -63,7 +70,6 @@ impl<'g, K, V> Iterator for Values<'g, K, V> { #[cfg(test)] mod tests { use crate::HashMap; - use crossbeam_epoch as epoch; use std::collections::HashSet; use std::iter::FromIterator; @@ -71,11 +77,11 @@ mod tests { fn iter() { let map = HashMap::::new(); - let guard = epoch::pin(); + let guard = map.guard(); map.insert(1, 42, &guard); map.insert(2, 84, &guard); - let guard = epoch::pin(); + let guard = map.guard(); assert_eq!( map.iter(&guard).collect::>(), HashSet::from_iter(vec![(&1, &42), (&2, &84)]) @@ -86,11 +92,11 @@ mod tests { fn keys() { let map = HashMap::::new(); - let guard = epoch::pin(); + let guard = map.guard(); map.insert(1, 42, &guard); map.insert(2, 84, &guard); - let guard = epoch::pin(); + let guard = map.guard(); assert_eq!( map.keys(&guard).collect::>(), HashSet::from_iter(vec![&1, &2]) @@ -101,10 +107,10 @@ mod tests { fn values() { let map = HashMap::::new(); - let mut guard = epoch::pin(); + let guard = map.guard(); map.insert(1, 42, &guard); map.insert(2, 84, &guard); - guard.repin(); + let guard = map.guard(); assert_eq!( map.values(&guard).collect::>(), diff --git a/src/iter/traverser.rs b/src/iter/traverser.rs index 80a58bb9..2bbfe495 100644 --- a/src/iter/traverser.rs +++ b/src/iter/traverser.rs @@ -1,12 +1,12 @@ use crate::node::{BinEntry, Node, TreeNode}; use crate::raw::Table; -use crossbeam_epoch::{Guard, Shared}; +use crate::reclaim::{Guard, Linked, Shared}; use std::sync::atomic::Ordering; #[derive(Debug)] pub(crate) struct NodeIter<'g, K, V> { /// Current table; update if resized - table: Option<&'g Table>, + table: Option<&'g Linked>>, stack: Option>>, spare: Option>>, @@ -26,11 +26,11 @@ pub(crate) struct NodeIter<'g, K, V> { /// Initial table size base_size: usize, - guard: &'g Guard, + guard: &'g Guard<'g>, } impl<'g, K, V> NodeIter<'g, K, V> { - pub(crate) fn new(table: Shared<'g, Table>, guard: &'g Guard) -> Self { + pub(crate) fn new(table: Shared<'g, Table>, guard: &'g Guard<'_>) -> Self { let (table, len) = if table.is_null() { (None, 0) } else { @@ -53,7 +53,7 @@ impl<'g, K, V> NodeIter<'g, K, V> { } } - fn push_state(&mut self, t: &'g Table, i: usize, n: usize) { + fn push_state(&mut self, t: &'g Linked>, i: usize, n: usize) { let mut s = self.spare.take(); if let Some(ref mut s) = s { self.spare = s.next.take(); @@ -121,11 +121,11 @@ impl<'g, K, V> Iterator for NodeIter<'g, K, V> { // inheritance (everything is a node), but we have to explicitly // check // safety: flurry does not drop or move until after guard drop - match unsafe { next.deref() } { - BinEntry::Node(node) => { + match **unsafe { next.deref() } { + BinEntry::Node(ref node) => { e = Some(node); } - BinEntry::TreeNode(tree_node) => { + BinEntry::TreeNode(ref tree_node) => { e = Some(&tree_node.node); } BinEntry::Moved => unreachable!("Nodes can only point to Nodes or TreeNodes"), @@ -156,7 +156,7 @@ impl<'g, K, V> Iterator for NodeIter<'g, K, V> { if !bin.is_null() { // safety: flurry does not drop or move until after guard drop let bin = unsafe { bin.deref() }; - match bin { + match **bin { BinEntry::Moved => { // recurse down into the target table // safety: same argument as for following Moved in Table::find @@ -166,10 +166,10 @@ impl<'g, K, V> Iterator for NodeIter<'g, K, V> { self.push_state(t, i, n); continue; } - BinEntry::Node(node) => { + BinEntry::Node(ref node) => { e = Some(node); } - BinEntry::Tree(tree_bin) => { + BinEntry::Tree(ref tree_bin) => { // since we want to iterate over all entries, TreeBins // are also traversed via the `next` pointers of their // contained node @@ -210,7 +210,7 @@ impl<'g, K, V> Iterator for NodeIter<'g, K, V> { struct TableStack<'g, K, V> { length: usize, index: usize, - table: &'g Table, + table: &'g Linked>, next: Option>>, } @@ -218,43 +218,47 @@ struct TableStack<'g, K, V> { mod tests { use super::*; use crate::raw::Table; - use crossbeam_epoch::{self as epoch, Atomic, Owned}; + use crate::reclaim::Atomic; use parking_lot::Mutex; #[test] fn iter_new() { - let guard = epoch::pin(); + let guard = unsafe { seize::Guard::unprotected() }; let iter = NodeIter::::new(Shared::null(), &guard); assert_eq!(iter.count(), 0); } #[test] fn iter_empty() { - let table = Owned::new(Table::::new(16)); - let guard = epoch::pin(); - let table = table.into_shared(&guard); + let collector = seize::Collector::new(); + + let table = Shared::boxed(Table::::new(16, &collector), &collector); + let guard = collector.enter(); let iter = NodeIter::new(table, &guard); assert_eq!(iter.count(), 0); // safety: nothing holds on to references into the table any more - let mut t = unsafe { table.into_owned() }; + let mut t = unsafe { table.into_box() }; t.drop_bins(); } #[test] fn iter_simple() { + let collector = seize::Collector::new(); let mut bins = vec![Atomic::null(); 16]; - bins[8] = Atomic::new(BinEntry::Node(Node { - hash: 0, - key: 0usize, - value: Atomic::new(0usize), - next: Atomic::null(), - lock: Mutex::new(()), - })); - - let table = Owned::new(Table::from(bins)); - let guard = epoch::pin(); - let table = table.into_shared(&guard); + bins[8] = Atomic::from(Shared::boxed( + BinEntry::Node(Node { + hash: 0, + key: 0usize, + value: Atomic::from(Shared::boxed(0usize, &collector)), + next: Atomic::null(), + lock: Mutex::new(()), + }), + &collector, + )); + + let table = Shared::boxed(Table::from(bins, &collector), &collector); + let guard = collector.enter(); { let mut iter = NodeIter::new(table, &guard); let e = iter.next().unwrap(); @@ -263,27 +267,32 @@ mod tests { } // safety: nothing holds on to references into the table any more - let mut t = unsafe { table.into_owned() }; + let mut t = unsafe { table.into_box() }; t.drop_bins(); } #[test] fn iter_fw() { // construct the forwarded-to table + let collector = seize::Collector::new(); let mut deep_bins = vec![Atomic::null(); 16]; - deep_bins[8] = Atomic::new(BinEntry::Node(Node { - hash: 0, - key: 0usize, - value: Atomic::new(0usize), - next: Atomic::null(), - lock: Mutex::new(()), - })); - let guard = epoch::pin(); - let deep_table = Owned::new(Table::from(deep_bins)).into_shared(&guard); + deep_bins[8] = Atomic::from(Shared::boxed( + BinEntry::Node(Node { + hash: 0, + key: 0usize, + value: Atomic::from(Shared::boxed(0usize, &collector)), + next: Atomic::null(), + lock: Mutex::new(()), + }), + &collector, + )); + + let guard = collector.enter(); + let deep_table = Shared::boxed(Table::from(deep_bins, &collector), &collector); // construct the forwarded-from table let mut bins = vec![Shared::null(); 16]; - let table = Table::::new(bins.len()); + let table = Table::::new(bins.len(), &collector); for bin in &mut bins[8..] { // this also sets table.next_table to deep_table *bin = table.get_moved(deep_table, &guard); @@ -293,7 +302,7 @@ mod tests { for i in 0..bins.len() { table.store_bin(i, bins[i]); } - let table = Owned::new(table).into_shared(&guard); + let table = Shared::boxed(table, &collector); { let mut iter = NodeIter::new(table, &guard); let e = iter.next().unwrap(); @@ -302,9 +311,9 @@ mod tests { } // safety: nothing holds on to references into the table any more - let mut t = unsafe { table.into_owned() }; + let mut t = unsafe { table.into_box() }; t.drop_bins(); // no one besides this test case uses deep_table - unsafe { deep_table.into_owned() }.drop_bins(); + unsafe { deep_table.into_box() }.drop_bins(); } } diff --git a/src/lib.rs b/src/lib.rs index 5411acc6..145eb769 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -242,13 +242,12 @@ )] #![warn(rust_2018_idioms)] #![allow(clippy::cognitive_complexity)] -use crossbeam_epoch::Guard; -use std::ops::Deref; mod map; mod map_ref; mod node; mod raw; +mod reclaim; mod set; mod set_ref; @@ -269,23 +268,4 @@ pub use set_ref::HashSetRef; /// Default hasher for [`HashMap`]. pub type DefaultHashBuilder = ahash::RandomState; -/// Types needed to safely access shared data concurrently. -pub mod epoch { - pub use crossbeam_epoch::{pin, Guard}; -} - -pub(crate) enum GuardRef<'g> { - Owned(Guard), - Ref(&'g Guard), -} - -impl Deref for GuardRef<'_> { - type Target = Guard; - - #[inline] - fn deref(&self) -> &Guard { - match *self { - GuardRef::Owned(ref guard) | GuardRef::Ref(&ref guard) => guard, - } - } -} +pub use seize::Guard; diff --git a/src/map.rs b/src/map.rs index c123b191..912ea387 100644 --- a/src/map.rs +++ b/src/map.rs @@ -1,7 +1,9 @@ +use seize::Linked; + use crate::iter::*; use crate::node::*; use crate::raw::*; -use crossbeam_epoch::{self as epoch, Atomic, Guard, Owned, Shared}; +use crate::reclaim::{self, Atomic, Collector, Guard, RetireShared, Shared}; use std::borrow::Borrow; use std::error::Error; use std::fmt::{self, Debug, Display, Formatter}; @@ -108,16 +110,14 @@ pub struct HashMap { /// /// ```rust,should_panic /// # use flurry::HashMap; - /// # use crossbeam_epoch; /// let map: HashMap<_, _> = HashMap::default(); - /// map.insert(42, String::from("hello"), &crossbeam_epoch::pin()); + /// map.insert(42, String::from("hello"), &map.guard()); /// - /// let evil = crossbeam_epoch::Collector::new(); - /// let evil = evil.register(); - /// let guard = evil.pin(); + /// let evil = seize::Collector::new(); + /// let guard = evil.enter(); /// let oops = map.get(&42, &guard); /// - /// map.remove(&42, &crossbeam_epoch::pin()); + /// map.remove(&42, &map.guard()); /// // at this point, the default collector is allowed to free `"hello"` /// // since no-one has the global epoch pinned as far as it is aware. /// // `oops` is tied to the lifetime of a Guard that is not a part of @@ -141,12 +141,12 @@ pub struct HashMap { /// It would, sadly, mean that we don't get to share a collector with other things that use /// `crossbeam-epoch` though. For more on this (and a cool optimization), see: /// https://github.com/crossbeam-rs/crossbeam/blob/ebecb82c740a1b3d9d10f235387848f7e3fa9c68/crossbeam-skiplist/src/base.rs#L308-L319 - collector: epoch::Collector, + collector: Collector, build_hasher: S, } -#[derive(Eq, PartialEq, Clone, Debug)] +#[derive(Eq, PartialEq, Debug)] enum PutResult<'a, T> { Inserted { new: &'a T, @@ -157,7 +157,7 @@ enum PutResult<'a, T> { }, Exists { current: &'a T, - not_inserted: Box, + not_inserted: Box>, }, } @@ -295,7 +295,7 @@ impl HashMap { count: AtomicIsize::new(0), size_ctl: AtomicIsize::new(0), build_hasher: hash_builder, - collector: epoch::default_collector().clone(), + collector: Collector::new(), } } @@ -329,11 +329,6 @@ impl HashMap { map } - /* - NOTE: This method is intentionally left out atm as it is a potentially large foot-gun. - See https://github.com/jonhoo/flurry/pull/49#issuecomment-580514518. - */ - /* /// Associate a custom [`epoch::Collector`] with this map. /// /// By default, the global collector is used. With this method you can use a different @@ -343,33 +338,26 @@ impl HashMap { /// Note that _all_ `Guard` references provided to access the returned map _must_ be /// constructed using guards produced by `collector`. You can use [`HashMap::register`] to get /// a thread-local handle to the collector that then lets you construct an [`epoch::Guard`]. - pub fn with_collector(mut self, collector: epoch::Collector) -> Self { + pub fn with_collector(mut self, collector: Collector) -> Self { self.collector = collector; self } - /// Allocate a thread-local handle to the [`epoch::Collector`] associated with this map. - /// - /// You can use the returned handle to produce [`epoch::Guard`] references. - pub fn register(&self) -> epoch::LocalHandle { - self.collector.register() - } - */ - /// Pin a `Guard` for use with this map. /// /// Keep in mind that for as long as you hold onto this `Guard`, you are preventing the /// collection of garbage generated by the map. - pub fn guard(&self) -> epoch::Guard { - self.collector.register().pin() + pub fn guard(&self) -> Guard<'_> { + self.collector.enter() } #[inline] - fn check_guard(&self, guard: &Guard) { - // guard.collector() may be `None` if it is unprotected - if let Some(c) = guard.collector() { - assert_eq!(c, &self.collector); - } + fn check_guard(&self, _guard: &Guard<'_>) { + // TODO + // // guard.collector() may be `None` if it is unprotected + // if let Some(c) = guard.collector() { + // assert_eq!(c, &self.collector); + // } } /// Returns the number of entries in the map. @@ -412,7 +400,7 @@ impl HashMap { #[cfg(test)] /// Returns the capacity of the map. - fn capacity(&self, guard: &Guard) -> usize { + fn capacity(&self, guard: &Guard<'_>) -> usize { self.check_guard(guard); let table = self.table.load(Ordering::Relaxed, &guard); @@ -434,7 +422,7 @@ impl HashMap { /// An iterator visiting all key-value pairs in arbitrary order. /// /// The iterator element type is `(&'g K, &'g V)`. - pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, K, V> { + pub fn iter<'g>(&'g self, guard: &'g Guard<'_>) -> Iter<'g, K, V> { self.check_guard(guard); let table = self.table.load(Ordering::SeqCst, guard); let node_iter = NodeIter::new(table, guard); @@ -444,7 +432,7 @@ impl HashMap { /// An iterator visiting all keys in arbitrary order. /// /// The iterator element type is `&'g K`. - pub fn keys<'g>(&'g self, guard: &'g Guard) -> Keys<'g, K, V> { + pub fn keys<'g>(&'g self, guard: &'g Guard<'_>) -> Keys<'g, K, V> { self.check_guard(guard); let table = self.table.load(Ordering::SeqCst, guard); let node_iter = NodeIter::new(table, guard); @@ -454,14 +442,14 @@ impl HashMap { /// An iterator visiting all values in arbitrary order. /// /// The iterator element type is `&'g V`. - pub fn values<'g>(&'g self, guard: &'g Guard) -> Values<'g, K, V> { + pub fn values<'g>(&'g self, guard: &'g Guard<'_>) -> Values<'g, K, V> { self.check_guard(guard); let table = self.table.load(Ordering::SeqCst, guard); let node_iter = NodeIter::new(table, guard); Values { node_iter, guard } } - fn init_table<'g>(&'g self, guard: &'g Guard) -> Shared<'g, Table> { + fn init_table<'g>(&'g self, guard: &'g Guard<'_>) -> Shared<'g, Table> { loop { let table = self.table.load(Ordering::SeqCst, guard); // safety: we loaded the table while epoch was pinned. table won't be deallocated until @@ -493,8 +481,7 @@ impl HashMap { } else { DEFAULT_CAPACITY }; - let new_table = Owned::new(Table::new(n)); - table = new_table.into_shared(guard); + table = Shared::boxed(Table::new(n, &self.collector), &self.collector); self.table.store(table, Ordering::SeqCst); sc = load_factor!(n as isize) } @@ -512,7 +499,7 @@ impl HashMap { // safety: we are creating this map, so no other thread can access it, // while we are initializing it. - let guard = unsafe { epoch::unprotected() }; + let guard = unsafe { reclaim::unprotected() }; let requested_capacity = if size >= MAXIMUM_CAPACITY / 2 { MAXIMUM_CAPACITY @@ -532,7 +519,10 @@ impl HashMap { // with as many bins as were requested // create a table with `new_capacity` empty bins - let new_table = Owned::new(Table::new(requested_capacity)).into_shared(guard); + let new_table = Shared::boxed( + Table::new(requested_capacity, &self.collector), + &self.collector, + ); // store the new table to `self.table` self.table.store(new_table, Ordering::SeqCst); @@ -559,7 +549,7 @@ where K: Clone + Ord, { /// Tries to presize table to accommodate the given number of elements. - fn try_presize(&self, size: usize, guard: &Guard) { + fn try_presize(&self, size: usize, guard: &Guard<'_>) { let requested_capacity = if size >= MAXIMUM_CAPACITY / 2 { MAXIMUM_CAPACITY } else { @@ -618,7 +608,8 @@ where } // create a table with `new_capacity` empty bins - let new_table = Owned::new(Table::new(new_capacity)).into_shared(guard); + let new_table = + Shared::boxed(Table::new(new_capacity, &self.collector), &self.collector); // store the new table to `self.table` let old_table = self.table.swap(new_table, Ordering::SeqCst, guard); @@ -675,7 +666,7 @@ where &'g self, table: Shared<'g, Table>, mut next_table: Shared<'g, Table>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) { // safety: table was read while `guard` was held. the code that drops table only drops it // after it is no longer reachable, and any outstanding references are no longer active. @@ -689,7 +680,7 @@ where if next_table.is_null() { // we are initiating a resize - let table = Owned::new(Table::new(n << 1)); + let table = Shared::boxed(Table::new(n << 1, &self.collector), &self.collector); let now_garbage = self.next_table.swap(table, Ordering::SeqCst, guard); assert!(now_garbage.is_null()); self.transfer_index.store(n as isize, Ordering::SeqCst); @@ -769,7 +760,7 @@ where // guard (since our guard is pinning the epoch). since the garbage is placed in // our epoch, it won't be freed until the _next_ epoch, at which point, that // thread must have dropped its guard, and with it, any reference to the value. - unsafe { guard.defer_destroy(now_garbage) }; + unsafe { guard.retire_shared(now_garbage) }; self.size_ctl .store(((n as isize) << 1) - ((n as isize) >> 1), Ordering::SeqCst); return; @@ -832,7 +823,7 @@ where // swap happened, it must have happened _after_ we read. since we did the read while // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we // are holding up by holding on to our guard). - match *unsafe { bin.deref() } { + match **unsafe { bin.deref() } { BinEntry::Moved => { // already processed advance = true; @@ -843,7 +834,7 @@ where // need to check that this is _still_ the head let current_head = table.bin(i, guard); - if current_head.as_raw() != bin.as_raw() { + if current_head != bin { // nope -- try again from the start continue; } @@ -910,13 +901,15 @@ where &mut high_bin }; - *link = Owned::new(BinEntry::Node(Node::with_next( - node.hash, - node.key.clone(), - node.value.clone(), - Atomic::from(*link), - ))) - .into_shared(guard); + *link = Shared::boxed( + BinEntry::Node(Node::with_next( + node.hash, + node.key.clone(), + node.value.clone(), + Atomic::from(*link), + )), + &self.collector, + ); p = node.next.load(Ordering::SeqCst, guard); } @@ -948,7 +941,7 @@ where .unwrap() .next .load(Ordering::SeqCst, guard); - unsafe { guard.defer_destroy(p) }; + unsafe { guard.retire_shared(p) }; p = next; } @@ -992,7 +985,7 @@ where if run_bit == 0 { new_node.prev.store(low_tail, Ordering::Relaxed); let new_node = - Owned::new(BinEntry::TreeNode(new_node)).into_shared(guard); + Shared::boxed(BinEntry::TreeNode(new_node), &self.collector); if low_tail.is_null() { // this is the first element inserted into the low bin low = new_node; @@ -1009,7 +1002,7 @@ where } else { new_node.prev.store(high_tail, Ordering::Relaxed); let new_node = - Owned::new(BinEntry::TreeNode(new_node)).into_shared(guard); + Shared::boxed(BinEntry::TreeNode(new_node), &self.collector); if high_tail.is_null() { // this is the first element inserted into the high bin high = new_node; @@ -1033,7 +1026,7 @@ where // bin is too small. since the tree nodes are // already behind shared references, we have to // clean them up manually. - let low_linear = Self::untreeify(low, guard); + let low_linear = self.untreeify(low, guard); // safety: we have just created `low` and its `next` // nodes and have never shared them unsafe { TreeBin::drop_tree_nodes(low, false, guard) }; @@ -1042,13 +1035,14 @@ where // the new bin will also be a tree bin. if both the high // bin and the low bin are non-empty, we have to // allocate a new TreeBin. - Owned::new(BinEntry::Tree(TreeBin::new( - // safety: we have just created `low` and its `next` - // nodes and have never shared them - unsafe { low.into_owned() }, - guard, - ))) - .into_shared(guard) + Shared::boxed( + BinEntry::Tree(TreeBin::new( + // safety: we have just created `low` and its `next` + // nodes and have never shared them + low, guard, + )), + &self.collector, + ) } else { // if not, we can re-use the old bin here, since it will // be swapped for a Moved entry while we are still @@ -1062,19 +1056,20 @@ where bin }; let high_bin = if high_count <= UNTREEIFY_THRESHOLD { - let high_linear = Self::untreeify(high, guard); + let high_linear = self.untreeify(high, guard); // safety: we have just created `high` and its `next` // nodes and have never shared them unsafe { TreeBin::drop_tree_nodes(high, false, guard) }; high_linear } else if low_count != 0 { - Owned::new(BinEntry::Tree(TreeBin::new( - // safety: we have just created `high` and its `next` - // nodes and have never shared them - unsafe { high.into_owned() }, - guard, - ))) - .into_shared(guard) + Shared::boxed( + BinEntry::Tree(TreeBin::new( + // safety: we have just created `high` and its `next` + // nodes and have never shared them + high, guard, + )), + &self.collector, + ) } else { reused_bin = true; // since we also don't use the created low nodes here, @@ -1118,7 +1113,7 @@ where fn help_transfer<'g>( &'g self, table: Shared<'g, Table>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, Table> { if table.is_null() { return table; @@ -1160,7 +1155,7 @@ where next_table } - fn add_count(&self, n: isize, resize_hint: Option, guard: &Guard) { + fn add_count(&self, n: isize, resize_hint: Option, guard: &Guard<'_>) { // TODO: implement the Java CounterCell business here use std::cmp; @@ -1259,7 +1254,7 @@ where /// /// Reserving does not panic in flurry. If the new size is invalid, no /// reallocation takes place. - pub fn reserve(&self, additional: usize, guard: &Guard) { + pub fn reserve(&self, additional: usize, guard: &Guard<'_>) { self.check_guard(guard); let absolute = self.len() + additional; self.try_presize(absolute, guard); @@ -1283,7 +1278,7 @@ where h.finish() } - fn get_node<'g, Q>(&'g self, key: &Q, guard: &'g Guard) -> Option<&'g Node> + fn get_node<'g, Q>(&'g self, key: &Q, guard: &'g Guard<'_>) -> Option<&'g Node> where K: Borrow, Q: ?Sized + Hash + Ord, @@ -1332,7 +1327,7 @@ where // next epoch after it is removed. since it wasn't removed, and the epoch was pinned, that // cannot be until after we drop our guard. let node = unsafe { node.deref() }; - Some(match node { + Some(match **node { BinEntry::Node(ref n) => n, BinEntry::TreeNode(ref tn) => &tn.node, _ => panic!("`Table::find` should always return a Node"), @@ -1359,7 +1354,7 @@ where /// assert_eq!(mref.contains_key(&1), true); /// assert_eq!(mref.contains_key(&2), false); /// ``` - pub fn contains_key(&self, key: &Q, guard: &Guard) -> bool + pub fn contains_key(&self, key: &Q, guard: &Guard<'_>) -> bool where K: Borrow, Q: ?Sized + Hash + Ord, @@ -1391,7 +1386,7 @@ where /// assert_eq!(mref.get(&2), None); /// ``` #[inline] - pub fn get<'g, Q>(&'g self, key: &Q, guard: &'g Guard) -> Option<&'g V> + pub fn get<'g, Q>(&'g self, key: &Q, guard: &'g Guard<'_>) -> Option<&'g V> where K: Borrow, Q: ?Sized + Hash + Ord, @@ -1404,7 +1399,7 @@ where // safety: the lifetime of the reference is bound to the guard // supplied which means that the memory will not be modified // until at least after the guard goes out of scope - unsafe { v.as_ref() } + unsafe { v.as_ref().map(|linked| &**linked) } } /// Returns the key-value pair corresponding to `key`. @@ -1418,7 +1413,7 @@ where /// [`Ord`]: std::cmp::Ord /// [`Hash`]: std::hash::Hash #[inline] - pub fn get_key_value<'g, Q>(&'g self, key: &Q, guard: &'g Guard) -> Option<(&'g K, &'g V)> + pub fn get_key_value<'g, Q>(&'g self, key: &Q, guard: &'g Guard<'_>) -> Option<(&'g K, &'g V)> where K: Borrow, Q: ?Sized + Hash + Ord, @@ -1431,10 +1426,15 @@ where // safety: the lifetime of the reference is bound to the guard // supplied which means that the memory will not be modified // until at least after the guard goes out of scope - unsafe { v.as_ref() }.map(|v| (&node.key, v)) + unsafe { v.as_ref() }.map(|v| (&node.key, &**v)) } - pub(crate) fn guarded_eq(&self, other: &Self, our_guard: &Guard, their_guard: &Guard) -> bool + pub(crate) fn guarded_eq( + &self, + other: &Self, + our_guard: &Guard<'_>, + their_guard: &Guard<'_>, + ) -> bool where V: PartialEq, { @@ -1469,7 +1469,7 @@ where /// map.pin().clear(); /// assert!(map.pin().is_empty()); /// ``` - pub fn clear(&self, guard: &Guard) { + pub fn clear(&self, guard: &Guard<'_>) { // Negative number of deletions let mut delta = 0; let mut idx = 0usize; @@ -1485,7 +1485,7 @@ where } // Safety: node is a valid pointer because we checked // it in the above if stmt. - match unsafe { raw_node.deref() } { + match **unsafe { raw_node.deref() } { BinEntry::Moved => { table = self.help_transfer(table, guard); // start from the first bin again in the new table @@ -1527,10 +1527,10 @@ where // into it above. it must also have pinned the epoch before that time. therefore, the // defer_destroy below won't be executed until that thread's guard is dropped, at which // point it holds no outstanding references to the value anyway. - unsafe { guard.defer_destroy(value) }; + unsafe { guard.retire_shared(value) }; // free the bin entry itself // safety: same argument as for value above. - unsafe { guard.defer_destroy(p) }; + unsafe { guard.retire_shared(p) }; next }; } @@ -1538,8 +1538,8 @@ where let value = node.value.load(Ordering::SeqCst, guard); // NOTE: do not use the reference in `node` after this point! // safety: same as the argument for being allowed to free the nodes beyond the head above - unsafe { guard.defer_destroy(value) }; - unsafe { guard.defer_destroy(raw_node) }; + unsafe { guard.retire_shared(value) }; + unsafe { guard.retire_shared(raw_node) }; delta -= 1; idx += 1; } @@ -1580,7 +1580,7 @@ where }; } // safety: same as in the BinEntry::Node case above - unsafe { guard.defer_destroy(raw_node) }; + unsafe { guard.retire_shared(raw_node) }; idx += 1; } BinEntry::TreeNode(_) => unreachable!( @@ -1633,7 +1633,7 @@ where /// assert_eq!(mref.insert(37, "c"), Some(&"b")); /// assert_eq!(mref.get(&37), Some(&"c")); /// ``` - pub fn insert<'g>(&'g self, key: K, value: V, guard: &'g Guard) -> Option<&'g V> { + pub fn insert<'g>(&'g self, key: K, value: V, guard: &'g Guard<'_>) -> Option<&'g V> { self.check_guard(guard); self.put(key, value, false, guard).before() } @@ -1670,7 +1670,7 @@ where &'g self, key: K, value: V, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Result<&'g V, TryInsertError<'g, V>> { match self.put(key, value, true, guard) { PutResult::Exists { @@ -1678,7 +1678,7 @@ where not_inserted, } => Err(TryInsertError { current, - not_inserted: *not_inserted, + not_inserted: Linked::into_inner(*not_inserted), }), PutResult::Inserted { new } => Ok(new), PutResult::Replaced { .. } => { @@ -1692,12 +1692,12 @@ where mut key: K, value: V, no_replacement: bool, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> PutResult<'g, V> { let hash = self.hash(&key); let mut table = self.table.load(Ordering::SeqCst, guard); let mut bin_count; - let value = Owned::new(value).into_shared(guard); + let value = Shared::boxed(value, &self.collector); let mut old_val = None; loop { // safety: see argument below for !is_null case @@ -1729,11 +1729,11 @@ where let mut bin = t.bin(bini, guard); if bin.is_null() { // fast path -- bin is empty so stick us at the front - let node = Owned::new(BinEntry::Node(Node::new(hash, key, value))); + let node = + Shared::boxed(BinEntry::Node(Node::new(hash, key, value)), &self.collector); match t.cas_bin(bini, bin, node, guard) { Ok(_old_null_ptr) => { self.add_count(1, Some(0), guard); - guard.flush(); // safety: we have not moved the node's value since we placed it into // its `Atomic` in the very beginning of the method, so the ref is still // valid. since the value is not currently marked as garbage, we know it @@ -1747,7 +1747,9 @@ where Err(changed) => { assert!(!changed.current.is_null()); bin = changed.current; - if let BinEntry::Node(node) = *changed.new.into_box() { + if let BinEntry::Node(node) = + Linked::into_inner(*unsafe { changed.new.into_box() }) + { key = node.key; } else { unreachable!("we declared node and it is a BinEntry::Node"); @@ -1775,7 +1777,7 @@ where // swap happened, it must have happened _after_ we read. since we did the read while // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we // are holding up by holding on to our guard). - match *unsafe { bin.deref() } { + match **unsafe { bin.deref() } { BinEntry::Moved => { table = self.help_transfer(table, guard); continue; @@ -1792,7 +1794,7 @@ where // is the last remaining pointer to the initial value. return PutResult::Exists { current: unsafe { v.deref() }, - not_inserted: unsafe { value.into_owned().into_box() }, + not_inserted: unsafe { value.into_box() }, }; } BinEntry::Node(ref head) => { @@ -1836,7 +1838,7 @@ where // safety: we own value and did not share it return PutResult::Exists { current: current_value, - not_inserted: unsafe { value.into_owned().into_box() }, + not_inserted: unsafe { value.into_box() }, }; } else { // update the value in the existing node @@ -1862,7 +1864,7 @@ where // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. - unsafe { guard.defer_destroy(now_garbage) }; + unsafe { guard.retire_shared(now_garbage) }; } break Some(current_value); } @@ -1871,7 +1873,10 @@ where let next = n.next.load(Ordering::SeqCst, guard); if next.is_null() { // we're at the end of the bin -- stick the node here! - let node = Owned::new(BinEntry::Node(Node::new(hash, key, value))); + let node = Shared::boxed( + BinEntry::Node(Node::new(hash, key, value)), + &self.collector, + ); n.next.store(node, Ordering::SeqCst); break None; } @@ -1900,7 +1905,7 @@ where // we don't actually count bins, just set this low enough // that we don't try to treeify the bin later bin_count = 2; - let p = tree_bin.find_or_put_tree_val(hash, key, value, guard); + let p = tree_bin.find_or_put_tree_val(hash, key, value, guard, &self.collector); if p.is_null() { // no TreeNode was returned, so the key did not previously exist in the // TreeBin. This means it was successfully put there by the call above @@ -1926,7 +1931,7 @@ where // safety: we own value and did not share it return PutResult::Exists { current: current_value, - not_inserted: unsafe { value.into_owned().into_box() }, + not_inserted: unsafe { value.into_box() }, }; } else { let now_garbage = @@ -1952,7 +1957,7 @@ where // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. - unsafe { guard.defer_destroy(now_garbage) }; + unsafe { guard.retire_shared(now_garbage) }; } Some(current_value) }; @@ -1987,7 +1992,6 @@ where // increment count, since we only get here if we did not return an old (updated) value debug_assert!(old_val.is_none()); self.add_count(1, Some(bin_count), guard); - guard.flush(); PutResult::Inserted { // safety: we have not moved the node's value since we placed it into its // `Atomic` in the very beginning of the method, so the ref is still valid. @@ -1999,7 +2003,7 @@ where } } - fn put_all>(&self, iter: I, guard: &Guard) { + fn put_all>(&self, iter: I, guard: &Guard<'_>) { for (key, value) in iter { self.put(key, value, false, guard); } @@ -2030,7 +2034,7 @@ where &'g self, key: &Q, remapping_function: F, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Option<&'g V> where K: Borrow, @@ -2095,7 +2099,7 @@ where // swap happened, it must have happened _after_ we read. since we did the read while // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we // are holding up by holding on to our guard). - match *unsafe { bin.deref() } { + match **unsafe { bin.deref() } { BinEntry::Moved => { table = self.help_transfer(table, guard); continue; @@ -2138,7 +2142,7 @@ where remapping_function(&n.key, unsafe { current_value.deref() }); if let Some(value) = new_value { - let value = Owned::new(value).into_shared(guard); + let value = Shared::boxed(value, &self.collector); let now_garbage = n.value.swap(value, Ordering::SeqCst, guard); // NOTE: now_garbage == current_value @@ -2161,7 +2165,7 @@ where // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. - unsafe { guard.defer_destroy(now_garbage) }; + unsafe { guard.retire_shared(now_garbage) }; // safety: since the value is present now, and we've held a guard from // the beginning of the search, the value cannot be dropped until the @@ -2202,8 +2206,8 @@ where // reference to the old value. there are no other ways to get to a // value except through its Node's `value` field (which is now gone // together with the node), so freeing the old value is fine. - unsafe { guard.defer_destroy(p) }; - unsafe { guard.defer_destroy(current_value) }; + unsafe { guard.retire_shared(p) }; + unsafe { guard.retire_shared(current_value) }; break None; } } @@ -2266,7 +2270,7 @@ where remapping_function(&n.key, unsafe { current_value.deref() }); if let Some(value) = new_value { - let value = Owned::new(value).into_shared(guard); + let value = Shared::boxed(value, &self.collector); let now_garbage = n.value.swap(value, Ordering::SeqCst, guard); // NOTE: now_garbage == current_value @@ -2289,7 +2293,7 @@ where // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. - unsafe { guard.defer_destroy(now_garbage) }; + unsafe { guard.retire_shared(now_garbage) }; // safety: since the value is present now, and we've held a guard from // the beginning of the search, the value cannot be dropped until the // next epoch, which won't arrive until after we drop our guard. @@ -2302,10 +2306,11 @@ where // directly, or we will `need_to_untreeify`. In the latter case, we `defer_destroy` // both `p` and its value below, after storing the linear bin. Thus, everything is // always marked for garbage collection _after_ it becomes unaccessible by other threads. - let need_to_untreeify = - unsafe { tree_bin.remove_tree_node(p, true, guard) }; + let need_to_untreeify = unsafe { + tree_bin.remove_tree_node(p, true, guard, &self.collector) + }; if need_to_untreeify { - let linear_bin = Self::untreeify( + let linear_bin = self.untreeify( tree_bin.first.load(Ordering::SeqCst, guard), guard, ); @@ -2324,8 +2329,8 @@ where // be untreeified. unsafe { TreeBin::defer_drop_without_values(bin, guard); - guard.defer_destroy(p); - guard.defer_destroy(current_value); + guard.retire_shared(p); + guard.retire_shared(current_value); } } None @@ -2350,8 +2355,7 @@ where // decrement count self.add_count(-1, Some(bin_count), guard); } - guard.flush(); - new_val + new_val.map(|linked| &**linked) } /// Removes a key-value pair from the map, and returns the removed value (if any). @@ -2373,7 +2377,7 @@ where /// assert_eq!(map.pin().remove(&1), Some(&"a")); /// assert_eq!(map.pin().remove(&1), None); /// ``` - pub fn remove<'g, Q>(&'g self, key: &Q, guard: &'g Guard) -> Option<&'g V> + pub fn remove<'g, Q>(&'g self, key: &Q, guard: &'g Guard<'_>) -> Option<&'g V> where K: Borrow, Q: ?Sized + Hash + Ord, @@ -2406,7 +2410,7 @@ where /// assert_eq!(map.remove_entry(&1, &guard), Some((&1, &"a"))); /// assert_eq!(map.remove(&1, &guard), None); /// ``` - pub fn remove_entry<'g, Q>(&'g self, key: &Q, guard: &'g Guard) -> Option<(&'g K, &'g V)> + pub fn remove_entry<'g, Q>(&'g self, key: &Q, guard: &'g Guard<'_>) -> Option<(&'g K, &'g V)> where K: Borrow, Q: ?Sized + Hash + Ord, @@ -2437,7 +2441,7 @@ where key: &Q, new_value: Option, observed_value: Option>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Option<(&'g K, &'g V)> where K: Borrow, @@ -2492,7 +2496,7 @@ where // swap happened, it must have happened _after_ we read. since we did the read while // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we // are holding up by holding on to our guard). - match *unsafe { bin.deref() } { + match **unsafe { bin.deref() } { BinEntry::Moved => { table = self.help_transfer(table, guard); continue; @@ -2528,7 +2532,10 @@ where // found the node but we have a new value to replace the old one if let Some(nv) = new_value { - n.value.store(Owned::new(nv), Ordering::SeqCst); + n.value.store( + Shared::boxed(nv, &self.collector), + Ordering::SeqCst, + ); // we are just replacing entry value and we do not want to remove the node // so we stop iterating here break; @@ -2549,7 +2556,7 @@ where // in either case, mark the BinEntry as garbage, since it was just removed // safety: as for val below / in put - unsafe { guard.defer_destroy(e) }; + unsafe { guard.retire_shared(e) }; } // since the key was found and only one node exists per key, we can break here break; @@ -2606,7 +2613,8 @@ where if let Some(nv) = new_value { // found the node but we have a new value to replace the old one - n.value.store(Owned::new(nv), Ordering::SeqCst); + n.value + .store(Shared::boxed(nv, &self.collector), Ordering::SeqCst); } else { // drop `p` without its value, since the old value is dropped // in the check on `old_val` below @@ -2615,20 +2623,19 @@ where // after storing the linear bin. The value stored in `p` is `defer_destroy`ed from within // `old_val` at the end of the method. Thus, everything is always marked for garbage // collection _after_ it becomes unaccessible by other threads. - let need_to_untreeify = - unsafe { tree_bin.remove_tree_node(p, false, guard) }; + let need_to_untreeify = unsafe { + tree_bin.remove_tree_node(p, false, guard, &self.collector) + }; if need_to_untreeify { - let linear_bin = Self::untreeify( - tree_bin.first.load(Ordering::SeqCst, guard), - guard, - ); + let linear_bin = self + .untreeify(tree_bin.first.load(Ordering::SeqCst, guard), guard); t.store_bin(bini, linear_bin); // the old bin is now garbage, but its values are not, // since they get re-used in the linear bin // safety: same as in put unsafe { TreeBin::defer_drop_without_values(bin, guard); - guard.defer_destroy(p); + guard.retire_shared(p); } } } @@ -2663,12 +2670,12 @@ where // reference to the old value. there are no other ways to get to a // value except through its Node's `value` field (which is now gone // together with the node), so freeing the old value is fine. - unsafe { guard.defer_destroy(val) }; + unsafe { guard.retire_shared(val) }; // safety: the lifetime of the reference is bound to the guard // supplied which means that the memory will not be freed // until at least after the guard goes out of scope - return unsafe { val.as_ref() }.map(move |v| (key, v)); + return unsafe { val.as_ref() }.map(move |v| (key, &**v)); } break; } @@ -2698,15 +2705,15 @@ where /// If `f` returns `false` for a given key/value pair, but the value for that pair is concurrently /// modified before the removal takes place, the entry will not be removed. /// If you want the removal to happen even in the case of concurrent modification, use [`HashMap::retain_force`]. - pub fn retain(&self, mut f: F, guard: &Guard) + pub fn retain(&self, mut f: F, guard: &Guard<'_>) where F: FnMut(&K, &V) -> bool, { self.check_guard(guard); - // removed selected keys - for (k, v) in self.iter(guard) { + let mut iter = self.iter(guard); + while let Some((k, v)) = iter.next_internal() { if !f(k, v) { - let old_value: Shared<'_, V> = Shared::from(v as *const V); + let old_value: Shared<'_, V> = Shared::from(v as *const _); self.replace_node(k, None, Some(old_value), guard); } } @@ -2732,7 +2739,7 @@ where /// map.pin().retain_force(|&k, _| k % 2 == 0); /// assert_eq!(map.pin().len(), 4); /// ``` - pub fn retain_force(&self, mut f: F, guard: &Guard) + pub fn retain_force(&self, mut f: F, guard: &Guard<'_>) where F: FnMut(&K, &V) -> bool, { @@ -2752,7 +2759,7 @@ where { /// Replaces all linked nodes in the bin at the given index unless the table /// is too small, in which case a resize is initiated instead. - fn treeify_bin<'g>(&'g self, tab: &Table, index: usize, guard: &'g Guard) { + fn treeify_bin<'g>(&'g self, tab: &Table, index: usize, guard: &'g Guard<'_>) { let n = tab.len(); if n < MIN_TREEIFY_CAPACITY { self.try_presize(n << 1, guard); @@ -2764,7 +2771,7 @@ where // safety: we loaded `bin` while the epoch was pinned by our // guard. if the bin was replaced since then, the old bin still // won't be dropped until after we release our guard. - match unsafe { bin.deref() } { + match **unsafe { bin.deref() } { BinEntry::Node(ref node) => { let lock = node.lock.lock(); // check if `bin` is still the head @@ -2795,7 +2802,7 @@ where ); new_tree_node.prev.store(tail, Ordering::Relaxed); let new_tree_node = - Owned::new(BinEntry::TreeNode(new_tree_node)).into_shared(guard); + Shared::boxed(BinEntry::TreeNode(new_tree_node), &self.collector); if tail.is_null() { // this was the first TreeNode, so it becomes the head head = new_tree_node; @@ -2814,12 +2821,14 @@ where } tab.store_bin( index, - Owned::new(BinEntry::Tree(TreeBin::new( - // safety: we have just created `head` and its `next` - // nodes and have never shared them - unsafe { head.into_owned() }, - guard, - ))), + Shared::boxed( + BinEntry::Tree(TreeBin::new( + // safety: we have just created `head` and its `next` + // nodes and have never shared them + head, guard, + )), + &self.collector, + ), ); drop(lock); // make sure the old bin entries get dropped @@ -2835,7 +2844,7 @@ where // // NOTE: we do not drop the value, since it gets moved to the new TreeNode unsafe { - guard.defer_destroy(e); + guard.retire_shared(e); e = e .deref() .as_node() @@ -2893,8 +2902,9 @@ where /// Returns a list of non-TreeNodes replacing those in the given list. Does /// _not_ clean up old TreeNodes, as they may still be reachable. fn untreeify<'g>( + &self, bin: Shared<'g, BinEntry>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> { let mut head = Shared::null(); let mut tail: Shared<'_, BinEntry> = Shared::null(); @@ -2910,12 +2920,14 @@ where let q_deref = unsafe { q.deref() }.as_tree_node().unwrap(); // NOTE: cloning the value uses a load with Ordering::Relaxed, but // write access is synchronized through the bin lock - let new_node = Owned::new(BinEntry::Node(Node::new( - q_deref.node.hash, - q_deref.node.key.clone(), - q_deref.node.value.clone(), - ))) - .into_shared(guard); + let new_node = Shared::boxed( + BinEntry::Node(Node::new( + q_deref.node.hash, + q_deref.node.key.clone(), + q_deref.node.value.clone(), + )), + &self.collector, + ); if tail.is_null() { head = new_node; } else { @@ -2962,7 +2974,7 @@ where V: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - let guard = self.collector.register().pin(); + let guard = self.collector.enter(); f.debug_map().entries(self.iter(&guard)).finish() } } @@ -2976,7 +2988,7 @@ impl Drop for HashMap { // NOTE: we _could_ relax the bounds in all the methods that return `&'g ...` to not also // bound `&self` by `'g`, but if we did that, we would need to use a regular `epoch::Guard` // here rather than an unprotected one. - let guard = unsafe { crossbeam_epoch::unprotected() }; + let guard = unsafe { reclaim::unprotected() }; assert!(self.next_table.load(Ordering::SeqCst, guard).is_null()); let table = self.table.swap(Shared::null(), Ordering::SeqCst, guard); @@ -2986,7 +2998,7 @@ impl Drop for HashMap { } // safety: same as above + we own the table - let mut table = unsafe { table.into_owned() }.into_box(); + let mut table = unsafe { table.into_box() }; table.drop_bins(); } } @@ -3010,7 +3022,7 @@ where (iter.size_hint().0 + 1) / 2 }; - let guard = self.collector.register().pin(); + let guard = self.collector.enter(); self.reserve(reserve, &guard); (*self).put_all(iter, &guard); } @@ -3039,7 +3051,7 @@ where if let Some((key, value)) = iter.next() { // safety: we own `map`, so it's not concurrently accessed by // anyone else at this point. - let guard = unsafe { crossbeam_epoch::unprotected() }; + let guard = unsafe { reclaim::unprotected() }; let (lower, _) = iter.size_hint(); let map = HashMap::with_capacity_and_hasher(lower.saturating_add(1), S::default()); @@ -3084,7 +3096,7 @@ where fn clone(&self) -> HashMap { let cloned_map = Self::with_capacity_and_hasher(self.len(), self.build_hasher.clone()); { - let guard = self.collector.register().pin(); + let guard = self.collector.enter(); for (k, v) in self.iter(&guard) { cloned_map.insert(k.clone(), v.clone(), &guard); } @@ -3110,7 +3122,7 @@ const fn num_cpus() -> usize { #[test] fn capacity() { let map = HashMap::::new(); - let guard = epoch::pin(); + let guard = map.guard(); assert_eq!(map.capacity(&guard), 0); // The table has not yet been allocated @@ -3135,7 +3147,7 @@ mod tests { #[test] fn reserve() { let map = HashMap::::new(); - let guard = epoch::pin(); + let guard = map.guard(); map.insert(42, 0, &guard); @@ -3148,7 +3160,7 @@ mod tests { #[test] fn reserve_uninit() { let map = HashMap::::new(); - let guard = epoch::pin(); + let guard = map.guard(); map.reserve(32, &guard); @@ -3177,42 +3189,42 @@ mod tests { /// (and so pass). /// /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.insert((), (), &guard); /// drop(map); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.get(&(), &guard); /// drop(map); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.remove(&(), &guard); /// drop(map); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.iter(&guard).next(); /// drop(map); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.keys(&guard).next(); /// drop(map); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.values(&guard).next(); /// drop(map); @@ -3222,42 +3234,42 @@ mod tests { /// # No references outlive the guard. /// /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.insert((), (), &guard); /// drop(guard); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.get(&(), &guard); /// drop(guard); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.remove(&(), &guard); /// drop(guard); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.iter(&guard).next(); /// drop(guard); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.keys(&guard).next(); /// drop(guard); /// drop(r); /// ``` /// ```compile_fail -/// let guard = crossbeam_epoch::pin(); +/// let guard = map.pin(); /// let map = super::HashMap::default(); /// let r = map.values(&guard).next(); /// drop(guard); @@ -3292,7 +3304,7 @@ fn replace_empty() { let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); assert_eq!(map.len(), 0); let old = map.replace_node(&42, None, None, &guard); assert_eq!(map.len(), 0); @@ -3304,7 +3316,7 @@ fn replace_empty() { fn replace_existing() { let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); map.insert(42, 42, &guard); assert_eq!(map.len(), 1); let old = map.replace_node(&42, Some(10), None, &guard); @@ -3319,38 +3331,39 @@ fn no_replacement_return_val() { // NOTE: this test also serves as a leak test for the injected value let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); map.insert(42, String::from("hello"), &guard); assert_eq!( map.put(42, String::from("world"), true, &guard), PutResult::Exists { current: &String::from("hello"), - not_inserted: Box::new(String::from("world")), + not_inserted: Box::new(map.collector.link(String::from("world"))), } ); } } -#[test] -fn replace_existing_observed_value_matching() { - let map = HashMap::::new(); - { - let guard = epoch::pin(); - map.insert(42, 42, &guard); - assert_eq!(map.len(), 1); - let observed_value = Shared::from(map.get(&42, &guard).unwrap() as *const _); - let old = map.replace_node(&42, Some(10), Some(observed_value), &guard); - assert_eq!(map.len(), 1); - assert_eq!(old, Some((&42, &42))); - assert_eq!(*map.get(&42, &guard).unwrap(), 10); - } -} +// TODO +// #[test] +// fn replace_existing_observed_value_matching() { +// let map = HashMap::::new(); +// { +// let guard = map.guard(); +// map.insert(42, 42, &guard); +// assert_eq!(map.len(), 1); +// let observed_value = Shared::from(map.get(&42, &guard).unwrap() as *const _); +// let old = map.replace_node(&42, Some(10), Some(observed_value), &guard); +// assert_eq!(map.len(), 1); +// assert_eq!(old, Some((&42, &42))); +// assert_eq!(*map.get(&42, &guard).unwrap(), 10); +// } +// } #[test] fn replace_existing_observed_value_non_matching() { let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); map.insert(42, 42, &guard); assert_eq!(map.len(), 1); let old = map.replace_node(&42, Some(10), Some(Shared::null()), &guard); @@ -3364,7 +3377,7 @@ fn replace_existing_observed_value_non_matching() { fn replace_twice() { let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); map.insert(42, 42, &guard); assert_eq!(map.len(), 1); let old = map.replace_node(&42, Some(43), None, &guard); @@ -3421,14 +3434,13 @@ mod tree_bins { let t = unsafe { t.deref() }; let bini = t.bini(0); let bin = t.bin(bini, guard); - match unsafe { bin.deref() } { + match unsafe { &**bin.deref() } { BinEntry::Tree(_) => {} // pass BinEntry::Moved => panic!("bin was not correctly treeified -- is Moved"), BinEntry::Node(_) => panic!("bin was not correctly treeified -- is Node"), BinEntry::TreeNode(_) => panic!("bin was not correctly treeified -- is TreeNode"), } - guard.flush(); drop(guard); } // then, spin up lots of reading and writing threads on a range of keys @@ -3505,7 +3517,7 @@ mod tree_bins { fn test_tree_bin_remove(f: F) where - F: Fn(usize, &HashMap, &Guard), + F: Fn(usize, &HashMap, &Guard<'_>), { let map = HashMap::::with_hasher(ZeroHashBuilder); { @@ -3519,7 +3531,7 @@ mod tree_bins { let t = unsafe { t.deref() }; let bini = t.bini(0); let bin = t.bin(bini, guard); - match unsafe { bin.deref() } { + match unsafe { &**bin.deref() } { BinEntry::Tree(_) => {} // pass BinEntry::Moved => panic!("bin was not correctly treeified -- is Moved"), BinEntry::Node(_) => panic!("bin was not correctly treeified -- is Node"), @@ -3530,7 +3542,6 @@ mod tree_bins { for i in 0..9 { f(i, &map, guard); } - guard.flush(); drop(guard); } assert_eq!(map.len(), 1); @@ -3542,7 +3553,7 @@ mod tree_bins { let t = unsafe { t.deref() }; let bini = t.bini(0); let bin = t.bin(bini, guard); - match unsafe { bin.deref() } { + match unsafe { &**bin.deref() } { BinEntry::Tree(_) => panic!("bin was not correctly untreeified -- is Tree"), BinEntry::Moved => panic!("bin was not correctly untreeified -- is Moved"), BinEntry::Node(_) => {} // pass @@ -3562,17 +3573,17 @@ mod tree_bins { #[test] #[should_panic] - #[cfg_attr(miri, ignore)] + // TODO + #[ignore] fn disallow_evil() { let map: HashMap<_, _> = HashMap::default(); - map.insert(42, String::from("hello"), &crossbeam_epoch::pin()); + map.insert(42, String::from("hello"), &map.guard()); - let evil = crossbeam_epoch::Collector::new(); - let evil = evil.register(); - let guard = evil.pin(); + let evil = seize::Collector::new(); + let guard = evil.enter(); let oops = map.get(&42, &guard); - map.remove(&42, &crossbeam_epoch::pin()); + map.remove(&42, &map.guard()); // at this point, the default collector is allowed to free `"hello"` // since no-one has the global epoch pinned as far as it is aware. // `oops` is tied to the lifetime of a Guard that is not a part of diff --git a/src/map_ref.rs b/src/map_ref.rs index 61b55476..1bffede5 100644 --- a/src/map_ref.rs +++ b/src/map_ref.rs @@ -1,6 +1,6 @@ use crate::iter::*; -use crate::{GuardRef, HashMap, TryInsertError}; -use crossbeam_epoch::Guard; +use crate::reclaim::{Guard, GuardRef}; +use crate::{HashMap, TryInsertError}; use std::borrow::Borrow; use std::fmt::{self, Debug, Formatter}; use std::hash::{BuildHasher, Hash}; @@ -28,7 +28,7 @@ impl HashMap { } /// Get a reference to this map with the given guard. - pub fn with_guard<'g>(&'g self, guard: &'g Guard) -> HashMapRef<'g, K, V, S> { + pub fn with_guard<'g>(&'g self, guard: &'g Guard<'_>) -> HashMapRef<'g, K, V, S> { HashMapRef { guard: GuardRef::Ref(guard), map: self, diff --git a/src/node.rs b/src/node.rs index 55ec8b67..80e78017 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,7 +1,8 @@ use crate::raw::Table; +use crate::reclaim::{self, Atomic, Collector, Guard, RetireShared, Shared}; use core::sync::atomic::{AtomicBool, AtomicI64, Ordering}; -use crossbeam_epoch::{Atomic, Guard, Owned, Shared}; use parking_lot::Mutex; +use seize::Linked; use std::borrow::Borrow; use std::thread::{current, park, Thread}; @@ -134,7 +135,7 @@ impl TreeNode { from: Shared<'g, BinEntry>, hash: u64, key: &Q, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> where K: Borrow, @@ -240,9 +241,8 @@ where /// Constructs a new bin from the given nodes. /// /// Nodes are arranged into an ordered red-black tree. - pub(crate) fn new(bin: Owned>, guard: &Guard) -> Self { + pub(crate) fn new(bin: Shared<'_, BinEntry>, guard: &Guard<'_>) -> Self { let mut root = Shared::null(); - let bin = bin.into_shared(guard); // safety: We own the nodes for creating this new TreeBin, so they are // not shared with another thread and cannot get invalidated. @@ -328,14 +328,14 @@ where impl TreeBin { /// Acquires write lock for tree restucturing. - fn lock_root(&self, guard: &Guard) { + fn lock_root(&self, guard: &Guard<'_>, collector: &Collector) { if self .lock_state .compare_exchange(0, WRITER, Ordering::SeqCst, Ordering::Relaxed) .is_err() { // the current lock state is non-zero, which means the lock is contended - self.contended_lock(guard); + self.contended_lock(guard, collector); } } @@ -345,7 +345,7 @@ impl TreeBin { } /// Possibly blocks awaiting root lock. - fn contended_lock(&self, guard: &Guard) { + fn contended_lock(&self, guard: &Guard<'_>, collector: &Collector) { let mut waiting = false; let mut state: i64; loop { @@ -378,8 +378,8 @@ impl TreeBin { // we noticed that there were no readers immediately after setting us as // the waiter, and then went directly into this branch. In that case, some // other thread may simultaneously have noticed that we wanted to be woken - // up, and be trying to call `.unpark`. So, we `defer_destroy` instead. - unsafe { guard.defer_destroy(waiter) }; + // up, and be trying to call `.unpark`. So, we `retire_shared` instead. + unsafe { guard.retire_shared(waiter) }; } return; } @@ -392,7 +392,7 @@ impl TreeBin { .is_ok() { waiting = true; - let current_thread = Owned::new(current()); + let current_thread = Shared::boxed(current(), &collector); let waiter = self.waiter.swap(current_thread, Ordering::SeqCst, guard); assert!(waiter.is_null()); } @@ -410,7 +410,7 @@ impl TreeBin { bin: Shared<'g, BinEntry>, hash: u64, key: &Q, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> where K: Borrow, @@ -513,7 +513,8 @@ impl TreeBin { &'g self, p: Shared<'g, BinEntry>, drop_value: bool, - guard: &'g Guard, + guard: &'g Guard<'_>, + collector: &Collector, ) -> bool { // safety: we were read under our guard, at which point the tree // structure was valid. Since our guard pins the current epoch, the @@ -582,7 +583,7 @@ impl TreeBin { // if we get here, we know that we will still be a tree and have // unlinked the `next` and `prev` pointers, so it's time to restructure // the tree - self.lock_root(guard); + self.lock_root(guard, collector); // NOTE: since we have the write lock for the tree, we know that all // readers will read along the linear `next` pointers until we release // the lock (these pointers were adjusted above to exclude the removed @@ -758,9 +759,9 @@ impl TreeBin { #[allow(unused_unsafe)] unsafe { if drop_value { - guard.defer_destroy(p_deref.node.value.load(Ordering::Relaxed, guard)); + guard.retire_shared(p_deref.node.value.load(Ordering::Relaxed, guard)); } - guard.defer_destroy(p); + guard.retire_shared(p); } if cfg!(debug_assertions) { @@ -782,20 +783,23 @@ where hash: u64, key: K, value: Shared<'g, V>, - guard: &'g Guard, + guard: &'g Guard<'_>, + collector: &Collector, ) -> Shared<'g, BinEntry> { let mut p = self.root.load(Ordering::SeqCst, guard); if p.is_null() { // the current root is `null`, i.e. the tree is currently empty. // This, we simply insert the new entry as the root. - let tree_node = Owned::new(BinEntry::TreeNode(TreeNode::new( - hash, - key, - Atomic::from(value), - Atomic::null(), - Atomic::null(), - ))) - .into_shared(guard); + let tree_node = Shared::boxed( + BinEntry::TreeNode(TreeNode::new( + hash, + key, + Atomic::from(value), + Atomic::null(), + Atomic::null(), + )), + collector, + ); self.root.store(tree_node, Ordering::Release); self.first.store(tree_node, Ordering::Release); return Shared::null(); @@ -854,14 +858,16 @@ where // position (which is here, since we arrived here by comparing // hash and key of the new entry) let first = self.first.load(Ordering::SeqCst, guard); - let x = Owned::new(BinEntry::TreeNode(TreeNode::new( - hash, - key, - Atomic::from(value), - Atomic::from(first), - Atomic::from(xp), - ))) - .into_shared(guard); + let x = Shared::boxed( + BinEntry::TreeNode(TreeNode::new( + hash, + key, + Atomic::from(value), + Atomic::from(first), + Atomic::from(xp), + )), + collector, + ); self.first.store(x, Ordering::SeqCst); if !first.is_null() { unsafe { TreeNode::get_tree_node(first) } @@ -889,7 +895,7 @@ where .red .store(true, Ordering::SeqCst); } else { - self.lock_root(guard); + self.lock_root(guard, collector); self.root.store( TreeNode::balance_insertion( self.root.load(Ordering::Relaxed, guard), @@ -929,10 +935,12 @@ impl TreeBin { /// method. pub(crate) unsafe fn defer_drop_without_values<'g>( bin: Shared<'g, BinEntry>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) { - guard.defer_unchecked(move || { - if let BinEntry::Tree(mut tree_bin) = *bin.into_owned().into_box() { + guard.retire(bin.as_ptr(), |mut link| { + let bin = link.cast::>(); + + if let BinEntry::Tree(mut tree_bin) = Linked::into_inner(*Box::from_raw(bin)) { tree_bin.drop_fields(false); } else { unreachable!("bin is a tree bin"); @@ -953,7 +961,7 @@ impl TreeBin { // swap out first pointer so nodes will not get dropped again when // `tree_bin` is dropped - let guard = crossbeam_epoch::unprotected(); + let guard = reclaim::unprotected(); let p = self.first.swap(Shared::null(), Ordering::Relaxed, guard); Self::drop_tree_nodes(p, drop_values, guard); } @@ -967,14 +975,14 @@ impl TreeBin { pub(crate) unsafe fn drop_tree_nodes<'g>( from: Shared<'g, BinEntry>, drop_values: bool, - guard: &'g Guard, + guard: &'g Guard<'_>, ) { let mut p = from; while !p.is_null() { - if let BinEntry::TreeNode(tree_node) = *p.into_owned().into_box() { + if let BinEntry::TreeNode(tree_node) = Linked::into_inner(*p.into_box()) { // if specified, drop the value in this node if drop_values { - let _ = tree_node.node.value.into_owned(); + let _ = tree_node.node.value.into_box(); } // then we move to the next node p = tree_node.node.next.load(Ordering::SeqCst, guard); @@ -1016,7 +1024,7 @@ impl TreeNode { fn rotate_left<'g>( mut root: Shared<'g, BinEntry>, p: Shared<'g, BinEntry>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> { if p.is_null() { return root; @@ -1061,7 +1069,7 @@ impl TreeNode { fn rotate_right<'g>( mut root: Shared<'g, BinEntry>, p: Shared<'g, BinEntry>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> { if p.is_null() { return root; @@ -1106,7 +1114,7 @@ impl TreeNode { fn balance_insertion<'g>( mut root: Shared<'g, BinEntry>, mut x: Shared<'g, BinEntry>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> { // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard @@ -1206,7 +1214,7 @@ impl TreeNode { fn balance_deletion<'g>( mut root: Shared<'g, BinEntry>, mut x: Shared<'g, BinEntry>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> { let mut x_parent: Shared<'_, BinEntry>; let mut x_parent_left: Shared<'_, BinEntry>; @@ -1363,7 +1371,7 @@ impl TreeNode { } } /// Checks invariants recursively for the tree of Nodes rootet at t. - fn check_invariants<'g>(t: Shared<'g, BinEntry>, guard: &'g Guard) { + fn check_invariants<'g>(t: Shared<'g, BinEntry>, guard: &'g Guard<'_>) { // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard // pins the current epoch, the TreeNodes remain valid for at least as @@ -1446,14 +1454,13 @@ impl TreeNode { #[cfg(test)] mod tests { use super::*; - use crossbeam_epoch::Owned; use std::sync::atomic::Ordering; - fn new_node(hash: u64, key: usize, value: usize) -> Node { + fn new_node(hash: u64, key: usize, value: usize, collector: &Collector) -> Node { Node { hash, key, - value: Atomic::new(value), + value: Atomic::from(Shared::boxed(value, &collector)), next: Atomic::null(), lock: Mutex::new(()), } @@ -1461,27 +1468,33 @@ mod tests { #[test] fn find_node_no_match() { - let guard = &crossbeam_epoch::pin(); - let node2 = new_node(4, 5, 6); + let collector = seize::Collector::new(); + let guard = collector.enter(); + + let node2 = new_node(4, 5, 6, &collector); let entry2 = BinEntry::Node(node2); - let node1 = new_node(1, 2, 3); - node1.next.store(Owned::new(entry2), Ordering::SeqCst); - let entry1 = Owned::new(BinEntry::Node(node1)).into_shared(guard); - let mut tab = Table::from(vec![Atomic::from(entry1)]); + let node1 = new_node(1, 2, 3, &collector); + node1 + .next + .store(Shared::boxed(entry2, &collector), Ordering::SeqCst); + let entry1 = Shared::boxed(BinEntry::Node(node1), &collector); + let mut tab = Table::from(vec![Atomic::from(entry1)], &collector); // safety: we have not yet dropped entry1 - assert!(tab.find(unsafe { entry1.deref() }, 1, &0, guard).is_null()); + assert!(tab.find(unsafe { entry1.deref() }, 1, &0, &guard).is_null()); tab.drop_bins(); } #[test] fn find_node_single_match() { - let guard = &crossbeam_epoch::pin(); - let entry = Owned::new(BinEntry::Node(new_node(1, 2, 3))).into_shared(guard); - let mut tab = Table::from(vec![Atomic::from(entry)]); + let collector = seize::Collector::new(); + let guard = collector.enter(); + + let entry = Shared::boxed(BinEntry::Node(new_node(1, 2, 3, &collector)), &collector); + let mut tab = Table::from(vec![Atomic::from(entry)], &collector); assert_eq!( // safety: we have not yet dropped entry - unsafe { tab.find(entry.deref(), 1, &2, guard).deref() } + unsafe { tab.find(entry.deref(), 1, &2, &guard).deref() } .as_node() .unwrap() .key, @@ -1492,16 +1505,20 @@ mod tests { #[test] fn find_node_multi_match() { - let guard = &crossbeam_epoch::pin(); - let node2 = new_node(4, 5, 6); + let collector = seize::Collector::new(); + let guard = collector.enter(); + + let node2 = new_node(4, 5, 6, &collector); let entry2 = BinEntry::Node(node2); - let node1 = new_node(1, 2, 3); - node1.next.store(Owned::new(entry2), Ordering::SeqCst); - let entry1 = Owned::new(BinEntry::Node(node1)).into_shared(guard); - let mut tab = Table::from(vec![Atomic::from(entry1)]); + let node1 = new_node(1, 2, 3, &collector); + node1 + .next + .store(Shared::boxed(entry2, &collector), Ordering::SeqCst); + let entry1 = Shared::boxed(BinEntry::Node(node1), &collector); + let mut tab = Table::from(vec![Atomic::from(entry1)], &collector); assert_eq!( // safety: we have not yet dropped entry1 - unsafe { tab.find(entry1.deref(), 4, &5, guard).deref() } + unsafe { tab.find(entry1.deref(), 4, &5, &guard).deref() } .as_node() .unwrap() .key, @@ -1512,69 +1529,93 @@ mod tests { #[test] fn find_moved_empty_bins_no_match() { - let guard = &crossbeam_epoch::pin(); - let mut table = Table::::new(1); - let mut table2 = Owned::new(Table::new(1)).into_shared(guard); + let collector = seize::Collector::new(); + let guard = collector.enter(); + + let mut table = Table::::new(1, &collector); + let table2 = Shared::boxed(Table::new(1, &collector), &collector); - let entry = table.get_moved(table2, guard); + let entry = table.get_moved(table2, &guard); table.store_bin(0, entry); - assert!(table.find(&BinEntry::Moved, 1, &2, guard).is_null()); + assert!(table + .find(&collector.link(BinEntry::Moved), 1, &2, &guard) + .is_null()); table.drop_bins(); // safety: table2 is still valid and not accessed by different threads - unsafe { table2.deref_mut() }.drop_bins(); - unsafe { guard.defer_destroy(table2) }; + unsafe { &mut *table2.as_ptr() }.drop_bins(); + unsafe { guard.retire_shared(table2) }; } #[test] fn find_moved_no_bins_no_match() { - let guard = &crossbeam_epoch::pin(); - let mut table = Table::::new(1); - let mut table2 = Owned::new(Table::new(0)).into_shared(guard); - let entry = table.get_moved(table2, guard); + let collector = seize::Collector::new(); + let guard = collector.enter(); + + let mut table = Table::::new(1, &collector); + let table2 = Shared::boxed(Table::new(0, &collector), &collector); + let entry = table.get_moved(table2, &guard); table.store_bin(0, entry); - assert!(table.find(&BinEntry::Moved, 1, &2, guard).is_null()); + assert!(table + .find(&collector.link(BinEntry::Moved), 1, &2, &guard) + .is_null()); table.drop_bins(); // safety: table2 is still valid and not accessed by different threads - unsafe { table2.deref_mut() }.drop_bins(); - unsafe { guard.defer_destroy(table2) }; + unsafe { &mut *table2.as_ptr() }.drop_bins(); + unsafe { guard.retire_shared(table2) }; } #[test] fn find_moved_null_bin_no_match() { - let guard = &crossbeam_epoch::pin(); - let mut table = Table::::new(1); - let mut table2 = Owned::new(Table::new(2)).into_shared(guard); - unsafe { table2.deref() }.store_bin(0, Owned::new(BinEntry::Node(new_node(1, 2, 3)))); - let entry = table.get_moved(table2, guard); + let collector = seize::Collector::new(); + let guard = collector.enter(); + + let mut table = Table::::new(1, &collector); + let table2 = Shared::boxed(Table::new(2, &collector), &collector); + unsafe { table2.deref() }.store_bin( + 0, + Shared::boxed(BinEntry::Node(new_node(1, 2, 3, &collector)), &collector), + ); + let entry = table.get_moved(table2, &guard); table.store_bin(0, entry); - assert!(table.find(&BinEntry::Moved, 0, &1, guard).is_null()); + assert!(table + .find(&collector.link(BinEntry::Moved), 0, &1, &guard) + .is_null()); table.drop_bins(); // safety: table2 is still valid and not accessed by different threads - unsafe { table2.deref_mut() }.drop_bins(); - unsafe { guard.defer_destroy(table2) }; + unsafe { &mut *table2.as_ptr() }.drop_bins(); + unsafe { guard.retire_shared(table2) }; } #[test] fn find_moved_match() { - let guard = &crossbeam_epoch::pin(); - let mut table = Table::::new(1); - let mut table2 = Owned::new(Table::new(1)).into_shared(guard); + let collector = seize::Collector::new(); + let guard = collector.enter(); + + let mut table = Table::::new(1, &collector); + let table2 = Shared::boxed(Table::new(1, &collector), &collector); // safety: table2 is still valid - unsafe { table2.deref() }.store_bin(0, Owned::new(BinEntry::Node(new_node(1, 2, 3)))); - let entry = table.get_moved(table2, guard); + unsafe { table2.deref() }.store_bin( + 0, + Shared::boxed(BinEntry::Node(new_node(1, 2, 3, &collector)), &collector), + ); + let entry = table.get_moved(table2, &guard); table.store_bin(0, entry); assert_eq!( // safety: entry is still valid since the table was not dropped and the // entry was not removed - unsafe { table.find(&BinEntry::Moved, 1, &2, guard).deref() } - .as_node() - .unwrap() - .key, + unsafe { + table + .find(&collector.link(BinEntry::Moved), 1, &2, &guard) + .deref() + } + .as_node() + .unwrap() + .key, 2 ); table.drop_bins(); // safety: table2 is still valid and not accessed by different threads - unsafe { table2.deref_mut() }.drop_bins(); - unsafe { guard.defer_destroy(table2) }; + unsafe { &mut *table2.as_ptr() }.drop_bins(); + unsafe { guard.retire_shared(table2) }; } } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index bf4fd49b..a169ab01 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -1,5 +1,7 @@ +use seize::Linked; + use crate::node::*; -use crossbeam_epoch::{Atomic, Guard, Owned, Pointer, Shared}; +use crate::reclaim::{self, Atomic, Collector, Guard, Shared}; use std::borrow::Borrow; use std::fmt::Debug; use std::sync::atomic::Ordering; @@ -49,19 +51,17 @@ pub(crate) struct Table { next_table: Atomic>, } -impl From>>> for Table { - fn from(bins: Vec>>) -> Self { +impl Table { + pub(crate) fn from(bins: Vec>>, collector: &Collector) -> Self { Self { bins: bins.into_boxed_slice(), - moved: Atomic::from(Owned::new(BinEntry::Moved)), + moved: Atomic::from(Shared::boxed(BinEntry::Moved, collector)), next_table: Atomic::null(), } } -} -impl Table { - pub(crate) fn new(bins: usize) -> Self { - Self::from(vec![Atomic::null(); bins]) + pub(crate) fn new(bins: usize, collector: &Collector) -> Self { + Self::from(vec![Atomic::null(); bins], &collector) } pub(crate) fn is_empty(&self) -> bool { @@ -75,16 +75,17 @@ impl Table { pub(crate) fn get_moved<'g>( &'g self, for_table: Shared<'g, Table>, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> { match self.next_table(guard) { t if t.is_null() => { // if a no next table is yet associated with this table, // create one and store it in `self.next_table` - match self.next_table.compare_and_set( + match self.next_table.compare_exchange( Shared::null(), for_table, Ordering::SeqCst, + Ordering::SeqCst, guard, ) { Ok(_) => {} @@ -104,20 +105,20 @@ impl Table { pub(crate) fn find<'g, Q>( &'g self, - bin: &BinEntry, + bin: &Linked>, hash: u64, key: &Q, - guard: &'g Guard, + guard: &'g Guard<'_>, ) -> Shared<'g, BinEntry> where K: Borrow, Q: ?Sized + Ord, { - match *bin { + match **bin { BinEntry::Node(_) => { let mut node = bin; loop { - let n = if let BinEntry::Node(ref n) = node { + let n = if let BinEntry::Node(ref n) = **node { n } else { unreachable!("BinEntry::Node only points to BinEntry::Node"); @@ -153,7 +154,7 @@ impl Table { // safety: the table is protected by the guard, and so is the bin. let bin = unsafe { bin.deref() }; - match *bin { + match **bin { BinEntry::Node(_) | BinEntry::Tree(_) => { break table.find(bin, hash, key, guard) } @@ -179,7 +180,7 @@ impl Table { // safety: we have &mut self _and_ all references we have returned are bound to the // lifetime of their borrow of self, so there cannot be any outstanding references to // anything in the map. - let guard = unsafe { crossbeam_epoch::unprotected() }; + let guard = unsafe { reclaim::unprotected() }; for bin in Vec::from(std::mem::replace(&mut self.bins, vec![].into_boxed_slice())) { if bin.load(Ordering::SeqCst, guard).is_null() { @@ -192,37 +193,37 @@ impl Table { // of `drop` // safety: same as above let bin_entry = unsafe { bin.load(Ordering::SeqCst, guard).deref() }; - match *bin_entry { + match **bin_entry { BinEntry::Moved => {} BinEntry::Node(_) => { // safety: same as above + we own the bin - Nodes are not shared across the table - let mut p = unsafe { bin.into_owned() }; + let mut p = unsafe { bin.into_box() }; loop { // safety below: // we're dropping the entire map, so no-one else is accessing it. // we replaced the bin with a NULL, so there's no future way to access it // either; we own all the nodes in the list. - let node = if let BinEntry::Node(node) = *p.into_box() { + let node = if let BinEntry::Node(node) = Linked::into_inner(*p) { node } else { unreachable!(); }; // first, drop the value in this node - let _ = unsafe { node.value.into_owned() }; + let _ = unsafe { node.value.into_box() }; // then we move to the next node if node.next.load(Ordering::SeqCst, guard).is_null() { break; } - p = unsafe { node.next.into_owned() }; + p = unsafe { node.next.into_box() }; } } BinEntry::Tree(_) => { // safety: same as for BinEntry::Node - let p = unsafe { bin.into_owned() }; - let bin = if let BinEntry::Tree(bin) = *p.into_box() { + let p = unsafe { bin.into_box() }; + let bin = if let BinEntry::Tree(bin) = Linked::into_inner(*p) { bin } else { unreachable!(); @@ -243,7 +244,7 @@ impl Drop for Table { // safety: we have &mut self _and_ all references we have returned are bound to the // lifetime of their borrow of self, so there cannot be any outstanding references to // anything in the map. - let guard = unsafe { crossbeam_epoch::unprotected() }; + let guard = unsafe { reclaim::unprotected() }; // since BinEntry::Nodes are either dropped by drop_bins or transferred to a new table, // all bins are empty or contain a Shared pointing to shared the BinEntry::Moved (if @@ -259,7 +260,7 @@ impl Drop for Table { } else { // safety: we have mut access to self, so no-one else will drop this value under us. let bin = unsafe { bin.deref() }; - if let BinEntry::Moved = *bin { + if let BinEntry::Moved = **bin { } else { unreachable!("dropped table with non-empty bin"); } @@ -282,7 +283,7 @@ impl Drop for Table { ); // safety: we have mut access to self, so no-one else will drop this value under us. - let moved = unsafe { moved.into_owned() }; + let moved = unsafe { moved.into_box() }; drop(moved); // NOTE that the current table _is not_ responsible for `defer_destroy`ing the _next_ table @@ -297,35 +298,29 @@ impl Table { } #[inline] - pub(crate) fn bin<'g>(&'g self, i: usize, guard: &'g Guard) -> Shared<'g, BinEntry> { + pub(crate) fn bin<'g>(&'g self, i: usize, guard: &'g Guard<'_>) -> Shared<'g, BinEntry> { self.bins[i].load(Ordering::Acquire, guard) } #[inline] #[allow(clippy::type_complexity)] - pub(crate) fn cas_bin<'g, P>( + pub(crate) fn cas_bin<'g>( &'g self, i: usize, current: Shared<'_, BinEntry>, - new: P, - guard: &'g Guard, - ) -> Result< - Shared<'g, BinEntry>, - crossbeam_epoch::CompareAndSetError<'g, BinEntry, P>, - > - where - P: Pointer>, - { - self.bins[i].compare_and_set(current, new, Ordering::AcqRel, guard) + new: Shared<'g, BinEntry>, + guard: &'g Guard<'_>, + ) -> Result>, reclaim::CompareExchangeError<'g, BinEntry>> { + self.bins[i].compare_exchange(current, new, Ordering::AcqRel, Ordering::Acquire, guard) } #[inline] - pub(crate) fn store_bin>>(&self, i: usize, new: P) { + pub(crate) fn store_bin(&self, i: usize, new: Shared<'_, BinEntry>) { self.bins[i].store(new, Ordering::Release) } #[inline] - pub(crate) fn next_table<'g>(&'g self, guard: &'g Guard) -> Shared<'g, Table> { + pub(crate) fn next_table<'g>(&'g self, guard: &'g Guard<'_>) -> Shared<'g, Table> { self.next_table.load(Ordering::SeqCst, guard) } } diff --git a/src/reclaim.rs b/src/reclaim.rs new file mode 100644 index 00000000..beacd441 --- /dev/null +++ b/src/reclaim.rs @@ -0,0 +1,186 @@ +pub(crate) use seize::{Collector, Guard, Linked}; + +use std::marker::PhantomData; +use std::ops::Deref; +use std::sync::atomic::Ordering; +use std::{fmt, ptr}; + +pub(crate) struct Atomic(seize::AtomicPtr); + +impl Atomic { + pub(crate) fn null() -> Self { + Self(seize::AtomicPtr::default()) + } + + pub(crate) fn load<'g>(&self, _: Ordering, guard: &'g Guard<'_>) -> Shared<'g, T> { + guard.protect(&self.0).into() + } + + pub(crate) fn store(&self, new: Shared<'_, T>, ordering: Ordering) { + self.0.store(new.ptr, ordering); + } + + pub(crate) unsafe fn into_box(self) -> Box> { + Box::from_raw(self.0.into_inner()) + } + + pub(crate) fn swap<'g>( + &self, + new: Shared<'_, T>, + ord: Ordering, + _: &'g Guard<'_>, + ) -> Shared<'g, T> { + self.0.swap(new.ptr, ord).into() + } + + pub(crate) fn compare_exchange<'g>( + &self, + current: Shared<'_, T>, + new: Shared<'g, T>, + success: Ordering, + failure: Ordering, + _: &'g Guard<'_>, + ) -> Result, CompareExchangeError<'g, T>> { + match self + .0 + .compare_exchange(current.ptr, new.ptr, success, failure) + { + Ok(ptr) => Ok(ptr.into()), + Err(current) => Err(CompareExchangeError { + current: current.into(), + new, + }), + } + } +} + +impl From> for Atomic { + fn from(shared: Shared<'_, T>) -> Self { + Atomic(shared.ptr.into()) + } +} + +impl Clone for Atomic { + fn clone(&self) -> Self { + Atomic(self.0.load(Ordering::Relaxed).into()) + } +} + +impl fmt::Debug for Shared<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:p}", self.ptr) + } +} + +impl fmt::Debug for Atomic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:p}", self.0.load(Ordering::SeqCst)) + } +} + +pub(crate) struct CompareExchangeError<'g, T> { + pub(crate) current: Shared<'g, T>, + pub(crate) new: Shared<'g, T>, +} + +pub(crate) struct Shared<'g, T> { + ptr: *mut Linked, + _g: PhantomData<&'g ()>, +} + +impl<'g, T> Shared<'g, T> { + pub(crate) fn null() -> Self { + Shared::from(ptr::null_mut()) + } + + pub(crate) fn boxed(value: T, collector: &Collector) -> Self { + Shared::from(collector.link_boxed(value)) + } + + pub(crate) unsafe fn into_box(self) -> Box> { + Box::from_raw(self.ptr) + } + + pub(crate) unsafe fn as_ptr(&self) -> *mut Linked { + self.ptr + } + + pub(crate) unsafe fn as_ref(&self) -> Option<&'g Linked> { + self.ptr.as_ref() + } + + pub(crate) unsafe fn deref(&self) -> &'g Linked { + &*self.ptr + } + + pub(crate) fn is_null(&self) -> bool { + self.ptr.is_null() + } +} + +impl<'g, T> PartialEq> for Shared<'g, T> { + fn eq(&self, other: &Self) -> bool { + self.ptr == other.ptr + } +} + +impl Eq for Shared<'_, T> {} + +impl Clone for Shared<'_, T> { + fn clone(&self) -> Self { + Shared::from(self.ptr) + } +} + +impl Copy for Shared<'_, T> {} + +impl From<*mut Linked> for Shared<'_, T> { + fn from(ptr: *mut Linked) -> Self { + Shared { + ptr, + _g: PhantomData, + } + } +} + +impl From<*const Linked> for Shared<'_, T> { + fn from(ptr: *const Linked) -> Self { + Shared::from(ptr as *mut _) + } +} + +pub(crate) trait RetireShared { + unsafe fn retire_shared(&self, shared: Shared<'_, T>); +} + +impl RetireShared for Guard<'_> { + unsafe fn retire_shared(&self, shared: Shared<'_, T>) { + self.retire(shared.ptr, seize::reclaim::boxed::); + } +} + +pub(crate) unsafe fn unprotected() -> &'static Guard<'static> { + struct RacyGuard(Guard<'static>); + + unsafe impl Send for RacyGuard {} + unsafe impl Sync for RacyGuard {} + + static UNPROTECTED: RacyGuard = RacyGuard(unsafe { Guard::unprotected() }); + &UNPROTECTED.0 +} + +pub(crate) enum GuardRef<'g> { + Owned(Guard<'g>), + Ref(&'g Guard<'g>), +} + +impl<'g> Deref for GuardRef<'g> { + type Target = Guard<'g>; + + #[inline] + fn deref(&self) -> &Guard<'g> { + match *self { + GuardRef::Owned(ref guard) | GuardRef::Ref(&ref guard) => guard, + } + } +} diff --git a/src/set.rs b/src/set.rs index a1d3fb99..89eb2a27 100644 --- a/src/set.rs +++ b/src/set.rs @@ -2,8 +2,8 @@ //! //! See `HashSet` for details. -use crate::epoch::Guard; use crate::iter::Keys; +use crate::reclaim::Guard; use crate::HashMap; use std::borrow::Borrow; use std::fmt::{self, Debug, Formatter}; @@ -151,7 +151,7 @@ impl HashSet { /// /// Keep in mind that for as long as you hold onto this `Guard`, you are preventing the /// collection of garbage generated by the set. - pub fn guard(&self) -> crate::epoch::Guard { + pub fn guard(&self) -> Guard<'_> { self.map.guard() } @@ -209,7 +209,7 @@ impl HashSet { /// println!("{}", x); /// } /// ``` - pub fn iter<'g>(&'g self, guard: &'g Guard) -> Keys<'g, T, ()> { + pub fn iter<'g>(&'g self, guard: &'g Guard<'_>) -> Keys<'g, T, ()> { self.map.keys(guard) } } @@ -241,7 +241,7 @@ where /// assert!(!set.contains(&1, &guard)); /// ``` #[inline] - pub fn contains<'g, Q>(&self, value: &Q, guard: &'g Guard) -> bool + pub fn contains<'g, Q>(&self, value: &Q, guard: &'g Guard<'_>) -> bool where T: Borrow, Q: ?Sized + Hash + Ord, @@ -268,7 +268,7 @@ where /// assert_eq!(set.get(&2, &guard), Some(&2)); /// assert_eq!(set.get(&4, &guard), None); /// ``` - pub fn get<'g, Q>(&'g self, value: &Q, guard: &'g Guard) -> Option<&'g T> + pub fn get<'g, Q>(&'g self, value: &Q, guard: &'g Guard<'_>) -> Option<&'g T> where T: Borrow, Q: ?Sized + Hash + Ord, @@ -299,8 +299,8 @@ where pub fn is_disjoint( &self, other: &HashSet, - our_guard: &Guard, - their_guard: &Guard, + our_guard: &Guard<'_>, + their_guard: &Guard<'_>, ) -> bool { for value in self.iter(our_guard) { if other.contains(value, their_guard) { @@ -328,7 +328,12 @@ where /// set.pin().insert(4); /// assert!(!set.pin().is_subset(&sup.pin())); /// ``` - pub fn is_subset(&self, other: &HashSet, our_guard: &Guard, their_guard: &Guard) -> bool { + pub fn is_subset( + &self, + other: &HashSet, + our_guard: &Guard<'_>, + their_guard: &Guard<'_>, + ) -> bool { for value in self.iter(our_guard) { if !other.contains(value, their_guard) { return false; @@ -361,13 +366,18 @@ where pub fn is_superset( &self, other: &HashSet, - our_guard: &Guard, - their_guard: &Guard, + our_guard: &Guard<'_>, + their_guard: &Guard<'_>, ) -> bool { other.is_subset(self, their_guard, our_guard) } - pub(crate) fn guarded_eq(&self, other: &Self, our_guard: &Guard, their_guard: &Guard) -> bool { + pub(crate) fn guarded_eq( + &self, + other: &Self, + our_guard: &Guard<'_>, + their_guard: &Guard<'_>, + ) -> bool { self.map.guarded_eq(&other.map, our_guard, their_guard) } } @@ -395,7 +405,7 @@ where /// assert_eq!(set.insert(2, &guard), false); /// assert!(set.contains(&2, &guard)); /// ``` - pub fn insert(&self, value: T, guard: &Guard) -> bool { + pub fn insert(&self, value: T, guard: &Guard<'_>) -> bool { let old = self.map.insert(value, (), guard); old.is_none() } @@ -426,7 +436,7 @@ where /// assert!(!set.contains(&2, &guard)); /// assert_eq!(set.remove(&2, &guard), false); /// ``` - pub fn remove(&self, value: &Q, guard: &Guard) -> bool + pub fn remove(&self, value: &Q, guard: &Guard<'_>) -> bool where T: Borrow, Q: ?Sized + Hash + Ord, @@ -454,7 +464,7 @@ where /// assert_eq!(set.take(&2, &guard), Some(&2)); /// assert_eq!(set.take(&2, &guard), None); /// ``` - pub fn take<'g, Q>(&'g self, value: &Q, guard: &'g Guard) -> Option<&'g T> + pub fn take<'g, Q>(&'g self, value: &Q, guard: &'g Guard<'_>) -> Option<&'g T> where T: Borrow, Q: ?Sized + Hash + Ord, @@ -479,7 +489,7 @@ where /// set.pin().retain(|&e| e % 2 == 0); /// assert_eq!(set.pin().len(), 4); /// ``` - pub fn retain(&self, mut f: F, guard: &Guard) + pub fn retain(&self, mut f: F, guard: &Guard<'_>) where F: FnMut(&T) -> bool, { @@ -504,7 +514,7 @@ where /// set.pin().clear(); /// assert!(set.pin().is_empty()); /// ``` - pub fn clear(&self, guard: &Guard) { + pub fn clear(&self, guard: &Guard<'_>) { self.map.clear(guard) } @@ -512,7 +522,7 @@ where /// be inserted in the `HashSet`. /// /// The collection may reserve more space to avoid frequent reallocations. - pub fn reserve(&self, additional: usize, guard: &Guard) { + pub fn reserve(&self, additional: usize, guard: &Guard<'_>) { self.map.reserve(additional, guard) } } diff --git a/src/set_ref.rs b/src/set_ref.rs index 271acdb0..ba145f69 100644 --- a/src/set_ref.rs +++ b/src/set_ref.rs @@ -1,6 +1,6 @@ use crate::iter::*; -use crate::{GuardRef, HashSet}; -use crossbeam_epoch::Guard; +use crate::reclaim::{Guard, GuardRef}; +use crate::HashSet; use std::borrow::Borrow; use std::fmt::{self, Debug, Formatter}; use std::hash::{BuildHasher, Hash}; @@ -27,7 +27,7 @@ impl HashSet { } /// Get a reference to this set with the given guard. - pub fn with_guard<'g>(&'g self, guard: &'g Guard) -> HashSetRef<'g, T, S> { + pub fn with_guard<'g>(&'g self, guard: &'g Guard<'_>) -> HashSetRef<'g, T, S> { HashSetRef { guard: GuardRef::Ref(guard), set: self, diff --git a/tests/basic_ref.rs b/tests/basic_ref.rs index 9f2c786c..a896972a 100644 --- a/tests/basic_ref.rs +++ b/tests/basic_ref.rs @@ -1,16 +1,17 @@ -use crossbeam_epoch as epoch; use flurry::*; use std::sync::Arc; #[test] fn pin() { - let _map = HashMap::::new().pin(); + let map = HashMap::::new(); + let _pinned = map.pin(); } #[test] fn with_guard() { - let guard = epoch::pin(); - let _map = HashMap::::new().with_guard(&guard); + let map = HashMap::::new(); + let guard = map.guard(); + let _pinned = map.with_guard(&guard); } #[test] diff --git a/tests/borrow.rs b/tests/borrow.rs index 07277066..3cf2dd18 100644 --- a/tests/borrow.rs +++ b/tests/borrow.rs @@ -1,4 +1,3 @@ -use crossbeam_epoch as epoch; use flurry::*; use std::sync::Arc; @@ -9,7 +8,7 @@ fn get_empty() { let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); let e = map.get("foo", &guard); assert!(e.is_none()); } @@ -20,7 +19,7 @@ fn remove_empty() { let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); let old = map.remove("foo", &guard); assert!(old.is_none()); } @@ -31,7 +30,7 @@ fn insert_and_remove() { let map = HashMap::::new(); { - let guard = epoch::pin(); + let guard = map.guard(); map.insert("foo".to_string(), 0, &guard); let old = map.remove("foo", &guard).unwrap(); assert_eq!(old, &0); @@ -43,9 +42,9 @@ fn insert_and_remove() { fn insert_and_get() { let map = HashMap::::new(); - map.insert("foo".to_string(), 0, &epoch::pin()); + map.insert("foo".to_string(), 0, &map.guard()); { - let guard = epoch::pin(); + let guard = map.guard(); let e = map.get("foo", &guard).unwrap(); assert_eq!(e, &0); } @@ -55,12 +54,12 @@ fn insert_and_get() { fn update() { let map = HashMap::::new(); - let guard = epoch::pin(); + let guard = map.guard(); map.insert("foo".to_string(), 0, &guard); let old = map.insert("foo".to_string(), 1, &guard); assert_eq!(old, Some(&0)); { - let guard = epoch::pin(); + let guard = map.guard(); let e = map.get("foo", &guard).unwrap(); assert_eq!(e, &1); } @@ -76,21 +75,21 @@ fn concurrent_insert() { let keys1 = keys.clone(); let t1 = std::thread::spawn(move || { for key in keys1.iter() { - map1.insert(key.clone(), 0, &epoch::pin()); + map1.insert(key.clone(), 0, &map1.guard()); } }); let map2 = map.clone(); let keys2 = keys.clone(); let t2 = std::thread::spawn(move || { for key in keys2.iter() { - map2.insert(key.clone(), 1, &epoch::pin()); + map2.insert(key.clone(), 1, &map2.guard()); } }); t1.join().unwrap(); t2.join().unwrap(); - let guard = epoch::pin(); + let guard = map.guard(); for key in keys.iter() { let v = map.get(key.as_str(), &guard).unwrap(); assert!(v == &0 || v == &1); @@ -104,7 +103,7 @@ fn concurrent_remove() { let keys = Arc::new((0..64).map(|i| i.to_string()).collect::>()); { - let guard = epoch::pin(); + let guard = map.guard(); for (i, key) in keys.iter().enumerate() { map.insert(key.clone(), i, &guard); } @@ -113,7 +112,7 @@ fn concurrent_remove() { let map1 = map.clone(); let keys1 = keys.clone(); let t1 = std::thread::spawn(move || { - let guard = epoch::pin(); + let guard = map1.guard(); for (i, key) in keys1.iter().enumerate() { if let Some(v) = map1.remove(key.as_str(), &guard) { assert_eq!(v, &i); @@ -123,7 +122,7 @@ fn concurrent_remove() { let map2 = map.clone(); let keys2 = keys.clone(); let t2 = std::thread::spawn(move || { - let guard = epoch::pin(); + let guard = map2.guard(); for (i, key) in keys2.iter().enumerate() { if let Some(v) = map2.remove(key.as_str(), &guard) { assert_eq!(v, &i); @@ -135,7 +134,7 @@ fn concurrent_remove() { t2.join().unwrap(); // after joining the threads, the map should be empty - let guard = epoch::pin(); + let guard = map.guard(); for key in keys.iter() { assert!(map.get(key.as_str(), &guard).is_none()); } diff --git a/tests/cuckoo/stress.rs b/tests/cuckoo/stress.rs index 5bf401f7..4b0c9258 100644 --- a/tests/cuckoo/stress.rs +++ b/tests/cuckoo/stress.rs @@ -61,7 +61,9 @@ impl Environment { fn stress_insert_thread(env: Arc) { let mut rng = rand::thread_rng(); - let guard = epoch::pin(); + let guard1 = env.table1.guard(); + let guard2 = env.table2.guard(); + while !env.finished.load(Ordering::SeqCst) { let idx = env.ind_dist.sample(&mut rng); let in_use = env.in_use.lock(); @@ -69,13 +71,17 @@ fn stress_insert_thread(env: Arc) { let key = env.keys[idx]; let val1 = env.val_dist1.sample(&mut rng); let val2 = env.val_dist2.sample(&mut rng); - let res1 = if !env.table1.contains_key(&key, &guard) { - env.table1.insert(key, val1, &guard).map_or(true, |_| false) + let res1 = if !env.table1.contains_key(&key, &guard1) { + env.table1 + .insert(key, val1, &guard1) + .map_or(true, |_| false) } else { false }; - let res2 = if !env.table2.contains_key(&key, &guard) { - env.table2.insert(key, val2, &guard).map_or(true, |_| false) + let res2 = if !env.table2.contains_key(&key, &guard2) { + env.table2 + .insert(key, val2, &guard2) + .map_or(true, |_| false) } else { false }; @@ -83,8 +89,8 @@ fn stress_insert_thread(env: Arc) { assert_ne!(res1, (*in_table)[idx]); assert_ne!(res2, (*in_table)[idx]); if res1 { - assert_eq!(Some(&val1), env.table1.get(&key, &guard)); - assert_eq!(Some(&val2), env.table2.get(&key, &guard)); + assert_eq!(Some(&val1), env.table1.get(&key, &guard1)); + assert_eq!(Some(&val2), env.table2.get(&key, &guard2)); let mut vals1 = env.vals1.lock(); let mut vals2 = env.vals2.lock(); (*vals1)[idx] = val1; @@ -98,20 +104,22 @@ fn stress_insert_thread(env: Arc) { fn stress_delete_thread(env: Arc) { let mut rng = rand::thread_rng(); - let guard = epoch::pin(); + let guard1 = env.table1.guard(); + let guard2 = env.table2.guard(); + while !env.finished.load(Ordering::SeqCst) { let idx = env.ind_dist.sample(&mut rng); let in_use = env.in_use.lock(); if (*in_use)[idx].compare_and_swap(false, true, Ordering::SeqCst) { let key = env.keys[idx]; - let res1 = env.table1.remove(&key, &guard).map_or(false, |_| true); - let res2 = env.table2.remove(&key, &guard).map_or(false, |_| true); + let res1 = env.table1.remove(&key, &guard1).map_or(false, |_| true); + let res2 = env.table2.remove(&key, &guard2).map_or(false, |_| true); let mut in_table = env.in_table.lock(); assert_eq!(res1, (*in_table)[idx]); assert_eq!(res2, (*in_table)[idx]); if res1 { - assert!(env.table1.get(&key, &guard).is_none()); - assert!(env.table2.get(&key, &guard).is_none()); + assert!(env.table1.get(&key, &guard1).is_none()); + assert!(env.table2.get(&key, &guard2).is_none()); (*in_table)[idx] = false; } (*in_use)[idx].swap(false, Ordering::SeqCst); @@ -121,7 +129,9 @@ fn stress_delete_thread(env: Arc) { fn stress_find_thread(env: Arc) { let mut rng = rand::thread_rng(); - let guard = epoch::pin(); + let guard1 = env.table1.guard(); + let guard2 = env.table2.guard(); + while !env.finished.load(Ordering::SeqCst) { let idx = env.ind_dist.sample(&mut rng); let in_use = env.in_use.lock(); @@ -131,12 +141,12 @@ fn stress_find_thread(env: Arc) { let val1 = (*env.vals1.lock())[idx]; let val2 = (*env.vals2.lock())[idx]; - let value = env.table1.get(&key, &guard); + let value = env.table1.get(&key, &guard1); if value.is_some() { assert_eq!(&val1, value.unwrap()); assert!((*in_table)[idx]); } - let value = env.table2.get(&key, &guard); + let value = env.table2.get(&key, &guard2); if value.is_some() { assert_eq!(&val2, value.unwrap()); assert!((*in_table)[idx]); diff --git a/tests/hasher.rs b/tests/hasher.rs index e11e8ca9..13f01afe 100644 --- a/tests/hasher.rs +++ b/tests/hasher.rs @@ -1,4 +1,3 @@ -use crossbeam_epoch as epoch; use flurry::{DefaultHashBuilder, HashMap}; use std::hash::{BuildHasher, BuildHasherDefault, Hasher}; @@ -24,8 +23,8 @@ impl BuildHasher for ZeroHashBuilder { fn check() { let range = if cfg!(miri) { 0..16 } else { 0..1000 }; - let guard = epoch::pin(); let map = HashMap::::default(); + let guard = map.guard(); for i in range.clone() { map.insert(i, i, &guard); } diff --git a/tests/jdk/map_check.rs b/tests/jdk/map_check.rs index 385b2219..aac648ac 100644 --- a/tests/jdk/map_check.rs +++ b/tests/jdk/map_check.rs @@ -1,4 +1,3 @@ -use crossbeam_epoch as epoch; use flurry::*; use rand::prelude::*; use std::hash::Hash; @@ -23,7 +22,7 @@ where { let mut sum = 0; let iters = 4; - let guard = epoch::pin(); + let guard = map.guard(); for _ in 0..iters { for key in keys { if map.get(key, &guard).is_some() { @@ -39,7 +38,7 @@ where K: 'static + Sync + Send + Copy + Hash + Ord + std::fmt::Display, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for key in keys { if map.remove(key, &guard).is_some() { sum += 1; @@ -53,7 +52,7 @@ where K: 'static + Sync + Send + Copy + Hash + Ord, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for i in 0..keys.len() { if map.insert(keys[i], 0, &guard).is_none() { sum += 1; @@ -67,7 +66,7 @@ where K: Sync + Send + Copy + Hash + Ord, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for i in 0..keys.len() { if map.contains_key(&keys[i], &guard) { sum += 1; @@ -81,7 +80,7 @@ where K: 'static + Sync + Send + Copy + Hash + Ord, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); let mut i = keys.len() as isize - 2; while i >= 0 { if map.remove(&keys[i as usize], &guard).is_some() { @@ -98,7 +97,7 @@ where V: Sync + Send, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for i in 0..expect { if map.get(&keys1[i], &guard).is_some() { sum += 1; @@ -115,7 +114,7 @@ where K: Sync + Send + Copy + Hash + Ord, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for i in 0..k1.len() { if map.contains_key(&k1[i], &guard) { sum += 1; @@ -132,7 +131,7 @@ where K: Sync + Send + Copy + Hash + Eq, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for _ in map.keys(&guard) { sum += 1; } @@ -144,7 +143,7 @@ where K: Sync + Send + Copy + Hash + Eq, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for _ in map.values(&guard) { sum += 1; } @@ -156,7 +155,7 @@ where K: Sync + Send + Copy + Hash + Eq, { let mut sum = 0; - let guard = epoch::pin(); + let guard = map.guard(); for _ in map.iter(&guard) { sum += 1; } diff --git a/tests/jsr166/main.rs b/tests/jsr166/main.rs index a108639f..9f2f711a 100644 --- a/tests/jsr166/main.rs +++ b/tests/jsr166/main.rs @@ -5,7 +5,7 @@ const ITER: [(usize, &'static str); 5] = [(1, "A"), (2, "B"), (3, "C"), (4, "D") #[test] fn test_from_iter() { - let guard = unsafe { crossbeam_epoch::unprotected() }; + let guard = unsafe { seize::Guard::unprotected() }; let map1 = from_iter_contron(); let map2: HashMap<_, _> = HashMap::from_iter(ITER.iter()); @@ -19,7 +19,7 @@ fn test_from_iter() { } fn from_iter_contron() -> HashMap { - let guard = unsafe { crossbeam_epoch::unprotected() }; + let guard = unsafe { seize::Guard::unprotected() }; let map = HashMap::with_capacity(5); assert!(map.is_empty()); @@ -36,7 +36,7 @@ fn map5() -> HashMap { let map = HashMap::new(); // TODO: add is_empty check once method exists // assert!(map.is_empty()); - let guard = epoch::pin(); + let guard = map.guard(); map.insert(1, "A".to_owned(), &guard); map.insert(2, "B".to_owned(), &guard); map.insert(3, "C".to_owned(), &guard); @@ -45,6 +45,7 @@ fn map5() -> HashMap { // TODO: add is_empty and len check once methods exist // assert!(!map.is_empty()); // assert_eq!(map.len(), 5); + drop(guard); map } @@ -52,7 +53,7 @@ fn map5() -> HashMap { #[test] fn test_remove() { let map = map5(); - let guard = epoch::pin(); + let guard = map.guard(); map.remove(&5, &guard); // TODO: add len check once method exists // assert_eq!(map.len(), 4); diff --git a/tests/regressions.rs b/tests/regressions.rs index 9536a030..0d938b81 100644 --- a/tests/regressions.rs +++ b/tests/regressions.rs @@ -1,4 +1,4 @@ -use flurry::{epoch::pin, *}; +use flurry::*; use rand::{thread_rng, Rng}; #[test] @@ -10,7 +10,7 @@ fn issue90() { let mut rng = thread_rng(); let map = HashMap::new(); - let g = pin(); + let g = map.guard(); for _ in 0..ITERATIONS { let el = rng.gen_range(0, 1000); let _ = map.try_insert(el, el, &g); From 10667b6d36c942c5279053d3b8b24332666dfe71 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 8 Feb 2022 19:39:48 -0500 Subject: [PATCH 02/29] fix serde/rayon build --- src/rayon_impls.rs | 50 ++++++++++++++++++++++++++++++++-------------- src/serde_impls.rs | 20 ++++++++++++------- 2 files changed, 48 insertions(+), 22 deletions(-) diff --git a/src/rayon_impls.rs b/src/rayon_impls.rs index 2d2eca87..43b23a19 100644 --- a/src/rayon_impls.rs +++ b/src/rayon_impls.rs @@ -173,14 +173,18 @@ mod test { let to_extend_with = Vec::new(); let mut map = HashMap::new(); - let guard = map.guard(); - map.insert(1, 2, &guard); - map.insert(3, 4, &guard); + + { + let guard = map.guard(); + map.insert(1, 2, &guard); + map.insert(3, 4, &guard); + } map.par_extend(to_extend_with.into_par_iter()); assert_eq!(map.len(), 2); + let guard = map.guard(); assert_eq!(map.get(&1, &guard), Some(&2)); assert_eq!(map.get(&3, &guard), Some(&4)); } @@ -193,13 +197,17 @@ mod test { } let mut map = HashMap::new(); - let guard = map.guard(); - map.insert(1, 2, &guard); - map.insert(3, 4, &guard); + + { + let guard = map.guard(); + map.insert(1, 2, &guard); + map.insert(3, 4, &guard); + } map.par_extend(to_extend_with.into_par_iter()); assert_eq!(map.len(), 102); + let guard = map.guard(); assert_eq!(map.get(&1, &guard), Some(&2)); assert_eq!(map.get(&3, &guard), Some(&4)); assert_eq!(map.get(&100, &guard), Some(&0)); @@ -249,14 +257,18 @@ mod test { let to_extend_with = Vec::new(); let mut set = HashSet::new(); - let guard = set.guard(); - set.insert(1, &guard); - set.insert(3, &guard); + + { + let guard = set.guard(); + set.insert(1, &guard); + set.insert(3, &guard); + } set.par_extend(to_extend_with.into_par_iter()); assert_eq!(set.len(), 2); + let guard = set.guard(); assert!(set.contains(&1, &guard)); assert!(!set.contains(&17, &guard)); } @@ -269,13 +281,17 @@ mod test { } let mut set = HashSet::new(); - let guard = set.guard(); - set.insert((1, 2), &guard); - set.insert((3, 4), &guard); + + { + let guard = set.guard(); + set.insert((1, 2), &guard); + set.insert((3, 4), &guard); + } set.par_extend(to_extend_with.into_par_iter()); assert_eq!(set.len(), 102); + let guard = set.guard(); assert!(set.contains(&(1, 2), &guard)); assert!(set.contains(&(199, 990), &guard)); assert!(!set.contains(&(199, 167), &guard)); @@ -286,13 +302,17 @@ mod test { let to_extend_with = Vec::new(); let mut set = HashSet::new(); - let guard = set.guard(); - set.insert((1, 2), &guard); - set.insert((3, 4), &guard); + + { + let guard = set.guard(); + set.insert((1, 2), &guard); + set.insert((3, 4), &guard); + } set.par_extend(to_extend_with.into_par_iter()); assert_eq!(set.len(), 2); + let guard = set.guard(); assert!(set.contains(&(1, 2), &guard)); assert!(!set.contains(&(199, 990), &guard)); assert!(!set.contains(&(199, 167), &guard)); diff --git a/src/serde_impls.rs b/src/serde_impls.rs index f46a2e38..9154bcca 100644 --- a/src/serde_impls.rs +++ b/src/serde_impls.rs @@ -83,11 +83,14 @@ where Some(n) => HashMap::with_capacity_and_hasher(n, S::default()), None => HashMap::with_hasher(S::default()), }; - let guard = map.guard(); - while let Some((key, value)) = access.next_entry()? { - if let Some(_old_value) = map.insert(key, value, &guard) { - unreachable!("Serialized map held two values with the same key"); + { + let guard = map.guard(); + + while let Some((key, value)) = access.next_entry()? { + if let Some(_old_value) = map.insert(key, value, &guard) { + unreachable!("Serialized map held two values with the same key"); + } } } @@ -162,10 +165,13 @@ where A: SeqAccess<'de>, { let set = HashSet::default(); - let guard = set.guard(); - while let Some(value) = access.next_element()? { - let _ = set.insert(value, &guard); + { + let guard = set.guard(); + + while let Some(value) = access.next_element()? { + let _ = set.insert(value, &guard); + } } Ok(set) From e8c71a43eda969b0984914dac5503874ab697abf Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 8 Feb 2022 19:48:54 -0500 Subject: [PATCH 03/29] update docs --- src/lib.rs | 4 ++-- src/map.rs | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 145eb769..a6f73f20 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,9 +9,9 @@ //! # A note on `Guard` and memory use //! //! You may have noticed that many of the access methods on this map take a reference to an -//! [`epoch::Guard`]. The exact details of this are beyond the scope of this documentation (for +//! [`Guard`]. The exact details of this are beyond the scope of this documentation (for //! that, see [`crossbeam::epoch`]), but some of the implications bear repeating here. You obtain a -//! `Guard` using [`epoch::pin`], and you can use references to the same guard to make multiple API +//! `Guard` using [`HashMap::guard`], and you can use references to the same guard to make multiple API //! calls if you wish. Whenever you get a reference to something stored in the map, that reference //! is tied to the lifetime of the `Guard` that you provided. This is because each `Guard` prevents //! the destruction of any item associated with it. Whenever something is read under a `Guard`, diff --git a/src/map.rs b/src/map.rs index 912ea387..912d48ef 100644 --- a/src/map.rs +++ b/src/map.rs @@ -76,8 +76,8 @@ macro_rules! load_factor { /// A concurrent hash table. /// /// Flurry uses [`Guards`] to control the lifetime of the resources that get stored and -/// extracted from the map. [`Guards`] are acquired through the [`epoch::pin`], [`HashMap::pin`] -/// and [`HashMap::guard`] functions. For more information, see the [notes in the crate-level +/// extracted from the map. [`Guards`] are acquired through the [`HashMap::pin`] and +/// [`HashMap::guard`] functions. For more information, see the [notes in the crate-level /// documentation]. /// /// [notes in the crate-level documentation]: index.html#a-note-on-guard-and-memory-use @@ -329,15 +329,14 @@ impl HashMap { map } - /// Associate a custom [`epoch::Collector`] with this map. + /// Associate a custom [`seize::Collector`] with this map. /// /// By default, the global collector is used. With this method you can use a different /// collector instead. This may be desireable if you want more control over when and how memory /// reclamation happens. /// /// Note that _all_ `Guard` references provided to access the returned map _must_ be - /// constructed using guards produced by `collector`. You can use [`HashMap::register`] to get - /// a thread-local handle to the collector that then lets you construct an [`epoch::Guard`]. + /// constructed using guards produced by `collector`. pub fn with_collector(mut self, collector: Collector) -> Self { self.collector = collector; self From 8e03dd5240dc6cd136d5e4016612175ed1b123d4 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 8 Feb 2022 20:05:02 -0500 Subject: [PATCH 04/29] appease clippy --- src/map.rs | 1 + src/node.rs | 2 +- src/raw/mod.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/map.rs b/src/map.rs index 912d48ef..d8f20fa9 100644 --- a/src/map.rs +++ b/src/map.rs @@ -337,6 +337,7 @@ impl HashMap { /// /// Note that _all_ `Guard` references provided to access the returned map _must_ be /// constructed using guards produced by `collector`. + #[must_use] pub fn with_collector(mut self, collector: Collector) -> Self { self.collector = collector; self diff --git a/src/node.rs b/src/node.rs index 80e78017..231383eb 100644 --- a/src/node.rs +++ b/src/node.rs @@ -392,7 +392,7 @@ impl TreeBin { .is_ok() { waiting = true; - let current_thread = Shared::boxed(current(), &collector); + let current_thread = Shared::boxed(current(), collector); let waiter = self.waiter.swap(current_thread, Ordering::SeqCst, guard); assert!(waiter.is_null()); } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index a169ab01..9bf47884 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -61,7 +61,7 @@ impl Table { } pub(crate) fn new(bins: usize, collector: &Collector) -> Self { - Self::from(vec![Atomic::null(); bins], &collector) + Self::from(vec![Atomic::null(); bins], collector) } pub(crate) fn is_empty(&self) -> bool { From a60861f0136b50cc389c50e67d6859eab62e874b Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 12 Feb 2022 18:11:29 -0500 Subject: [PATCH 05/29] disable evil collector test (for now) --- src/map.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/map.rs b/src/map.rs index d8f20fa9..45ceae57 100644 --- a/src/map.rs +++ b/src/map.rs @@ -108,7 +108,7 @@ pub struct HashMap { /// unsoundness as described in https://github.com/jonhoo/flurry/issues/46. Specifically, a /// user can do: /// - /// ```rust,should_panic + /// ```rust,no_run /// # use flurry::HashMap; /// let map: HashMap<_, _> = HashMap::default(); /// map.insert(42, String::from("hello"), &map.guard()); From 5170f149585f86b70c027082ba53b1c92f8e3072 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 12 Feb 2022 20:09:39 -0500 Subject: [PATCH 06/29] disable miri isolation --- .github/workflows/miri.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/miri.yml b/.github/workflows/miri.yml index 8633b8bc..bf220c5a 100644 --- a/.github/workflows/miri.yml +++ b/.github/workflows/miri.yml @@ -22,4 +22,4 @@ jobs: command: miri args: test env: - MIRIFLAGS: "-Zmiri-ignore-leaks" + MIRIFLAGS: "-Zmiri-ignore-leaks -Zmiri-disable-isolation" From 4c0b1d755ed237fa2e8b3f8bc8fe2dc21b326c21 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 15 Feb 2022 16:37:44 -0500 Subject: [PATCH 07/29] fix stacked borrows violation in `HashMap::put` --- src/map.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/map.rs b/src/map.rs index 45ceae57..fc96cd2e 100644 --- a/src/map.rs +++ b/src/map.rs @@ -1826,18 +1826,19 @@ where // the key already exists in the map! let current_value = n.value.load(Ordering::SeqCst, guard); - // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. - let current_value = unsafe { current_value.deref() }; - if no_replacement { // the key is not absent, so don't update because of // `no_replacement`, we don't use the new value, so we need to clean // it up and return it back to the caller - // safety: we own value and did not share it + + // safety: since the value is present now, and we've held a guard from + // the beginning of the search, the value cannot be dropped until the + // next epoch, which won't arrive until after we drop our guard. + let current_value = unsafe { current_value.deref() }; + return PutResult::Exists { current: current_value, + // safety: we own value and did not share it not_inserted: unsafe { value.into_box() }, }; } else { @@ -1865,8 +1866,17 @@ where // `value` field (which is what we swapped), so freeing // now_garbage is fine. unsafe { guard.retire_shared(now_garbage) }; + + // safety: since the value is present now, and we've held a guard from + // safety: same as the deref in the no_replacement case + // + // note that we must deref *after* calling retire_shared + // because it creates an &mut T which would not be unique + // if we are holding on to &T + let current_value = unsafe { current_value.deref() }; + + break Some(current_value); } - break Some(current_value); } // TODO: This Ordering can probably be relaxed due to the Mutex From 2f03673678af90de9911e2f1a3bdc7dbd0a76ad7 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 15 Feb 2022 19:01:21 -0500 Subject: [PATCH 08/29] fetch seize patch --- Cargo.toml | 2 +- src/map.rs | 24 +++++++----------------- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b673b48c..c22194a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ parking_lot = "0.10" num_cpus = "1.12.0" rayon = {version = "1.3", optional = true} serde = {version = "1.0.105", optional = true} -seize = { git = "https://github.com/ibraheemdev/seize" } +seize = {git = "https://github.com/ibraheemdev/seize"} [dependencies.ahash] version = "0.3.2" diff --git a/src/map.rs b/src/map.rs index fc96cd2e..45ceae57 100644 --- a/src/map.rs +++ b/src/map.rs @@ -1826,19 +1826,18 @@ where // the key already exists in the map! let current_value = n.value.load(Ordering::SeqCst, guard); + // safety: since the value is present now, and we've held a guard from + // the beginning of the search, the value cannot be dropped until the + // next epoch, which won't arrive until after we drop our guard. + let current_value = unsafe { current_value.deref() }; + if no_replacement { // the key is not absent, so don't update because of // `no_replacement`, we don't use the new value, so we need to clean // it up and return it back to the caller - - // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. - let current_value = unsafe { current_value.deref() }; - + // safety: we own value and did not share it return PutResult::Exists { current: current_value, - // safety: we own value and did not share it not_inserted: unsafe { value.into_box() }, }; } else { @@ -1866,17 +1865,8 @@ where // `value` field (which is what we swapped), so freeing // now_garbage is fine. unsafe { guard.retire_shared(now_garbage) }; - - // safety: since the value is present now, and we've held a guard from - // safety: same as the deref in the no_replacement case - // - // note that we must deref *after* calling retire_shared - // because it creates an &mut T which would not be unique - // if we are holding on to &T - let current_value = unsafe { current_value.deref() }; - - break Some(current_value); } + break Some(current_value); } // TODO: This Ordering can probably be relaxed due to the Mutex From 8fd541b91b4bd64cf9e82bf1f761315d0b38da22 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 15 Feb 2022 19:03:58 -0500 Subject: [PATCH 09/29] bump msrv to 1.56 --- .github/workflows/msrv.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 7dde2bcb..17750e7c 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -10,10 +10,10 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.52.0 # rustdoc:: lint prefix + toolchain: 1.56.0 # UnsafeCell::raw_get override: true - uses: actions/checkout@v2 - - name: cargo +1.52.0 check + - name: cargo +1.56.0 check uses: actions-rs/cargo@v1 with: command: check From 6861f9a5f86f3d725be5a151d731e0836bb0f35c Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 15 Feb 2022 19:47:53 -0500 Subject: [PATCH 10/29] pass -Zmiri-tag-raw-pointers, don't pass -Zmiri-ignore-leaks in CI --- .github/workflows/miri.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/miri.yml b/.github/workflows/miri.yml index bf220c5a..359b4f45 100644 --- a/.github/workflows/miri.yml +++ b/.github/workflows/miri.yml @@ -22,4 +22,4 @@ jobs: command: miri args: test env: - MIRIFLAGS: "-Zmiri-ignore-leaks -Zmiri-disable-isolation" + MIRIFLAGS: "-Zmiri-tag-raw-pointers -Zmiri-disable-isolation" From eb6290d8b1dc82775dee10bb39411c2b034611c5 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 15 Feb 2022 20:13:26 -0500 Subject: [PATCH 11/29] disallow evil guards --- src/map.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/map.rs b/src/map.rs index 45ceae57..6097dc02 100644 --- a/src/map.rs +++ b/src/map.rs @@ -108,7 +108,7 @@ pub struct HashMap { /// unsoundness as described in https://github.com/jonhoo/flurry/issues/46. Specifically, a /// user can do: /// - /// ```rust,no_run + /// ```rust,should_panic /// # use flurry::HashMap; /// let map: HashMap<_, _> = HashMap::default(); /// map.insert(42, String::from("hello"), &map.guard()); @@ -352,12 +352,11 @@ impl HashMap { } #[inline] - fn check_guard(&self, _guard: &Guard<'_>) { - // TODO - // // guard.collector() may be `None` if it is unprotected - // if let Some(c) = guard.collector() { - // assert_eq!(c, &self.collector); - // } + fn check_guard(&self, guard: &Guard<'_>) { + // guard.collector() may be `None` if it is unprotected + if let Some(c) = guard.collector() { + assert_eq!(c, &self.collector); + } } /// Returns the number of entries in the map. @@ -3573,8 +3572,6 @@ mod tree_bins { #[test] #[should_panic] - // TODO - #[ignore] fn disallow_evil() { let map: HashMap<_, _> = HashMap::default(); map.insert(42, String::from("hello"), &map.guard()); From f704581c44d360fbcfdd0d84f45a151cb81e9e97 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Tue, 15 Feb 2022 22:25:02 -0500 Subject: [PATCH 12/29] fix guard equality checks --- src/map.rs | 9 ++++++--- tests/basic.rs | 17 +++++++---------- tests/set.rs | 17 +++++++---------- 3 files changed, 20 insertions(+), 23 deletions(-) diff --git a/src/map.rs b/src/map.rs index 6097dc02..e3a432bf 100644 --- a/src/map.rs +++ b/src/map.rs @@ -355,7 +355,7 @@ impl HashMap { fn check_guard(&self, guard: &Guard<'_>) { // guard.collector() may be `None` if it is unprotected if let Some(c) = guard.collector() { - assert_eq!(c, &self.collector); + assert!(Collector::ptr_eq(c, &self.collector)); } } @@ -3093,11 +3093,14 @@ where S: BuildHasher + Clone, { fn clone(&self) -> HashMap { - let cloned_map = Self::with_capacity_and_hasher(self.len(), self.build_hasher.clone()); + let cloned_map = Self::with_capacity_and_hasher(self.len(), self.build_hasher.clone()) + .with_collector(self.collector.clone()); + { let guard = self.collector.enter(); + let cloned_guard = cloned_map.collector.enter(); for (k, v) in self.iter(&guard) { - cloned_map.insert(k.clone(), v.clone(), &guard); + cloned_map.insert(k.clone(), v.clone(), &cloned_guard); } } cloned_map diff --git a/tests/basic.rs b/tests/basic.rs index 6bc02a1a..b14079ea 100644 --- a/tests/basic.rs +++ b/tests/basic.rs @@ -393,10 +393,9 @@ fn different_size_maps_not_equal() { let map1 = HashMap::::new(); let map2 = HashMap::::new(); { - let guard = map1.guard(); - map1.insert(1, 0, &guard); - map1.insert(2, 0, &guard); - map2.insert(1, 0, &guard); + map1.pin().insert(1, 0); + map1.pin().insert(2, 0); + map2.pin().insert(1, 0); } assert_ne!(map1, map2); @@ -408,9 +407,8 @@ fn same_values_equal() { let map1 = HashMap::::new(); let map2 = HashMap::::new(); { - let guard = map1.guard(); - map1.insert(1, 0, &guard); - map2.insert(1, 0, &guard); + map1.pin().insert(1, 0); + map2.pin().insert(1, 0); } assert_eq!(map1, map2); @@ -422,9 +420,8 @@ fn different_values_not_equal() { let map1 = HashMap::::new(); let map2 = HashMap::::new(); { - let guard = map1.guard(); - map1.insert(1, 0, &guard); - map2.insert(1, 1, &guard); + map1.pin().insert(1, 0); + map2.pin().insert(1, 1); } assert_ne!(map1, map2); diff --git a/tests/set.rs b/tests/set.rs index 3dde617a..74df53fe 100644 --- a/tests/set.rs +++ b/tests/set.rs @@ -178,10 +178,9 @@ fn different_size_maps_not_equal() { let set1 = HashSet::::new(); let set2 = HashSet::::new(); { - let guard = set1.guard(); - set1.insert(1, &guard); - set1.insert(2, &guard); - set2.insert(1, &guard); + set1.pin().insert(1); + set1.pin().insert(2); + set2.pin().insert(1); } assert_ne!(set1, set2); @@ -193,9 +192,8 @@ fn same_values_equal() { let set1 = HashSet::::new(); let set2 = HashSet::::new(); { - let guard = set1.guard(); - set1.insert(1, &guard); - set2.insert(1, &guard); + set1.pin().insert(1); + set2.pin().insert(1); } assert_eq!(set1, set2); @@ -207,9 +205,8 @@ fn different_values_not_equal() { let set1 = HashSet::::new(); let set2 = HashSet::::new(); { - let guard = set1.guard(); - set1.insert(1, &guard); - set2.insert(2, &guard); + set1.pin().insert(1); + set2.pin().insert(2); } assert_ne!(set1, set2); From d50737c467f47b6732057d36d29fbaa9a0751765 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 19 Feb 2022 18:29:26 -0500 Subject: [PATCH 13/29] reuse guards in `different_size_maps_not_equal` test --- tests/basic.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/basic.rs b/tests/basic.rs index b14079ea..907fdb5a 100644 --- a/tests/basic.rs +++ b/tests/basic.rs @@ -393,9 +393,15 @@ fn different_size_maps_not_equal() { let map1 = HashMap::::new(); let map2 = HashMap::::new(); { - map1.pin().insert(1, 0); - map1.pin().insert(2, 0); - map2.pin().insert(1, 0); + let guard1 = map1.guard(); + let guard2 = map2.guard(); + + map1.insert(1, 0, &guard1); + map1.insert(2, 0, &guard1); + map1.insert(3, 0, &guard1); + + map2.insert(1, 0, &guard2); + map2.insert(2, 0, &guard2); } assert_ne!(map1, map2); From 9f8766d51bf98734d3f84cb3c31f0604f248e33a Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 19 Feb 2022 18:29:43 -0500 Subject: [PATCH 14/29] update seize --- src/reclaim.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/reclaim.rs b/src/reclaim.rs index beacd441..34ff7a37 100644 --- a/src/reclaim.rs +++ b/src/reclaim.rs @@ -12,8 +12,8 @@ impl Atomic { Self(seize::AtomicPtr::default()) } - pub(crate) fn load<'g>(&self, _: Ordering, guard: &'g Guard<'_>) -> Shared<'g, T> { - guard.protect(&self.0).into() + pub(crate) fn load<'g>(&self, ordering: Ordering, guard: &'g Guard<'_>) -> Shared<'g, T> { + guard.protect(&self.0, ordering).into() } pub(crate) fn store(&self, new: Shared<'_, T>, ordering: Ordering) { From 011a9df178983d95a9f6d43cfc404576904107be Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 19 Feb 2022 18:31:17 -0500 Subject: [PATCH 15/29] relax failure ordering in `Table::get_moved` --- src/raw/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 9bf47884..1bde089f 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -85,7 +85,7 @@ impl Table { Shared::null(), for_table, Ordering::SeqCst, - Ordering::SeqCst, + Ordering::Relaxed, guard, ) { Ok(_) => {} From 336477cb4cbd334fa216284459b21ed8b238c71a Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 19 Feb 2022 18:41:30 -0500 Subject: [PATCH 16/29] remove `Shared: From<*const Linked>` --- src/iter/mod.rs | 11 +++++------ src/map.rs | 36 ++++++++++++++++++------------------ src/raw/mod.rs | 10 ++++++++-- src/reclaim.rs | 6 ------ 4 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/iter/mod.rs b/src/iter/mod.rs index 22ce9c9d..506d55e0 100644 --- a/src/iter/mod.rs +++ b/src/iter/mod.rs @@ -1,8 +1,7 @@ mod traverser; -use seize::Linked; pub(crate) use traverser::NodeIter; -use crate::reclaim::Guard; +use crate::reclaim::{Guard, Shared}; use std::sync::atomic::Ordering; /// An iterator over a map's entries. @@ -15,11 +14,9 @@ pub struct Iter<'g, K, V> { } impl<'g, K, V> Iter<'g, K, V> { - pub(crate) fn next_internal(&mut self) -> Option<(&'g K, &'g Linked)> { + pub(crate) fn next_internal(&mut self) -> Option<(&'g K, Shared<'g, V>)> { let node = self.node_iter.next()?; let value = node.value.load(Ordering::SeqCst, self.guard); - // safety: flurry does not drop or move until after guard drop - let value = unsafe { value.deref() }; Some((&node.key, value)) } } @@ -27,7 +24,9 @@ impl<'g, K, V> Iter<'g, K, V> { impl<'g, K, V> Iterator for Iter<'g, K, V> { type Item = (&'g K, &'g V); fn next(&mut self) -> Option { - self.next_internal().map(|(k, v)| (k, &**v)) + // safety: flurry does not drop or move until after guard drop + self.next_internal() + .map(|(k, v)| unsafe { (k, &**v.deref()) }) } } diff --git a/src/map.rs b/src/map.rs index e3a432bf..6d7bc499 100644 --- a/src/map.rs +++ b/src/map.rs @@ -664,7 +664,7 @@ where fn transfer<'g>( &'g self, table: Shared<'g, Table>, - mut next_table: Shared<'g, Table>, + mut next_table_ptr: Shared<'g, Table>, guard: &'g Guard<'_>, ) { // safety: table was read while `guard` was held. the code that drops table only drops it @@ -677,17 +677,17 @@ where let stride = if ncpu > 1 { (n >> 3) / ncpu } else { n }; let stride = std::cmp::max(stride as isize, MIN_TRANSFER_STRIDE); - if next_table.is_null() { + if next_table_ptr.is_null() { // we are initiating a resize let table = Shared::boxed(Table::new(n << 1, &self.collector), &self.collector); let now_garbage = self.next_table.swap(table, Ordering::SeqCst, guard); assert!(now_garbage.is_null()); self.transfer_index.store(n as isize, Ordering::SeqCst); - next_table = self.next_table.load(Ordering::Relaxed, guard); + next_table_ptr = self.next_table.load(Ordering::Relaxed, guard); } // safety: same argument as for table above - let next_n = unsafe { next_table.deref() }.len(); + let next_n = unsafe { next_table_ptr.deref() }.len(); let mut advance = true; let mut finishing = false; @@ -732,7 +732,7 @@ where if finishing { // this branch is only taken for one thread partaking in the resize! self.next_table.store(Shared::null(), Ordering::SeqCst); - let now_garbage = self.table.swap(next_table, Ordering::SeqCst, guard); + let now_garbage = self.table.swap(next_table_ptr, Ordering::SeqCst, guard); // safety: need to guarantee that now_garbage is no longer reachable. more // specifically, no thread that executes _after_ this line can ever get a // reference to now_garbage. @@ -798,12 +798,17 @@ where let bin = table.bin(i as usize, guard); if bin.is_null() { advance = table - .cas_bin(i, Shared::null(), table.get_moved(next_table, guard), guard) + .cas_bin( + i, + Shared::null(), + table.get_moved(next_table_ptr, guard), + guard, + ) .is_ok(); continue; } // safety: as for table above - let next_table = unsafe { next_table.deref() }; + let next_table = unsafe { next_table_ptr.deref() }; // safety: bin is a valid pointer. // @@ -915,10 +920,7 @@ where next_table.store_bin(i, low_bin); next_table.store_bin(i + n, high_bin); - table.store_bin( - i, - table.get_moved(Shared::from(next_table as *const _), guard), - ); + table.store_bin(i, table.get_moved(Shared::from(next_table_ptr), guard)); // everything up to last_run in the _old_ bin linked list is now garbage. // those nodes have all been re-allocated in the new bin linked list. @@ -1081,10 +1083,7 @@ where next_table.store_bin(i, low_bin); next_table.store_bin(i + n, high_bin); - table.store_bin( - i, - table.get_moved(Shared::from(next_table as *const _), guard), - ); + table.store_bin(i, table.get_moved(Shared::from(next_table_ptr), guard)); // if we did not re-use the old bin, it is now garbage, // since all of its nodes have been reallocated. However, @@ -2711,9 +2710,10 @@ where self.check_guard(guard); let mut iter = self.iter(guard); while let Some((k, v)) = iter.next_internal() { - if !f(k, v) { - let old_value: Shared<'_, V> = Shared::from(v as *const _); - self.replace_node(k, None, Some(old_value), guard); + // safety: flurry does not drop or move until after guard drop + let value = unsafe { v.deref() }; + if !f(k, value) { + self.replace_node(k, None, Some(v), guard); } } } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 1bde089f..a9c6761f 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -125,7 +125,9 @@ impl Table { }; if n.hash == hash && n.key.borrow() == key { - return Shared::from(node as *const _); + // safety: this cast is fine because find + // is only used to return shared references + return Shared::from(node as *const _ as *mut _); } let next = n.next.load(Ordering::SeqCst, guard); if next.is_null() { @@ -172,7 +174,11 @@ impl Table { "`find` was called on a TreeNode, which cannot be the first entry in a bin" ); } - BinEntry::Tree(_) => TreeBin::find(Shared::from(bin as *const _), hash, key, guard), + BinEntry::Tree(_) => { + // safety: this cast is fine because TreeBin::find + // only needs a shared reference to the bin + TreeBin::find(Shared::from(bin as *const _ as *mut _), hash, key, guard) + } } } diff --git a/src/reclaim.rs b/src/reclaim.rs index 34ff7a37..b33c4f48 100644 --- a/src/reclaim.rs +++ b/src/reclaim.rs @@ -143,12 +143,6 @@ impl From<*mut Linked> for Shared<'_, T> { } } -impl From<*const Linked> for Shared<'_, T> { - fn from(ptr: *const Linked) -> Self { - Shared::from(ptr as *mut _) - } -} - pub(crate) trait RetireShared { unsafe fn retire_shared(&self, shared: Shared<'_, T>); } From a94060adc7731193c854d3eca046af90c62d2d73 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 19 Feb 2022 18:49:43 -0500 Subject: [PATCH 17/29] add safety guarantees for `defer_drop_without_values` --- src/node.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/node.rs b/src/node.rs index 231383eb..b4d8bf1f 100644 --- a/src/node.rs +++ b/src/node.rs @@ -938,9 +938,15 @@ impl TreeBin { guard: &'g Guard<'_>, ) { guard.retire(bin.as_ptr(), |mut link| { - let bin = link.cast::>(); + let bin = unsafe { + // SAFETY: `bin` is a `BinEntry` + let ptr = link.cast::>(); + // SAFETY: `retire` guarantees that we + // have unique access to `bin` at this point + *Box::from_raw(ptr) + }; - if let BinEntry::Tree(mut tree_bin) = Linked::into_inner(*Box::from_raw(bin)) { + if let BinEntry::Tree(mut tree_bin) = Linked::into_inner(bin) { tree_bin.drop_fields(false); } else { unreachable!("bin is a tree bin"); From 41966b41e4bdfb4e7db3d9967527100856e6b38d Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 19 Feb 2022 19:21:52 -0500 Subject: [PATCH 18/29] remove `Guard::unprotected` helper --- src/map.rs | 18 +++++++++--------- src/node.rs | 8 ++++---- src/raw/mod.rs | 14 +++++++------- src/reclaim.rs | 10 ---------- 4 files changed, 20 insertions(+), 30 deletions(-) diff --git a/src/map.rs b/src/map.rs index 6d7bc499..970c2b3c 100644 --- a/src/map.rs +++ b/src/map.rs @@ -3,7 +3,7 @@ use seize::Linked; use crate::iter::*; use crate::node::*; use crate::raw::*; -use crate::reclaim::{self, Atomic, Collector, Guard, RetireShared, Shared}; +use crate::reclaim::{Atomic, Collector, Guard, RetireShared, Shared}; use std::borrow::Borrow; use std::error::Error; use std::fmt::{self, Debug, Display, Formatter}; @@ -498,7 +498,7 @@ impl HashMap { // safety: we are creating this map, so no other thread can access it, // while we are initializing it. - let guard = unsafe { reclaim::unprotected() }; + let guard = unsafe { Guard::unprotected() }; let requested_capacity = if size >= MAXIMUM_CAPACITY / 2 { MAXIMUM_CAPACITY @@ -512,7 +512,7 @@ impl HashMap { // sanity check that the map has indeed not been set up already assert_eq!(self.size_ctl.load(Ordering::SeqCst), 0); - assert!(self.table.load(Ordering::SeqCst, guard).is_null()); + assert!(self.table.load(Ordering::SeqCst, &guard).is_null()); // the table has not yet been initialized, so we can just create it // with as many bins as were requested @@ -2987,10 +2987,10 @@ impl Drop for HashMap { // NOTE: we _could_ relax the bounds in all the methods that return `&'g ...` to not also // bound `&self` by `'g`, but if we did that, we would need to use a regular `epoch::Guard` // here rather than an unprotected one. - let guard = unsafe { reclaim::unprotected() }; + let guard = unsafe { Guard::unprotected() }; - assert!(self.next_table.load(Ordering::SeqCst, guard).is_null()); - let table = self.table.swap(Shared::null(), Ordering::SeqCst, guard); + assert!(self.next_table.load(Ordering::SeqCst, &guard).is_null()); + let table = self.table.swap(Shared::null(), Ordering::SeqCst, &guard); if table.is_null() { // table was never allocated! return; @@ -3050,13 +3050,13 @@ where if let Some((key, value)) = iter.next() { // safety: we own `map`, so it's not concurrently accessed by // anyone else at this point. - let guard = unsafe { reclaim::unprotected() }; + let guard = unsafe { Guard::unprotected() }; let (lower, _) = iter.size_hint(); let map = HashMap::with_capacity_and_hasher(lower.saturating_add(1), S::default()); - map.put(key, value, false, guard); - map.put_all(iter, guard); + map.put(key, value, false, &guard); + map.put_all(iter, &guard); map } else { Self::default() diff --git a/src/node.rs b/src/node.rs index b4d8bf1f..08cf02c8 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,5 +1,5 @@ use crate::raw::Table; -use crate::reclaim::{self, Atomic, Collector, Guard, RetireShared, Shared}; +use crate::reclaim::{Atomic, Collector, Guard, RetireShared, Shared}; use core::sync::atomic::{AtomicBool, AtomicI64, Ordering}; use parking_lot::Mutex; use seize::Linked; @@ -967,9 +967,9 @@ impl TreeBin { // swap out first pointer so nodes will not get dropped again when // `tree_bin` is dropped - let guard = reclaim::unprotected(); - let p = self.first.swap(Shared::null(), Ordering::Relaxed, guard); - Self::drop_tree_nodes(p, drop_values, guard); + let guard = Guard::unprotected(); + let p = self.first.swap(Shared::null(), Ordering::Relaxed, &guard); + Self::drop_tree_nodes(p, drop_values, &guard); } /// Drops the given list of tree nodes, but only drops their values when specified. diff --git a/src/raw/mod.rs b/src/raw/mod.rs index a9c6761f..c6aee24a 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -186,10 +186,10 @@ impl Table { // safety: we have &mut self _and_ all references we have returned are bound to the // lifetime of their borrow of self, so there cannot be any outstanding references to // anything in the map. - let guard = unsafe { reclaim::unprotected() }; + let guard = unsafe { Guard::unprotected() }; for bin in Vec::from(std::mem::replace(&mut self.bins, vec![].into_boxed_slice())) { - if bin.load(Ordering::SeqCst, guard).is_null() { + if bin.load(Ordering::SeqCst, &guard).is_null() { // bin was never used continue; } @@ -198,7 +198,7 @@ impl Table { // note that dropping the shared Moved, if it exists, is the responsibility // of `drop` // safety: same as above - let bin_entry = unsafe { bin.load(Ordering::SeqCst, guard).deref() }; + let bin_entry = unsafe { bin.load(Ordering::SeqCst, &guard).deref() }; match **bin_entry { BinEntry::Moved => {} BinEntry::Node(_) => { @@ -220,7 +220,7 @@ impl Table { let _ = unsafe { node.value.into_box() }; // then we move to the next node - if node.next.load(Ordering::SeqCst, guard).is_null() { + if node.next.load(Ordering::SeqCst, &guard).is_null() { break; } p = unsafe { node.next.into_box() }; @@ -250,7 +250,7 @@ impl Drop for Table { // safety: we have &mut self _and_ all references we have returned are bound to the // lifetime of their borrow of self, so there cannot be any outstanding references to // anything in the map. - let guard = unsafe { reclaim::unprotected() }; + let guard = unsafe { Guard::unprotected() }; // since BinEntry::Nodes are either dropped by drop_bins or transferred to a new table, // all bins are empty or contain a Shared pointing to shared the BinEntry::Moved (if @@ -260,7 +260,7 @@ impl Drop for Table { // when testing, we check the above invariant. in production, we assume it to be true if cfg!(debug_assertions) { for bin in bins.iter() { - let bin = bin.load(Ordering::SeqCst, guard); + let bin = bin.load(Ordering::SeqCst, &guard); if bin.is_null() { continue; } else { @@ -282,7 +282,7 @@ impl Drop for Table { // we need to drop the shared forwarding node (since it is heap allocated). // Note that this needs to happen _independently_ of whether or not there was // a previous call to drop_bins. - let moved = self.moved.swap(Shared::null(), Ordering::SeqCst, guard); + let moved = self.moved.swap(Shared::null(), Ordering::SeqCst, &guard); assert!( !moved.is_null(), "self.moved is initialized together with the table" diff --git a/src/reclaim.rs b/src/reclaim.rs index b33c4f48..6334c0a0 100644 --- a/src/reclaim.rs +++ b/src/reclaim.rs @@ -153,16 +153,6 @@ impl RetireShared for Guard<'_> { } } -pub(crate) unsafe fn unprotected() -> &'static Guard<'static> { - struct RacyGuard(Guard<'static>); - - unsafe impl Send for RacyGuard {} - unsafe impl Sync for RacyGuard {} - - static UNPROTECTED: RacyGuard = RacyGuard(unsafe { Guard::unprotected() }); - &UNPROTECTED.0 -} - pub(crate) enum GuardRef<'g> { Owned(Guard<'g>), Ref(&'g Guard<'g>), From 386eaa75f33a0aab33646242525b45af5adf26ac Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 19 Feb 2022 19:43:43 -0500 Subject: [PATCH 19/29] pass boxed bin to `TableBin::new` --- src/map.rs | 9 ++++++--- src/node.rs | 3 ++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/map.rs b/src/map.rs index 970c2b3c..0a05b9aa 100644 --- a/src/map.rs +++ b/src/map.rs @@ -1040,7 +1040,8 @@ where BinEntry::Tree(TreeBin::new( // safety: we have just created `low` and its `next` // nodes and have never shared them - low, guard, + unsafe { low.into_box() }, + guard, )), &self.collector, ) @@ -1067,7 +1068,8 @@ where BinEntry::Tree(TreeBin::new( // safety: we have just created `high` and its `next` // nodes and have never shared them - high, guard, + unsafe { high.into_box() }, + guard, )), &self.collector, ) @@ -2824,7 +2826,8 @@ where BinEntry::Tree(TreeBin::new( // safety: we have just created `head` and its `next` // nodes and have never shared them - head, guard, + unsafe { head.into_box() }, + guard, )), &self.collector, ), diff --git a/src/node.rs b/src/node.rs index 08cf02c8..8cb266ec 100644 --- a/src/node.rs +++ b/src/node.rs @@ -241,8 +241,9 @@ where /// Constructs a new bin from the given nodes. /// /// Nodes are arranged into an ordered red-black tree. - pub(crate) fn new(bin: Shared<'_, BinEntry>, guard: &Guard<'_>) -> Self { + pub(crate) fn new(bin: Box>>, guard: &Guard<'_>) -> Self { let mut root = Shared::null(); + let bin = Shared::from(Box::into_raw(bin)); // safety: We own the nodes for creating this new TreeBin, so they are // not shared with another thread and cannot get invalidated. From ad9d5f29acce9a81b11f0e1875c39f45b6dae53e Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sun, 20 Feb 2022 13:24:36 -0500 Subject: [PATCH 20/29] add flush operation --- src/iter/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/iter/mod.rs b/src/iter/mod.rs index 506d55e0..56d8b4cc 100644 --- a/src/iter/mod.rs +++ b/src/iter/mod.rs @@ -106,10 +106,10 @@ mod tests { fn values() { let map = HashMap::::new(); - let guard = map.guard(); + let mut guard = map.guard(); map.insert(1, 42, &guard); map.insert(2, 84, &guard); - let guard = map.guard(); + guard.flush(); assert_eq!( map.values(&guard).collect::>(), From d6307cf9bae70d700f29cfe0dcb708e49e6419c6 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sun, 20 Feb 2022 15:28:26 -0500 Subject: [PATCH 21/29] seize 0.2.0 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index c22194a6..80cd8fb0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ parking_lot = "0.10" num_cpus = "1.12.0" rayon = {version = "1.3", optional = true} serde = {version = "1.0.105", optional = true} -seize = {git = "https://github.com/ibraheemdev/seize"} +seize = "0.2.0" [dependencies.ahash] version = "0.3.2" From 55093a16c36f0b8982a7862519593b9720d3939d Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sun, 20 Feb 2022 17:14:10 -0500 Subject: [PATCH 22/29] remove 'static bounds --- src/lib.rs | 4 +--- src/map.rs | 30 +++++++++++++++--------------- src/map_ref.rs | 4 ++-- src/rayon_impls.rs | 24 ++++++++++++------------ src/serde_impls.rs | 12 ++++++------ src/set.rs | 12 ++++++------ src/set_ref.rs | 2 +- tests/jdk/map_check.rs | 6 +++--- 8 files changed, 46 insertions(+), 48 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index a6f73f20..0d3cf7cd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,9 +23,7 @@ //! Notice that there is a trade-off here. Creating and dropping a `Guard` is not free, since it //! also needs to interact with said bookkeeping. But if you keep one around for a long time, you //! may accumulate much garbage which will take up valuable free memory on your system. Use your -//! best judgement in deciding whether or not to re-use a `Guard`. This is also the reason why the -//! map requires that `K: 'static` and `V: 'static`. If we did not, then your keys and values may -//! get dropped far later, potentially after those lifetimes have passed, which would not be sound. +//! best judgement in deciding whether or not to re-use a `Guard`. //! //! # Consistency //! diff --git a/src/map.rs b/src/map.rs index 0a05b9aa..69c1484f 100644 --- a/src/map.rs +++ b/src/map.rs @@ -214,7 +214,7 @@ where // === // the following methods only see Ks and Vs if there have been inserts. -// modifications to the map are all guarded by thread-safety bounds (Send + Sync + 'static). +// modifications to the map are all guarded by thread-safety bounds (Send + Sync ). // but _these_ methods do not need to be, since they will never introduce keys or values, only give // out ones that have already been inserted (which implies they must be thread-safe). // === @@ -1602,8 +1602,8 @@ where impl HashMap where - K: 'static + Sync + Send + Clone + Hash + Ord, - V: 'static + Sync + Send, + K: Sync + Send + Clone + Hash + Ord, + V: Sync + Send, S: BuildHasher, { /// Inserts a key-value pair into the map. @@ -3007,8 +3007,8 @@ impl Drop for HashMap { impl Extend<(K, V)> for &HashMap where - K: 'static + Sync + Send + Clone + Hash + Ord, - V: 'static + Sync + Send, + K: Sync + Send + Clone + Hash + Ord, + V: Sync + Send, S: BuildHasher, { fn extend>(&mut self, iter: T) { @@ -3032,8 +3032,8 @@ where impl<'a, K, V, S> Extend<(&'a K, &'a V)> for &HashMap where - K: 'static + Sync + Send + Copy + Hash + Ord, - V: 'static + Sync + Send + Copy, + K: Sync + Send + Copy + Hash + Ord, + V: Sync + Send + Copy, S: BuildHasher, { fn extend>(&mut self, iter: T) { @@ -3043,8 +3043,8 @@ where impl FromIterator<(K, V)> for HashMap where - K: 'static + Sync + Send + Clone + Hash + Ord, - V: 'static + Sync + Send, + K: Sync + Send + Clone + Hash + Ord, + V: Sync + Send, S: BuildHasher + Default, { fn from_iter>(iter: T) -> Self { @@ -3069,8 +3069,8 @@ where impl<'a, K, V, S> FromIterator<(&'a K, &'a V)> for HashMap where - K: 'static + Sync + Send + Copy + Hash + Ord, - V: 'static + Sync + Send + Copy, + K: Sync + Send + Copy + Hash + Ord, + V: Sync + Send + Copy, S: BuildHasher + Default, { fn from_iter>(iter: T) -> Self { @@ -3080,8 +3080,8 @@ where impl<'a, K, V, S> FromIterator<&'a (K, V)> for HashMap where - K: 'static + Sync + Send + Copy + Hash + Ord, - V: 'static + Sync + Send + Copy, + K: Sync + Send + Copy + Hash + Ord, + V: Sync + Send + Copy, S: BuildHasher + Default, { fn from_iter>(iter: T) -> Self { @@ -3091,8 +3091,8 @@ where impl Clone for HashMap where - K: 'static + Sync + Send + Clone + Hash + Ord, - V: 'static + Sync + Send + Clone, + K: Sync + Send + Clone + Hash + Ord, + V: Sync + Send + Clone, S: BuildHasher + Clone, { fn clone(&self) -> HashMap { diff --git a/src/map_ref.rs b/src/map_ref.rs index 1bffede5..0b339ef1 100644 --- a/src/map_ref.rs +++ b/src/map_ref.rs @@ -149,8 +149,8 @@ where impl HashMapRef<'_, K, V, S> where - K: 'static + Sync + Send + Clone + Hash + Ord, - V: 'static + Sync + Send, + K: Sync + Send + Clone + Hash + Ord, + V: Sync + Send, S: BuildHasher, { /// Inserts a key-value pair into the map. diff --git a/src/rayon_impls.rs b/src/rayon_impls.rs index 43b23a19..caee7070 100644 --- a/src/rayon_impls.rs +++ b/src/rayon_impls.rs @@ -4,8 +4,8 @@ use std::hash::{BuildHasher, Hash}; impl FromParallelIterator<(K, V)> for HashMap where - K: Clone + Hash + Ord + Send + Sync + 'static, - V: Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, + V: Send + Sync, S: BuildHasher + Default + Sync, { fn from_par_iter(par_iter: I) -> Self @@ -20,8 +20,8 @@ where impl ParallelExtend<(K, V)> for HashMap where - K: Clone + Hash + Ord + Send + Sync + 'static, - V: Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, + V: Send + Sync, S: BuildHasher + Sync, { fn par_extend(&mut self, par_iter: I) @@ -34,8 +34,8 @@ where impl ParallelExtend<(K, V)> for &HashMap where - K: Clone + Hash + Ord + Send + Sync + 'static, - V: Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, + V: Send + Sync, S: BuildHasher + Sync, { fn par_extend(&mut self, par_iter: I) @@ -53,8 +53,8 @@ where impl<'map, K, V, S> ParallelExtend<(K, V)> for HashMapRef<'map, K, V, S> where - K: Clone + Hash + Ord + Send + Sync + 'static, - V: Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, + V: Send + Sync, S: BuildHasher + Sync, { fn par_extend(&mut self, par_iter: I) @@ -67,7 +67,7 @@ where impl FromParallelIterator for HashSet where - K: Clone + Hash + Ord + Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, S: BuildHasher + Default + Sync, { fn from_par_iter(par_iter: I) -> Self @@ -82,7 +82,7 @@ where impl ParallelExtend for HashSet where - K: Clone + Hash + Ord + Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, S: BuildHasher + Sync, { fn par_extend(&mut self, par_iter: I) @@ -95,7 +95,7 @@ where impl ParallelExtend for &HashSet where - K: Clone + Hash + Ord + Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, S: BuildHasher + Sync, { fn par_extend(&mut self, par_iter: I) @@ -109,7 +109,7 @@ where impl<'set, K, S> ParallelExtend for HashSetRef<'set, K, S> where - K: Clone + Hash + Ord + Send + Sync + 'static, + K: Clone + Hash + Ord + Send + Sync, S: BuildHasher + Sync, { fn par_extend(&mut self, par_iter: I) diff --git a/src/serde_impls.rs b/src/serde_impls.rs index 9154bcca..aa08721f 100644 --- a/src/serde_impls.rs +++ b/src/serde_impls.rs @@ -41,8 +41,8 @@ where impl<'de, K, V, S> Deserialize<'de> for HashMap where - K: 'static + Deserialize<'de> + Send + Sync + Hash + Clone + Ord, - V: 'static + Deserialize<'de> + Send + Sync + Ord, + K: Deserialize<'de> + Send + Sync + Hash + Clone + Ord, + V: Deserialize<'de> + Send + Sync + Ord, S: Default + BuildHasher, { fn deserialize(deserializer: D) -> Result @@ -65,8 +65,8 @@ impl HashMapVisitor { impl<'de, K, V, S> Visitor<'de> for HashMapVisitor where - K: 'static + Deserialize<'de> + Send + Sync + Hash + Clone + Ord, - V: 'static + Deserialize<'de> + Send + Sync + Ord, + K: Deserialize<'de> + Send + Sync + Hash + Clone + Ord, + V: Deserialize<'de> + Send + Sync + Ord, S: Default + BuildHasher, { type Value = HashMap; @@ -124,7 +124,7 @@ where impl<'de, T, S> Deserialize<'de> for HashSet where - T: 'static + Deserialize<'de> + Send + Sync + Hash + Clone + Ord, + T: Deserialize<'de> + Send + Sync + Hash + Clone + Ord, S: Default + BuildHasher, { fn deserialize(deserializer: D) -> Result @@ -151,7 +151,7 @@ impl HashSetVisitor { impl<'de, T, S> Visitor<'de> for HashSetVisitor where - T: 'static + Deserialize<'de> + Send + Sync + Hash + Clone + Ord, + T: Deserialize<'de> + Send + Sync + Hash + Clone + Ord, S: Default + BuildHasher, { type Value = HashSet; diff --git a/src/set.rs b/src/set.rs index 89eb2a27..69a43171 100644 --- a/src/set.rs +++ b/src/set.rs @@ -384,7 +384,7 @@ where impl HashSet where - T: 'static + Sync + Send + Clone + Hash + Ord, + T: Sync + Send + Clone + Hash + Ord, S: BuildHasher, { /// Adds a value to the set. @@ -556,7 +556,7 @@ where impl Extend for &HashSet where - T: 'static + Sync + Send + Clone + Hash + Ord, + T: Sync + Send + Clone + Hash + Ord, S: BuildHasher, { fn extend>(&mut self, iter: I) { @@ -566,7 +566,7 @@ where impl<'a, T, S> Extend<&'a T> for &HashSet where - T: 'static + Sync + Send + Copy + Hash + Ord, + T: Sync + Send + Copy + Hash + Ord, S: BuildHasher, { fn extend>(&mut self, iter: I) { @@ -576,7 +576,7 @@ where impl FromIterator for HashSet where - T: 'static + Sync + Send + Clone + Hash + Ord, + T: Sync + Send + Clone + Hash + Ord, S: BuildHasher + Default, { fn from_iter>(iter: I) -> Self { @@ -588,7 +588,7 @@ where impl<'a, T, S> FromIterator<&'a T> for HashSet where - T: 'static + Sync + Send + Copy + Hash + Ord, + T: Sync + Send + Copy + Hash + Ord, S: BuildHasher + Default, { fn from_iter>(iter: I) -> Self { @@ -600,7 +600,7 @@ where impl Clone for HashSet where - T: 'static + Sync + Send + Clone + Hash + Ord, + T: Sync + Send + Clone + Hash + Ord, S: BuildHasher + Clone, { fn clone(&self) -> HashSet { diff --git a/src/set_ref.rs b/src/set_ref.rs index ba145f69..ff2d2a7a 100644 --- a/src/set_ref.rs +++ b/src/set_ref.rs @@ -112,7 +112,7 @@ where impl HashSetRef<'_, T, S> where - T: 'static + Sync + Send + Clone + Hash + Ord, + T: Sync + Send + Clone + Hash + Ord, S: BuildHasher, { /// Adds a value to the set. diff --git a/tests/jdk/map_check.rs b/tests/jdk/map_check.rs index aac648ac..401fe9dd 100644 --- a/tests/jdk/map_check.rs +++ b/tests/jdk/map_check.rs @@ -35,7 +35,7 @@ where fn t2(map: &HashMap, keys: &[K], expect: usize) where - K: 'static + Sync + Send + Copy + Hash + Ord + std::fmt::Display, + K: Sync + Send + Copy + Hash + Ord + std::fmt::Display, { let mut sum = 0; let guard = map.guard(); @@ -49,7 +49,7 @@ where fn t3(map: &HashMap, keys: &[K], expect: usize) where - K: 'static + Sync + Send + Copy + Hash + Ord, + K: Sync + Send + Copy + Hash + Ord, { let mut sum = 0; let guard = map.guard(); @@ -77,7 +77,7 @@ where fn t5(map: &HashMap, keys: &[K], expect: usize) where - K: 'static + Sync + Send + Copy + Hash + Ord, + K: Sync + Send + Copy + Hash + Ord, { let mut sum = 0; let guard = map.guard(); From 519a3fee67767b4c985179d0867c60dea8a62ec0 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sun, 20 Feb 2022 17:40:31 -0500 Subject: [PATCH 23/29] remove `crossbeam::epoch` from public documentation --- src/lib.rs | 12 ++++++------ src/map.rs | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0d3cf7cd..17b20bc7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ //! //! You may have noticed that many of the access methods on this map take a reference to an //! [`Guard`]. The exact details of this are beyond the scope of this documentation (for -//! that, see [`crossbeam::epoch`]), but some of the implications bear repeating here. You obtain a +//! that, see the [`seize`] crate), but some of the implications bear repeating here. You obtain a //! `Guard` using [`HashMap::guard`], and you can use references to the same guard to make multiple API //! calls if you wish. Whenever you get a reference to something stored in the map, that reference //! is tied to the lifetime of the `Guard` that you provided. This is because each `Guard` prevents @@ -226,12 +226,12 @@ //! The Java implementation can rely on Java's runtime garbage collection to safely deallocate //! deleted or removed nodes, keys, and values. Since Rust does not have such a runtime, we must //! ensure through some other mechanism that we do not drop values before all references to them -//! have gone away. We do this using [`crossbeam::epoch`], which provides an implementation of an -//! epoch-based garbage collection scheme. This forces us to make certain API changes such as -//! requiring `Guard` arguments to many methods or wrapping the return values, but provides much -//! more efficient operation than if everything had to be atomically reference-counted. +//! have gone away. We do this using [`seize`], which provides a garbage collection scheme based +//! on batch reference-counting. This forces us to make certain API changes such as requiring +//! `Guard` arguments to many methods or wrapping the return values, but provides much more efficient +//! operation than if every individual value had to be atomically reference-counted. //! -//! [`crossbeam::epoch`]: https://docs.rs/crossbeam/0.7/crossbeam/epoch/index.html +//! [`seize`]: https://docs.rs/seize #![deny( missing_docs, missing_debug_implementations, diff --git a/src/map.rs b/src/map.rs index 69c1484f..10ac4513 100644 --- a/src/map.rs +++ b/src/map.rs @@ -3281,13 +3281,13 @@ mod tests { /// drop(r); /// ``` /// -/// # Keys and values must be static +/// # Keys and values do not have be static /// -/// ```compile_fail +/// ```no_run /// let x = String::from("foo"); /// let map: flurry::HashMap<_, _> = std::iter::once((&x, &x)).collect(); /// ``` -/// ```compile_fail +/// ```no_run /// let x = String::from("foo"); /// let map: flurry::HashMap<_, _> = flurry::HashMap::new(); /// map.insert(&x, &x, &map.guard()); From c6078d6851091a217a0138137fae0ec752f8e68b Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Thu, 24 Feb 2022 22:58:42 -0500 Subject: [PATCH 24/29] update safety comments --- src/iter/traverser.rs | 4 +- src/map.rs | 442 ++++++++++++++++++++---------------------- src/node.rs | 57 +++--- src/raw/mod.rs | 15 +- 4 files changed, 243 insertions(+), 275 deletions(-) diff --git a/src/iter/traverser.rs b/src/iter/traverser.rs index 2bbfe495..965269d4 100644 --- a/src/iter/traverser.rs +++ b/src/iter/traverser.rs @@ -175,8 +175,8 @@ impl<'g, K, V> Iterator for NodeIter<'g, K, V> { // contained node e = Some( // safety: `bin` was read under our guard, at which - // point the tree was valid. Since our guard pins - // the current epoch, the TreeNodes remain valid for + // point the tree was valid. Since our guard marks + // the current thread as active, the TreeNodes remain valid for // at least as long as we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. &unsafe { diff --git a/src/map.rs b/src/map.rs index 10ac4513..49fc26f0 100644 --- a/src/map.rs +++ b/src/map.rs @@ -119,9 +119,9 @@ pub struct HashMap { /// /// map.remove(&42, &map.guard()); /// // at this point, the default collector is allowed to free `"hello"` - /// // since no-one has the global epoch pinned as far as it is aware. + /// // since no guard from `map`s collector is active. /// // `oops` is tied to the lifetime of a Guard that is not a part of - /// // the same epoch group, and so can now be dangling. + /// // the same collector, and so can now be dangling. /// // but we can still access it! /// assert_eq!(oops.unwrap(), "hello"); /// ``` @@ -129,18 +129,6 @@ pub struct HashMap { /// We avoid that by checking that every external guard that is passed in is associated with /// the `Collector` that was specified when the map was created (which may be the global /// collector). - /// - /// Note also that the fact that this can be a global collector is what necessitates the - /// `'static` bounds on `K` and `V`. Since deallocation can be deferred arbitrarily, it is not - /// okay for us to take a `K` or `V` with a limited lifetime, since we may drop it far after - /// that lifetime has passed. - /// - /// One possibility is to never use the global allocator, and instead _always_ create and use - /// our own `Collector`. If we did that, then we could accept non-`'static` keys and values since - /// the destruction of the collector would ensure that that all deferred destructors are run. - /// It would, sadly, mean that we don't get to share a collector with other things that use - /// `crossbeam-epoch` though. For more on this (and a cool optimization), see: - /// https://github.com/crossbeam-rs/crossbeam/blob/ebecb82c740a1b3d9d10f235387848f7e3fa9c68/crossbeam-skiplist/src/base.rs#L308-L319 collector: Collector, build_hasher: S, @@ -451,8 +439,8 @@ impl HashMap { fn init_table<'g>(&'g self, guard: &'g Guard<'_>) -> Shared<'g, Table> { loop { let table = self.table.load(Ordering::SeqCst, guard); - // safety: we loaded the table while epoch was pinned. table won't be deallocated until - // next epoch at the earliest. + // safety: we loaded the table while the thread was marked as active. + // table won't be deallocated until the guard is dropped at the earliest. if !table.is_null() && !unsafe { table.deref() }.is_empty() { break table; } @@ -472,8 +460,8 @@ impl HashMap { // we get to do it! let mut table = self.table.load(Ordering::SeqCst, guard); - // safety: we loaded the table while epoch was pinned. table won't be deallocated - // until next epoch at the earliest. + // safety: we loaded the table while the thread was marked as active. + // table won't be deallocated until the guard is dropped at the earliest. if table.is_null() || unsafe { table.deref() }.is_empty() { let n = if sc > 0 { sc as usize @@ -750,15 +738,14 @@ where // therefore they also cannot get to ::Moved that point to now_garbage, so // we're fine. // - // this means that no _future_ thread (i.e., in a later epoch where the value - // may be freed) can get a reference to now_garbage. + // this means that no _future_ thread (i.e., that was inactive before + // the swap may be freed) can get a reference to now_garbage. // // next, let's talk about threads with _existing_ references to now_garbage. // such a thread must have gotten that reference before the call to swap. - // because of this, that thread must be pinned to an epoch <= the epoch of our - // guard (since our guard is pinning the epoch). since the garbage is placed in - // our epoch, it won't be freed until the _next_ epoch, at which point, that - // thread must have dropped its guard, and with it, any reference to the value. + // because of this, that thread must have been marked as active, and included + // in the reference count, meaning the garbage will not be freed until + // the thread drops its guard at the earliest. unsafe { guard.retire_shared(now_garbage) }; self.size_ctl .store(((n as isize) << 1) - ((n as isize) >> 1), Ordering::SeqCst); @@ -815,18 +802,17 @@ where // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be dropped in the next epoch - // following that. + // that case, the table (and all its heads) will be retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will then be dropped in the following epoch after that happens. + // will be retired. // 3. when elements are inserted into or removed from the map, bin may be changed into // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // dropped in the following epoch if that happens. + // retired. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while - // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we - // are holding up by holding on to our guard). + // the current thread was marked as active, we must be included in the reference count, + // and the drop must happen _after_ we decrement the count (i.e drop our guard). match **unsafe { bin.deref() } { BinEntry::Moved => { // already processed @@ -854,12 +840,12 @@ where loop { // safety: p is a valid pointer. // - // p is only dropped in the next epoch following when its bin is replaced - // with a move node (see safety comment near table.store_bin below). we - // read the bin, and got to p, so its bin has not yet been swapped with a - // move node. and, we have the epoch pinned, so the next epoch cannot have - // arrived yet. therefore, it will be dropped in a future epoch, and is - // safe to use now. + // p is only retired when its bin is replaced with a move node + // (see safety comment near table.store_bin below). we read the + // bin, and got to p, so its bin has not yet been swapped with a + // move node. and, our thread is marked as active, so any retirement + // must include us in the reference count. therefore, it can only be + // dropped _after_ we drop our guard, and is safe to use now. let node = unsafe { p.deref() }.as_node().unwrap(); let next = node.next.load(Ordering::SeqCst, guard); @@ -889,12 +875,12 @@ where while p != last_run { // safety: p is a valid pointer. // - // p is only dropped in the next epoch following when its bin is replaced - // with a move node (see safety comment near table.store_bin below). we - // read the bin, and got to p, so its bin has not yet been swapped with a - // move node. and, we have the epoch pinned, so the next epoch cannot have - // arrived yet. therefore, it will be dropped in a future epoch, and is - // safe to use now. + // p is only retired when its bin is replaced with a move node + // (see safety comment near table.store_bin below). we read the + // bin, and got to p, so its bin has not yet been swapped with a + // move node. and, our thread is marked as active, so any retirement + // must include us in the reference count. therefore, it can only be + // dropped _after_ we drop our guard, and is safe to use now. let node = unsafe { p.deref() }.as_node().unwrap(); let link = if node.hash & n as u64 == 0 { @@ -933,10 +919,8 @@ where // BinEntry::Moved, p is no longer accessible. // // any existing reference to p must have been taken before table.store_bin. - // at that time we had the epoch pinned, so any threads that have such a - // reference must be before or at our epoch. since the p isn't destroyed - // until the next epoch, those old references are fine since they are tied - // to those old threads' pins of the old epoch. + // any threads that have such a reference must have been active before we + // retired p, so they are protected by the reference count. let next = unsafe { p.deref() } .as_node() .unwrap() @@ -968,10 +952,10 @@ where let mut high_count = 0; let mut e = tree_bin.first.load(Ordering::Relaxed, guard); while !e.is_null() { - // safety: the TreeBin was read under our guard, at - // which point the tree structure was valid. Since our - // guard pins the current epoch, the TreeNodes remain - // valid for at least as long as we hold onto the guard. + // safety: we read under our guard, at which point the tree + // structure was valid. Since our guard marks the current thread + // as active, the TreeNodes remain valid for at least as long as + // we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let tree_node = unsafe { TreeNode::get_tree_node(e) }; let hash = tree_node.node.hash; @@ -1119,10 +1103,11 @@ where return table; } - // safety: table is only dropped on the next epoch change after it is swapped to null. - // we read it as not null, so it must not be dropped until a subsequent epoch. since we - // held `guard` at the time, we know that the current epoch continues to persist, and that - // our reference is therefore valid. + // safety: table is only retired after it is swapped to null. + // we read it as not null while holding `guard`, so any thread + // retiring the table must have seen us as active and included us + // in the reference count. therefore our reference is valid until + // we decrement the reference count (i.e drop our guard). let next_table = unsafe { table.deref() }.next_table(guard); if next_table.is_null() { return table; @@ -1187,10 +1172,11 @@ where break; } - // safety: table is only dropped on the next epoch change after it is swapped to null. - // we read it as not null, so it must not be dropped until a subsequent epoch. since we - // hold a Guard, we know that the current epoch will persist, and that our reference - // will therefore remain valid. + // safety: table is only retired after it is swapped to null. + // we read it as not null while holding `guard`, so any thread + // retiring the table must have seen us as active and included us + // in the reference count. therefore our reference is valid until + // we decrement the reference count (i.e drop our guard). let n = unsafe { table.deref() }.len(); if n >= MAXIMUM_CAPACITY { // can't resize any more anyway @@ -1288,8 +1274,9 @@ where return None; } - // safety: we loaded the table while epoch was pinned. table won't be deallocated until - // next epoch at the earliest. + // safety: we loaded the table while holding a guard. + // table won't be deallocated until we drop our guard + // at the earliest. let table = unsafe { table.deref() }; if table.is_empty() { return None; @@ -1307,25 +1294,25 @@ where // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be dropped in the next epoch - // following that. + // that case, the table (and all its heads) will be retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will then be dropped in the following epoch after that happens. + // will be retired. // 3. when elements are inserted into or removed from the map, bin may be changed into // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // dropped in the following epoch if that happens. + // retired. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while - // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we - // are holding up by holding on to our guard). + // the current thread was marked as active, we must be included in the reference count, + // and the drop must happen _after_ we decrement the count (i.e drop our guard). let node = table.find(unsafe { bin.deref() }, h, key, guard); if node.is_null() { return None; } - // safety: we read the bin while pinning the epoch. a bin will never be dropped until the - // next epoch after it is removed. since it wasn't removed, and the epoch was pinned, that - // cannot be until after we drop our guard. + + // safety: we loaded the bin while holding a guard, so any retirements + // must have seen us as active. the bin and its nodes cannot be dropped + // until at least after we drop our guard. let node = unsafe { node.deref() }; Some(match **node { BinEntry::Node(ref n) => n, @@ -1524,7 +1511,7 @@ where // free the node's value // safety: any thread that sees this p's value must have read the bin before we stored null - // into it above. it must also have pinned the epoch before that time. therefore, the + // into it above. it must also have been marked as active. therefore, the // defer_destroy below won't be executed until that thread's guard is dropped, at which // point it holds no outstanding references to the value anyway. unsafe { guard.retire_shared(value) }; @@ -1565,11 +1552,10 @@ where while !p.is_null() { delta -= 1; p = { - // safety: the TreeBin was read under our guard, at - // which point the tree was valid. Since our guard - // pins the current epoch, the TreeNodes remain - // valid for at least as long as we hold onto the - // guard. + // safety: we read under our guard, at which point the tree + // structure was valid. Since our guard marks the current thread + // as active, the TreeNodes remain valid for at least as long as + // we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let tree_node = unsafe { TreeNode::get_tree_node(p) }; // NOTE: we do not drop the TreeNodes or their @@ -1711,13 +1697,11 @@ where // we are in one of three cases: // // 1. if table is the one we read before the loop, then we read it while holding the - // guard, so it won't be dropped until after we drop that guard b/c the drop logic - // only queues a drop for the next epoch after removing the table. + // guard, so it won't be dropped until after we drop that guard b/c this thread must + // have been included in the reference count of a retirement. // - // 2. if table is read by init_table, then either we did a load, and the argument is - // as for point 1. or, we allocated a table, in which case the earliest it can be - // deallocated is in the next epoch. we are holding up the epoch by holding the - // guard, so this deref is safe. + // 2. if table is read by init_table, we did so while holding a guard, so the + // argument is as for point 1. // // 3. if table is set by a Moved node (below) through help_transfer, it will _either_ // keep using `table` (which is fine by 1. and 2.), or use the `next_table` raw @@ -1736,10 +1720,9 @@ where self.add_count(1, Some(0), guard); // safety: we have not moved the node's value since we placed it into // its `Atomic` in the very beginning of the method, so the ref is still - // valid. since the value is not currently marked as garbage, we know it - // will not collected until at least one epoch passes, and since `value` - // was produced under a guard the pins the current epoch, the returned - // reference will remain valid for the guard's lifetime. + // valid. since the value is not currently marked as garbage, and since + // `value` was loaded under a guard, the returned reference will remain valid + // for the guard's lifetime. return PutResult::Inserted { new: unsafe { value.deref() }, }; @@ -1758,25 +1741,22 @@ where } } - // slow path -- bin is non-empty - // safety: bin is a valid pointer. // // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be dropped in the next epoch - // following that. + // that case, the table (and all its heads) will be retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will then be dropped in the following epoch after that happens. + // will be retired. // 3. when elements are inserted into or removed from the map, bin may be changed into // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // dropped in the following epoch if that happens. + // retired. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while - // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we - // are holding up by holding on to our guard). + // the current thread was marked as active, we must be included in the reference count, + // and the drop must happen _after_ we decrement the count (i.e drop our guard). match **unsafe { bin.deref() } { BinEntry::Moved => { table = self.help_transfer(table, guard); @@ -1788,8 +1768,7 @@ where // fast path if replacement is disallowed and first bin matches let v = head.value.load(Ordering::SeqCst, guard); // safety (for v): since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the next - // epoch, which won't arrive until after we drop our guard. + // the beginning of the search, the value cannot be dropped after we drop our guard. // safety (for value): since we never inserted the value in the tree, `value` // is the last remaining pointer to the initial value. return PutResult::Exists { @@ -1817,18 +1796,17 @@ where let mut p = bin; old_val = loop { - // safety: we read the bin while pinning the epoch. a bin will never be - // dropped until the next epoch after it is removed. since it wasn't - // removed, and the epoch was pinned, that cannot be until after we drop - // our guard. + // safety: we loaded the bin while holding a guard, so any retirements + // must have seen us as active. the bin and its nodes cannot be dropped + // until at least after we drop our guard. let n = unsafe { p.deref() }.as_node().unwrap(); if n.hash == hash && n.key == key { // the key already exists in the map! let current_value = n.value.load(Ordering::SeqCst, guard); // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. + // the beginning of the search, the value cannot be dropped until after + // we drop our guard. let current_value = unsafe { current_value.deref() }; if no_replacement { @@ -1852,12 +1830,12 @@ where // here are the possible cases: // // - another thread already has a reference to now_garbage. - // they must have read it before the call to swap. - // because of this, that thread must be pinned to an epoch <= - // the epoch of our guard. since the garbage is placed in our - // epoch, it won't be freed until the _next_ epoch, at which - // point, that thread must have dropped its guard, and with it, - // any reference to the value. + // they must have read it before the call to swap while + // marked as active (holding a guard), and are included in + // the reference count. therefore t won't be freed until _after_ + // it decrements the reference count, which can only happen + // when that thread drops its guard, and with it, any reference + // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a // reference to now_garbage (they get value instead). there are @@ -1912,17 +1890,17 @@ where // and we are done. break; } - // safety: the TreeBin was read under our guard, at - // which point the tree structure was valid. Since our - // guard pins the current epoch, the TreeNodes remain - // valid for at least as long as we hold onto the guard. + // safety: the TreeBin was read under our guard, at which point the tree + // structure was valid. Since our guard marks the current thread as active, + // the TreeNodes remain valid for at least as long as we hold onto the + // guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let tree_node = unsafe { TreeNode::get_tree_node(p) }; old_val = { let current_value = tree_node.node.value.load(Ordering::SeqCst, guard); // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. + // the beginning of the search, the value cannot be dropped until after + // we drop our guard. let current_value = unsafe { current_value.deref() }; if no_replacement { // the key is not absent, so don't update because of @@ -1945,15 +1923,15 @@ where // here are the possible cases: // // - another thread already has a reference to now_garbage. - // they must have read it before the call to swap. - // because of this, that thread must be pinned to an epoch <= - // the epoch of our guard. since the garbage is placed in our - // epoch, it won't be freed until the _next_ epoch, at which - // point, that thread must have dropped its guard, and with it, - // any reference to the value. + // they must have read it before the call to swap while + // marked as active (holding a guard), and are included in + // the reference count. therefore t won't be freed until _after_ + // it decrements the reference count, which can only happen + // when that thread drops its guard, and with it, any reference + // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a - // reference to now_garbage (they get `value` instead). there are + // reference to now_garbage (they get value instead). there are // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. @@ -1978,12 +1956,11 @@ where if let Some(old_val) = old_val { return PutResult::Replaced { old: old_val, - // safety: we have not moved the node's value since we placed it into its - // `Atomic` in the very beginning of the method, so the ref is still valid. - // since the value is not currently marked as garbage, we know it will not - // collected until at least one epoch passes, and since `value` was produced - // under a guard the pins the current epoch, the returned reference will remain - // valid for the guard's lifetime. + // safety: we have not moved the node's value since we placed it into + // its `Atomic` in the very beginning of the method, so the ref is still + // valid. since the value is not currently marked as garbage, and since + // `value` was loaded under a guard, the returned reference will remain valid + // for the guard's lifetime. new: unsafe { value.deref() }, }; } @@ -1993,12 +1970,11 @@ where debug_assert!(old_val.is_none()); self.add_count(1, Some(bin_count), guard); PutResult::Inserted { - // safety: we have not moved the node's value since we placed it into its - // `Atomic` in the very beginning of the method, so the ref is still valid. - // since the value is not currently marked as garbage, we know it will not - // collected until at least one epoch passes, and since `value` was produced - // under a guard the pins the current epoch, the returned reference will remain - // valid for the guard's lifetime. + // safety: we have not moved the node's value since we placed it into + // its `Atomic` in the very beginning of the method, so the ref is still + // valid. since the value is not currently marked as garbage, and since + // `value` was loaded under a guard, the returned reference will remain valid + // for the guard's lifetime. new: unsafe { value.deref() }, } } @@ -2060,13 +2036,11 @@ where // we are in one of three cases: // // 1. if table is the one we read before the loop, then we read it while holding the - // guard, so it won't be dropped until after we drop that guard b/c the drop logic - // only queues a drop for the next epoch after removing the table. + // guard, so it won't be dropped until after we drop that guard b/c this thread must + // have been included in the reference count of a retirement. // - // 2. if table is read by init_table, then either we did a load, and the argument is - // as for point 1. or, we allocated a table, in which case the earliest it can be - // deallocated is in the next epoch. we are holding up the epoch by holding the - // guard, so this deref is safe. + // 2. if table is read by init_table, we did so while holding a guard, so the + // argument is as for point 1. // // 3. if table is set by a Moved node (below) through help_transfer, it will _either_ // keep using `table` (which is fine by 1. and 2.), or use the `next_table` raw @@ -2087,18 +2061,17 @@ where // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be dropped in the next epoch - // following that. + // that case, the table (and all its heads) will be retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will then be dropped in the following epoch after that happens. + // will be retired. // 3. when elements are inserted into or removed from the map, bin may be changed into // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // dropped in the following epoch if that happens. + // retired. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while - // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we - // are holding up by holding on to our guard). + // the current thread was marked as active, we must be included in the reference count, + // and the drop must happen _after_ we decrement the count (i.e drop our guard). match **unsafe { bin.deref() } { BinEntry::Moved => { table = self.help_transfer(table, guard); @@ -2124,10 +2097,9 @@ where let mut pred: Shared<'_, BinEntry> = Shared::null(); new_val = loop { - // safety: we read the bin while pinning the epoch. a bin will never be - // dropped until the next epoch after it is removed. since it wasn't - // removed, and the epoch was pinned, that cannot be until after we drop - // our guard. + // safety: we loaded the bin while holding a guard, so any retirements + // must have seen us as active. the bin and its nodes cannot be dropped + // until at least after we drop our guard. let n = unsafe { p.deref() }.as_node().unwrap(); // TODO: This Ordering can probably be relaxed due to the Mutex let next = n.next.load(Ordering::SeqCst, guard); @@ -2136,8 +2108,8 @@ where let current_value = n.value.load(Ordering::SeqCst, guard); // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. + // the beginning of the search, the value cannot be dropped until after + // we drop our guard. let new_value = remapping_function(&n.key, unsafe { current_value.deref() }); @@ -2153,12 +2125,12 @@ where // here are the possible cases: // // - another thread already has a reference to now_garbage. - // they must have read it before the call to swap. - // because of this, that thread must be pinned to an epoch <= - // the epoch of our guard. since the garbage is placed in our - // epoch, it won't be freed until the _next_ epoch, at which - // point, that thread must have dropped its guard, and with it, - // any reference to the value. + // they must have read it before the call to swap while + // marked as active (holding a guard), and are included in + // the reference count. therefore t won't be freed until _after_ + // it decrements the reference count, which can only happen + // when that thread drops its guard, and with it, any reference + // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a // reference to now_garbage (they get value instead). there are @@ -2168,8 +2140,8 @@ where unsafe { guard.retire_shared(now_garbage) }; // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. + // the beginning of the search, the value cannot be dropped until after + // we drop our guard. break Some(unsafe { value.deref() }); } else { removed_node = true; @@ -2194,18 +2166,19 @@ where // // here are the possible cases: // - // - another thread already has a reference to the old value. - // they must have read it before the call to store_bin. - // because of this, that thread must be pinned to an epoch <= - // the epoch of our guard. since the garbage is placed in our - // epoch, it won't be freed until the _next_ epoch, at which - // point, that thread must have dropped its guard, and with it, - // any reference to the value. + // - another thread already has a reference to now_garbage. + // they must have read it before the call to swap while + // marked as active (holding a guard), and are included in + // the reference count. therefore t won't be freed until _after_ + // it decrements the reference count, which can only happen + // when that thread drops its guard, and with it, any reference + // to the value. // - another thread is about to get a reference to this value. - // they execute _after_ the store_bin, and therefore do _not_ get a - // reference to the old value. there are no other ways to get to a - // value except through its Node's `value` field (which is now gone - // together with the node), so freeing the old value is fine. + // they execute _after_ the swap, and therefore do _not_ get a + // reference to now_garbage (they get value instead). there are + // no other ways to get to a value except through its Node's + // `value` field (which is what we swapped), so freeing + // now_garbage is fine. unsafe { guard.retire_shared(p) }; unsafe { guard.retire_shared(current_value) }; break None; @@ -2253,19 +2226,17 @@ where None } else { // a node for the given key exists, so we try to update it - // safety: the TreeBin was read under our guard, - // at which point the tree structure was valid. - // Since our guard pins the current epoch, the - // TreeNodes and `p` in particular remain valid - // for at least as long as we hold onto the + // safety: the TreeBin was read under our guard, at which point the tree + // structure was valid. Since our guard marks the current thread as active, + // the TreeNodes remain valid for at least as long as we hold onto the // guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let n = &unsafe { TreeNode::get_tree_node(p) }.node; let current_value = n.value.load(Ordering::SeqCst, guard); // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. + // the beginning of the search, the value cannot be dropped until after + // we drop our guard. let new_value = remapping_function(&n.key, unsafe { current_value.deref() }); @@ -2281,12 +2252,12 @@ where // here are the possible cases: // // - another thread already has a reference to now_garbage. - // they must have read it before the call to swap. - // because of this, that thread must be pinned to an epoch <= - // the epoch of our guard. since the garbage is placed in our - // epoch, it won't be freed until the _next_ epoch, at which - // point, that thread must have dropped its guard, and with it, - // any reference to the value. + // they must have read it before the call to swap while + // marked as active (holding a guard), and are included in + // the reference count. therefore t won't be freed until _after_ + // it decrements the reference count, which can only happen + // when that thread drops its guard, and with it, any reference + // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a // reference to now_garbage (they get value instead). there are @@ -2295,8 +2266,8 @@ where // now_garbage is fine. unsafe { guard.retire_shared(now_garbage) }; // safety: since the value is present now, and we've held a guard from - // the beginning of the search, the value cannot be dropped until the - // next epoch, which won't arrive until after we drop our guard. + // the beginning of the search, the value cannot be dropped until after + // we drop our guard. Some(unsafe { value.deref() }) } else { removed_node = true; @@ -2319,9 +2290,9 @@ where // since they are re-used in the linear bin. // safety: in the same way as for `now_garbage` above, any existing // references to `bin` must have been obtained before storing the - // linear bin. These references were obtained while pinning an epoch - // <= our epoch and have to be dropped before the epoch can advance - // past the destruction of the old bin. After the store, threads will + // linear bin. These references were obtained while holding a + // guard, and are protected until they drop it and decrement + // the reference count. After the store, threads will // always see the linear bin, so the cannot obtain new references either. // // The same holds for `p` and its value, which does not get dropped together @@ -2462,12 +2433,13 @@ where // we are in one of two cases: // // 1. if table is the one we read before the loop, then we read it while holding the - // guard, so it won't be dropped until after we drop that guard b/c the drop logic - // only queues a drop for the next epoch after removing the table. + // guard, so it won't be dropped until after we drop that guard b/c this thread must + // have been included in the reference count of a retirement. // - // 2. if table is set by a Moved node (below) through help_transfer, it will use the - // `next_table` raw pointer from inside the Moved. to see that if a Moved(t) is - // _read_, then t must still be valid, see the safety comment on Table.next_table. + // 2. if table is set by a Moved node (below) through help_transfer, it will _either_ + // keep using `table` (which is fine by 1. and 2.), or use the `next_table` raw + // pointer from inside the Moved. to see that if a Moved(t) is _read_, then t must + // still be valid, see the safety comment on Table.next_table. let t = unsafe { table.deref() }; let n = t.len() as u64; if n == 0 { @@ -2484,18 +2456,17 @@ where // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be dropped in the next epoch - // following that. + // that case, the table (and all its heads) will be retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will then be dropped in the following epoch after that happens. + // will be retired. // 3. when elements are inserted into or removed from the map, bin may be changed into // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // dropped in the following epoch if that happens. + // retired. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while - // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we - // are holding up by holding on to our guard). + // the current thread was marked as active, we must be included in the reference count, + // and the drop must happen _after_ we decrement the count (i.e drop our guard). match **unsafe { bin.deref() } { BinEntry::Moved => { table = self.help_transfer(table, guard); @@ -2515,11 +2486,11 @@ where // safety: either e is bin, in which case it is valid due to the above, // or e was obtained from a next pointer. Any next pointer obtained from // bin is valid at the time we look up bin in the table, at which point - // the epoch is pinned by our guard. Since we found the next pointer - // in a valid map and it is not null (as checked above and below), the - // node it points to was present (i.e. not removed) from the map in the - // current epoch. Thus, because the epoch cannot advance until we release - // our guard, e is also valid if it was obtained from a next pointer. + // we held a guard. Since we found the next pointer in a valid map and + // it is not null (as checked above and below), the node it points to was + // present (i.e. not removed) from the map while we were marked as active. + // Thus, e cannot be dropped until we release our guard, e is also valid + // if it was obtained from a next pointer. let n = unsafe { e.deref() }.as_node().unwrap(); let next = n.next.load(Ordering::SeqCst, guard); if n.hash == hash && n.key.borrow() == key { @@ -2596,11 +2567,9 @@ where // them matches the given key break; } - // safety: the TreeBin was read under our guard, - // at which point the tree structure was valid. - // Since our guard pins the current epoch, the - // TreeNodes and `p` in particular remain valid - // for at least as long as we hold onto the + // safety: the TreeBin was read our guard, at which point the tree + // structure was valid. Since our guard marks the current thread as active, + // the TreeNodes remain valid for at least as long as we hold onto the // guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let n = &unsafe { TreeNode::get_tree_node(p) }.node; @@ -2658,18 +2627,19 @@ where // // here are the possible cases: // - // - another thread already has a reference to the old value. - // they must have read it before the call to store_bin. - // because of this, that thread must be pinned to an epoch <= - // the epoch of our guard. since the garbage is placed in our - // epoch, it won't be freed until the _next_ epoch, at which - // point, that thread must have dropped its guard, and with it, - // any reference to the value. + // - another thread already has a reference to now_garbage. + // they must have read it before the call to swap while + // marked as active (holding a guard), and are included in + // the reference count. therefore t won't be freed until _after_ + // it decrements the reference count, which can only happen + // when that thread drops its guard, and with it, any reference + // to the value. // - another thread is about to get a reference to this value. - // they execute _after_ the store_bin, and therefore do _not_ get a - // reference to the old value. there are no other ways to get to a - // value except through its Node's `value` field (which is now gone - // together with the node), so freeing the old value is fine. + // they execute _after_ the swap, and therefore do _not_ get a + // reference to now_garbage (they get value instead). there are + // no other ways to get to a value except through its Node's + // `value` field (which is what we swapped), so freeing + // now_garbage is fine. unsafe { guard.retire_shared(val) }; // safety: the lifetime of the reference is bound to the guard @@ -2769,8 +2739,8 @@ where if bin.is_null() { return; } - // safety: we loaded `bin` while the epoch was pinned by our - // guard. if the bin was replaced since then, the old bin still + // safety: we loaded `bin` while holding a guard. + // if the bin was replaced since then, the old bin still // won't be dropped until after we release our guard. match **unsafe { bin.deref() } { BinEntry::Node(ref node) => { @@ -2783,14 +2753,14 @@ where let mut head = Shared::null(); let mut tail = Shared::null(); while !e.is_null() { - // safety: either `e` is `bin`, in which case it is valid due to the above, - // or `e` was obtained from a next pointer. Any next pointer obtained from - // bin is valid at the time we look up bin in the table, at which point the - // epoch is pinned by our guard. Since we found the next pointer in a valid - // map and it is not null (as checked above), the node it points to was - // present (i.e. not removed) from the map in the current epoch. Thus, - // because the epoch cannot advance until we release our guard, `e` is also - // valid if it was obtained from a next pointer. + // safety: either e is bin, in which case it is valid due to the above, + // or e was obtained from a next pointer. Any next pointer obtained from + // bin is valid at the time we look up bin in the table, at which point + // we held a guard. Since we found the next pointer in a valid map and + // it is not null (as checked above and below), the node it points to was + // present (i.e. not removed) from the map while we were marked as active. + // Thus, e cannot be dropped until we release our guard, e is also valid + // if it was obtained from a next pointer. let e_deref = unsafe { e.deref() }.as_node().unwrap(); // NOTE: cloning the value uses a load with Ordering::Relaxed, but // write access is synchronized through the bin lock @@ -2838,10 +2808,10 @@ where while !e.is_null() { // safety: we just replaced the bin containing this BinEntry, making it // unreachable for other threads since subsequent loads will see the new - // bin. Threads with existing references to `e` must have obtained them in - // this or an earlier epoch, and this epoch is pinned by our guard. Thus, - // `e` will only be dropped after these threads release their guard, at - // which point they can no longer hold their reference to `e`. + // bin. Threads with existing references to `e` must have obtained them + // while holding guards. Thus, `e` will only be dropped after these threads + // release their guard, at which point they can no longer hold their reference + // to `e`. Any loads after this point will see the new bin. // The BinEntry pointers are valid to deref for the same reason as above. // // NOTE: we do not drop the value, since it gets moved to the new TreeNode @@ -2988,7 +2958,7 @@ impl Drop for HashMap { // anything in the map. // // NOTE: we _could_ relax the bounds in all the methods that return `&'g ...` to not also - // bound `&self` by `'g`, but if we did that, we would need to use a regular `epoch::Guard` + // bound `&self` by `'g`, but if we did that, we would need to use a regular `Guard` // here rather than an unprotected one. let guard = unsafe { Guard::unprotected() }; @@ -3588,9 +3558,9 @@ mod tree_bins { map.remove(&42, &map.guard()); // at this point, the default collector is allowed to free `"hello"` - // since no-one has the global epoch pinned as far as it is aware. + // since no guard from `map`s collector is active. // `oops` is tied to the lifetime of a Guard that is not a part of - // the same epoch group, and so can now be dangling. + // the same collector, and so can now be dangling. // but we can still access it! assert_eq!(oops.unwrap(), "hello"); } diff --git a/src/node.rs b/src/node.rs index 8cb266ec..927a2337 100644 --- a/src/node.rs +++ b/src/node.rs @@ -153,8 +153,8 @@ impl TreeNode { while !p.is_null() { // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard - // pins the current epoch, the TreeNodes remain valid for at least as - // long as we hold onto the guard. + // marks the current thread as active, the TreeNodes remain valid for + // at least as long as we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let p_deref = unsafe { Self::get_tree_node(p) }; let p_hash = p_deref.node.hash; @@ -422,18 +422,17 @@ impl TreeBin { // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be dropped in the next epoch - // following that. + // that case, the table (and all its heads) will be retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will then be dropped in the following epoch after that happens. + // will be retired. // 3. when elements are inserted into or removed from the map, bin may be changed into // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // dropped in the following epoch if that happens. + // retired. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while - // pinning the epoch, the drop must happen in the _next_ epoch (i.e., the one that we - // are holding up by holding on to our guard). + // the current thread was marked as active, we must be included in the reference count, + // and the drop must happen _after_ we decrement the count (i.e drop our guard). let bin_deref = unsafe { bin.deref() }.as_tree_bin().unwrap(); let mut element = bin_deref.first.load(Ordering::SeqCst, guard); while !element.is_null() { @@ -444,10 +443,10 @@ impl TreeBin { // pointers of the `TreeNode` linearly, as we cannot trust the // tree's structure. // - // safety: we were read under our guard, at which point the tree - // structure was valid. Since our guard pins the current epoch, - // the TreeNodes remain valid for at least as long as we hold - // onto the guard. + // safety: we read under our guard, at which point the tree + // structure was valid. Since our guard marks the current thread + // as active, the TreeNodes remain valid for at least as long as + // we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let element_deref = unsafe { TreeNode::get_tree_node(element) }; let element_key = &element_deref.node.key; @@ -517,9 +516,9 @@ impl TreeBin { guard: &'g Guard<'_>, collector: &Collector, ) -> bool { - // safety: we were read under our guard, at which point the tree - // structure was valid. Since our guard pins the current epoch, the - // TreeNodes remain valid for at least as long as we hold onto the + // safety: we read under our guard, at which point the tree + // structure was valid. Since our guard marks the current thread as active, + // the TreeNodes remain valid for at least as long as we hold onto the // guard. Additionally, this method assumes `p` to be non-null. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let p_deref = TreeNode::get_tree_node(p); @@ -755,7 +754,7 @@ impl TreeBin { // safety: we just completely unlinked `p` from both linear and tree // traversal, making it and its value unreachable for any future thread. // Any existing references to one of them were obtained under a guard - // that pins an epoch <= our epoch, and thus have to be released before + // included in the reference count, and thus have to be released before // `p` is actually dropped. #[allow(unused_unsafe)] unsafe { @@ -805,9 +804,9 @@ where self.first.store(tree_node, Ordering::Release); return Shared::null(); } - // safety: we were read under our guard, at which point the tree - // structure was valid. Since our guard pins the current epoch, the - // TreeNodes remain valid for at least as long as we hold onto the + // safety: we read under our guard, at which point the tree + // structure was valid. Since our guard marks the current thread as active, + // the TreeNodes remain valid for at least as long as we hold onto the // guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. loop { @@ -1038,8 +1037,8 @@ impl TreeNode { } // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard - // pins the current epoch, the TreeNodes remain valid for at least as - // long as we hold onto the guard. + // marks the current thread as active, the TreeNodes remain valid for + // at least as long as we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let p_deref = treenode!(p); let right = p_deref.right.load(Ordering::Relaxed, guard); @@ -1083,8 +1082,8 @@ impl TreeNode { } // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard - // pins the current epoch, the TreeNodes remain valid for at least as - // long as we hold onto the guard. + // marks the current thread as active, the TreeNodes remain valid for + // at least as long as we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let p_deref = treenode!(p); let left = p_deref.left.load(Ordering::Relaxed, guard); @@ -1125,8 +1124,8 @@ impl TreeNode { ) -> Shared<'g, BinEntry> { // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard - // pins the current epoch, the TreeNodes remain valid for at least as - // long as we hold onto the guard. + // marks the current thread as active, the TreeNodes remain valid for + // at least as long as we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. treenode!(x).red.store(true, Ordering::Relaxed); @@ -1228,8 +1227,8 @@ impl TreeNode { let mut x_parent_right: Shared<'_, BinEntry>; // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard - // pins the current epoch, the TreeNodes remain valid for at least as - // long as we hold onto the guard. + // marks the current thread as active, the TreeNodes remain valid for at + // least as long as we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. loop { if x.is_null() || x == root { @@ -1381,8 +1380,8 @@ impl TreeNode { fn check_invariants<'g>(t: Shared<'g, BinEntry>, guard: &'g Guard<'_>) { // safety: the containing TreeBin of all TreeNodes was read under our // guard, at which point the tree structure was valid. Since our guard - // pins the current epoch, the TreeNodes remain valid for at least as - // long as we hold onto the guard. + // marks the current thread as active, the TreeNodes remain valid for + // at least as long as we hold onto the guard. // Structurally, TreeNodes always point to TreeNodes, so this is sound. let t_deref = treenode!(t); let t_parent = t_deref.parent.load(Ordering::Relaxed, guard); diff --git a/src/raw/mod.rs b/src/raw/mod.rs index c6aee24a..59d734e7 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -21,9 +21,9 @@ pub(crate) struct Table { // table as `map::HashMap.table` and reading a BinEntry::Moved while still holding the // guard used for this load: // - // When loading the current table of the HashMap with a guard g, the current epoch will be - // pinned by g. This happens _before_ the resize which put the Moved entry into the this - // table finishes, as otherwise a different table would have been loaded (see + // When loading the current table of the HashMap with a guard g, the current thread will be + // marked as active by g. This happens _before_ the resize which put the Moved entry into the + // this table finishes, as otherwise a different table would have been loaded (see // `map::HashMap::transfer`). // // Hence: @@ -36,10 +36,9 @@ pub(crate) struct Table { // next_table is still valid. // // - The above is true until a subsequent resize ends, at which point `map::HashMap.tableĀ“ is - // set to another new table != next_table and next_table is `epoch::Guard::defer_destroy`ed + // set to another new table != next_table and next_table is `Guard::retire_shared`ed // (again, see `map::HashMap::transfer`). At this point, next_table is not referenced by the - // map anymore. However, the guard g used to load _this_ table is still pinning the epoch at - // the time of the call to `defer_destroy`. Thus, next_table remains valid for at least the + // map anymore, however `Guard::retire_shared` guarantees that next_table remains valid for at least the // lifetime of g and, in particular, cannot be dropped before _this_ table. // // - After releasing g, either the current resize is finished and operations on the map @@ -133,8 +132,8 @@ impl Table { if next.is_null() { return Shared::null(); } - // safety: next will only be dropped, if bin are dropped. bin won't be dropped until - // an epoch passes, which is protected by guard. + // safety: next will only be dropped, if bin are dropped. bin was read under + // a guard, and so cannot be dropped until we drop the guard at the earliest. node = unsafe { next.deref() }; } } From 5db3eb82c3707fab6c788cd2d802f1e7288c2ce3 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Thu, 24 Feb 2022 23:00:40 -0500 Subject: [PATCH 25/29] run ui doc tests --- src/map.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/map.rs b/src/map.rs index 49fc26f0..b1d041c7 100644 --- a/src/map.rs +++ b/src/map.rs @@ -3253,11 +3253,11 @@ mod tests { /// /// # Keys and values do not have be static /// -/// ```no_run +/// ``` /// let x = String::from("foo"); /// let map: flurry::HashMap<_, _> = std::iter::once((&x, &x)).collect(); /// ``` -/// ```no_run +/// ``` /// let x = String::from("foo"); /// let map: flurry::HashMap<_, _> = flurry::HashMap::new(); /// map.insert(&x, &x, &map.guard()); @@ -3265,7 +3265,7 @@ mod tests { /// /// # get() key can be non-static /// -/// ```no_run +/// ``` /// let x = String::from("foo"); /// let map: flurry::HashMap<_, _> = flurry::HashMap::new(); /// map.insert(x.clone(), x.clone(), &map.guard()); From 16cb96a85a30d0124b977da1272af096b97147cb Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 26 Feb 2022 15:12:31 -0500 Subject: [PATCH 26/29] tweak bin deref safety comments --- src/map.rs | 69 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/src/map.rs b/src/map.rs index b1d041c7..131391bd 100644 --- a/src/map.rs +++ b/src/map.rs @@ -745,7 +745,7 @@ where // such a thread must have gotten that reference before the call to swap. // because of this, that thread must have been marked as active, and included // in the reference count, meaning the garbage will not be freed until - // the thread drops its guard at the earliest. + // that thread drops its guard at the earliest. unsafe { guard.retire_shared(now_garbage) }; self.size_ctl .store(((n as isize) << 1) - ((n as isize) >> 1), Ordering::SeqCst); @@ -802,12 +802,12 @@ where // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be retired. + // that case, the table (and all its heads) have already been retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will be retired. + // will only be retired after that happens. // 3. when elements are inserted into or removed from the map, bin may be changed into - // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // retired. + // or from a TreeBin from or into a regular, linear bin. the old bin will be + // retired only once that happens. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while @@ -1294,12 +1294,12 @@ where // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be retired. + // that case, the table (and all its heads) have already been retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will be retired. + // will only be retired after that happens. // 3. when elements are inserted into or removed from the map, bin may be changed into - // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // retired. + // or from a TreeBin from or into a regular, linear bin. the old bin will be + // retired only once that happens. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while @@ -1311,8 +1311,8 @@ where } // safety: we loaded the bin while holding a guard, so any retirements - // must have seen us as active. the bin and its nodes cannot be dropped - // until at least after we drop our guard. + // must have seen us as active, and any future retirements must see us as active. + // the bin and its nodes cannot be dropped until at least after we drop our guard. let node = unsafe { node.deref() }; Some(match **node { BinEntry::Node(ref n) => n, @@ -1511,7 +1511,7 @@ where // free the node's value // safety: any thread that sees this p's value must have read the bin before we stored null - // into it above. it must also have been marked as active. therefore, the + // into it above. it must also have already been marked as active. therefore, the // defer_destroy below won't be executed until that thread's guard is dropped, at which // point it holds no outstanding references to the value anyway. unsafe { guard.retire_shared(value) }; @@ -1741,17 +1741,19 @@ where } } + // slow path -- bin is non-empty + // // safety: bin is a valid pointer. // // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be retired. + // that case, the table (and all its heads) have already been retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will be retired. + // will only be retired after that happens. // 3. when elements are inserted into or removed from the map, bin may be changed into - // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // retired. + // or from a TreeBin from or into a regular, linear bin. the old bin will be + // retired only once that happens. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while @@ -1838,7 +1840,7 @@ where // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a - // reference to now_garbage (they get value instead). there are + // reference to now_garbage (they get `value` instead). there are // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. @@ -1931,7 +1933,7 @@ where // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a - // reference to now_garbage (they get value instead). there are + // reference to now_garbage (they get `value` instead). there are // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. @@ -2056,17 +2058,18 @@ where } // slow path -- bin is non-empty + // // safety: bin is a valid pointer. // // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be retired. + // that case, the table (and all its heads) have already been retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will be retired. + // will only be retired after that happens. // 3. when elements are inserted into or removed from the map, bin may be changed into - // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // retired. + // or from a TreeBin from or into a regular, linear bin. the old bin will be + // retired only once that happens. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while @@ -2133,7 +2136,7 @@ where // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a - // reference to now_garbage (they get value instead). there are + // reference to now_garbage (they get `value` instead). there are // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. @@ -2175,7 +2178,7 @@ where // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a - // reference to now_garbage (they get value instead). there are + // reference to now_garbage (they get `value` instead). there are // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. @@ -2260,7 +2263,7 @@ where // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a - // reference to now_garbage (they get value instead). there are + // reference to now_garbage (they get `value` instead). there are // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. @@ -2456,12 +2459,12 @@ where // there are three cases when a bin pointer is invalidated: // // 1. if the table was resized, bin is a move entry, and the resize has completed. in - // that case, the table (and all its heads) will be retired. + // that case, the table (and all its heads) have already been retired. // 2. if the table is being resized, bin may be swapped with a move entry. the old bin - // will be retired. + // will only be retired after that happens. // 3. when elements are inserted into or removed from the map, bin may be changed into - // or from a TreeBin from or into a regular, linear bin. the old bin will also be - // retired. + // or from a TreeBin from or into a regular, linear bin. the old bin will be + // retired only once that happens. // // in all cases, we held the guard when we got the reference to the bin. if any such // swap happened, it must have happened _after_ we read. since we did the read while @@ -2483,7 +2486,7 @@ where let mut e = bin; let mut pred: Shared<'_, BinEntry> = Shared::null(); loop { - // safety: either e is bin, in which case it is valid due to the above, + // safety: either `e` is `bin`, in which case it is valid due to the above, // or e was obtained from a next pointer. Any next pointer obtained from // bin is valid at the time we look up bin in the table, at which point // we held a guard. Since we found the next pointer in a valid map and @@ -2636,7 +2639,7 @@ where // to the value. // - another thread is about to get a reference to this value. // they execute _after_ the swap, and therefore do _not_ get a - // reference to now_garbage (they get value instead). there are + // reference to now_garbage (they get `value` instead). there are // no other ways to get to a value except through its Node's // `value` field (which is what we swapped), so freeing // now_garbage is fine. @@ -2753,13 +2756,13 @@ where let mut head = Shared::null(); let mut tail = Shared::null(); while !e.is_null() { - // safety: either e is bin, in which case it is valid due to the above, + // safety: either `e` is `bin`, in which case it is valid due to the above, // or e was obtained from a next pointer. Any next pointer obtained from // bin is valid at the time we look up bin in the table, at which point // we held a guard. Since we found the next pointer in a valid map and // it is not null (as checked above and below), the node it points to was // present (i.e. not removed) from the map while we were marked as active. - // Thus, e cannot be dropped until we release our guard, e is also valid + // Thus, e cannot be dropped until we release our guard. e is also valid // if it was obtained from a next pointer. let e_deref = unsafe { e.deref() }.as_node().unwrap(); // NOTE: cloning the value uses a load with Ordering::Relaxed, but From 9a6710810d9037b3fe5240d45b75d8f88d6739c4 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 26 Feb 2022 15:25:49 -0500 Subject: [PATCH 27/29] fix punctuation --- src/map.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/map.rs b/src/map.rs index 131391bd..bd066d2a 100644 --- a/src/map.rs +++ b/src/map.rs @@ -2492,7 +2492,7 @@ where // we held a guard. Since we found the next pointer in a valid map and // it is not null (as checked above and below), the node it points to was // present (i.e. not removed) from the map while we were marked as active. - // Thus, e cannot be dropped until we release our guard, e is also valid + // Thus, e cannot be dropped until we release our guard. e is also valid // if it was obtained from a next pointer. let n = unsafe { e.deref() }.as_node().unwrap(); let next = n.next.load(Ordering::SeqCst, guard); From cc93a0c01bf622dae4280130c4999b135ba4d05e Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 26 Feb 2022 15:28:07 -0500 Subject: [PATCH 28/29] update table deref safety comments --- src/map.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/map.rs b/src/map.rs index bd066d2a..cf59ea80 100644 --- a/src/map.rs +++ b/src/map.rs @@ -1701,7 +1701,8 @@ where // have been included in the reference count of a retirement. // // 2. if table is read by init_table, we did so while holding a guard, so the - // argument is as for point 1. + // argument is as for point 1. or, we allocated the table while holding a guard, + // so the earliest it can be deallocated is after we drop our guard. // // 3. if table is set by a Moved node (below) through help_transfer, it will _either_ // keep using `table` (which is fine by 1. and 2.), or use the `next_table` raw @@ -2042,7 +2043,8 @@ where // have been included in the reference count of a retirement. // // 2. if table is read by init_table, we did so while holding a guard, so the - // argument is as for point 1. + // argument is as for point 1. or, we allocated the table while holding a guard, + // so the earliest it can be deallocated is after we drop our guard. // // 3. if table is set by a Moved node (below) through help_transfer, it will _either_ // keep using `table` (which is fine by 1. and 2.), or use the `next_table` raw From fe8a171148ad6faacfe8d6b791ae93d9ca263905 Mon Sep 17 00:00:00 2001 From: Ibraheem Ahmed Date: Sat, 26 Feb 2022 15:54:41 -0500 Subject: [PATCH 29/29] update node walk safety comments --- src/map.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/map.rs b/src/map.rs index cf59ea80..5a2a869c 100644 --- a/src/map.rs +++ b/src/map.rs @@ -2494,7 +2494,7 @@ where // we held a guard. Since we found the next pointer in a valid map and // it is not null (as checked above and below), the node it points to was // present (i.e. not removed) from the map while we were marked as active. - // Thus, e cannot be dropped until we release our guard. e is also valid + // Thus, e cannot be dropped until we release our guard, so e is also valid // if it was obtained from a next pointer. let n = unsafe { e.deref() }.as_node().unwrap(); let next = n.next.load(Ordering::SeqCst, guard); @@ -2764,7 +2764,7 @@ where // we held a guard. Since we found the next pointer in a valid map and // it is not null (as checked above and below), the node it points to was // present (i.e. not removed) from the map while we were marked as active. - // Thus, e cannot be dropped until we release our guard. e is also valid + // Thus, e cannot be dropped until we release our guard, so e is also valid // if it was obtained from a next pointer. let e_deref = unsafe { e.deref() }.as_node().unwrap(); // NOTE: cloning the value uses a load with Ordering::Relaxed, but