-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.rs
151 lines (135 loc) · 4.55 KB
/
utils.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
use std::collections::hash_map::DefaultHasher;
use std::fmt::{self, Display};
use std::hash::{Hash, Hasher};
use std::time::Duration;
/// Shorthand for `Box::new` without `feature(box_syntax)`
#[allow(non_snake_case)]
pub fn P<T>(x: T) -> Box<T> {
Box::new(x)
}
/// Pretty-printing wrapper for `Duration`, outputs "1.234s"
pub struct Time(pub Duration);
impl Display for Time {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}.{:0>3}s", self.0.as_secs(), self.0.subsec_millis())
}
}
/// Hash a value with the default hasher
pub fn default_hash<H: Hash + ?Sized>(h: &H) -> u64 {
let mut hasher = DefaultHasher::new();
h.hash(&mut hasher);
hasher.finish()
}
pub trait VecExt<T> {
fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F>
where
F: FnMut(&mut T) -> bool;
}
impl<T> VecExt<T> for Vec<T> {
fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F>
where
F: FnMut(&mut T) -> bool,
{
let old_len = self.len();
// Guard against us getting leaked (leak amplification)
unsafe {
self.set_len(0);
}
DrainFilter {
vec: self,
idx: 0,
del: 0,
old_len,
pred: filter,
panic_flag: false,
}
}
}
pub struct DrainFilter<'a, T, F>
where
F: FnMut(&mut T) -> bool,
{
vec: &'a mut Vec<T>,
idx: usize,
del: usize,
old_len: usize,
pred: F,
panic_flag: bool,
}
impl<T, F> Iterator for DrainFilter<'_, T, F>
where
F: FnMut(&mut T) -> bool,
{
type Item = T;
fn next(&mut self) -> Option<T> {
unsafe {
while self.idx < self.old_len {
let i = self.idx;
let v = std::slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
self.panic_flag = true;
let drained = (self.pred)(&mut v[i]);
self.panic_flag = false;
// Update the index *after* the predicate is called. If the index
// is updated prior and the predicate panics, the element at this
// index would be leaked.
self.idx += 1;
if drained {
self.del += 1;
return Some(std::ptr::read(&v[i]));
} else if self.del > 0 {
let del = self.del;
let src: *const T = &v[i];
let dst: *mut T = &mut v[i - del];
std::ptr::copy_nonoverlapping(src, dst, 1);
}
}
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(0, Some(self.old_len - self.idx))
}
}
impl<T, F> Drop for DrainFilter<'_, T, F>
where
F: FnMut(&mut T) -> bool,
{
fn drop(&mut self) {
struct BackshiftOnDrop<'a, 'b, T, F>
where
F: FnMut(&mut T) -> bool,
{
drain: &'b mut DrainFilter<'a, T, F>,
}
impl<'a, 'b, T, F> Drop for BackshiftOnDrop<'a, 'b, T, F>
where
F: FnMut(&mut T) -> bool,
{
fn drop(&mut self) {
unsafe {
if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
// This is a pretty messed up state, and there isn't really an
// obviously right thing to do. We don't want to keep trying
// to execute `pred`, so we just backshift all the unprocessed
// elements and tell the vec that they still exist. The backshift
// is required to prevent a double-drop of the last successfully
// drained item prior to a panic in the predicate.
let ptr = self.drain.vec.as_mut_ptr();
let src = ptr.add(self.drain.idx);
let dst = src.sub(self.drain.del);
let tail_len = self.drain.old_len - self.drain.idx;
src.copy_to(dst, tail_len);
}
self.drain.vec.set_len(self.drain.old_len - self.drain.del);
}
}
}
let backshift = BackshiftOnDrop { drain: self };
// Attempt to consume any remaining elements if the filter predicate
// has not yet panicked. We'll backshift any remaining elements
// whether we've already panicked or if the consumption here panics.
if !backshift.drain.panic_flag {
backshift.drain.for_each(drop);
}
}
}