1 | use core::iter::FusedIterator; |
2 | use core::marker::PhantomData; |
3 | use core::mem::{self, SizedTypeProperties}; |
4 | use core::ptr::NonNull; |
5 | use core::{fmt, ptr}; |
6 | |
7 | use crate::alloc::{Allocator, Global}; |
8 | |
9 | use super::VecDeque; |
10 | |
11 | /// A draining iterator over the elements of a `VecDeque`. |
12 | /// |
13 | /// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its |
14 | /// documentation for more. |
15 | /// |
16 | /// [`drain`]: VecDeque::drain |
17 | #[stable (feature = "drain" , since = "1.6.0" )] |
18 | pub struct Drain< |
19 | 'a, |
20 | T: 'a, |
21 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
22 | > { |
23 | // We can't just use a &mut VecDeque<T, A>, as that would make Drain invariant over T |
24 | // and we want it to be covariant instead |
25 | deque: NonNull<VecDeque<T, A>>, |
26 | // drain_start is stored in deque.len |
27 | drain_len: usize, |
28 | // index into the logical array, not the physical one (always lies in [0..deque.len)) |
29 | idx: usize, |
30 | // number of elements remaining after dropping the drain |
31 | new_len: usize, |
32 | remaining: usize, |
33 | // Needed to make Drain covariant over T |
34 | _marker: PhantomData<&'a T>, |
35 | } |
36 | |
37 | impl<'a, T, A: Allocator> Drain<'a, T, A> { |
38 | pub(super) unsafe fn new( |
39 | deque: &'a mut VecDeque<T, A>, |
40 | drain_start: usize, |
41 | drain_len: usize, |
42 | ) -> Self { |
43 | let orig_len = mem::replace(&mut deque.len, drain_start); |
44 | let new_len = orig_len - drain_len; |
45 | Drain { |
46 | deque: NonNull::from(deque), |
47 | drain_len, |
48 | idx: drain_start, |
49 | new_len, |
50 | remaining: drain_len, |
51 | _marker: PhantomData, |
52 | } |
53 | } |
54 | |
55 | // Only returns pointers to the slices, as that's all we need |
56 | // to drop them. May only be called if `self.remaining != 0`. |
57 | unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) { |
58 | unsafe { |
59 | let deque = self.deque.as_ref(); |
60 | |
61 | // We know that `self.idx + self.remaining <= deque.len <= usize::MAX`, so this won't overflow. |
62 | let logical_remaining_range = self.idx..self.idx + self.remaining; |
63 | |
64 | // SAFETY: `logical_remaining_range` represents the |
65 | // range into the logical buffer of elements that |
66 | // haven't been drained yet, so they're all initialized, |
67 | // and `slice::range(start..end, end) == start..end`, |
68 | // so the preconditions for `slice_ranges` are met. |
69 | let (a_range, b_range) = |
70 | deque.slice_ranges(logical_remaining_range.clone(), logical_remaining_range.end); |
71 | (deque.buffer_range(a_range), deque.buffer_range(b_range)) |
72 | } |
73 | } |
74 | } |
75 | |
76 | #[stable (feature = "collection_debug" , since = "1.17.0" )] |
77 | impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> { |
78 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
79 | f&mut DebugTuple<'_, '_>.debug_tuple(name:"Drain" ) |
80 | .field(&self.drain_len) |
81 | .field(&self.idx) |
82 | .field(&self.new_len) |
83 | .field(&self.remaining) |
84 | .finish() |
85 | } |
86 | } |
87 | |
88 | #[stable (feature = "drain" , since = "1.6.0" )] |
89 | unsafe impl<T: Sync, A: Allocator + Sync> Sync for Drain<'_, T, A> {} |
90 | #[stable (feature = "drain" , since = "1.6.0" )] |
91 | unsafe impl<T: Send, A: Allocator + Send> Send for Drain<'_, T, A> {} |
92 | |
93 | #[stable (feature = "drain" , since = "1.6.0" )] |
94 | impl<T, A: Allocator> Drop for Drain<'_, T, A> { |
95 | fn drop(&mut self) { |
96 | struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>); |
97 | |
98 | let guard = DropGuard(self); |
99 | |
100 | if mem::needs_drop::<T>() && guard.0.remaining != 0 { |
101 | unsafe { |
102 | // SAFETY: We just checked that `self.remaining != 0`. |
103 | let (front, back) = guard.0.as_slices(); |
104 | // since idx is a logical index, we don't need to worry about wrapping. |
105 | guard.0.idx += front.len(); |
106 | guard.0.remaining -= front.len(); |
107 | ptr::drop_in_place(front); |
108 | guard.0.remaining = 0; |
109 | ptr::drop_in_place(back); |
110 | } |
111 | } |
112 | |
113 | // Dropping `guard` handles moving the remaining elements into place. |
114 | impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { |
115 | #[inline ] |
116 | fn drop(&mut self) { |
117 | if mem::needs_drop::<T>() && self.0.remaining != 0 { |
118 | unsafe { |
119 | // SAFETY: We just checked that `self.remaining != 0`. |
120 | let (front, back) = self.0.as_slices(); |
121 | ptr::drop_in_place(front); |
122 | ptr::drop_in_place(back); |
123 | } |
124 | } |
125 | |
126 | let source_deque = unsafe { self.0.deque.as_mut() }; |
127 | |
128 | let drain_len = self.0.drain_len; |
129 | let new_len = self.0.new_len; |
130 | |
131 | if T::IS_ZST { |
132 | // no need to copy around any memory if T is a ZST |
133 | source_deque.len = new_len; |
134 | return; |
135 | } |
136 | |
137 | let head_len = source_deque.len; // #elements in front of the drain |
138 | let tail_len = new_len - head_len; // #elements behind the drain |
139 | |
140 | // Next, we will fill the hole left by the drain with as few writes as possible. |
141 | // The code below handles the following control flow and reduces the amount of |
142 | // branches under the assumption that `head_len == 0 || tail_len == 0`, i.e. |
143 | // draining at the front or at the back of the dequeue is especially common. |
144 | // |
145 | // H = "head index" = `deque.head` |
146 | // h = elements in front of the drain |
147 | // d = elements in the drain |
148 | // t = elements behind the drain |
149 | // |
150 | // Note that the buffer may wrap at any point and the wrapping is handled by |
151 | // `wrap_copy` and `to_physical_idx`. |
152 | // |
153 | // Case 1: if `head_len == 0 && tail_len == 0` |
154 | // Everything was drained, reset the head index back to 0. |
155 | // H |
156 | // [ . . . . . d d d d . . . . . ] |
157 | // H |
158 | // [ . . . . . . . . . . . . . . ] |
159 | // |
160 | // Case 2: else if `tail_len == 0` |
161 | // Don't move data or the head index. |
162 | // H |
163 | // [ . . . h h h h d d d d . . . ] |
164 | // H |
165 | // [ . . . h h h h . . . . . . . ] |
166 | // |
167 | // Case 3: else if `head_len == 0` |
168 | // Don't move data, but move the head index. |
169 | // H |
170 | // [ . . . d d d d t t t t . . . ] |
171 | // H |
172 | // [ . . . . . . . t t t t . . . ] |
173 | // |
174 | // Case 4: else if `tail_len <= head_len` |
175 | // Move data, but not the head index. |
176 | // H |
177 | // [ . . h h h h d d d d t t . . ] |
178 | // H |
179 | // [ . . h h h h t t . . . . . . ] |
180 | // |
181 | // Case 5: else |
182 | // Move data and the head index. |
183 | // H |
184 | // [ . . h h d d d d t t t t . . ] |
185 | // H |
186 | // [ . . . . . . h h t t t t . . ] |
187 | |
188 | // When draining at the front (`.drain(..n)`) or at the back (`.drain(n..)`), |
189 | // we don't need to copy any data. The number of elements copied would be 0. |
190 | if head_len != 0 && tail_len != 0 { |
191 | join_head_and_tail_wrapping(source_deque, drain_len, head_len, tail_len); |
192 | // Marking this function as cold helps LLVM to eliminate it entirely if |
193 | // this branch is never taken. |
194 | // We use `#[cold]` instead of `#[inline(never)]`, because inlining this |
195 | // function into the general case (`.drain(n..m)`) is fine. |
196 | // See `tests/codegen/vecdeque-drain.rs` for a test. |
197 | #[cold ] |
198 | fn join_head_and_tail_wrapping<T, A: Allocator>( |
199 | source_deque: &mut VecDeque<T, A>, |
200 | drain_len: usize, |
201 | head_len: usize, |
202 | tail_len: usize, |
203 | ) { |
204 | // Pick whether to move the head or the tail here. |
205 | let (src, dst, len); |
206 | if head_len < tail_len { |
207 | src = source_deque.head; |
208 | dst = source_deque.to_physical_idx(drain_len); |
209 | len = head_len; |
210 | } else { |
211 | src = source_deque.to_physical_idx(head_len + drain_len); |
212 | dst = source_deque.to_physical_idx(head_len); |
213 | len = tail_len; |
214 | }; |
215 | |
216 | unsafe { |
217 | source_deque.wrap_copy(src, dst, len); |
218 | } |
219 | } |
220 | } |
221 | |
222 | if new_len == 0 { |
223 | // Special case: If the entire dequeue was drained, reset the head back to 0, |
224 | // like `.clear()` does. |
225 | source_deque.head = 0; |
226 | } else if head_len < tail_len { |
227 | // If we moved the head above, then we need to adjust the head index here. |
228 | source_deque.head = source_deque.to_physical_idx(drain_len); |
229 | } |
230 | source_deque.len = new_len; |
231 | } |
232 | } |
233 | } |
234 | } |
235 | |
236 | #[stable (feature = "drain" , since = "1.6.0" )] |
237 | impl<T, A: Allocator> Iterator for Drain<'_, T, A> { |
238 | type Item = T; |
239 | |
240 | #[inline ] |
241 | fn next(&mut self) -> Option<T> { |
242 | if self.remaining == 0 { |
243 | return None; |
244 | } |
245 | let wrapped_idx: usize = unsafe { self.deque.as_ref().to_physical_idx(self.idx) }; |
246 | self.idx += 1; |
247 | self.remaining -= 1; |
248 | Some(unsafe { self.deque.as_mut().buffer_read(off:wrapped_idx) }) |
249 | } |
250 | |
251 | #[inline ] |
252 | fn size_hint(&self) -> (usize, Option<usize>) { |
253 | let len: usize = self.remaining; |
254 | (len, Some(len)) |
255 | } |
256 | } |
257 | |
258 | #[stable (feature = "drain" , since = "1.6.0" )] |
259 | impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { |
260 | #[inline ] |
261 | fn next_back(&mut self) -> Option<T> { |
262 | if self.remaining == 0 { |
263 | return None; |
264 | } |
265 | self.remaining -= 1; |
266 | let wrapped_idx: usize = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) }; |
267 | Some(unsafe { self.deque.as_mut().buffer_read(off:wrapped_idx) }) |
268 | } |
269 | } |
270 | |
271 | #[stable (feature = "drain" , since = "1.6.0" )] |
272 | impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {} |
273 | |
274 | #[stable (feature = "fused" , since = "1.26.0" )] |
275 | impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {} |
276 | |