1 | use crate::convert::TryFrom; |
2 | use crate::{ |
3 | intrinsics, |
4 | iter::{from_fn, TrustedLen, TrustedRandomAccess}, |
5 | ops::{Range, Try}, |
6 | }; |
7 | |
8 | /// An iterator for stepping iterators by a custom amount. |
9 | /// |
10 | /// This `struct` is created by the [`step_by`] method on [`Iterator`]. See |
11 | /// its documentation for more. |
12 | /// |
13 | /// [`step_by`]: Iterator::step_by |
14 | /// [`Iterator`]: trait.Iterator.html |
15 | #[must_use = "iterators are lazy and do nothing unless consumed" ] |
16 | #[stable (feature = "iterator_step_by" , since = "1.28.0" )] |
17 | #[derive (Clone, Debug)] |
18 | pub struct StepBy<I> { |
19 | /// This field is guaranteed to be preprocessed by the specialized `SpecRangeSetup::setup` |
20 | /// in the constructor. |
21 | /// For most iterators that processing is a no-op, but for Range<{integer}> types it is lossy |
22 | /// which means the inner iterator cannot be returned to user code. |
23 | /// Additionally this type-dependent preprocessing means specialized implementations |
24 | /// cannot be used interchangeably. |
25 | iter: I, |
26 | step: usize, |
27 | first_take: bool, |
28 | } |
29 | |
30 | impl<I> StepBy<I> { |
31 | #[inline ] |
32 | pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy<I> { |
33 | assert!(step != 0); |
34 | let iter: I = <I as SpecRangeSetup<I>>::setup(inner:iter, step); |
35 | StepBy { iter, step: step - 1, first_take: true } |
36 | } |
37 | } |
38 | |
39 | #[stable (feature = "iterator_step_by" , since = "1.28.0" )] |
40 | impl<I> Iterator for StepBy<I> |
41 | where |
42 | I: Iterator, |
43 | { |
44 | type Item = I::Item; |
45 | |
46 | #[inline ] |
47 | fn next(&mut self) -> Option<Self::Item> { |
48 | self.spec_next() |
49 | } |
50 | |
51 | #[inline ] |
52 | fn size_hint(&self) -> (usize, Option<usize>) { |
53 | self.spec_size_hint() |
54 | } |
55 | |
56 | #[inline ] |
57 | fn nth(&mut self, n: usize) -> Option<Self::Item> { |
58 | self.spec_nth(n) |
59 | } |
60 | |
61 | fn try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R |
62 | where |
63 | F: FnMut(Acc, Self::Item) -> R, |
64 | R: Try<Output = Acc>, |
65 | { |
66 | self.spec_try_fold(acc, f) |
67 | } |
68 | |
69 | #[inline ] |
70 | fn fold<Acc, F>(self, acc: Acc, f: F) -> Acc |
71 | where |
72 | F: FnMut(Acc, Self::Item) -> Acc, |
73 | { |
74 | self.spec_fold(acc, f) |
75 | } |
76 | } |
77 | |
78 | impl<I> StepBy<I> |
79 | where |
80 | I: ExactSizeIterator, |
81 | { |
82 | // The zero-based index starting from the end of the iterator of the |
83 | // last element. Used in the `DoubleEndedIterator` implementation. |
84 | fn next_back_index(&self) -> usize { |
85 | let rem: usize = self.iter.len() % (self.step + 1); |
86 | if self.first_take { if rem == 0 { self.step } else { rem - 1 } } else { rem } |
87 | } |
88 | } |
89 | |
90 | #[stable (feature = "double_ended_step_by_iterator" , since = "1.38.0" )] |
91 | impl<I> DoubleEndedIterator for StepBy<I> |
92 | where |
93 | I: DoubleEndedIterator + ExactSizeIterator, |
94 | { |
95 | #[inline ] |
96 | fn next_back(&mut self) -> Option<Self::Item> { |
97 | self.spec_next_back() |
98 | } |
99 | |
100 | #[inline ] |
101 | fn nth_back(&mut self, n: usize) -> Option<Self::Item> { |
102 | self.spec_nth_back(n) |
103 | } |
104 | |
105 | fn try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R |
106 | where |
107 | F: FnMut(Acc, Self::Item) -> R, |
108 | R: Try<Output = Acc>, |
109 | { |
110 | self.spec_try_rfold(init, f) |
111 | } |
112 | |
113 | #[inline ] |
114 | fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc |
115 | where |
116 | Self: Sized, |
117 | F: FnMut(Acc, Self::Item) -> Acc, |
118 | { |
119 | self.spec_rfold(init, f) |
120 | } |
121 | } |
122 | |
123 | // StepBy can only make the iterator shorter, so the len will still fit. |
124 | #[stable (feature = "iterator_step_by" , since = "1.28.0" )] |
125 | impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {} |
126 | |
127 | // SAFETY: This adapter is shortening. TrustedLen requires the upper bound to be calculated correctly. |
128 | // These requirements can only be satisfied when the upper bound of the inner iterator's upper |
129 | // bound is never `None`. I: TrustedRandomAccess happens to provide this guarantee while |
130 | // I: TrustedLen would not. |
131 | // This also covers the Range specializations since the ranges also implement TRA |
132 | #[unstable (feature = "trusted_len" , issue = "37572" )] |
133 | unsafe impl<I> TrustedLen for StepBy<I> where I: Iterator + TrustedRandomAccess {} |
134 | |
135 | trait SpecRangeSetup<T> { |
136 | fn setup(inner: T, step: usize) -> T; |
137 | } |
138 | |
139 | impl<T> SpecRangeSetup<T> for T { |
140 | #[inline ] |
141 | default fn setup(inner: T, _step: usize) -> T { |
142 | inner |
143 | } |
144 | } |
145 | |
146 | /// Specialization trait to optimize `StepBy<Range<{integer}>>` iteration. |
147 | /// |
148 | /// # Safety |
149 | /// |
150 | /// Technically this is safe to implement (look ma, no unsafe!), but in reality |
151 | /// a lot of unsafe code relies on ranges over integers being correct. |
152 | /// |
153 | /// For correctness *all* public StepBy methods must be specialized |
154 | /// because `setup` drastically alters the meaning of the struct fields so that mixing |
155 | /// different implementations would lead to incorrect results. |
156 | unsafe trait StepByImpl<I> { |
157 | type Item; |
158 | |
159 | fn spec_next(&mut self) -> Option<Self::Item>; |
160 | |
161 | fn spec_size_hint(&self) -> (usize, Option<usize>); |
162 | |
163 | fn spec_nth(&mut self, n: usize) -> Option<Self::Item>; |
164 | |
165 | fn spec_try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R |
166 | where |
167 | F: FnMut(Acc, Self::Item) -> R, |
168 | R: Try<Output = Acc>; |
169 | |
170 | fn spec_fold<Acc, F>(self, acc: Acc, f: F) -> Acc |
171 | where |
172 | F: FnMut(Acc, Self::Item) -> Acc; |
173 | } |
174 | |
175 | /// Specialization trait for double-ended iteration. |
176 | /// |
177 | /// See also: `StepByImpl` |
178 | /// |
179 | /// # Safety |
180 | /// |
181 | /// The specializations must be implemented together with `StepByImpl` |
182 | /// where applicable. I.e. if `StepBy` does support backwards iteration |
183 | /// for a given iterator and that is specialized for forward iteration then |
184 | /// it must also be specialized for backwards iteration. |
185 | unsafe trait StepByBackImpl<I> { |
186 | type Item; |
187 | |
188 | fn spec_next_back(&mut self) -> Option<Self::Item> |
189 | where |
190 | I: DoubleEndedIterator + ExactSizeIterator; |
191 | |
192 | fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item> |
193 | where |
194 | I: DoubleEndedIterator + ExactSizeIterator; |
195 | |
196 | fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R |
197 | where |
198 | I: DoubleEndedIterator + ExactSizeIterator, |
199 | F: FnMut(Acc, Self::Item) -> R, |
200 | R: Try<Output = Acc>; |
201 | |
202 | fn spec_rfold<Acc, F>(self, init: Acc, f: F) -> Acc |
203 | where |
204 | I: DoubleEndedIterator + ExactSizeIterator, |
205 | F: FnMut(Acc, Self::Item) -> Acc; |
206 | } |
207 | |
208 | unsafe impl<I: Iterator> StepByImpl<I> for StepBy<I> { |
209 | type Item = I::Item; |
210 | |
211 | #[inline ] |
212 | default fn spec_next(&mut self) -> Option<I::Item> { |
213 | let step_size = if self.first_take { 0 } else { self.step }; |
214 | self.first_take = false; |
215 | self.iter.nth(step_size) |
216 | } |
217 | |
218 | #[inline ] |
219 | default fn spec_size_hint(&self) -> (usize, Option<usize>) { |
220 | #[inline ] |
221 | fn first_size(step: usize) -> impl Fn(usize) -> usize { |
222 | move |n| if n == 0 { 0 } else { 1 + (n - 1) / (step + 1) } |
223 | } |
224 | |
225 | #[inline ] |
226 | fn other_size(step: usize) -> impl Fn(usize) -> usize { |
227 | move |n| n / (step + 1) |
228 | } |
229 | |
230 | let (low, high) = self.iter.size_hint(); |
231 | |
232 | if self.first_take { |
233 | let f = first_size(self.step); |
234 | (f(low), high.map(f)) |
235 | } else { |
236 | let f = other_size(self.step); |
237 | (f(low), high.map(f)) |
238 | } |
239 | } |
240 | |
241 | #[inline ] |
242 | default fn spec_nth(&mut self, mut n: usize) -> Option<I::Item> { |
243 | if self.first_take { |
244 | self.first_take = false; |
245 | let first = self.iter.next(); |
246 | if n == 0 { |
247 | return first; |
248 | } |
249 | n -= 1; |
250 | } |
251 | // n and self.step are indices, we need to add 1 to get the amount of elements |
252 | // When calling `.nth`, we need to subtract 1 again to convert back to an index |
253 | // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1` |
254 | let mut step = self.step + 1; |
255 | // n + 1 could overflow |
256 | // thus, if n is usize::MAX, instead of adding one, we call .nth(step) |
257 | if n == usize::MAX { |
258 | self.iter.nth(step - 1); |
259 | } else { |
260 | n += 1; |
261 | } |
262 | |
263 | // overflow handling |
264 | loop { |
265 | let mul = n.checked_mul(step); |
266 | { |
267 | if intrinsics::likely(mul.is_some()) { |
268 | return self.iter.nth(mul.unwrap() - 1); |
269 | } |
270 | } |
271 | let div_n = usize::MAX / n; |
272 | let div_step = usize::MAX / step; |
273 | let nth_n = div_n * n; |
274 | let nth_step = div_step * step; |
275 | let nth = if nth_n > nth_step { |
276 | step -= div_n; |
277 | nth_n |
278 | } else { |
279 | n -= div_step; |
280 | nth_step |
281 | }; |
282 | self.iter.nth(nth - 1); |
283 | } |
284 | } |
285 | |
286 | default fn spec_try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R |
287 | where |
288 | F: FnMut(Acc, Self::Item) -> R, |
289 | R: Try<Output = Acc>, |
290 | { |
291 | #[inline ] |
292 | fn nth<I: Iterator>(iter: &mut I, step: usize) -> impl FnMut() -> Option<I::Item> + '_ { |
293 | move || iter.nth(step) |
294 | } |
295 | |
296 | if self.first_take { |
297 | self.first_take = false; |
298 | match self.iter.next() { |
299 | None => return try { acc }, |
300 | Some(x) => acc = f(acc, x)?, |
301 | } |
302 | } |
303 | from_fn(nth(&mut self.iter, self.step)).try_fold(acc, f) |
304 | } |
305 | |
306 | default fn spec_fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc |
307 | where |
308 | F: FnMut(Acc, Self::Item) -> Acc, |
309 | { |
310 | #[inline ] |
311 | fn nth<I: Iterator>(iter: &mut I, step: usize) -> impl FnMut() -> Option<I::Item> + '_ { |
312 | move || iter.nth(step) |
313 | } |
314 | |
315 | if self.first_take { |
316 | self.first_take = false; |
317 | match self.iter.next() { |
318 | None => return acc, |
319 | Some(x) => acc = f(acc, x), |
320 | } |
321 | } |
322 | from_fn(nth(&mut self.iter, self.step)).fold(acc, f) |
323 | } |
324 | } |
325 | |
326 | unsafe impl<I: DoubleEndedIterator + ExactSizeIterator> StepByBackImpl<I> for StepBy<I> { |
327 | type Item = I::Item; |
328 | |
329 | #[inline ] |
330 | default fn spec_next_back(&mut self) -> Option<Self::Item> { |
331 | self.iter.nth_back(self.next_back_index()) |
332 | } |
333 | |
334 | #[inline ] |
335 | default fn spec_nth_back(&mut self, n: usize) -> Option<I::Item> { |
336 | // `self.iter.nth_back(usize::MAX)` does the right thing here when `n` |
337 | // is out of bounds because the length of `self.iter` does not exceed |
338 | // `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is |
339 | // zero-indexed |
340 | let n = n.saturating_mul(self.step + 1).saturating_add(self.next_back_index()); |
341 | self.iter.nth_back(n) |
342 | } |
343 | |
344 | default fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R |
345 | where |
346 | F: FnMut(Acc, Self::Item) -> R, |
347 | R: Try<Output = Acc>, |
348 | { |
349 | #[inline ] |
350 | fn nth_back<I: DoubleEndedIterator>( |
351 | iter: &mut I, |
352 | step: usize, |
353 | ) -> impl FnMut() -> Option<I::Item> + '_ { |
354 | move || iter.nth_back(step) |
355 | } |
356 | |
357 | match self.next_back() { |
358 | None => try { init }, |
359 | Some(x) => { |
360 | let acc = f(init, x)?; |
361 | from_fn(nth_back(&mut self.iter, self.step)).try_fold(acc, f) |
362 | } |
363 | } |
364 | } |
365 | |
366 | #[inline ] |
367 | default fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc |
368 | where |
369 | Self: Sized, |
370 | F: FnMut(Acc, I::Item) -> Acc, |
371 | { |
372 | #[inline ] |
373 | fn nth_back<I: DoubleEndedIterator>( |
374 | iter: &mut I, |
375 | step: usize, |
376 | ) -> impl FnMut() -> Option<I::Item> + '_ { |
377 | move || iter.nth_back(step) |
378 | } |
379 | |
380 | match self.next_back() { |
381 | None => init, |
382 | Some(x) => { |
383 | let acc = f(init, x); |
384 | from_fn(nth_back(&mut self.iter, self.step)).fold(acc, f) |
385 | } |
386 | } |
387 | } |
388 | } |
389 | |
390 | /// For these implementations, `SpecRangeSetup` calculates the number |
391 | /// of iterations that will be needed and stores that in `iter.end`. |
392 | /// |
393 | /// The various iterator implementations then rely on that to not need |
394 | /// overflow checking, letting loops just be counted instead. |
395 | /// |
396 | /// These only work for unsigned types, and will need to be reworked |
397 | /// if you want to use it to specialize on signed types. |
398 | /// |
399 | /// Currently these are only implemented for integers up to usize due to |
400 | /// correctness issues around ExactSizeIterator impls on 16bit platforms. |
401 | /// And since ExactSizeIterator is a prerequisite for backwards iteration |
402 | /// and we must consistently specialize backwards and forwards iteration |
403 | /// that makes the situation complicated enough that it's not covered |
404 | /// for now. |
405 | macro_rules! spec_int_ranges { |
406 | ($($t:ty)*) => ($( |
407 | |
408 | const _: () = assert!(usize::BITS >= <$t>::BITS); |
409 | |
410 | impl SpecRangeSetup<Range<$t>> for Range<$t> { |
411 | #[inline] |
412 | fn setup(mut r: Range<$t>, step: usize) -> Range<$t> { |
413 | let inner_len = r.size_hint().0; |
414 | // If step exceeds $t::MAX, then the count will be at most 1 and |
415 | // thus always fit into $t. |
416 | let yield_count = inner_len.div_ceil(step); |
417 | // Turn the range end into an iteration counter |
418 | r.end = yield_count as $t; |
419 | r |
420 | } |
421 | } |
422 | |
423 | unsafe impl StepByImpl<Range<$t>> for StepBy<Range<$t>> { |
424 | #[inline] |
425 | fn spec_next(&mut self) -> Option<$t> { |
426 | // if a step size larger than the type has been specified fall back to |
427 | // t::MAX, in which case remaining will be at most 1. |
428 | // The `+ 1` can't overflow since the constructor substracted 1 from the original value. |
429 | let step = <$t>::try_from(self.step + 1).unwrap_or(<$t>::MAX); |
430 | let remaining = self.iter.end; |
431 | if remaining > 0 { |
432 | let val = self.iter.start; |
433 | // this can only overflow during the last step, after which the value |
434 | // will not be used |
435 | self.iter.start = val.wrapping_add(step); |
436 | self.iter.end = remaining - 1; |
437 | Some(val) |
438 | } else { |
439 | None |
440 | } |
441 | } |
442 | |
443 | #[inline] |
444 | fn spec_size_hint(&self) -> (usize, Option<usize>) { |
445 | let remaining = self.iter.end as usize; |
446 | (remaining, Some(remaining)) |
447 | } |
448 | |
449 | // The methods below are all copied from the Iterator trait default impls. |
450 | // We have to repeat them here so that the specialization overrides the StepByImpl defaults |
451 | |
452 | #[inline] |
453 | fn spec_nth(&mut self, n: usize) -> Option<Self::Item> { |
454 | self.advance_by(n).ok()?; |
455 | self.next() |
456 | } |
457 | |
458 | #[inline] |
459 | fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R |
460 | where |
461 | F: FnMut(Acc, Self::Item) -> R, |
462 | R: Try<Output = Acc> |
463 | { |
464 | let mut accum = init; |
465 | while let Some(x) = self.next() { |
466 | accum = f(accum, x)?; |
467 | } |
468 | try { accum } |
469 | } |
470 | |
471 | #[inline] |
472 | fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc |
473 | where |
474 | F: FnMut(Acc, Self::Item) -> Acc |
475 | { |
476 | // if a step size larger than the type has been specified fall back to |
477 | // t::MAX, in which case remaining will be at most 1. |
478 | let step = <$t>::try_from(self.step + 1).unwrap_or(<$t>::MAX); |
479 | let remaining = self.iter.end; |
480 | let mut acc = init; |
481 | let mut val = self.iter.start; |
482 | for _ in 0..remaining { |
483 | acc = f(acc, val); |
484 | // this can only overflow during the last step, after which the value |
485 | // will no longer be used |
486 | val = val.wrapping_add(step); |
487 | } |
488 | acc |
489 | } |
490 | } |
491 | )*) |
492 | } |
493 | |
494 | macro_rules! spec_int_ranges_r { |
495 | ($($t:ty)*) => ($( |
496 | const _: () = assert!(usize::BITS >= <$t>::BITS); |
497 | |
498 | unsafe impl StepByBackImpl<Range<$t>> for StepBy<Range<$t>> { |
499 | |
500 | #[inline] |
501 | fn spec_next_back(&mut self) -> Option<Self::Item> |
502 | where Range<$t>: DoubleEndedIterator + ExactSizeIterator, |
503 | { |
504 | let step = (self.step + 1) as $t; |
505 | let remaining = self.iter.end; |
506 | if remaining > 0 { |
507 | let start = self.iter.start; |
508 | self.iter.end = remaining - 1; |
509 | Some(start + step * (remaining - 1)) |
510 | } else { |
511 | None |
512 | } |
513 | } |
514 | |
515 | // The methods below are all copied from the Iterator trait default impls. |
516 | // We have to repeat them here so that the specialization overrides the StepByImplBack defaults |
517 | |
518 | #[inline] |
519 | fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item> |
520 | where Self: DoubleEndedIterator, |
521 | { |
522 | if self.advance_back_by(n).is_err() { |
523 | return None; |
524 | } |
525 | self.next_back() |
526 | } |
527 | |
528 | #[inline] |
529 | fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R |
530 | where |
531 | Self: DoubleEndedIterator, |
532 | F: FnMut(Acc, Self::Item) -> R, |
533 | R: Try<Output = Acc> |
534 | { |
535 | let mut accum = init; |
536 | while let Some(x) = self.next_back() { |
537 | accum = f(accum, x)?; |
538 | } |
539 | try { accum } |
540 | } |
541 | |
542 | #[inline] |
543 | fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc |
544 | where |
545 | Self: DoubleEndedIterator, |
546 | F: FnMut(Acc, Self::Item) -> Acc |
547 | { |
548 | let mut accum = init; |
549 | while let Some(x) = self.next_back() { |
550 | accum = f(accum, x); |
551 | } |
552 | accum |
553 | } |
554 | } |
555 | )*) |
556 | } |
557 | |
558 | #[cfg (target_pointer_width = "64" )] |
559 | spec_int_ranges!(u8 u16 u32 u64 usize); |
560 | // DoubleEndedIterator requires ExactSizeIterator, which isn't implemented for Range<u64> |
561 | #[cfg (target_pointer_width = "64" )] |
562 | spec_int_ranges_r!(u8 u16 u32 usize); |
563 | |
564 | #[cfg (target_pointer_width = "32" )] |
565 | spec_int_ranges!(u8 u16 u32 usize); |
566 | #[cfg (target_pointer_width = "32" )] |
567 | spec_int_ranges_r!(u8 u16 u32 usize); |
568 | |
569 | #[cfg (target_pointer_width = "16" )] |
570 | spec_int_ranges!(u8 u16 usize); |
571 | #[cfg (target_pointer_width = "16" )] |
572 | spec_int_ranges_r!(u8 u16 usize); |
573 | |