1 | use crate::{ |
2 | intrinsics, |
3 | iter::{from_fn, TrustedLen, TrustedRandomAccess}, |
4 | num::NonZeroUsize, |
5 | ops::{Range, Try}, |
6 | }; |
7 | |
8 | /// An iterator for stepping iterators by a custom amount. |
9 | /// |
10 | /// This `struct` is created by the [`step_by`] method on [`Iterator`]. See |
11 | /// its documentation for more. |
12 | /// |
13 | /// [`step_by`]: Iterator::step_by |
14 | /// [`Iterator`]: trait.Iterator.html |
15 | #[must_use = "iterators are lazy and do nothing unless consumed" ] |
16 | #[stable (feature = "iterator_step_by" , since = "1.28.0" )] |
17 | #[derive (Clone, Debug)] |
18 | pub struct StepBy<I> { |
19 | /// This field is guaranteed to be preprocessed by the specialized `SpecRangeSetup::setup` |
20 | /// in the constructor. |
21 | /// For most iterators that processing is a no-op, but for Range<{integer}> types it is lossy |
22 | /// which means the inner iterator cannot be returned to user code. |
23 | /// Additionally this type-dependent preprocessing means specialized implementations |
24 | /// cannot be used interchangeably. |
25 | iter: I, |
26 | /// This field is `step - 1`, aka the correct amount to pass to `nth` when iterating. |
27 | /// It MUST NOT be `usize::MAX`, as `unsafe` code depends on being able to add one |
28 | /// without the risk of overflow. (This is important so that length calculations |
29 | /// don't need to check for division-by-zero, for example.) |
30 | step_minus_one: usize, |
31 | first_take: bool, |
32 | } |
33 | |
34 | impl<I> StepBy<I> { |
35 | #[inline ] |
36 | pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy<I> { |
37 | assert!(step != 0); |
38 | let iter: I = <I as SpecRangeSetup<I>>::setup(inner:iter, step); |
39 | StepBy { iter, step_minus_one: step - 1, first_take: true } |
40 | } |
41 | |
42 | /// The `step` that was originally passed to `Iterator::step_by(step)`, |
43 | /// aka `self.step_minus_one + 1`. |
44 | #[inline ] |
45 | fn original_step(&self) -> NonZeroUsize { |
46 | // SAFETY: By type invariant, `step_minus_one` cannot be `MAX`, which |
47 | // means the addition cannot overflow and the result cannot be zero. |
48 | unsafe { NonZeroUsize::new_unchecked(intrinsics::unchecked_add(self.step_minus_one, y:1)) } |
49 | } |
50 | } |
51 | |
52 | #[stable (feature = "iterator_step_by" , since = "1.28.0" )] |
53 | impl<I> Iterator for StepBy<I> |
54 | where |
55 | I: Iterator, |
56 | { |
57 | type Item = I::Item; |
58 | |
59 | #[inline ] |
60 | fn next(&mut self) -> Option<Self::Item> { |
61 | self.spec_next() |
62 | } |
63 | |
64 | #[inline ] |
65 | fn size_hint(&self) -> (usize, Option<usize>) { |
66 | self.spec_size_hint() |
67 | } |
68 | |
69 | #[inline ] |
70 | fn nth(&mut self, n: usize) -> Option<Self::Item> { |
71 | self.spec_nth(n) |
72 | } |
73 | |
74 | fn try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R |
75 | where |
76 | F: FnMut(Acc, Self::Item) -> R, |
77 | R: Try<Output = Acc>, |
78 | { |
79 | self.spec_try_fold(acc, f) |
80 | } |
81 | |
82 | #[inline ] |
83 | fn fold<Acc, F>(self, acc: Acc, f: F) -> Acc |
84 | where |
85 | F: FnMut(Acc, Self::Item) -> Acc, |
86 | { |
87 | self.spec_fold(acc, f) |
88 | } |
89 | } |
90 | |
91 | impl<I> StepBy<I> |
92 | where |
93 | I: ExactSizeIterator, |
94 | { |
95 | // The zero-based index starting from the end of the iterator of the |
96 | // last element. Used in the `DoubleEndedIterator` implementation. |
97 | fn next_back_index(&self) -> usize { |
98 | let rem: usize = self.iter.len() % self.original_step(); |
99 | if self.first_take { if rem == 0 { self.step_minus_one } else { rem - 1 } } else { rem } |
100 | } |
101 | } |
102 | |
103 | #[stable (feature = "double_ended_step_by_iterator" , since = "1.38.0" )] |
104 | impl<I> DoubleEndedIterator for StepBy<I> |
105 | where |
106 | I: DoubleEndedIterator + ExactSizeIterator, |
107 | { |
108 | #[inline ] |
109 | fn next_back(&mut self) -> Option<Self::Item> { |
110 | self.spec_next_back() |
111 | } |
112 | |
113 | #[inline ] |
114 | fn nth_back(&mut self, n: usize) -> Option<Self::Item> { |
115 | self.spec_nth_back(n) |
116 | } |
117 | |
118 | fn try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R |
119 | where |
120 | F: FnMut(Acc, Self::Item) -> R, |
121 | R: Try<Output = Acc>, |
122 | { |
123 | self.spec_try_rfold(init, f) |
124 | } |
125 | |
126 | #[inline ] |
127 | fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc |
128 | where |
129 | Self: Sized, |
130 | F: FnMut(Acc, Self::Item) -> Acc, |
131 | { |
132 | self.spec_rfold(init, f) |
133 | } |
134 | } |
135 | |
136 | // StepBy can only make the iterator shorter, so the len will still fit. |
137 | #[stable (feature = "iterator_step_by" , since = "1.28.0" )] |
138 | impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {} |
139 | |
140 | // SAFETY: This adapter is shortening. TrustedLen requires the upper bound to be calculated correctly. |
141 | // These requirements can only be satisfied when the upper bound of the inner iterator's upper |
142 | // bound is never `None`. I: TrustedRandomAccess happens to provide this guarantee while |
143 | // I: TrustedLen would not. |
144 | // This also covers the Range specializations since the ranges also implement TRA |
145 | #[unstable (feature = "trusted_len" , issue = "37572" )] |
146 | unsafe impl<I> TrustedLen for StepBy<I> where I: Iterator + TrustedRandomAccess {} |
147 | |
148 | trait SpecRangeSetup<T> { |
149 | fn setup(inner: T, step: usize) -> T; |
150 | } |
151 | |
152 | impl<T> SpecRangeSetup<T> for T { |
153 | #[inline ] |
154 | default fn setup(inner: T, _step: usize) -> T { |
155 | inner |
156 | } |
157 | } |
158 | |
159 | /// Specialization trait to optimize `StepBy<Range<{integer}>>` iteration. |
160 | /// |
161 | /// # Safety |
162 | /// |
163 | /// Technically this is safe to implement (look ma, no unsafe!), but in reality |
164 | /// a lot of unsafe code relies on ranges over integers being correct. |
165 | /// |
166 | /// For correctness *all* public StepBy methods must be specialized |
167 | /// because `setup` drastically alters the meaning of the struct fields so that mixing |
168 | /// different implementations would lead to incorrect results. |
169 | unsafe trait StepByImpl<I> { |
170 | type Item; |
171 | |
172 | fn spec_next(&mut self) -> Option<Self::Item>; |
173 | |
174 | fn spec_size_hint(&self) -> (usize, Option<usize>); |
175 | |
176 | fn spec_nth(&mut self, n: usize) -> Option<Self::Item>; |
177 | |
178 | fn spec_try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R |
179 | where |
180 | F: FnMut(Acc, Self::Item) -> R, |
181 | R: Try<Output = Acc>; |
182 | |
183 | fn spec_fold<Acc, F>(self, acc: Acc, f: F) -> Acc |
184 | where |
185 | F: FnMut(Acc, Self::Item) -> Acc; |
186 | } |
187 | |
188 | /// Specialization trait for double-ended iteration. |
189 | /// |
190 | /// See also: `StepByImpl` |
191 | /// |
192 | /// # Safety |
193 | /// |
194 | /// The specializations must be implemented together with `StepByImpl` |
195 | /// where applicable. I.e. if `StepBy` does support backwards iteration |
196 | /// for a given iterator and that is specialized for forward iteration then |
197 | /// it must also be specialized for backwards iteration. |
198 | unsafe trait StepByBackImpl<I> { |
199 | type Item; |
200 | |
201 | fn spec_next_back(&mut self) -> Option<Self::Item> |
202 | where |
203 | I: DoubleEndedIterator + ExactSizeIterator; |
204 | |
205 | fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item> |
206 | where |
207 | I: DoubleEndedIterator + ExactSizeIterator; |
208 | |
209 | fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R |
210 | where |
211 | I: DoubleEndedIterator + ExactSizeIterator, |
212 | F: FnMut(Acc, Self::Item) -> R, |
213 | R: Try<Output = Acc>; |
214 | |
215 | fn spec_rfold<Acc, F>(self, init: Acc, f: F) -> Acc |
216 | where |
217 | I: DoubleEndedIterator + ExactSizeIterator, |
218 | F: FnMut(Acc, Self::Item) -> Acc; |
219 | } |
220 | |
221 | unsafe impl<I: Iterator> StepByImpl<I> for StepBy<I> { |
222 | type Item = I::Item; |
223 | |
224 | #[inline ] |
225 | default fn spec_next(&mut self) -> Option<I::Item> { |
226 | let step_size = if self.first_take { 0 } else { self.step_minus_one }; |
227 | self.first_take = false; |
228 | self.iter.nth(step_size) |
229 | } |
230 | |
231 | #[inline ] |
232 | default fn spec_size_hint(&self) -> (usize, Option<usize>) { |
233 | #[inline ] |
234 | fn first_size(step: NonZeroUsize) -> impl Fn(usize) -> usize { |
235 | move |n| if n == 0 { 0 } else { 1 + (n - 1) / step } |
236 | } |
237 | |
238 | #[inline ] |
239 | fn other_size(step: NonZeroUsize) -> impl Fn(usize) -> usize { |
240 | move |n| n / step |
241 | } |
242 | |
243 | let (low, high) = self.iter.size_hint(); |
244 | |
245 | if self.first_take { |
246 | let f = first_size(self.original_step()); |
247 | (f(low), high.map(f)) |
248 | } else { |
249 | let f = other_size(self.original_step()); |
250 | (f(low), high.map(f)) |
251 | } |
252 | } |
253 | |
254 | #[inline ] |
255 | default fn spec_nth(&mut self, mut n: usize) -> Option<I::Item> { |
256 | if self.first_take { |
257 | self.first_take = false; |
258 | let first = self.iter.next(); |
259 | if n == 0 { |
260 | return first; |
261 | } |
262 | n -= 1; |
263 | } |
264 | // n and self.step_minus_one are indices, we need to add 1 to get the amount of elements |
265 | // When calling `.nth`, we need to subtract 1 again to convert back to an index |
266 | let mut step = self.original_step().get(); |
267 | // n + 1 could overflow |
268 | // thus, if n is usize::MAX, instead of adding one, we call .nth(step) |
269 | if n == usize::MAX { |
270 | self.iter.nth(step - 1); |
271 | } else { |
272 | n += 1; |
273 | } |
274 | |
275 | // overflow handling |
276 | loop { |
277 | let mul = n.checked_mul(step); |
278 | { |
279 | if intrinsics::likely(mul.is_some()) { |
280 | return self.iter.nth(mul.unwrap() - 1); |
281 | } |
282 | } |
283 | let div_n = usize::MAX / n; |
284 | let div_step = usize::MAX / step; |
285 | let nth_n = div_n * n; |
286 | let nth_step = div_step * step; |
287 | let nth = if nth_n > nth_step { |
288 | step -= div_n; |
289 | nth_n |
290 | } else { |
291 | n -= div_step; |
292 | nth_step |
293 | }; |
294 | self.iter.nth(nth - 1); |
295 | } |
296 | } |
297 | |
298 | default fn spec_try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R |
299 | where |
300 | F: FnMut(Acc, Self::Item) -> R, |
301 | R: Try<Output = Acc>, |
302 | { |
303 | #[inline ] |
304 | fn nth<I: Iterator>( |
305 | iter: &mut I, |
306 | step_minus_one: usize, |
307 | ) -> impl FnMut() -> Option<I::Item> + '_ { |
308 | move || iter.nth(step_minus_one) |
309 | } |
310 | |
311 | if self.first_take { |
312 | self.first_take = false; |
313 | match self.iter.next() { |
314 | None => return try { acc }, |
315 | Some(x) => acc = f(acc, x)?, |
316 | } |
317 | } |
318 | from_fn(nth(&mut self.iter, self.step_minus_one)).try_fold(acc, f) |
319 | } |
320 | |
321 | default fn spec_fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc |
322 | where |
323 | F: FnMut(Acc, Self::Item) -> Acc, |
324 | { |
325 | #[inline ] |
326 | fn nth<I: Iterator>( |
327 | iter: &mut I, |
328 | step_minus_one: usize, |
329 | ) -> impl FnMut() -> Option<I::Item> + '_ { |
330 | move || iter.nth(step_minus_one) |
331 | } |
332 | |
333 | if self.first_take { |
334 | self.first_take = false; |
335 | match self.iter.next() { |
336 | None => return acc, |
337 | Some(x) => acc = f(acc, x), |
338 | } |
339 | } |
340 | from_fn(nth(&mut self.iter, self.step_minus_one)).fold(acc, f) |
341 | } |
342 | } |
343 | |
344 | unsafe impl<I: DoubleEndedIterator + ExactSizeIterator> StepByBackImpl<I> for StepBy<I> { |
345 | type Item = I::Item; |
346 | |
347 | #[inline ] |
348 | default fn spec_next_back(&mut self) -> Option<Self::Item> { |
349 | self.iter.nth_back(self.next_back_index()) |
350 | } |
351 | |
352 | #[inline ] |
353 | default fn spec_nth_back(&mut self, n: usize) -> Option<I::Item> { |
354 | // `self.iter.nth_back(usize::MAX)` does the right thing here when `n` |
355 | // is out of bounds because the length of `self.iter` does not exceed |
356 | // `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is |
357 | // zero-indexed |
358 | let n = n.saturating_mul(self.original_step().get()).saturating_add(self.next_back_index()); |
359 | self.iter.nth_back(n) |
360 | } |
361 | |
362 | default fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R |
363 | where |
364 | F: FnMut(Acc, Self::Item) -> R, |
365 | R: Try<Output = Acc>, |
366 | { |
367 | #[inline ] |
368 | fn nth_back<I: DoubleEndedIterator>( |
369 | iter: &mut I, |
370 | step_minus_one: usize, |
371 | ) -> impl FnMut() -> Option<I::Item> + '_ { |
372 | move || iter.nth_back(step_minus_one) |
373 | } |
374 | |
375 | match self.next_back() { |
376 | None => try { init }, |
377 | Some(x) => { |
378 | let acc = f(init, x)?; |
379 | from_fn(nth_back(&mut self.iter, self.step_minus_one)).try_fold(acc, f) |
380 | } |
381 | } |
382 | } |
383 | |
384 | #[inline ] |
385 | default fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc |
386 | where |
387 | Self: Sized, |
388 | F: FnMut(Acc, I::Item) -> Acc, |
389 | { |
390 | #[inline ] |
391 | fn nth_back<I: DoubleEndedIterator>( |
392 | iter: &mut I, |
393 | step_minus_one: usize, |
394 | ) -> impl FnMut() -> Option<I::Item> + '_ { |
395 | move || iter.nth_back(step_minus_one) |
396 | } |
397 | |
398 | match self.next_back() { |
399 | None => init, |
400 | Some(x) => { |
401 | let acc = f(init, x); |
402 | from_fn(nth_back(&mut self.iter, self.step_minus_one)).fold(acc, f) |
403 | } |
404 | } |
405 | } |
406 | } |
407 | |
408 | /// For these implementations, `SpecRangeSetup` calculates the number |
409 | /// of iterations that will be needed and stores that in `iter.end`. |
410 | /// |
411 | /// The various iterator implementations then rely on that to not need |
412 | /// overflow checking, letting loops just be counted instead. |
413 | /// |
414 | /// These only work for unsigned types, and will need to be reworked |
415 | /// if you want to use it to specialize on signed types. |
416 | /// |
417 | /// Currently these are only implemented for integers up to usize due to |
418 | /// correctness issues around ExactSizeIterator impls on 16bit platforms. |
419 | /// And since ExactSizeIterator is a prerequisite for backwards iteration |
420 | /// and we must consistently specialize backwards and forwards iteration |
421 | /// that makes the situation complicated enough that it's not covered |
422 | /// for now. |
423 | macro_rules! spec_int_ranges { |
424 | ($($t:ty)*) => ($( |
425 | |
426 | const _: () = assert!(usize::BITS >= <$t>::BITS); |
427 | |
428 | impl SpecRangeSetup<Range<$t>> for Range<$t> { |
429 | #[inline] |
430 | fn setup(mut r: Range<$t>, step: usize) -> Range<$t> { |
431 | let inner_len = r.size_hint().0; |
432 | // If step exceeds $t::MAX, then the count will be at most 1 and |
433 | // thus always fit into $t. |
434 | let yield_count = inner_len.div_ceil(step); |
435 | // Turn the range end into an iteration counter |
436 | r.end = yield_count as $t; |
437 | r |
438 | } |
439 | } |
440 | |
441 | unsafe impl StepByImpl<Range<$t>> for StepBy<Range<$t>> { |
442 | #[inline] |
443 | fn spec_next(&mut self) -> Option<$t> { |
444 | // if a step size larger than the type has been specified fall back to |
445 | // t::MAX, in which case remaining will be at most 1. |
446 | let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX); |
447 | let remaining = self.iter.end; |
448 | if remaining > 0 { |
449 | let val = self.iter.start; |
450 | // this can only overflow during the last step, after which the value |
451 | // will not be used |
452 | self.iter.start = val.wrapping_add(step); |
453 | self.iter.end = remaining - 1; |
454 | Some(val) |
455 | } else { |
456 | None |
457 | } |
458 | } |
459 | |
460 | #[inline] |
461 | fn spec_size_hint(&self) -> (usize, Option<usize>) { |
462 | let remaining = self.iter.end as usize; |
463 | (remaining, Some(remaining)) |
464 | } |
465 | |
466 | // The methods below are all copied from the Iterator trait default impls. |
467 | // We have to repeat them here so that the specialization overrides the StepByImpl defaults |
468 | |
469 | #[inline] |
470 | fn spec_nth(&mut self, n: usize) -> Option<Self::Item> { |
471 | self.advance_by(n).ok()?; |
472 | self.next() |
473 | } |
474 | |
475 | #[inline] |
476 | fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R |
477 | where |
478 | F: FnMut(Acc, Self::Item) -> R, |
479 | R: Try<Output = Acc> |
480 | { |
481 | let mut accum = init; |
482 | while let Some(x) = self.next() { |
483 | accum = f(accum, x)?; |
484 | } |
485 | try { accum } |
486 | } |
487 | |
488 | #[inline] |
489 | fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc |
490 | where |
491 | F: FnMut(Acc, Self::Item) -> Acc |
492 | { |
493 | // if a step size larger than the type has been specified fall back to |
494 | // t::MAX, in which case remaining will be at most 1. |
495 | let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX); |
496 | let remaining = self.iter.end; |
497 | let mut acc = init; |
498 | let mut val = self.iter.start; |
499 | for _ in 0..remaining { |
500 | acc = f(acc, val); |
501 | // this can only overflow during the last step, after which the value |
502 | // will no longer be used |
503 | val = val.wrapping_add(step); |
504 | } |
505 | acc |
506 | } |
507 | } |
508 | )*) |
509 | } |
510 | |
511 | macro_rules! spec_int_ranges_r { |
512 | ($($t:ty)*) => ($( |
513 | const _: () = assert!(usize::BITS >= <$t>::BITS); |
514 | |
515 | unsafe impl StepByBackImpl<Range<$t>> for StepBy<Range<$t>> { |
516 | |
517 | #[inline] |
518 | fn spec_next_back(&mut self) -> Option<Self::Item> |
519 | where Range<$t>: DoubleEndedIterator + ExactSizeIterator, |
520 | { |
521 | let step = self.original_step().get() as $t; |
522 | let remaining = self.iter.end; |
523 | if remaining > 0 { |
524 | let start = self.iter.start; |
525 | self.iter.end = remaining - 1; |
526 | Some(start + step * (remaining - 1)) |
527 | } else { |
528 | None |
529 | } |
530 | } |
531 | |
532 | // The methods below are all copied from the Iterator trait default impls. |
533 | // We have to repeat them here so that the specialization overrides the StepByImplBack defaults |
534 | |
535 | #[inline] |
536 | fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item> |
537 | where Self: DoubleEndedIterator, |
538 | { |
539 | if self.advance_back_by(n).is_err() { |
540 | return None; |
541 | } |
542 | self.next_back() |
543 | } |
544 | |
545 | #[inline] |
546 | fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R |
547 | where |
548 | Self: DoubleEndedIterator, |
549 | F: FnMut(Acc, Self::Item) -> R, |
550 | R: Try<Output = Acc> |
551 | { |
552 | let mut accum = init; |
553 | while let Some(x) = self.next_back() { |
554 | accum = f(accum, x)?; |
555 | } |
556 | try { accum } |
557 | } |
558 | |
559 | #[inline] |
560 | fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc |
561 | where |
562 | Self: DoubleEndedIterator, |
563 | F: FnMut(Acc, Self::Item) -> Acc |
564 | { |
565 | let mut accum = init; |
566 | while let Some(x) = self.next_back() { |
567 | accum = f(accum, x); |
568 | } |
569 | accum |
570 | } |
571 | } |
572 | )*) |
573 | } |
574 | |
575 | #[cfg (target_pointer_width = "64" )] |
576 | spec_int_ranges!(u8 u16 u32 u64 usize); |
577 | // DoubleEndedIterator requires ExactSizeIterator, which isn't implemented for Range<u64> |
578 | #[cfg (target_pointer_width = "64" )] |
579 | spec_int_ranges_r!(u8 u16 u32 usize); |
580 | |
581 | #[cfg (target_pointer_width = "32" )] |
582 | spec_int_ranges!(u8 u16 u32 usize); |
583 | #[cfg (target_pointer_width = "32" )] |
584 | spec_int_ranges_r!(u8 u16 u32 usize); |
585 | |
586 | #[cfg (target_pointer_width = "16" )] |
587 | spec_int_ranges!(u8 u16 usize); |
588 | #[cfg (target_pointer_width = "16" )] |
589 | spec_int_ranges_r!(u8 u16 usize); |
590 | |