1 | //! Slice sorting |
2 | //! |
3 | //! This module contains a sorting algorithm based on Orson Peters' pattern-defeating quicksort, |
4 | //! published at: <https://github.com/orlp/pdqsort> |
5 | //! |
6 | //! Unstable sorting is compatible with core because it doesn't allocate memory, unlike our |
7 | //! stable sorting implementation. |
8 | //! |
9 | //! In addition it also contains the core logic of the stable sort used by `slice::sort` based on |
10 | //! TimSort. |
11 | |
12 | use crate::cmp; |
13 | use crate::mem::{self, MaybeUninit, SizedTypeProperties}; |
14 | use crate::ptr; |
15 | |
16 | // When dropped, copies from `src` into `dest`. |
17 | struct InsertionHole<T> { |
18 | src: *const T, |
19 | dest: *mut T, |
20 | } |
21 | |
22 | impl<T> Drop for InsertionHole<T> { |
23 | fn drop(&mut self) { |
24 | // SAFETY: This is a helper class. Please refer to its usage for correctness. Namely, one |
25 | // must be sure that `src` and `dst` does not overlap as required by |
26 | // `ptr::copy_nonoverlapping` and are both valid for writes. |
27 | unsafe { |
28 | ptr::copy_nonoverlapping(self.src, self.dest, count:1); |
29 | } |
30 | } |
31 | } |
32 | |
33 | /// Inserts `v[v.len() - 1]` into pre-sorted sequence `v[..v.len() - 1]` so that whole `v[..]` |
34 | /// becomes sorted. |
35 | unsafe fn insert_tail<T, F>(v: &mut [T], is_less: &mut F) |
36 | where |
37 | F: FnMut(&T, &T) -> bool, |
38 | { |
39 | debug_assert!(v.len() >= 2); |
40 | |
41 | let arr_ptr = v.as_mut_ptr(); |
42 | let i = v.len() - 1; |
43 | |
44 | // SAFETY: caller must ensure v is at least len 2. |
45 | unsafe { |
46 | // See insert_head which talks about why this approach is beneficial. |
47 | let i_ptr = arr_ptr.add(i); |
48 | |
49 | // It's important that we use i_ptr here. If this check is positive and we continue, |
50 | // We want to make sure that no other copy of the value was seen by is_less. |
51 | // Otherwise we would have to copy it back. |
52 | if is_less(&*i_ptr, &*i_ptr.sub(1)) { |
53 | // It's important, that we use tmp for comparison from now on. As it is the value that |
54 | // will be copied back. And notionally we could have created a divergence if we copy |
55 | // back the wrong value. |
56 | let tmp = mem::ManuallyDrop::new(ptr::read(i_ptr)); |
57 | // Intermediate state of the insertion process is always tracked by `hole`, which |
58 | // serves two purposes: |
59 | // 1. Protects integrity of `v` from panics in `is_less`. |
60 | // 2. Fills the remaining hole in `v` in the end. |
61 | // |
62 | // Panic safety: |
63 | // |
64 | // If `is_less` panics at any point during the process, `hole` will get dropped and |
65 | // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it |
66 | // initially held exactly once. |
67 | let mut hole = InsertionHole { src: &*tmp, dest: i_ptr.sub(1) }; |
68 | ptr::copy_nonoverlapping(hole.dest, i_ptr, 1); |
69 | |
70 | // SAFETY: We know i is at least 1. |
71 | for j in (0..(i - 1)).rev() { |
72 | let j_ptr = arr_ptr.add(j); |
73 | if !is_less(&*tmp, &*j_ptr) { |
74 | break; |
75 | } |
76 | |
77 | ptr::copy_nonoverlapping(j_ptr, hole.dest, 1); |
78 | hole.dest = j_ptr; |
79 | } |
80 | // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. |
81 | } |
82 | } |
83 | } |
84 | |
85 | /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. |
86 | /// |
87 | /// This is the integral subroutine of insertion sort. |
88 | unsafe fn insert_head<T, F>(v: &mut [T], is_less: &mut F) |
89 | where |
90 | F: FnMut(&T, &T) -> bool, |
91 | { |
92 | debug_assert!(v.len() >= 2); |
93 | |
94 | // SAFETY: caller must ensure v is at least len 2. |
95 | unsafe { |
96 | if is_less(v.get_unchecked(1), v.get_unchecked(0)) { |
97 | let arr_ptr = v.as_mut_ptr(); |
98 | |
99 | // There are three ways to implement insertion here: |
100 | // |
101 | // 1. Swap adjacent elements until the first one gets to its final destination. |
102 | // However, this way we copy data around more than is necessary. If elements are big |
103 | // structures (costly to copy), this method will be slow. |
104 | // |
105 | // 2. Iterate until the right place for the first element is found. Then shift the |
106 | // elements succeeding it to make room for it and finally place it into the |
107 | // remaining hole. This is a good method. |
108 | // |
109 | // 3. Copy the first element into a temporary variable. Iterate until the right place |
110 | // for it is found. As we go along, copy every traversed element into the slot |
111 | // preceding it. Finally, copy data from the temporary variable into the remaining |
112 | // hole. This method is very good. Benchmarks demonstrated slightly better |
113 | // performance than with the 2nd method. |
114 | // |
115 | // All methods were benchmarked, and the 3rd showed best results. So we chose that one. |
116 | let tmp = mem::ManuallyDrop::new(ptr::read(arr_ptr)); |
117 | |
118 | // Intermediate state of the insertion process is always tracked by `hole`, which |
119 | // serves two purposes: |
120 | // 1. Protects integrity of `v` from panics in `is_less`. |
121 | // 2. Fills the remaining hole in `v` in the end. |
122 | // |
123 | // Panic safety: |
124 | // |
125 | // If `is_less` panics at any point during the process, `hole` will get dropped and |
126 | // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it |
127 | // initially held exactly once. |
128 | let mut hole = InsertionHole { src: &*tmp, dest: arr_ptr.add(1) }; |
129 | ptr::copy_nonoverlapping(arr_ptr.add(1), arr_ptr.add(0), 1); |
130 | |
131 | for i in 2..v.len() { |
132 | if !is_less(&v.get_unchecked(i), &*tmp) { |
133 | break; |
134 | } |
135 | ptr::copy_nonoverlapping(arr_ptr.add(i), arr_ptr.add(i - 1), 1); |
136 | hole.dest = arr_ptr.add(i); |
137 | } |
138 | // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. |
139 | } |
140 | } |
141 | } |
142 | |
143 | /// Sort `v` assuming `v[..offset]` is already sorted. |
144 | /// |
145 | /// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no |
146 | /// performance impact. Even improving performance in some cases. |
147 | #[inline (never)] |
148 | pub(super) fn insertion_sort_shift_left<T, F>(v: &mut [T], offset: usize, is_less: &mut F) |
149 | where |
150 | F: FnMut(&T, &T) -> bool, |
151 | { |
152 | let len: usize = v.len(); |
153 | |
154 | // Using assert here improves performance. |
155 | assert!(offset != 0 && offset <= len); |
156 | |
157 | // Shift each element of the unsorted region v[i..] as far left as is needed to make v sorted. |
158 | for i: usize in offset..len { |
159 | // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len |
160 | // >= 2. The range is exclusive and we know `i` must be at least 1 so this slice has at |
161 | // >least len 2. |
162 | unsafe { |
163 | insert_tail(&mut v[..=i], is_less); |
164 | } |
165 | } |
166 | } |
167 | |
168 | /// Sort `v` assuming `v[offset..]` is already sorted. |
169 | /// |
170 | /// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no |
171 | /// performance impact. Even improving performance in some cases. |
172 | #[inline (never)] |
173 | fn insertion_sort_shift_right<T, F>(v: &mut [T], offset: usize, is_less: &mut F) |
174 | where |
175 | F: FnMut(&T, &T) -> bool, |
176 | { |
177 | let len: usize = v.len(); |
178 | |
179 | // Using assert here improves performance. |
180 | assert!(offset != 0 && offset <= len && len >= 2); |
181 | |
182 | // Shift each element of the unsorted region v[..i] as far left as is needed to make v sorted. |
183 | for i: usize in (0..offset).rev() { |
184 | // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len |
185 | // >= 2.We ensured that the slice length is always at least 2 long. We know that start_found |
186 | // will be at least one less than end, and the range is exclusive. Which gives us i always |
187 | // <= (end - 2). |
188 | unsafe { |
189 | insert_head(&mut v[i..len], is_less); |
190 | } |
191 | } |
192 | } |
193 | |
194 | /// Partially sorts a slice by shifting several out-of-order elements around. |
195 | /// |
196 | /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case. |
197 | #[cold ] |
198 | fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -> bool |
199 | where |
200 | F: FnMut(&T, &T) -> bool, |
201 | { |
202 | // Maximum number of adjacent out-of-order pairs that will get shifted. |
203 | const MAX_STEPS: usize = 5; |
204 | // If the slice is shorter than this, don't shift any elements. |
205 | const SHORTEST_SHIFTING: usize = 50; |
206 | |
207 | let len = v.len(); |
208 | let mut i = 1; |
209 | |
210 | for _ in 0..MAX_STEPS { |
211 | // SAFETY: We already explicitly did the bound checking with `i < len`. |
212 | // All our subsequent indexing is only in the range `0 <= index < len` |
213 | unsafe { |
214 | // Find the next pair of adjacent out-of-order elements. |
215 | while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) { |
216 | i += 1; |
217 | } |
218 | } |
219 | |
220 | // Are we done? |
221 | if i == len { |
222 | return true; |
223 | } |
224 | |
225 | // Don't shift elements on short arrays, that has a performance cost. |
226 | if len < SHORTEST_SHIFTING { |
227 | return false; |
228 | } |
229 | |
230 | // Swap the found pair of elements. This puts them in correct order. |
231 | v.swap(i - 1, i); |
232 | |
233 | if i >= 2 { |
234 | // Shift the smaller element to the left. |
235 | insertion_sort_shift_left(&mut v[..i], i - 1, is_less); |
236 | |
237 | // Shift the greater element to the right. |
238 | insertion_sort_shift_right(&mut v[..i], 1, is_less); |
239 | } |
240 | } |
241 | |
242 | // Didn't manage to sort the slice in the limited number of steps. |
243 | false |
244 | } |
245 | |
246 | /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case. |
247 | #[cold ] |
248 | #[unstable (feature = "sort_internals" , reason = "internal to sort module" , issue = "none" )] |
249 | pub fn heapsort<T, F>(v: &mut [T], mut is_less: F) |
250 | where |
251 | F: FnMut(&T, &T) -> bool, |
252 | { |
253 | // This binary heap respects the invariant `parent >= child`. |
254 | let mut sift_down = |v: &mut [T], mut node| { |
255 | loop { |
256 | // Children of `node`. |
257 | let mut child = 2 * node + 1; |
258 | if child >= v.len() { |
259 | break; |
260 | } |
261 | |
262 | // Choose the greater child. |
263 | if child + 1 < v.len() { |
264 | // We need a branch to be sure not to out-of-bounds index, |
265 | // but it's highly predictable. The comparison, however, |
266 | // is better done branchless, especially for primitives. |
267 | child += is_less(&v[child], &v[child + 1]) as usize; |
268 | } |
269 | |
270 | // Stop if the invariant holds at `node`. |
271 | if !is_less(&v[node], &v[child]) { |
272 | break; |
273 | } |
274 | |
275 | // Swap `node` with the greater child, move one step down, and continue sifting. |
276 | v.swap(node, child); |
277 | node = child; |
278 | } |
279 | }; |
280 | |
281 | // Build the heap in linear time. |
282 | for i in (0..v.len() / 2).rev() { |
283 | sift_down(v, i); |
284 | } |
285 | |
286 | // Pop maximal elements from the heap. |
287 | for i in (1..v.len()).rev() { |
288 | v.swap(0, i); |
289 | sift_down(&mut v[..i], 0); |
290 | } |
291 | } |
292 | |
293 | /// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal |
294 | /// to `pivot`. |
295 | /// |
296 | /// Returns the number of elements smaller than `pivot`. |
297 | /// |
298 | /// Partitioning is performed block-by-block in order to minimize the cost of branching operations. |
299 | /// This idea is presented in the [BlockQuicksort][pdf] paper. |
300 | /// |
301 | /// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf |
302 | fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize |
303 | where |
304 | F: FnMut(&T, &T) -> bool, |
305 | { |
306 | // Number of elements in a typical block. |
307 | const BLOCK: usize = 128; |
308 | |
309 | // The partitioning algorithm repeats the following steps until completion: |
310 | // |
311 | // 1. Trace a block from the left side to identify elements greater than or equal to the pivot. |
312 | // 2. Trace a block from the right side to identify elements smaller than the pivot. |
313 | // 3. Exchange the identified elements between the left and right side. |
314 | // |
315 | // We keep the following variables for a block of elements: |
316 | // |
317 | // 1. `block` - Number of elements in the block. |
318 | // 2. `start` - Start pointer into the `offsets` array. |
319 | // 3. `end` - End pointer into the `offsets` array. |
320 | // 4. `offsets` - Indices of out-of-order elements within the block. |
321 | |
322 | // The current block on the left side (from `l` to `l.add(block_l)`). |
323 | let mut l = v.as_mut_ptr(); |
324 | let mut block_l = BLOCK; |
325 | let mut start_l = ptr::null_mut(); |
326 | let mut end_l = ptr::null_mut(); |
327 | let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK]; |
328 | |
329 | // The current block on the right side (from `r.sub(block_r)` to `r`). |
330 | // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe |
331 | let mut r = unsafe { l.add(v.len()) }; |
332 | let mut block_r = BLOCK; |
333 | let mut start_r = ptr::null_mut(); |
334 | let mut end_r = ptr::null_mut(); |
335 | let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK]; |
336 | |
337 | // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather |
338 | // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient. |
339 | |
340 | // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive). |
341 | fn width<T>(l: *mut T, r: *mut T) -> usize { |
342 | assert!(mem::size_of::<T>() > 0); |
343 | // FIXME: this should *likely* use `offset_from`, but more |
344 | // investigation is needed (including running tests in miri). |
345 | (r.addr() - l.addr()) / mem::size_of::<T>() |
346 | } |
347 | |
348 | loop { |
349 | // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do |
350 | // some patch-up work in order to partition the remaining elements in between. |
351 | let is_done = width(l, r) <= 2 * BLOCK; |
352 | |
353 | if is_done { |
354 | // Number of remaining elements (still not compared to the pivot). |
355 | let mut rem = width(l, r); |
356 | if start_l < end_l || start_r < end_r { |
357 | rem -= BLOCK; |
358 | } |
359 | |
360 | // Adjust block sizes so that the left and right block don't overlap, but get perfectly |
361 | // aligned to cover the whole remaining gap. |
362 | if start_l < end_l { |
363 | block_r = rem; |
364 | } else if start_r < end_r { |
365 | block_l = rem; |
366 | } else { |
367 | // There were the same number of elements to switch on both blocks during the last |
368 | // iteration, so there are no remaining elements on either block. Cover the remaining |
369 | // items with roughly equally-sized blocks. |
370 | block_l = rem / 2; |
371 | block_r = rem - block_l; |
372 | } |
373 | debug_assert!(block_l <= BLOCK && block_r <= BLOCK); |
374 | debug_assert!(width(l, r) == block_l + block_r); |
375 | } |
376 | |
377 | if start_l == end_l { |
378 | // Trace `block_l` elements from the left side. |
379 | start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l); |
380 | end_l = start_l; |
381 | let mut elem = l; |
382 | |
383 | for i in 0..block_l { |
384 | // SAFETY: The unsafety operations below involve the usage of the `offset`. |
385 | // According to the conditions required by the function, we satisfy them because: |
386 | // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object. |
387 | // 2. The function `is_less` returns a `bool`. |
388 | // Casting a `bool` will never overflow `isize`. |
389 | // 3. We have guaranteed that `block_l` will be `<= BLOCK`. |
390 | // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack. |
391 | // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end. |
392 | // Another unsafety operation here is dereferencing `elem`. |
393 | // However, `elem` was initially the begin pointer to the slice which is always valid. |
394 | unsafe { |
395 | // Branchless comparison. |
396 | *end_l = i as u8; |
397 | end_l = end_l.add(!is_less(&*elem, pivot) as usize); |
398 | elem = elem.add(1); |
399 | } |
400 | } |
401 | } |
402 | |
403 | if start_r == end_r { |
404 | // Trace `block_r` elements from the right side. |
405 | start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r); |
406 | end_r = start_r; |
407 | let mut elem = r; |
408 | |
409 | for i in 0..block_r { |
410 | // SAFETY: The unsafety operations below involve the usage of the `offset`. |
411 | // According to the conditions required by the function, we satisfy them because: |
412 | // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object. |
413 | // 2. The function `is_less` returns a `bool`. |
414 | // Casting a `bool` will never overflow `isize`. |
415 | // 3. We have guaranteed that `block_r` will be `<= BLOCK`. |
416 | // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack. |
417 | // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end. |
418 | // Another unsafety operation here is dereferencing `elem`. |
419 | // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it. |
420 | // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice. |
421 | unsafe { |
422 | // Branchless comparison. |
423 | elem = elem.sub(1); |
424 | *end_r = i as u8; |
425 | end_r = end_r.add(is_less(&*elem, pivot) as usize); |
426 | } |
427 | } |
428 | } |
429 | |
430 | // Number of out-of-order elements to swap between the left and right side. |
431 | let count = cmp::min(width(start_l, end_l), width(start_r, end_r)); |
432 | |
433 | if count > 0 { |
434 | macro_rules! left { |
435 | () => { |
436 | l.add(usize::from(*start_l)) |
437 | }; |
438 | } |
439 | macro_rules! right { |
440 | () => { |
441 | r.sub(usize::from(*start_r) + 1) |
442 | }; |
443 | } |
444 | |
445 | // Instead of swapping one pair at the time, it is more efficient to perform a cyclic |
446 | // permutation. This is not strictly equivalent to swapping, but produces a similar |
447 | // result using fewer memory operations. |
448 | |
449 | // SAFETY: The use of `ptr::read` is valid because there is at least one element in |
450 | // both `offsets_l` and `offsets_r`, so `left!` is a valid pointer to read from. |
451 | // |
452 | // The uses of `left!` involve calls to `offset` on `l`, which points to the |
453 | // beginning of `v`. All the offsets pointed-to by `start_l` are at most `block_l`, so |
454 | // these `offset` calls are safe as all reads are within the block. The same argument |
455 | // applies for the uses of `right!`. |
456 | // |
457 | // The calls to `start_l.offset` are valid because there are at most `count-1` of them, |
458 | // plus the final one at the end of the unsafe block, where `count` is the minimum number |
459 | // of collected offsets in `offsets_l` and `offsets_r`, so there is no risk of there not |
460 | // being enough elements. The same reasoning applies to the calls to `start_r.offset`. |
461 | // |
462 | // The calls to `copy_nonoverlapping` are safe because `left!` and `right!` are guaranteed |
463 | // not to overlap, and are valid because of the reasoning above. |
464 | unsafe { |
465 | let tmp = ptr::read(left!()); |
466 | ptr::copy_nonoverlapping(right!(), left!(), 1); |
467 | |
468 | for _ in 1..count { |
469 | start_l = start_l.add(1); |
470 | ptr::copy_nonoverlapping(left!(), right!(), 1); |
471 | start_r = start_r.add(1); |
472 | ptr::copy_nonoverlapping(right!(), left!(), 1); |
473 | } |
474 | |
475 | ptr::copy_nonoverlapping(&tmp, right!(), 1); |
476 | mem::forget(tmp); |
477 | start_l = start_l.add(1); |
478 | start_r = start_r.add(1); |
479 | } |
480 | } |
481 | |
482 | if start_l == end_l { |
483 | // All out-of-order elements in the left block were moved. Move to the next block. |
484 | |
485 | // block-width-guarantee |
486 | // SAFETY: if `!is_done` then the slice width is guaranteed to be at least `2*BLOCK` wide. There |
487 | // are at most `BLOCK` elements in `offsets_l` because of its size, so the `offset` operation is |
488 | // safe. Otherwise, the debug assertions in the `is_done` case guarantee that |
489 | // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account |
490 | // for the smaller number of remaining elements. |
491 | l = unsafe { l.add(block_l) }; |
492 | } |
493 | |
494 | if start_r == end_r { |
495 | // All out-of-order elements in the right block were moved. Move to the previous block. |
496 | |
497 | // SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide, |
498 | // or `block_r` has been adjusted for the last handful of elements. |
499 | r = unsafe { r.sub(block_r) }; |
500 | } |
501 | |
502 | if is_done { |
503 | break; |
504 | } |
505 | } |
506 | |
507 | // All that remains now is at most one block (either the left or the right) with out-of-order |
508 | // elements that need to be moved. Such remaining elements can be simply shifted to the end |
509 | // within their block. |
510 | |
511 | if start_l < end_l { |
512 | // The left block remains. |
513 | // Move its remaining out-of-order elements to the far right. |
514 | debug_assert_eq!(width(l, r), block_l); |
515 | while start_l < end_l { |
516 | // remaining-elements-safety |
517 | // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it |
518 | // is safe to point `end_l` to the previous element. |
519 | // |
520 | // The `ptr::swap` is safe if both its arguments are valid for reads and writes: |
521 | // - Per the debug assert above, the distance between `l` and `r` is `block_l` |
522 | // elements, so there can be at most `block_l` remaining offsets between `start_l` |
523 | // and `end_l`. This means `r` will be moved at most `block_l` steps back, which |
524 | // makes the `r.offset` calls valid (at that point `l == r`). |
525 | // - `offsets_l` contains valid offsets into `v` collected during the partitioning of |
526 | // the last block, so the `l.offset` calls are valid. |
527 | unsafe { |
528 | end_l = end_l.sub(1); |
529 | ptr::swap(l.add(usize::from(*end_l)), r.sub(1)); |
530 | r = r.sub(1); |
531 | } |
532 | } |
533 | width(v.as_mut_ptr(), r) |
534 | } else if start_r < end_r { |
535 | // The right block remains. |
536 | // Move its remaining out-of-order elements to the far left. |
537 | debug_assert_eq!(width(l, r), block_r); |
538 | while start_r < end_r { |
539 | // SAFETY: See the reasoning in [remaining-elements-safety]. |
540 | unsafe { |
541 | end_r = end_r.sub(1); |
542 | ptr::swap(l, r.sub(usize::from(*end_r) + 1)); |
543 | l = l.add(1); |
544 | } |
545 | } |
546 | width(v.as_mut_ptr(), l) |
547 | } else { |
548 | // Nothing else to do, we're done. |
549 | width(v.as_mut_ptr(), l) |
550 | } |
551 | } |
552 | |
553 | /// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or |
554 | /// equal to `v[pivot]`. |
555 | /// |
556 | /// Returns a tuple of: |
557 | /// |
558 | /// 1. Number of elements smaller than `v[pivot]`. |
559 | /// 2. True if `v` was already partitioned. |
560 | pub(super) fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool) |
561 | where |
562 | F: FnMut(&T, &T) -> bool, |
563 | { |
564 | let (mid, was_partitioned) = { |
565 | // Place the pivot at the beginning of slice. |
566 | v.swap(0, pivot); |
567 | let (pivot, v) = v.split_at_mut(1); |
568 | let pivot = &mut pivot[0]; |
569 | |
570 | // Read the pivot into a stack-allocated variable for efficiency. If a following comparison |
571 | // operation panics, the pivot will be automatically written back into the slice. |
572 | |
573 | // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe. |
574 | let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); |
575 | let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; |
576 | let pivot = &*tmp; |
577 | |
578 | // Find the first pair of out-of-order elements. |
579 | let mut l = 0; |
580 | let mut r = v.len(); |
581 | |
582 | // SAFETY: The unsafety below involves indexing an array. |
583 | // For the first one: We already do the bounds checking here with `l < r`. |
584 | // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation. |
585 | // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one. |
586 | unsafe { |
587 | // Find the first element greater than or equal to the pivot. |
588 | while l < r && is_less(v.get_unchecked(l), pivot) { |
589 | l += 1; |
590 | } |
591 | |
592 | // Find the last element smaller that the pivot. |
593 | while l < r && !is_less(v.get_unchecked(r - 1), pivot) { |
594 | r -= 1; |
595 | } |
596 | } |
597 | |
598 | (l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r) |
599 | |
600 | // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated |
601 | // variable) back into the slice where it originally was. This step is critical in ensuring |
602 | // safety! |
603 | }; |
604 | |
605 | // Place the pivot between the two partitions. |
606 | v.swap(0, mid); |
607 | |
608 | (mid, was_partitioned) |
609 | } |
610 | |
611 | /// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`. |
612 | /// |
613 | /// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain |
614 | /// elements smaller than the pivot. |
615 | pub(super) fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> usize |
616 | where |
617 | F: FnMut(&T, &T) -> bool, |
618 | { |
619 | // Place the pivot at the beginning of slice. |
620 | v.swap(0, pivot); |
621 | let (pivot, v) = v.split_at_mut(1); |
622 | let pivot = &mut pivot[0]; |
623 | |
624 | // Read the pivot into a stack-allocated variable for efficiency. If a following comparison |
625 | // operation panics, the pivot will be automatically written back into the slice. |
626 | // SAFETY: The pointer here is valid because it is obtained from a reference to a slice. |
627 | let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); |
628 | let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; |
629 | let pivot = &*tmp; |
630 | |
631 | let len = v.len(); |
632 | if len == 0 { |
633 | return 0; |
634 | } |
635 | |
636 | // Now partition the slice. |
637 | let mut l = 0; |
638 | let mut r = len; |
639 | loop { |
640 | // SAFETY: The unsafety below involves indexing an array. |
641 | // For the first one: We already do the bounds checking here with `l < r`. |
642 | // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation. |
643 | // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one. |
644 | unsafe { |
645 | // Find the first element greater than the pivot. |
646 | while l < r && !is_less(pivot, v.get_unchecked(l)) { |
647 | l += 1; |
648 | } |
649 | |
650 | // Find the last element equal to the pivot. |
651 | loop { |
652 | r -= 1; |
653 | if l >= r || !is_less(pivot, v.get_unchecked(r)) { |
654 | break; |
655 | } |
656 | } |
657 | |
658 | // Are we done? |
659 | if l >= r { |
660 | break; |
661 | } |
662 | |
663 | // Swap the found pair of out-of-order elements. |
664 | let ptr = v.as_mut_ptr(); |
665 | ptr::swap(ptr.add(l), ptr.add(r)); |
666 | l += 1; |
667 | } |
668 | } |
669 | |
670 | // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself. |
671 | l + 1 |
672 | |
673 | // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable) |
674 | // back into the slice where it originally was. This step is critical in ensuring safety! |
675 | } |
676 | |
677 | /// Scatters some elements around in an attempt to break patterns that might cause imbalanced |
678 | /// partitions in quicksort. |
679 | #[cold ] |
680 | pub(super) fn break_patterns<T>(v: &mut [T]) { |
681 | let len = v.len(); |
682 | if len >= 8 { |
683 | let mut seed = len; |
684 | let mut gen_usize = || { |
685 | // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. |
686 | if usize::BITS <= 32 { |
687 | let mut r = seed as u32; |
688 | r ^= r << 13; |
689 | r ^= r >> 17; |
690 | r ^= r << 5; |
691 | seed = r as usize; |
692 | seed |
693 | } else { |
694 | let mut r = seed as u64; |
695 | r ^= r << 13; |
696 | r ^= r >> 7; |
697 | r ^= r << 17; |
698 | seed = r as usize; |
699 | seed |
700 | } |
701 | }; |
702 | |
703 | // Take random numbers modulo this number. |
704 | // The number fits into `usize` because `len` is not greater than `isize::MAX`. |
705 | let modulus = len.next_power_of_two(); |
706 | |
707 | // Some pivot candidates will be in the nearby of this index. Let's randomize them. |
708 | let pos = len / 4 * 2; |
709 | |
710 | for i in 0..3 { |
711 | // Generate a random number modulo `len`. However, in order to avoid costly operations |
712 | // we first take it modulo a power of two, and then decrease by `len` until it fits |
713 | // into the range `[0, len - 1]`. |
714 | let mut other = gen_usize() & (modulus - 1); |
715 | |
716 | // `other` is guaranteed to be less than `2 * len`. |
717 | if other >= len { |
718 | other -= len; |
719 | } |
720 | |
721 | v.swap(pos - 1 + i, other); |
722 | } |
723 | } |
724 | } |
725 | |
726 | /// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted. |
727 | /// |
728 | /// Elements in `v` might be reordered in the process. |
729 | pub(super) fn choose_pivot<T, F>(v: &mut [T], is_less: &mut F) -> (usize, bool) |
730 | where |
731 | F: FnMut(&T, &T) -> bool, |
732 | { |
733 | // Minimum length to choose the median-of-medians method. |
734 | // Shorter slices use the simple median-of-three method. |
735 | const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50; |
736 | // Maximum number of swaps that can be performed in this function. |
737 | const MAX_SWAPS: usize = 4 * 3; |
738 | |
739 | let len = v.len(); |
740 | |
741 | // Three indices near which we are going to choose a pivot. |
742 | let mut a = len / 4 * 1; |
743 | let mut b = len / 4 * 2; |
744 | let mut c = len / 4 * 3; |
745 | |
746 | // Counts the total number of swaps we are about to perform while sorting indices. |
747 | let mut swaps = 0; |
748 | |
749 | if len >= 8 { |
750 | // Swaps indices so that `v[a] <= v[b]`. |
751 | // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of |
752 | // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in |
753 | // corresponding calls to `sort3` with valid 3-item neighborhoods around each |
754 | // pointer, which in turn means the calls to `sort2` are done with valid |
755 | // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap` |
756 | // call. |
757 | let mut sort2 = |a: &mut usize, b: &mut usize| unsafe { |
758 | if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) { |
759 | ptr::swap(a, b); |
760 | swaps += 1; |
761 | } |
762 | }; |
763 | |
764 | // Swaps indices so that `v[a] <= v[b] <= v[c]`. |
765 | let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| { |
766 | sort2(a, b); |
767 | sort2(b, c); |
768 | sort2(a, b); |
769 | }; |
770 | |
771 | if len >= SHORTEST_MEDIAN_OF_MEDIANS { |
772 | // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`. |
773 | let mut sort_adjacent = |a: &mut usize| { |
774 | let tmp = *a; |
775 | sort3(&mut (tmp - 1), a, &mut (tmp + 1)); |
776 | }; |
777 | |
778 | // Find medians in the neighborhoods of `a`, `b`, and `c`. |
779 | sort_adjacent(&mut a); |
780 | sort_adjacent(&mut b); |
781 | sort_adjacent(&mut c); |
782 | } |
783 | |
784 | // Find the median among `a`, `b`, and `c`. |
785 | sort3(&mut a, &mut b, &mut c); |
786 | } |
787 | |
788 | if swaps < MAX_SWAPS { |
789 | (b, swaps == 0) |
790 | } else { |
791 | // The maximum number of swaps was performed. Chances are the slice is descending or mostly |
792 | // descending, so reversing will probably help sort it faster. |
793 | v.reverse(); |
794 | (len - 1 - b, true) |
795 | } |
796 | } |
797 | |
798 | /// Sorts `v` recursively. |
799 | /// |
800 | /// If the slice had a predecessor in the original array, it is specified as `pred`. |
801 | /// |
802 | /// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero, |
803 | /// this function will immediately switch to heapsort. |
804 | fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: u32) |
805 | where |
806 | F: FnMut(&T, &T) -> bool, |
807 | { |
808 | // Slices of up to this length get sorted using insertion sort. |
809 | const MAX_INSERTION: usize = 20; |
810 | |
811 | // True if the last partitioning was reasonably balanced. |
812 | let mut was_balanced = true; |
813 | // True if the last partitioning didn't shuffle elements (the slice was already partitioned). |
814 | let mut was_partitioned = true; |
815 | |
816 | loop { |
817 | let len = v.len(); |
818 | |
819 | // Very short slices get sorted using insertion sort. |
820 | if len <= MAX_INSERTION { |
821 | if len >= 2 { |
822 | insertion_sort_shift_left(v, 1, is_less); |
823 | } |
824 | return; |
825 | } |
826 | |
827 | // If too many bad pivot choices were made, simply fall back to heapsort in order to |
828 | // guarantee `O(n * log(n))` worst-case. |
829 | if limit == 0 { |
830 | heapsort(v, is_less); |
831 | return; |
832 | } |
833 | |
834 | // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling |
835 | // some elements around. Hopefully we'll choose a better pivot this time. |
836 | if !was_balanced { |
837 | break_patterns(v); |
838 | limit -= 1; |
839 | } |
840 | |
841 | // Choose a pivot and try guessing whether the slice is already sorted. |
842 | let (pivot, likely_sorted) = choose_pivot(v, is_less); |
843 | |
844 | // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot |
845 | // selection predicts the slice is likely already sorted... |
846 | if was_balanced && was_partitioned && likely_sorted { |
847 | // Try identifying several out-of-order elements and shifting them to correct |
848 | // positions. If the slice ends up being completely sorted, we're done. |
849 | if partial_insertion_sort(v, is_less) { |
850 | return; |
851 | } |
852 | } |
853 | |
854 | // If the chosen pivot is equal to the predecessor, then it's the smallest element in the |
855 | // slice. Partition the slice into elements equal to and elements greater than the pivot. |
856 | // This case is usually hit when the slice contains many duplicate elements. |
857 | if let Some(p) = pred { |
858 | if !is_less(p, &v[pivot]) { |
859 | let mid = partition_equal(v, pivot, is_less); |
860 | |
861 | // Continue sorting elements greater than the pivot. |
862 | v = &mut v[mid..]; |
863 | continue; |
864 | } |
865 | } |
866 | |
867 | // Partition the slice. |
868 | let (mid, was_p) = partition(v, pivot, is_less); |
869 | was_balanced = cmp::min(mid, len - mid) >= len / 8; |
870 | was_partitioned = was_p; |
871 | |
872 | // Split the slice into `left`, `pivot`, and `right`. |
873 | let (left, right) = v.split_at_mut(mid); |
874 | let (pivot, right) = right.split_at_mut(1); |
875 | let pivot = &pivot[0]; |
876 | |
877 | // Recurse into the shorter side only in order to minimize the total number of recursive |
878 | // calls and consume less stack space. Then just continue with the longer side (this is |
879 | // akin to tail recursion). |
880 | if left.len() < right.len() { |
881 | recurse(left, is_less, pred, limit); |
882 | v = right; |
883 | pred = Some(pivot); |
884 | } else { |
885 | recurse(right, is_less, Some(pivot), limit); |
886 | v = left; |
887 | } |
888 | } |
889 | } |
890 | |
891 | /// Sorts `v` using pattern-defeating quicksort, which is *O*(*n* \* log(*n*)) worst-case. |
892 | pub fn quicksort<T, F>(v: &mut [T], mut is_less: F) |
893 | where |
894 | F: FnMut(&T, &T) -> bool, |
895 | { |
896 | // Sorting has no meaningful behavior on zero-sized types. |
897 | if T::IS_ZST { |
898 | return; |
899 | } |
900 | |
901 | // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`. |
902 | let limit: u32 = usize::BITS - v.len().leading_zeros(); |
903 | |
904 | recurse(v, &mut is_less, pred:None, limit); |
905 | } |
906 | |
907 | /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and |
908 | /// stores the result into `v[..]`. |
909 | /// |
910 | /// # Safety |
911 | /// |
912 | /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough |
913 | /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type. |
914 | unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) |
915 | where |
916 | F: FnMut(&T, &T) -> bool, |
917 | { |
918 | let len = v.len(); |
919 | let v = v.as_mut_ptr(); |
920 | |
921 | // SAFETY: mid and len must be in-bounds of v. |
922 | let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) }; |
923 | |
924 | // The merge process first copies the shorter run into `buf`. Then it traces the newly copied |
925 | // run and the longer run forwards (or backwards), comparing their next unconsumed elements and |
926 | // copying the lesser (or greater) one into `v`. |
927 | // |
928 | // As soon as the shorter run is fully consumed, the process is done. If the longer run gets |
929 | // consumed first, then we must copy whatever is left of the shorter run into the remaining |
930 | // hole in `v`. |
931 | // |
932 | // Intermediate state of the process is always tracked by `hole`, which serves two purposes: |
933 | // 1. Protects integrity of `v` from panics in `is_less`. |
934 | // 2. Fills the remaining hole in `v` if the longer run gets consumed first. |
935 | // |
936 | // Panic safety: |
937 | // |
938 | // If `is_less` panics at any point during the process, `hole` will get dropped and fill the |
939 | // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every |
940 | // object it initially held exactly once. |
941 | let mut hole; |
942 | |
943 | if mid <= len - mid { |
944 | // The left run is shorter. |
945 | |
946 | // SAFETY: buf must have enough capacity for `v[..mid]`. |
947 | unsafe { |
948 | ptr::copy_nonoverlapping(v, buf, mid); |
949 | hole = MergeHole { start: buf, end: buf.add(mid), dest: v }; |
950 | } |
951 | |
952 | // Initially, these pointers point to the beginnings of their arrays. |
953 | let left = &mut hole.start; |
954 | let mut right = v_mid; |
955 | let out = &mut hole.dest; |
956 | |
957 | while *left < hole.end && right < v_end { |
958 | // Consume the lesser side. |
959 | // If equal, prefer the left run to maintain stability. |
960 | |
961 | // SAFETY: left and right must be valid and part of v same for out. |
962 | unsafe { |
963 | let is_l = is_less(&*right, &**left); |
964 | let to_copy = if is_l { right } else { *left }; |
965 | ptr::copy_nonoverlapping(to_copy, *out, 1); |
966 | *out = out.add(1); |
967 | right = right.add(is_l as usize); |
968 | *left = left.add(!is_l as usize); |
969 | } |
970 | } |
971 | } else { |
972 | // The right run is shorter. |
973 | |
974 | // SAFETY: buf must have enough capacity for `v[mid..]`. |
975 | unsafe { |
976 | ptr::copy_nonoverlapping(v_mid, buf, len - mid); |
977 | hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid }; |
978 | } |
979 | |
980 | // Initially, these pointers point past the ends of their arrays. |
981 | let left = &mut hole.dest; |
982 | let right = &mut hole.end; |
983 | let mut out = v_end; |
984 | |
985 | while v < *left && buf < *right { |
986 | // Consume the greater side. |
987 | // If equal, prefer the right run to maintain stability. |
988 | |
989 | // SAFETY: left and right must be valid and part of v same for out. |
990 | unsafe { |
991 | let is_l = is_less(&*right.sub(1), &*left.sub(1)); |
992 | *left = left.sub(is_l as usize); |
993 | *right = right.sub(!is_l as usize); |
994 | let to_copy = if is_l { *left } else { *right }; |
995 | out = out.sub(1); |
996 | ptr::copy_nonoverlapping(to_copy, out, 1); |
997 | } |
998 | } |
999 | } |
1000 | // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of |
1001 | // it will now be copied into the hole in `v`. |
1002 | |
1003 | // When dropped, copies the range `start..end` into `dest..`. |
1004 | struct MergeHole<T> { |
1005 | start: *mut T, |
1006 | end: *mut T, |
1007 | dest: *mut T, |
1008 | } |
1009 | |
1010 | impl<T> Drop for MergeHole<T> { |
1011 | fn drop(&mut self) { |
1012 | // SAFETY: `T` is not a zero-sized type, and these are pointers into a slice's elements. |
1013 | unsafe { |
1014 | let len = self.end.sub_ptr(self.start); |
1015 | ptr::copy_nonoverlapping(self.start, self.dest, len); |
1016 | } |
1017 | } |
1018 | } |
1019 | } |
1020 | |
1021 | /// This merge sort borrows some (but not all) ideas from TimSort, which used to be described in |
1022 | /// detail [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt). However Python |
1023 | /// has switched to a Powersort based implementation. |
1024 | /// |
1025 | /// The algorithm identifies strictly descending and non-descending subsequences, which are called |
1026 | /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed |
1027 | /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are |
1028 | /// satisfied: |
1029 | /// |
1030 | /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len` |
1031 | /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len` |
1032 | /// |
1033 | /// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case. |
1034 | pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( |
1035 | v: &mut [T], |
1036 | is_less: &mut CmpF, |
1037 | elem_alloc_fn: ElemAllocF, |
1038 | elem_dealloc_fn: ElemDeallocF, |
1039 | run_alloc_fn: RunAllocF, |
1040 | run_dealloc_fn: RunDeallocF, |
1041 | ) where |
1042 | CmpF: FnMut(&T, &T) -> bool, |
1043 | ElemAllocF: Fn(usize) -> *mut T, |
1044 | ElemDeallocF: Fn(*mut T, usize), |
1045 | RunAllocF: Fn(usize) -> *mut TimSortRun, |
1046 | RunDeallocF: Fn(*mut TimSortRun, usize), |
1047 | { |
1048 | // Slices of up to this length get sorted using insertion sort. |
1049 | const MAX_INSERTION: usize = 20; |
1050 | |
1051 | // The caller should have already checked that. |
1052 | debug_assert!(!T::IS_ZST); |
1053 | |
1054 | let len = v.len(); |
1055 | |
1056 | // Short arrays get sorted in-place via insertion sort to avoid allocations. |
1057 | if len <= MAX_INSERTION { |
1058 | if len >= 2 { |
1059 | insertion_sort_shift_left(v, 1, is_less); |
1060 | } |
1061 | return; |
1062 | } |
1063 | |
1064 | // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it |
1065 | // shallow copies of the contents of `v` without risking the dtors running on copies if |
1066 | // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run, |
1067 | // which will always have length at most `len / 2`. |
1068 | let buf = BufGuard::new(len / 2, elem_alloc_fn, elem_dealloc_fn); |
1069 | let buf_ptr = buf.buf_ptr.as_ptr(); |
1070 | |
1071 | let mut runs = RunVec::new(run_alloc_fn, run_dealloc_fn); |
1072 | |
1073 | let mut end = 0; |
1074 | let mut start = 0; |
1075 | |
1076 | // Scan forward. Memory pre-fetching prefers forward scanning vs backwards scanning, and the |
1077 | // code-gen is usually better. For the most sensitive types such as integers, these are merged |
1078 | // bidirectionally at once. So there is no benefit in scanning backwards. |
1079 | while end < len { |
1080 | let (streak_end, was_reversed) = find_streak(&v[start..], is_less); |
1081 | end += streak_end; |
1082 | if was_reversed { |
1083 | v[start..end].reverse(); |
1084 | } |
1085 | |
1086 | // Insert some more elements into the run if it's too short. Insertion sort is faster than |
1087 | // merge sort on short sequences, so this significantly improves performance. |
1088 | end = provide_sorted_batch(v, start, end, is_less); |
1089 | |
1090 | // Push this run onto the stack. |
1091 | runs.push(TimSortRun { start, len: end - start }); |
1092 | start = end; |
1093 | |
1094 | // Merge some pairs of adjacent runs to satisfy the invariants. |
1095 | while let Some(r) = collapse(runs.as_slice(), len) { |
1096 | let left = runs[r]; |
1097 | let right = runs[r + 1]; |
1098 | let merge_slice = &mut v[left.start..right.start + right.len]; |
1099 | // SAFETY: `buf_ptr` must hold enough capacity for the shorter of the two sides, and |
1100 | // neither side may be on length 0. |
1101 | unsafe { |
1102 | merge(merge_slice, left.len, buf_ptr, is_less); |
1103 | } |
1104 | runs[r + 1] = TimSortRun { start: left.start, len: left.len + right.len }; |
1105 | runs.remove(r); |
1106 | } |
1107 | } |
1108 | |
1109 | // Finally, exactly one run must remain in the stack. |
1110 | debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len); |
1111 | |
1112 | // Examines the stack of runs and identifies the next pair of runs to merge. More specifically, |
1113 | // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the |
1114 | // algorithm should continue building a new run instead, `None` is returned. |
1115 | // |
1116 | // TimSort is infamous for its buggy implementations, as described here: |
1117 | // http://envisage-project.eu/timsort-specification-and-verification/ |
1118 | // |
1119 | // The gist of the story is: we must enforce the invariants on the top four runs on the stack. |
1120 | // Enforcing them on just top three is not sufficient to ensure that the invariants will still |
1121 | // hold for *all* runs in the stack. |
1122 | // |
1123 | // This function correctly checks invariants for the top four runs. Additionally, if the top |
1124 | // run starts at index 0, it will always demand a merge operation until the stack is fully |
1125 | // collapsed, in order to complete the sort. |
1126 | #[inline ] |
1127 | fn collapse(runs: &[TimSortRun], stop: usize) -> Option<usize> { |
1128 | let n = runs.len(); |
1129 | if n >= 2 |
1130 | && (runs[n - 1].start + runs[n - 1].len == stop |
1131 | || runs[n - 2].len <= runs[n - 1].len |
1132 | || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) |
1133 | || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) |
1134 | { |
1135 | if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) } |
1136 | } else { |
1137 | None |
1138 | } |
1139 | } |
1140 | |
1141 | // Extremely basic versions of Vec. |
1142 | // Their use is super limited and by having the code here, it allows reuse between the sort |
1143 | // implementations. |
1144 | struct BufGuard<T, ElemDeallocF> |
1145 | where |
1146 | ElemDeallocF: Fn(*mut T, usize), |
1147 | { |
1148 | buf_ptr: ptr::NonNull<T>, |
1149 | capacity: usize, |
1150 | elem_dealloc_fn: ElemDeallocF, |
1151 | } |
1152 | |
1153 | impl<T, ElemDeallocF> BufGuard<T, ElemDeallocF> |
1154 | where |
1155 | ElemDeallocF: Fn(*mut T, usize), |
1156 | { |
1157 | fn new<ElemAllocF>( |
1158 | len: usize, |
1159 | elem_alloc_fn: ElemAllocF, |
1160 | elem_dealloc_fn: ElemDeallocF, |
1161 | ) -> Self |
1162 | where |
1163 | ElemAllocF: Fn(usize) -> *mut T, |
1164 | { |
1165 | Self { |
1166 | buf_ptr: ptr::NonNull::new(elem_alloc_fn(len)).unwrap(), |
1167 | capacity: len, |
1168 | elem_dealloc_fn, |
1169 | } |
1170 | } |
1171 | } |
1172 | |
1173 | impl<T, ElemDeallocF> Drop for BufGuard<T, ElemDeallocF> |
1174 | where |
1175 | ElemDeallocF: Fn(*mut T, usize), |
1176 | { |
1177 | fn drop(&mut self) { |
1178 | (self.elem_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); |
1179 | } |
1180 | } |
1181 | |
1182 | struct RunVec<RunAllocF, RunDeallocF> |
1183 | where |
1184 | RunAllocF: Fn(usize) -> *mut TimSortRun, |
1185 | RunDeallocF: Fn(*mut TimSortRun, usize), |
1186 | { |
1187 | buf_ptr: ptr::NonNull<TimSortRun>, |
1188 | capacity: usize, |
1189 | len: usize, |
1190 | run_alloc_fn: RunAllocF, |
1191 | run_dealloc_fn: RunDeallocF, |
1192 | } |
1193 | |
1194 | impl<RunAllocF, RunDeallocF> RunVec<RunAllocF, RunDeallocF> |
1195 | where |
1196 | RunAllocF: Fn(usize) -> *mut TimSortRun, |
1197 | RunDeallocF: Fn(*mut TimSortRun, usize), |
1198 | { |
1199 | fn new(run_alloc_fn: RunAllocF, run_dealloc_fn: RunDeallocF) -> Self { |
1200 | // Most slices can be sorted with at most 16 runs in-flight. |
1201 | const START_RUN_CAPACITY: usize = 16; |
1202 | |
1203 | Self { |
1204 | buf_ptr: ptr::NonNull::new(run_alloc_fn(START_RUN_CAPACITY)).unwrap(), |
1205 | capacity: START_RUN_CAPACITY, |
1206 | len: 0, |
1207 | run_alloc_fn, |
1208 | run_dealloc_fn, |
1209 | } |
1210 | } |
1211 | |
1212 | fn push(&mut self, val: TimSortRun) { |
1213 | if self.len == self.capacity { |
1214 | let old_capacity = self.capacity; |
1215 | let old_buf_ptr = self.buf_ptr.as_ptr(); |
1216 | |
1217 | self.capacity = self.capacity * 2; |
1218 | self.buf_ptr = ptr::NonNull::new((self.run_alloc_fn)(self.capacity)).unwrap(); |
1219 | |
1220 | // SAFETY: buf_ptr new and old were correctly allocated and old_buf_ptr has |
1221 | // old_capacity valid elements. |
1222 | unsafe { |
1223 | ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr.as_ptr(), old_capacity); |
1224 | } |
1225 | |
1226 | (self.run_dealloc_fn)(old_buf_ptr, old_capacity); |
1227 | } |
1228 | |
1229 | // SAFETY: The invariant was just checked. |
1230 | unsafe { |
1231 | self.buf_ptr.as_ptr().add(self.len).write(val); |
1232 | } |
1233 | self.len += 1; |
1234 | } |
1235 | |
1236 | fn remove(&mut self, index: usize) { |
1237 | if index >= self.len { |
1238 | panic!("Index out of bounds" ); |
1239 | } |
1240 | |
1241 | // SAFETY: buf_ptr needs to be valid and len invariant upheld. |
1242 | unsafe { |
1243 | // the place we are taking from. |
1244 | let ptr = self.buf_ptr.as_ptr().add(index); |
1245 | |
1246 | // Shift everything down to fill in that spot. |
1247 | ptr::copy(ptr.add(1), ptr, self.len - index - 1); |
1248 | } |
1249 | self.len -= 1; |
1250 | } |
1251 | |
1252 | fn as_slice(&self) -> &[TimSortRun] { |
1253 | // SAFETY: Safe as long as buf_ptr is valid and len invariant was upheld. |
1254 | unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr.as_ptr(), self.len) } |
1255 | } |
1256 | |
1257 | fn len(&self) -> usize { |
1258 | self.len |
1259 | } |
1260 | } |
1261 | |
1262 | impl<RunAllocF, RunDeallocF> core::ops::Index<usize> for RunVec<RunAllocF, RunDeallocF> |
1263 | where |
1264 | RunAllocF: Fn(usize) -> *mut TimSortRun, |
1265 | RunDeallocF: Fn(*mut TimSortRun, usize), |
1266 | { |
1267 | type Output = TimSortRun; |
1268 | |
1269 | fn index(&self, index: usize) -> &Self::Output { |
1270 | if index < self.len { |
1271 | // SAFETY: buf_ptr and len invariant must be upheld. |
1272 | unsafe { |
1273 | return &*(self.buf_ptr.as_ptr().add(index)); |
1274 | } |
1275 | } |
1276 | |
1277 | panic!("Index out of bounds" ); |
1278 | } |
1279 | } |
1280 | |
1281 | impl<RunAllocF, RunDeallocF> core::ops::IndexMut<usize> for RunVec<RunAllocF, RunDeallocF> |
1282 | where |
1283 | RunAllocF: Fn(usize) -> *mut TimSortRun, |
1284 | RunDeallocF: Fn(*mut TimSortRun, usize), |
1285 | { |
1286 | fn index_mut(&mut self, index: usize) -> &mut Self::Output { |
1287 | if index < self.len { |
1288 | // SAFETY: buf_ptr and len invariant must be upheld. |
1289 | unsafe { |
1290 | return &mut *(self.buf_ptr.as_ptr().add(index)); |
1291 | } |
1292 | } |
1293 | |
1294 | panic!("Index out of bounds" ); |
1295 | } |
1296 | } |
1297 | |
1298 | impl<RunAllocF, RunDeallocF> Drop for RunVec<RunAllocF, RunDeallocF> |
1299 | where |
1300 | RunAllocF: Fn(usize) -> *mut TimSortRun, |
1301 | RunDeallocF: Fn(*mut TimSortRun, usize), |
1302 | { |
1303 | fn drop(&mut self) { |
1304 | // As long as TimSortRun is Copy we don't need to drop them individually but just the |
1305 | // whole allocation. |
1306 | (self.run_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); |
1307 | } |
1308 | } |
1309 | } |
1310 | |
1311 | /// Internal type used by merge_sort. |
1312 | #[derive (Clone, Copy, Debug)] |
1313 | pub struct TimSortRun { |
1314 | len: usize, |
1315 | start: usize, |
1316 | } |
1317 | |
1318 | /// Takes a range as denoted by start and end, that is already sorted and extends it to the right if |
1319 | /// necessary with sorts optimized for smaller ranges such as insertion sort. |
1320 | fn provide_sorted_batch<T, F>(v: &mut [T], start: usize, mut end: usize, is_less: &mut F) -> usize |
1321 | where |
1322 | F: FnMut(&T, &T) -> bool, |
1323 | { |
1324 | let len: usize = v.len(); |
1325 | assert!(end >= start && end <= len); |
1326 | |
1327 | // This value is a balance between least comparisons and best performance, as |
1328 | // influenced by for example cache locality. |
1329 | const MIN_INSERTION_RUN: usize = 10; |
1330 | |
1331 | // Insert some more elements into the run if it's too short. Insertion sort is faster than |
1332 | // merge sort on short sequences, so this significantly improves performance. |
1333 | let start_end_diff: usize = end - start; |
1334 | |
1335 | if start_end_diff < MIN_INSERTION_RUN && end < len { |
1336 | // v[start_found..end] are elements that are already sorted in the input. We want to extend |
1337 | // the sorted region to the left, so we push up MIN_INSERTION_RUN - 1 to the right. Which is |
1338 | // more efficient that trying to push those already sorted elements to the left. |
1339 | end = cmp::min(v1:start + MIN_INSERTION_RUN, v2:len); |
1340 | let presorted_start: usize = cmp::max(v1:start_end_diff, v2:1); |
1341 | |
1342 | insertion_sort_shift_left(&mut v[start..end], offset:presorted_start, is_less); |
1343 | } |
1344 | |
1345 | end |
1346 | } |
1347 | |
1348 | /// Finds a streak of presorted elements starting at the beginning of the slice. Returns the first |
1349 | /// value that is not part of said streak, and a bool denoting whether the streak was reversed. |
1350 | /// Streaks can be increasing or decreasing. |
1351 | fn find_streak<T, F>(v: &[T], is_less: &mut F) -> (usize, bool) |
1352 | where |
1353 | F: FnMut(&T, &T) -> bool, |
1354 | { |
1355 | let len = v.len(); |
1356 | |
1357 | if len < 2 { |
1358 | return (len, false); |
1359 | } |
1360 | |
1361 | let mut end = 2; |
1362 | |
1363 | // SAFETY: See below specific. |
1364 | unsafe { |
1365 | // SAFETY: We checked that len >= 2, so 0 and 1 are valid indices. |
1366 | let assume_reverse = is_less(v.get_unchecked(1), v.get_unchecked(0)); |
1367 | |
1368 | // SAFETY: We know end >= 2 and check end < len. |
1369 | // From that follows that accessing v at end and end - 1 is safe. |
1370 | if assume_reverse { |
1371 | while end < len && is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { |
1372 | end += 1; |
1373 | } |
1374 | |
1375 | (end, true) |
1376 | } else { |
1377 | while end < len && !is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { |
1378 | end += 1; |
1379 | } |
1380 | (end, false) |
1381 | } |
1382 | } |
1383 | } |
1384 | |