1 | use alloc::alloc::{alloc, dealloc}; |
2 | use core::{alloc::Layout, ptr::NonNull, slice}; |
3 | |
4 | pub struct RingBuffer { |
5 | // Safety invariants: |
6 | // |
7 | // 1. |
8 | // a.`buf` must be a valid allocation of capacity `cap` |
9 | // b. ...unless `cap=0`, in which case it is dangling |
10 | // 2. If tail≥head |
11 | // a. `head..tail` must contain initialized memory. |
12 | // b. Else, `head..` and `..tail` must be initialized |
13 | // 3. `head` and `tail` are in bounds (≥ 0 and < cap) |
14 | // 4. `tail` is never `cap` except for a full buffer, and instead uses the value `0`. In other words, `tail` always points to the place |
15 | // where the next element would go (if there is space) |
16 | buf: NonNull<u8>, |
17 | cap: usize, |
18 | head: usize, |
19 | tail: usize, |
20 | } |
21 | |
22 | impl RingBuffer { |
23 | pub fn new() -> Self { |
24 | RingBuffer { |
25 | // SAFETY: Upholds invariant 1a as stated |
26 | buf: NonNull::dangling(), |
27 | cap: 0, |
28 | // SAFETY: Upholds invariant 2-4 |
29 | head: 0, |
30 | tail: 0, |
31 | } |
32 | } |
33 | |
34 | pub fn len(&self) -> usize { |
35 | let (x, y) = self.data_slice_lengths(); |
36 | x + y |
37 | } |
38 | |
39 | pub fn free(&self) -> usize { |
40 | let (x, y) = self.free_slice_lengths(); |
41 | (x + y).saturating_sub(1) |
42 | } |
43 | |
44 | pub fn clear(&mut self) { |
45 | // SAFETY: Upholds invariant 2, trivially |
46 | // SAFETY: Upholds invariant 3; 0 is always valid |
47 | self.head = 0; |
48 | self.tail = 0; |
49 | } |
50 | |
51 | pub fn is_empty(&self) -> bool { |
52 | self.head == self.tail |
53 | } |
54 | |
55 | pub fn reserve(&mut self, amount: usize) { |
56 | let free = self.free(); |
57 | if free >= amount { |
58 | return; |
59 | } |
60 | |
61 | self.reserve_amortized(amount - free); |
62 | } |
63 | |
64 | #[inline (never)] |
65 | #[cold ] |
66 | fn reserve_amortized(&mut self, amount: usize) { |
67 | // SAFETY: if we were succesfully able to construct this layout when we allocated then it's also valid do so now |
68 | let current_layout = unsafe { Layout::array::<u8>(self.cap).unwrap_unchecked() }; |
69 | |
70 | // Always have at least 1 unused element as the sentinel. |
71 | let new_cap = usize::max( |
72 | self.cap.next_power_of_two(), |
73 | (self.cap + amount).next_power_of_two(), |
74 | ) + 1; |
75 | |
76 | // Check that the capacity isn't bigger than isize::MAX, which is the max allowed by LLVM, or that |
77 | // we are on a >= 64 bit system which will never allow that much memory to be allocated |
78 | #[allow (clippy::assertions_on_constants)] |
79 | { |
80 | debug_assert!(usize::BITS >= 64 || new_cap < isize::MAX as usize); |
81 | } |
82 | |
83 | let new_layout = Layout::array::<u8>(new_cap) |
84 | .unwrap_or_else(|_| panic!("Could not create layout for u8 array of size {}" , new_cap)); |
85 | |
86 | // alloc the new memory region and panic if alloc fails |
87 | // TODO maybe rework this to generate an error? |
88 | let new_buf = unsafe { |
89 | let new_buf = alloc(new_layout); |
90 | |
91 | NonNull::new(new_buf).expect("Allocating new space for the ringbuffer failed" ) |
92 | }; |
93 | |
94 | // If we had data before, copy it over to the newly alloced memory region |
95 | if self.cap > 0 { |
96 | let ((s1_ptr, s1_len), (s2_ptr, s2_len)) = self.data_slice_parts(); |
97 | |
98 | unsafe { |
99 | // SAFETY: Upholds invariant 2, we end up populating (0..(len₁ + len₂)) |
100 | new_buf.as_ptr().copy_from_nonoverlapping(s1_ptr, s1_len); |
101 | new_buf |
102 | .as_ptr() |
103 | .add(s1_len) |
104 | .copy_from_nonoverlapping(s2_ptr, s2_len); |
105 | dealloc(self.buf.as_ptr(), current_layout); |
106 | } |
107 | |
108 | // SAFETY: Upholds invariant 3, head is 0 and in bounds, tail is only ever `cap` if the buffer |
109 | // is entirely full |
110 | self.tail = s1_len + s2_len; |
111 | self.head = 0; |
112 | } |
113 | // SAFETY: Upholds invariant 1: the buffer was just allocated correctly |
114 | self.buf = new_buf; |
115 | self.cap = new_cap; |
116 | } |
117 | |
118 | #[allow (dead_code)] |
119 | pub fn push_back(&mut self, byte: u8) { |
120 | self.reserve(1); |
121 | |
122 | // SAFETY: Upholds invariant 2 by writing initialized memory |
123 | unsafe { self.buf.as_ptr().add(self.tail).write(byte) }; |
124 | // SAFETY: Upholds invariant 3 by wrapping `tail` around |
125 | self.tail = (self.tail + 1) % self.cap; |
126 | } |
127 | |
128 | #[allow (dead_code)] |
129 | pub fn get(&self, idx: usize) -> Option<u8> { |
130 | if idx < self.len() { |
131 | // SAFETY: Establishes invariants on memory being initialized and the range being in-bounds |
132 | // (Invariants 2 & 3) |
133 | let idx = (self.head + idx) % self.cap; |
134 | Some(unsafe { self.buf.as_ptr().add(idx).read() }) |
135 | } else { |
136 | None |
137 | } |
138 | } |
139 | |
140 | pub fn extend(&mut self, data: &[u8]) { |
141 | let len = data.len(); |
142 | let ptr = data.as_ptr(); |
143 | if len == 0 { |
144 | return; |
145 | } |
146 | |
147 | self.reserve(len); |
148 | |
149 | debug_assert!(self.len() + len <= self.cap - 1); |
150 | debug_assert!(self.free() >= len, "free: {} len: {}" , self.free(), len); |
151 | |
152 | let ((f1_ptr, f1_len), (f2_ptr, f2_len)) = self.free_slice_parts(); |
153 | debug_assert!(f1_len + f2_len >= len, " {} + {} < {}" , f1_len, f2_len, len); |
154 | |
155 | let in_f1 = usize::min(len, f1_len); |
156 | |
157 | let in_f2 = len - in_f1; |
158 | |
159 | debug_assert!(in_f1 + in_f2 == len); |
160 | |
161 | unsafe { |
162 | // SAFETY: `in_f₁ + in_f₂ = len`, so this writes `len` bytes total |
163 | // upholding invariant 2 |
164 | if in_f1 > 0 { |
165 | f1_ptr.copy_from_nonoverlapping(ptr, in_f1); |
166 | } |
167 | if in_f2 > 0 { |
168 | f2_ptr.copy_from_nonoverlapping(ptr.add(in_f1), in_f2); |
169 | } |
170 | } |
171 | // SAFETY: Upholds invariant 3 by wrapping `tail` around. |
172 | self.tail = (self.tail + len) % self.cap; |
173 | } |
174 | |
175 | pub fn drop_first_n(&mut self, amount: usize) { |
176 | debug_assert!(amount <= self.len()); |
177 | let amount = usize::min(amount, self.len()); |
178 | // SAFETY: we maintain invariant 2 here since this will always lead to a smaller buffer |
179 | // for amount≤len |
180 | self.head = (self.head + amount) % self.cap; |
181 | } |
182 | |
183 | // SAFETY: other code relies on this pointing to initialized halves of the buffer only |
184 | fn data_slice_lengths(&self) -> (usize, usize) { |
185 | let len_after_head; |
186 | let len_to_tail; |
187 | |
188 | // TODO can we do this branchless? |
189 | if self.tail >= self.head { |
190 | len_after_head = self.tail - self.head; |
191 | len_to_tail = 0; |
192 | } else { |
193 | len_after_head = self.cap - self.head; |
194 | len_to_tail = self.tail; |
195 | } |
196 | (len_after_head, len_to_tail) |
197 | } |
198 | |
199 | // SAFETY: other code relies on this pointing to initialized halves of the buffer only |
200 | fn data_slice_parts(&self) -> ((*const u8, usize), (*const u8, usize)) { |
201 | let (len_after_head, len_to_tail) = self.data_slice_lengths(); |
202 | |
203 | ( |
204 | (unsafe { self.buf.as_ptr().add(self.head) }, len_after_head), |
205 | (self.buf.as_ptr(), len_to_tail), |
206 | ) |
207 | } |
208 | pub fn as_slices(&self) -> (&[u8], &[u8]) { |
209 | let (s1, s2) = self.data_slice_parts(); |
210 | unsafe { |
211 | // SAFETY: relies on the behavior of data_slice_parts for producing initialized memory |
212 | let s1 = slice::from_raw_parts(s1.0, s1.1); |
213 | let s2 = slice::from_raw_parts(s2.0, s2.1); |
214 | (s1, s2) |
215 | } |
216 | } |
217 | |
218 | // SAFETY: other code relies on this producing the lengths of free zones |
219 | // at the beginning/end of the buffer. Everything else must be initialized |
220 | fn free_slice_lengths(&self) -> (usize, usize) { |
221 | let len_to_head; |
222 | let len_after_tail; |
223 | |
224 | // TODO can we do this branchless? |
225 | if self.tail < self.head { |
226 | len_after_tail = self.head - self.tail; |
227 | len_to_head = 0; |
228 | } else { |
229 | len_after_tail = self.cap - self.tail; |
230 | len_to_head = self.head; |
231 | } |
232 | (len_to_head, len_after_tail) |
233 | } |
234 | |
235 | // SAFETY: Other code relies on this pointing to the free zones, data after the first and before the second must |
236 | // be valid |
237 | fn free_slice_parts(&self) -> ((*mut u8, usize), (*mut u8, usize)) { |
238 | let (len_to_head, len_after_tail) = self.free_slice_lengths(); |
239 | |
240 | ( |
241 | (unsafe { self.buf.as_ptr().add(self.tail) }, len_after_tail), |
242 | (self.buf.as_ptr(), len_to_head), |
243 | ) |
244 | } |
245 | |
246 | #[allow (dead_code)] |
247 | pub fn extend_from_within(&mut self, start: usize, len: usize) { |
248 | if start + len > self.len() { |
249 | panic!( |
250 | "Calls to this functions must respect start ( {}) + len ( {}) <= self.len() ( {})!" , |
251 | start, |
252 | len, |
253 | self.len() |
254 | ); |
255 | } |
256 | |
257 | self.reserve(len); |
258 | |
259 | // SAFETY: Requirements checked: |
260 | // 1. explicitly checked above, resulting in a panic if it does not hold |
261 | // 2. explicitly reserved enough memory |
262 | unsafe { self.extend_from_within_unchecked(start, len) } |
263 | } |
264 | |
265 | /// SAFETY: |
266 | /// For this to be safe two requirements need to hold: |
267 | /// 1. start + len <= self.len() so we do not copy uninitialised memory |
268 | /// 2. More then len reserved space so we do not write out-of-bounds |
269 | #[warn (unsafe_op_in_unsafe_fn)] |
270 | pub unsafe fn extend_from_within_unchecked(&mut self, start: usize, len: usize) { |
271 | if self.head < self.tail { |
272 | // continous data slice |____HDDDDDDDT_____| |
273 | let after_tail = usize::min(len, self.cap - self.tail); |
274 | unsafe { |
275 | self.buf |
276 | .as_ptr() |
277 | .add(self.tail) |
278 | .copy_from_nonoverlapping(self.buf.as_ptr().add(self.head + start), after_tail); |
279 | if after_tail < len { |
280 | self.buf.as_ptr().copy_from_nonoverlapping( |
281 | self.buf.as_ptr().add(self.head + start + after_tail), |
282 | len - after_tail, |
283 | ); |
284 | } |
285 | } |
286 | } else { |
287 | // continous free slice |DDDT_________HDDDD| |
288 | if self.head + start > self.cap { |
289 | let start = (self.head + start) % self.cap; |
290 | unsafe { |
291 | self.buf |
292 | .as_ptr() |
293 | .add(self.tail) |
294 | .copy_from_nonoverlapping(self.buf.as_ptr().add(start), len) |
295 | } |
296 | } else { |
297 | let after_start = usize::min(len, self.cap - self.head - start); |
298 | unsafe { |
299 | self.buf.as_ptr().add(self.tail).copy_from_nonoverlapping( |
300 | self.buf.as_ptr().add(self.head + start), |
301 | after_start, |
302 | ); |
303 | if after_start < len { |
304 | self.buf |
305 | .as_ptr() |
306 | .add(self.tail + after_start) |
307 | .copy_from_nonoverlapping(self.buf.as_ptr(), len - after_start); |
308 | } |
309 | } |
310 | } |
311 | } |
312 | |
313 | self.tail = (self.tail + len) % self.cap; |
314 | } |
315 | |
316 | #[allow (dead_code)] |
317 | /// SAFETY: |
318 | /// Needs start + len <= self.len() |
319 | /// And more then len reserved space |
320 | pub unsafe fn extend_from_within_unchecked_branchless(&mut self, start: usize, len: usize) { |
321 | // data slices in raw parts |
322 | let ((s1_ptr, s1_len), (s2_ptr, s2_len)) = self.data_slice_parts(); |
323 | |
324 | debug_assert!(len <= s1_len + s2_len, " {} > {} + {}" , len, s1_len, s2_len); |
325 | |
326 | // calc the actually wanted slices in raw parts |
327 | let start_in_s1 = usize::min(s1_len, start); |
328 | let end_in_s1 = usize::min(s1_len, start + len); |
329 | let m1_ptr = s1_ptr.add(start_in_s1); |
330 | let m1_len = end_in_s1 - start_in_s1; |
331 | |
332 | debug_assert!(end_in_s1 <= s1_len); |
333 | debug_assert!(start_in_s1 <= s1_len); |
334 | |
335 | let start_in_s2 = start.saturating_sub(s1_len); |
336 | let end_in_s2 = start_in_s2 + (len - m1_len); |
337 | let m2_ptr = s2_ptr.add(start_in_s2); |
338 | let m2_len = end_in_s2 - start_in_s2; |
339 | |
340 | debug_assert!(start_in_s2 <= s2_len); |
341 | debug_assert!(end_in_s2 <= s2_len); |
342 | |
343 | debug_assert_eq!(len, m1_len + m2_len); |
344 | |
345 | // the free slices, must hold: f1_len + f2_len >= m1_len + m2_len |
346 | let ((f1_ptr, f1_len), (f2_ptr, f2_len)) = self.free_slice_parts(); |
347 | |
348 | debug_assert!(f1_len + f2_len >= m1_len + m2_len); |
349 | |
350 | // calc how many from where bytes go where |
351 | let m1_in_f1 = usize::min(m1_len, f1_len); |
352 | let m1_in_f2 = m1_len - m1_in_f1; |
353 | let m2_in_f1 = usize::min(f1_len - m1_in_f1, m2_len); |
354 | let m2_in_f2 = m2_len - m2_in_f1; |
355 | |
356 | debug_assert_eq!(m1_len, m1_in_f1 + m1_in_f2); |
357 | debug_assert_eq!(m2_len, m2_in_f1 + m2_in_f2); |
358 | debug_assert!(f1_len >= m1_in_f1 + m2_in_f1); |
359 | debug_assert!(f2_len >= m1_in_f2 + m2_in_f2); |
360 | debug_assert_eq!(len, m1_in_f1 + m2_in_f1 + m1_in_f2 + m2_in_f2); |
361 | |
362 | debug_assert!(self.buf.as_ptr().add(self.cap) > f1_ptr.add(m1_in_f1 + m2_in_f1)); |
363 | debug_assert!(self.buf.as_ptr().add(self.cap) > f2_ptr.add(m1_in_f2 + m2_in_f2)); |
364 | |
365 | debug_assert!((m1_in_f2 > 0) ^ (m2_in_f1 > 0) || (m1_in_f2 == 0 && m2_in_f1 == 0)); |
366 | |
367 | copy_with_checks( |
368 | m1_ptr, m2_ptr, f1_ptr, f2_ptr, m1_in_f1, m2_in_f1, m1_in_f2, m2_in_f2, |
369 | ); |
370 | self.tail = (self.tail + len) % self.cap; |
371 | } |
372 | } |
373 | |
374 | impl Drop for RingBuffer { |
375 | fn drop(&mut self) { |
376 | if self.cap == 0 { |
377 | return; |
378 | } |
379 | |
380 | // SAFETY: is we were succesfully able to construct this layout when we allocated then it's also valid do so now |
381 | // Relies on / establishes invariant 1 |
382 | let current_layout: Layout = unsafe { Layout::array::<u8>(self.cap).unwrap_unchecked() }; |
383 | |
384 | unsafe { |
385 | dealloc(self.buf.as_ptr(), current_layout); |
386 | } |
387 | } |
388 | } |
389 | |
390 | #[allow (dead_code)] |
391 | #[inline (always)] |
392 | #[allow (clippy::too_many_arguments)] |
393 | unsafe fn copy_without_checks( |
394 | m1_ptr: *const u8, |
395 | m2_ptr: *const u8, |
396 | f1_ptr: *mut u8, |
397 | f2_ptr: *mut u8, |
398 | m1_in_f1: usize, |
399 | m2_in_f1: usize, |
400 | m1_in_f2: usize, |
401 | m2_in_f2: usize, |
402 | ) { |
403 | f1_ptr.copy_from_nonoverlapping(src:m1_ptr, count:m1_in_f1); |
404 | f1_ptr |
405 | .add(m1_in_f1) |
406 | .copy_from_nonoverlapping(src:m2_ptr, count:m2_in_f1); |
407 | |
408 | f2_ptr.copy_from_nonoverlapping(src:m1_ptr.add(m1_in_f1), count:m1_in_f2); |
409 | f2_ptr |
410 | .add(m1_in_f2) |
411 | .copy_from_nonoverlapping(src:m2_ptr.add(m2_in_f1), count:m2_in_f2); |
412 | } |
413 | |
414 | #[allow (dead_code)] |
415 | #[inline (always)] |
416 | #[allow (clippy::too_many_arguments)] |
417 | unsafe fn copy_with_checks( |
418 | m1_ptr: *const u8, |
419 | m2_ptr: *const u8, |
420 | f1_ptr: *mut u8, |
421 | f2_ptr: *mut u8, |
422 | m1_in_f1: usize, |
423 | m2_in_f1: usize, |
424 | m1_in_f2: usize, |
425 | m2_in_f2: usize, |
426 | ) { |
427 | if m1_in_f1 != 0 { |
428 | f1_ptr.copy_from_nonoverlapping(src:m1_ptr, count:m1_in_f1); |
429 | } |
430 | if m2_in_f1 != 0 { |
431 | f1_ptr |
432 | .add(m1_in_f1) |
433 | .copy_from_nonoverlapping(src:m2_ptr, count:m2_in_f1); |
434 | } |
435 | |
436 | if m1_in_f2 != 0 { |
437 | f2_ptr.copy_from_nonoverlapping(src:m1_ptr.add(m1_in_f1), count:m1_in_f2); |
438 | } |
439 | if m2_in_f2 != 0 { |
440 | f2_ptr |
441 | .add(m1_in_f2) |
442 | .copy_from_nonoverlapping(src:m2_ptr.add(m2_in_f1), count:m2_in_f2); |
443 | } |
444 | } |
445 | |
446 | #[allow (dead_code)] |
447 | #[inline (always)] |
448 | #[allow (clippy::too_many_arguments)] |
449 | unsafe fn copy_with_nobranch_check( |
450 | m1_ptr: *const u8, |
451 | m2_ptr: *const u8, |
452 | f1_ptr: *mut u8, |
453 | f2_ptr: *mut u8, |
454 | m1_in_f1: usize, |
455 | m2_in_f1: usize, |
456 | m1_in_f2: usize, |
457 | m2_in_f2: usize, |
458 | ) { |
459 | let case = (m1_in_f1 > 0) as usize |
460 | | (((m2_in_f1 > 0) as usize) << 1) |
461 | | (((m1_in_f2 > 0) as usize) << 2) |
462 | | (((m2_in_f2 > 0) as usize) << 3); |
463 | |
464 | match case { |
465 | 0 => {} |
466 | |
467 | // one bit set |
468 | 1 => { |
469 | f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); |
470 | } |
471 | 2 => { |
472 | f1_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f1); |
473 | } |
474 | 4 => { |
475 | f2_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f2); |
476 | } |
477 | 8 => { |
478 | f2_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f2); |
479 | } |
480 | |
481 | // two bit set |
482 | 3 => { |
483 | f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); |
484 | f1_ptr |
485 | .add(m1_in_f1) |
486 | .copy_from_nonoverlapping(m2_ptr, m2_in_f1); |
487 | } |
488 | 5 => { |
489 | f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); |
490 | f2_ptr.copy_from_nonoverlapping(m1_ptr.add(m1_in_f1), m1_in_f2); |
491 | } |
492 | 6 => core::hint::unreachable_unchecked(), |
493 | 7 => core::hint::unreachable_unchecked(), |
494 | 9 => { |
495 | f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); |
496 | f2_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f2); |
497 | } |
498 | 10 => { |
499 | f1_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f1); |
500 | f2_ptr.copy_from_nonoverlapping(m2_ptr.add(m2_in_f1), m2_in_f2); |
501 | } |
502 | 12 => { |
503 | f2_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f2); |
504 | f2_ptr |
505 | .add(m1_in_f2) |
506 | .copy_from_nonoverlapping(m2_ptr, m2_in_f2); |
507 | } |
508 | |
509 | // three bit set |
510 | 11 => { |
511 | f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); |
512 | f1_ptr |
513 | .add(m1_in_f1) |
514 | .copy_from_nonoverlapping(m2_ptr, m2_in_f1); |
515 | f2_ptr.copy_from_nonoverlapping(m2_ptr.add(m2_in_f1), m2_in_f2); |
516 | } |
517 | 13 => { |
518 | f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); |
519 | f2_ptr.copy_from_nonoverlapping(m1_ptr.add(m1_in_f1), m1_in_f2); |
520 | f2_ptr |
521 | .add(m1_in_f2) |
522 | .copy_from_nonoverlapping(m2_ptr, m2_in_f2); |
523 | } |
524 | 14 => core::hint::unreachable_unchecked(), |
525 | 15 => core::hint::unreachable_unchecked(), |
526 | _ => core::hint::unreachable_unchecked(), |
527 | } |
528 | } |
529 | |
530 | #[cfg (test)] |
531 | mod tests { |
532 | use super::RingBuffer; |
533 | |
534 | #[test ] |
535 | fn smoke() { |
536 | let mut rb = RingBuffer::new(); |
537 | |
538 | rb.reserve(15); |
539 | assert_eq!(17, rb.cap); |
540 | |
541 | rb.extend(b"0123456789" ); |
542 | assert_eq!(rb.len(), 10); |
543 | assert_eq!(rb.as_slices().0, b"0123456789" ); |
544 | assert_eq!(rb.as_slices().1, b"" ); |
545 | |
546 | rb.drop_first_n(5); |
547 | assert_eq!(rb.len(), 5); |
548 | assert_eq!(rb.as_slices().0, b"56789" ); |
549 | assert_eq!(rb.as_slices().1, b"" ); |
550 | |
551 | rb.extend_from_within(2, 3); |
552 | assert_eq!(rb.len(), 8); |
553 | assert_eq!(rb.as_slices().0, b"56789789" ); |
554 | assert_eq!(rb.as_slices().1, b"" ); |
555 | |
556 | rb.extend_from_within(0, 3); |
557 | assert_eq!(rb.len(), 11); |
558 | assert_eq!(rb.as_slices().0, b"56789789567" ); |
559 | assert_eq!(rb.as_slices().1, b"" ); |
560 | |
561 | rb.extend_from_within(0, 2); |
562 | assert_eq!(rb.len(), 13); |
563 | assert_eq!(rb.as_slices().0, b"567897895675" ); |
564 | assert_eq!(rb.as_slices().1, b"6" ); |
565 | |
566 | rb.drop_first_n(11); |
567 | assert_eq!(rb.len(), 2); |
568 | assert_eq!(rb.as_slices().0, b"5" ); |
569 | assert_eq!(rb.as_slices().1, b"6" ); |
570 | |
571 | rb.extend(b"0123456789" ); |
572 | assert_eq!(rb.len(), 12); |
573 | assert_eq!(rb.as_slices().0, b"5" ); |
574 | assert_eq!(rb.as_slices().1, b"60123456789" ); |
575 | |
576 | rb.drop_first_n(11); |
577 | assert_eq!(rb.len(), 1); |
578 | assert_eq!(rb.as_slices().0, b"9" ); |
579 | assert_eq!(rb.as_slices().1, b"" ); |
580 | |
581 | rb.extend(b"0123456789" ); |
582 | assert_eq!(rb.len(), 11); |
583 | assert_eq!(rb.as_slices().0, b"9012345" ); |
584 | assert_eq!(rb.as_slices().1, b"6789" ); |
585 | } |
586 | |
587 | #[test ] |
588 | fn edge_cases() { |
589 | // Fill exactly, then empty then fill again |
590 | let mut rb = RingBuffer::new(); |
591 | rb.reserve(16); |
592 | assert_eq!(17, rb.cap); |
593 | rb.extend(b"0123456789012345" ); |
594 | assert_eq!(17, rb.cap); |
595 | assert_eq!(16, rb.len()); |
596 | assert_eq!(0, rb.free()); |
597 | rb.drop_first_n(16); |
598 | assert_eq!(0, rb.len()); |
599 | assert_eq!(16, rb.free()); |
600 | rb.extend(b"0123456789012345" ); |
601 | assert_eq!(16, rb.len()); |
602 | assert_eq!(0, rb.free()); |
603 | assert_eq!(17, rb.cap); |
604 | assert_eq!(1, rb.as_slices().0.len()); |
605 | assert_eq!(15, rb.as_slices().1.len()); |
606 | |
607 | rb.clear(); |
608 | |
609 | // data in both slices and then reserve |
610 | rb.extend(b"0123456789012345" ); |
611 | rb.drop_first_n(8); |
612 | rb.extend(b"67890123" ); |
613 | assert_eq!(16, rb.len()); |
614 | assert_eq!(0, rb.free()); |
615 | assert_eq!(17, rb.cap); |
616 | assert_eq!(9, rb.as_slices().0.len()); |
617 | assert_eq!(7, rb.as_slices().1.len()); |
618 | rb.reserve(1); |
619 | assert_eq!(16, rb.len()); |
620 | assert_eq!(16, rb.free()); |
621 | assert_eq!(33, rb.cap); |
622 | assert_eq!(16, rb.as_slices().0.len()); |
623 | assert_eq!(0, rb.as_slices().1.len()); |
624 | |
625 | rb.clear(); |
626 | |
627 | // fill exactly, then extend from within |
628 | rb.extend(b"0123456789012345" ); |
629 | rb.extend_from_within(0, 16); |
630 | assert_eq!(32, rb.len()); |
631 | assert_eq!(0, rb.free()); |
632 | assert_eq!(33, rb.cap); |
633 | assert_eq!(32, rb.as_slices().0.len()); |
634 | assert_eq!(0, rb.as_slices().1.len()); |
635 | |
636 | // extend from within cases |
637 | let mut rb = RingBuffer::new(); |
638 | rb.reserve(8); |
639 | rb.extend(b"01234567" ); |
640 | rb.drop_first_n(5); |
641 | rb.extend_from_within(0, 3); |
642 | assert_eq!(4, rb.as_slices().0.len()); |
643 | assert_eq!(2, rb.as_slices().1.len()); |
644 | |
645 | rb.drop_first_n(2); |
646 | assert_eq!(2, rb.as_slices().0.len()); |
647 | assert_eq!(2, rb.as_slices().1.len()); |
648 | rb.extend_from_within(0, 4); |
649 | assert_eq!(2, rb.as_slices().0.len()); |
650 | assert_eq!(6, rb.as_slices().1.len()); |
651 | |
652 | rb.drop_first_n(2); |
653 | assert_eq!(6, rb.as_slices().0.len()); |
654 | assert_eq!(0, rb.as_slices().1.len()); |
655 | rb.drop_first_n(2); |
656 | assert_eq!(4, rb.as_slices().0.len()); |
657 | assert_eq!(0, rb.as_slices().1.len()); |
658 | rb.extend_from_within(0, 4); |
659 | assert_eq!(7, rb.as_slices().0.len()); |
660 | assert_eq!(1, rb.as_slices().1.len()); |
661 | |
662 | let mut rb = RingBuffer::new(); |
663 | rb.reserve(8); |
664 | rb.extend(b"11111111" ); |
665 | rb.drop_first_n(7); |
666 | rb.extend(b"111" ); |
667 | assert_eq!(2, rb.as_slices().0.len()); |
668 | assert_eq!(2, rb.as_slices().1.len()); |
669 | rb.extend_from_within(0, 4); |
670 | assert_eq!(b"11" , rb.as_slices().0); |
671 | assert_eq!(b"111111" , rb.as_slices().1); |
672 | } |
673 | } |
674 | |