1// This file is part of ICU4X. For terms of use, please see the file
2// called LICENSE at the top level of the ICU4X source tree
3// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
4
5// The mutation operations in this file should panic to prevent undefined behavior
6#![allow(clippy::unwrap_used)]
7#![allow(clippy::expect_used)]
8#![allow(clippy::indexing_slicing)]
9#![allow(clippy::panic)]
10
11use super::*;
12use crate::ule::*;
13use alloc::boxed::Box;
14use alloc::vec::Vec;
15use core::any;
16use core::convert::TryInto;
17use core::marker::PhantomData;
18use core::ops::Deref;
19use core::ops::Range;
20use core::{fmt, ptr, slice};
21
22use super::components::LENGTH_WIDTH;
23use super::components::MAX_INDEX;
24use super::components::MAX_LENGTH;
25use super::components::METADATA_WIDTH;
26
27/// A fully-owned [`VarZeroVec`]. This type has no lifetime but has the same
28/// internal buffer representation of [`VarZeroVec`], making it cheaply convertible to
29/// [`VarZeroVec`] and [`VarZeroSlice`].
30///
31/// The `F` type parameter is a [`VarZeroVecFormat`] (see its docs for more details), which can be used to select the
32/// precise format of the backing buffer with various size and performance tradeoffs. It defaults to [`Index16`].
33pub struct VarZeroVecOwned<T: ?Sized, F = Index16> {
34 marker: PhantomData<(Box<T>, F)>,
35 // safety invariant: must parse into a valid VarZeroVecComponents
36 entire_slice: Vec<u8>,
37}
38
39impl<T: ?Sized, F> Clone for VarZeroVecOwned<T, F> {
40 fn clone(&self) -> Self {
41 VarZeroVecOwned {
42 marker: self.marker,
43 entire_slice: self.entire_slice.clone(),
44 }
45 }
46}
47
48// The effect of a shift on the indices in the varzerovec.
49#[derive(PartialEq)]
50enum ShiftType {
51 Insert,
52 Replace,
53 Remove,
54}
55
56impl<T: VarULE + ?Sized, F: VarZeroVecFormat> Deref for VarZeroVecOwned<T, F> {
57 type Target = VarZeroSlice<T, F>;
58 fn deref(&self) -> &VarZeroSlice<T, F> {
59 self.as_slice()
60 }
61}
62
63impl<T: VarULE + ?Sized, F> VarZeroVecOwned<T, F> {
64 /// Construct an empty VarZeroVecOwned
65 pub fn new() -> Self {
66 Self {
67 marker: PhantomData,
68 entire_slice: Vec::new(),
69 }
70 }
71}
72
73impl<T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroVecOwned<T, F> {
74 /// Construct a VarZeroVecOwned from a [`VarZeroSlice`] by cloning the internal data
75 pub fn from_slice(slice: &VarZeroSlice<T, F>) -> Self {
76 Self {
77 marker: PhantomData,
78 entire_slice: slice.as_bytes().into(),
79 }
80 }
81
82 /// Construct a VarZeroVecOwned from a list of elements
83 pub fn try_from_elements<A>(elements: &[A]) -> Result<Self, &'static str>
84 where
85 A: EncodeAsVarULE<T>,
86 {
87 Ok(if elements.is_empty() {
88 Self::from_slice(VarZeroSlice::new_empty())
89 } else {
90 Self {
91 marker: PhantomData,
92 // TODO(#1410): Rethink length errors in VZV.
93 entire_slice: components::get_serializable_bytes_non_empty::<T, A, F>(elements)
94 .ok_or(
95 "Attempted to build VarZeroVec out of elements that \
96 cumulatively are larger than a u32 in size",
97 )?,
98 }
99 })
100 }
101
102 /// Obtain this `VarZeroVec` as a [`VarZeroSlice`]
103 pub fn as_slice(&self) -> &VarZeroSlice<T, F> {
104 let slice: &[u8] = &self.entire_slice;
105 unsafe {
106 // safety: the slice is known to come from a valid parsed VZV
107 VarZeroSlice::from_byte_slice_unchecked(slice)
108 }
109 }
110
111 /// Try to allocate a buffer with enough capacity for `capacity`
112 /// elements. Since `T` can take up an arbitrary size this will
113 /// just allocate enough space for 4-byte Ts
114 pub(crate) fn with_capacity(capacity: usize) -> Self {
115 Self {
116 marker: PhantomData,
117 entire_slice: Vec::with_capacity(capacity * (F::INDEX_WIDTH + 4)),
118 }
119 }
120
121 /// Try to reserve space for `capacity`
122 /// elements. Since `T` can take up an arbitrary size this will
123 /// just allocate enough space for 4-byte Ts
124 pub(crate) fn reserve(&mut self, capacity: usize) {
125 self.entire_slice.reserve(capacity * (F::INDEX_WIDTH + 4))
126 }
127
128 /// Get the position of a specific element in the data segment.
129 ///
130 /// If `idx == self.len()`, it will return the size of the data segment (where a new element would go).
131 ///
132 /// ## Safety
133 /// `idx <= self.len()` and `self.as_encoded_bytes()` is well-formed.
134 unsafe fn element_position_unchecked(&self, idx: usize) -> usize {
135 let len = self.len();
136 let out = if idx == len {
137 self.entire_slice.len() - LENGTH_WIDTH - METADATA_WIDTH - (F::INDEX_WIDTH * len)
138 } else {
139 F::rawbytes_to_usize(*self.index_data(idx))
140 };
141 debug_assert!(
142 out + LENGTH_WIDTH + METADATA_WIDTH + len * F::INDEX_WIDTH <= self.entire_slice.len()
143 );
144 out
145 }
146
147 /// Get the range of a specific element in the data segment.
148 ///
149 /// ## Safety
150 /// `idx < self.len()` and `self.as_encoded_bytes()` is well-formed.
151 unsafe fn element_range_unchecked(&self, idx: usize) -> core::ops::Range<usize> {
152 let start = self.element_position_unchecked(idx);
153 let end = self.element_position_unchecked(idx + 1);
154 debug_assert!(start <= end, "{start} > {end}");
155 start..end
156 }
157
158 /// Set the number of elements in the list without any checks.
159 ///
160 /// ## Safety
161 /// No safe functions may be called until `self.as_encoded_bytes()` is well-formed.
162 unsafe fn set_len(&mut self, len: usize) {
163 assert!(len <= MAX_LENGTH);
164 let len_bytes = len.to_le_bytes();
165 self.entire_slice[0..LENGTH_WIDTH].copy_from_slice(&len_bytes[0..LENGTH_WIDTH]);
166 // Double-check that the length fits in the length field
167 assert_eq!(len_bytes[LENGTH_WIDTH..].iter().sum::<u8>(), 0);
168 }
169
170 fn index_range(index: usize) -> Range<usize> {
171 let pos = LENGTH_WIDTH + METADATA_WIDTH + F::INDEX_WIDTH * index;
172 pos..pos + F::INDEX_WIDTH
173 }
174
175 /// Return the raw bytes representing the given `index`.
176 ///
177 /// ## Safety
178 /// The index must be valid, and self.as_encoded_bytes() must be well-formed
179 unsafe fn index_data(&self, index: usize) -> &F::RawBytes {
180 &F::RawBytes::from_byte_slice_unchecked(&self.entire_slice[Self::index_range(index)])[0]
181 }
182
183 /// Return the mutable slice representing the given `index`.
184 ///
185 /// ## Safety
186 /// The index must be valid. self.as_encoded_bytes() must have allocated space
187 /// for this index, but need not have its length appropriately set.
188 unsafe fn index_data_mut(&mut self, index: usize) -> &mut F::RawBytes {
189 let ptr = self.entire_slice.as_mut_ptr();
190 let range = Self::index_range(index);
191
192 // Doing this instead of just `get_unchecked_mut()` because it's unclear
193 // if `get_unchecked_mut()` can be called out of bounds on a slice even
194 // if we know the buffer is larger.
195 let data = slice::from_raw_parts_mut(ptr.add(range.start), F::INDEX_WIDTH);
196
197 &mut F::rawbytes_from_byte_slice_unchecked_mut(data)[0]
198 }
199
200 /// Shift the indices starting with and after `starting_index` by the provided `amount`.
201 ///
202 /// ## Safety
203 /// Adding `amount` to each index after `starting_index` must not result in the slice from becoming malformed.
204 /// The length of the slice must be correctly set.
205 unsafe fn shift_indices(&mut self, starting_index: usize, amount: i32) {
206 let len = self.len();
207 let indices = F::rawbytes_from_byte_slice_unchecked_mut(
208 &mut self.entire_slice[LENGTH_WIDTH + METADATA_WIDTH
209 ..LENGTH_WIDTH + METADATA_WIDTH + F::INDEX_WIDTH * len],
210 );
211 for idx in &mut indices[starting_index..] {
212 let mut new_idx = F::rawbytes_to_usize(*idx);
213 if amount > 0 {
214 new_idx = new_idx.checked_add(amount.try_into().unwrap()).unwrap();
215 } else {
216 new_idx = new_idx.checked_sub((-amount).try_into().unwrap()).unwrap();
217 }
218 *idx = F::usize_to_rawbytes(new_idx);
219 }
220 }
221
222 /// Get this [`VarZeroVecOwned`] as a borrowed [`VarZeroVec`]
223 ///
224 /// If you wish to repeatedly call methods on this [`VarZeroVecOwned`],
225 /// it is more efficient to perform this conversion first
226 pub fn as_varzerovec<'a>(&'a self) -> VarZeroVec<'a, T, F> {
227 self.as_slice().into()
228 }
229
230 /// Empty the vector
231 pub fn clear(&mut self) {
232 self.entire_slice.clear()
233 }
234
235 /// Consume this vector and return the backing buffer
236 #[inline]
237 pub fn into_bytes(self) -> Vec<u8> {
238 self.entire_slice
239 }
240
241 /// Invalidate and resize the data at an index, optionally inserting or removing the index.
242 /// Also updates affected indices and the length.
243 /// Returns a slice to the new element data - it doesn't contain uninitialized data but its value is indeterminate.
244 ///
245 /// ## Safety
246 /// - `index` must be a valid index, or, if `shift_type == ShiftType::Insert`, `index == self.len()` is allowed.
247 /// - `new_size` musn't result in the data segment growing larger than `F::MAX_VALUE`.
248 unsafe fn shift(&mut self, index: usize, new_size: usize, shift_type: ShiftType) -> &mut [u8] {
249 // The format of the encoded data is:
250 // - four bytes of "len"
251 // - len*4 bytes for an array of indices
252 // - the actual data to which the indices point
253 //
254 // When inserting or removing an element, the size of the indices segment must be changed,
255 // so the data before the target element must be shifted by 4 bytes in addition to the
256 // shifting needed for the new element size.
257 let len = self.len();
258 let slice_len = self.entire_slice.len();
259
260 let prev_element = match shift_type {
261 ShiftType::Insert => {
262 let pos = self.element_position_unchecked(index);
263 // In the case of an insert, there's no previous element,
264 // so it's an empty range at the new position.
265 pos..pos
266 }
267 _ => self.element_range_unchecked(index),
268 };
269
270 // How much shifting must be done in bytes due to removal/insertion of an index.
271 let index_shift: i64 = match shift_type {
272 ShiftType::Insert => F::INDEX_WIDTH as i64,
273 ShiftType::Replace => 0,
274 ShiftType::Remove => -(F::INDEX_WIDTH as i64),
275 };
276 // The total shift in byte size of the owned slice.
277 let shift: i64 =
278 new_size as i64 - (prev_element.end - prev_element.start) as i64 + index_shift;
279 let new_slice_len = slice_len.wrapping_add(shift as usize);
280 if shift > 0 {
281 if new_slice_len > F::MAX_VALUE as usize {
282 panic!(
283 "Attempted to grow VarZeroVec to an encoded size that does not fit within the length size used by {}",
284 any::type_name::<F>()
285 );
286 }
287 self.entire_slice.resize(new_slice_len, 0);
288 }
289
290 // Now that we've ensured there's enough space, we can shift the data around.
291 {
292 // Note: There are no references introduced between pointer creation and pointer use, and all
293 // raw pointers are derived from a single &mut. This preserves pointer provenance.
294 let slice_range = self.entire_slice.as_mut_ptr_range();
295 let old_slice_end = slice_range.start.add(slice_len);
296 let data_start = slice_range
297 .start
298 .add(LENGTH_WIDTH + METADATA_WIDTH + len * F::INDEX_WIDTH);
299 let prev_element_p =
300 data_start.add(prev_element.start)..data_start.add(prev_element.end);
301
302 // The memory range of the affected index.
303 // When inserting: where the new index goes.
304 // When removing: where the index being removed is.
305 // When replacing: unused.
306 let index_range = {
307 let index_start = slice_range
308 .start
309 .add(LENGTH_WIDTH + METADATA_WIDTH + F::INDEX_WIDTH * index);
310 index_start..index_start.add(F::INDEX_WIDTH)
311 };
312
313 unsafe fn shift_bytes(block: Range<*const u8>, to: *mut u8) {
314 debug_assert!(block.end >= block.start);
315 ptr::copy(block.start, to, block.end.offset_from(block.start) as usize);
316 }
317
318 if shift_type == ShiftType::Remove {
319 // Move the data before the element back by 4 to remove the index.
320 shift_bytes(index_range.end..prev_element_p.start, index_range.start);
321 }
322
323 // Shift data after the element to its new position.
324 shift_bytes(
325 prev_element_p.end..old_slice_end,
326 prev_element_p
327 .start
328 .offset((new_size as i64 + index_shift) as isize),
329 );
330
331 let first_affected_index = match shift_type {
332 ShiftType::Insert => {
333 // Move data before the element forward by 4 to make space for a new index.
334 shift_bytes(index_range.start..prev_element_p.start, index_range.end);
335
336 *self.index_data_mut(index) = F::usize_to_rawbytes(prev_element.start);
337 self.set_len(len + 1);
338 index + 1
339 }
340 ShiftType::Remove => {
341 self.set_len(len - 1);
342 index
343 }
344 ShiftType::Replace => index + 1,
345 };
346 // No raw pointer use should occur after this point (because of self.index_data and self.set_len).
347
348 // Set the new slice length. This must be done after shifting data around to avoid uninitialized data.
349 self.entire_slice.set_len(new_slice_len);
350
351 // Shift the affected indices.
352 self.shift_indices(first_affected_index, (shift - index_shift) as i32);
353 };
354
355 debug_assert!(self.verify_integrity());
356
357 // Return a mut slice to the new element data.
358 let element_pos = LENGTH_WIDTH
359 + METADATA_WIDTH
360 + self.len() * F::INDEX_WIDTH
361 + self.element_position_unchecked(index);
362 &mut self.entire_slice[element_pos..element_pos + new_size]
363 }
364
365 /// Checks the internal invariants of the vec to ensure safe code will not cause UB.
366 /// Returns whether integrity was verified.
367 ///
368 /// Note: an index is valid if it doesn't point to data past the end of the slice and is
369 /// less than or equal to all future indices. The length of the index segment is not part of each index.
370 fn verify_integrity(&self) -> bool {
371 if self.is_empty() && !self.entire_slice.is_empty() {
372 return false;
373 }
374 let slice_len = self.entire_slice.len();
375 match slice_len {
376 0 => return true,
377 1..=3 => return false,
378 _ => (),
379 }
380 let len = unsafe {
381 RawBytesULE::<LENGTH_WIDTH>::from_byte_slice_unchecked(
382 &self.entire_slice[..LENGTH_WIDTH],
383 )[0]
384 .as_unsigned_int()
385 };
386 if len == 0 {
387 // An empty vec must have an empty slice: there is only a single valid byte representation.
388 return false;
389 }
390 if slice_len < LENGTH_WIDTH + METADATA_WIDTH + len as usize * F::INDEX_WIDTH {
391 // Not enough room for the indices.
392 return false;
393 }
394 let data_len =
395 self.entire_slice.len() - LENGTH_WIDTH - METADATA_WIDTH - len as usize * F::INDEX_WIDTH;
396 if data_len > MAX_INDEX {
397 // The data segment is too long.
398 return false;
399 }
400
401 // Test index validity.
402 let indices = unsafe {
403 F::RawBytes::from_byte_slice_unchecked(
404 &self.entire_slice[LENGTH_WIDTH + METADATA_WIDTH
405 ..LENGTH_WIDTH + METADATA_WIDTH + len as usize * F::INDEX_WIDTH],
406 )
407 };
408 for idx in indices {
409 if F::rawbytes_to_usize(*idx) > data_len {
410 // Indices must not point past the data segment.
411 return false;
412 }
413 }
414 for window in indices.windows(2) {
415 if F::rawbytes_to_usize(window[0]) > F::rawbytes_to_usize(window[1]) {
416 // Indices must be in non-decreasing order.
417 return false;
418 }
419 }
420 true
421 }
422
423 /// Insert an element at the end of this vector
424 pub fn push<A: EncodeAsVarULE<T> + ?Sized>(&mut self, element: &A) {
425 self.insert(self.len(), element)
426 }
427
428 /// Insert an element at index `idx`
429 pub fn insert<A: EncodeAsVarULE<T> + ?Sized>(&mut self, index: usize, element: &A) {
430 let len = self.len();
431 if index > len {
432 panic!("Called out-of-bounds insert() on VarZeroVec, index {index} len {len}");
433 }
434
435 let value_len = element.encode_var_ule_len();
436
437 if len == 0 {
438 let header_len = LENGTH_WIDTH + METADATA_WIDTH + F::INDEX_WIDTH;
439 let cap = header_len + value_len;
440 self.entire_slice.resize(cap, 0);
441 self.entire_slice[0] = 1; // set length
442 element.encode_var_ule_write(&mut self.entire_slice[header_len..]);
443 return;
444 }
445
446 assert!(value_len < MAX_INDEX);
447 unsafe {
448 let place = self.shift(index, value_len, ShiftType::Insert);
449 element.encode_var_ule_write(place);
450 }
451 }
452
453 /// Remove the element at index `idx`
454 pub fn remove(&mut self, index: usize) {
455 let len = self.len();
456 if index >= len {
457 panic!("Called out-of-bounds remove() on VarZeroVec, index {index} len {len}");
458 }
459 if len == 1 {
460 // This is removing the last element. Set the slice to empty to ensure all empty vecs have empty data slices.
461 self.entire_slice.clear();
462 return;
463 }
464 unsafe {
465 self.shift(index, 0, ShiftType::Remove);
466 }
467 }
468
469 /// Replace the element at index `idx` with another
470 pub fn replace<A: EncodeAsVarULE<T> + ?Sized>(&mut self, index: usize, element: &A) {
471 let len = self.len();
472 if index >= len {
473 panic!("Called out-of-bounds replace() on VarZeroVec, index {index} len {len}");
474 }
475
476 let value_len = element.encode_var_ule_len();
477
478 assert!(value_len < MAX_INDEX);
479 unsafe {
480 let place = self.shift(index, value_len, ShiftType::Replace);
481 element.encode_var_ule_write(place);
482 }
483 }
484}
485
486impl<T: VarULE + ?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVecOwned<T, F>
487where
488 T: fmt::Debug,
489{
490 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
491 VarZeroSlice::fmt(self, f)
492 }
493}
494
495impl<T: VarULE + ?Sized, F> Default for VarZeroVecOwned<T, F> {
496 fn default() -> Self {
497 Self::new()
498 }
499}
500
501impl<T, A, F> PartialEq<&'_ [A]> for VarZeroVecOwned<T, F>
502where
503 T: VarULE + ?Sized,
504 T: PartialEq,
505 A: AsRef<T>,
506 F: VarZeroVecFormat,
507{
508 #[inline]
509 fn eq(&self, other: &&[A]) -> bool {
510 self.iter().eq(other.iter().map(|t: &A| t.as_ref()))
511 }
512}
513
514impl<'a, T: ?Sized + VarULE, F: VarZeroVecFormat> From<&'a VarZeroSlice<T, F>>
515 for VarZeroVecOwned<T, F>
516{
517 fn from(other: &'a VarZeroSlice<T, F>) -> Self {
518 Self::from_slice(other)
519 }
520}
521
522#[cfg(test)]
523mod test {
524 use super::VarZeroVecOwned;
525 #[test]
526 fn test_insert_integrity() {
527 let mut items: Vec<String> = Vec::new();
528 let mut zerovec = VarZeroVecOwned::<str>::new();
529
530 // Insert into an empty vec.
531 items.insert(0, "1234567890".into());
532 zerovec.insert(0, "1234567890");
533 assert_eq!(zerovec, &*items);
534
535 zerovec.insert(1, "foo3");
536 items.insert(1, "foo3".into());
537 assert_eq!(zerovec, &*items);
538
539 // Insert at the end.
540 items.insert(items.len(), "qwertyuiop".into());
541 zerovec.insert(zerovec.len(), "qwertyuiop");
542 assert_eq!(zerovec, &*items);
543
544 items.insert(0, "asdfghjkl;".into());
545 zerovec.insert(0, "asdfghjkl;");
546 assert_eq!(zerovec, &*items);
547
548 items.insert(2, "".into());
549 zerovec.insert(2, "");
550 assert_eq!(zerovec, &*items);
551 }
552
553 #[test]
554 // ensure that inserting empty items works
555 fn test_empty_inserts() {
556 let mut items: Vec<String> = Vec::new();
557 let mut zerovec = VarZeroVecOwned::<str>::new();
558
559 // Insert into an empty vec.
560 items.insert(0, "".into());
561 zerovec.insert(0, "");
562 assert_eq!(zerovec, &*items);
563
564 items.insert(0, "".into());
565 zerovec.insert(0, "");
566 assert_eq!(zerovec, &*items);
567
568 items.insert(0, "1234567890".into());
569 zerovec.insert(0, "1234567890");
570 assert_eq!(zerovec, &*items);
571
572 items.insert(0, "".into());
573 zerovec.insert(0, "");
574 assert_eq!(zerovec, &*items);
575 }
576
577 #[test]
578 fn test_small_insert_integrity() {
579 // Tests that insert() works even when there
580 // is not enough space for the new index in entire_slice.len()
581 let mut items: Vec<String> = Vec::new();
582 let mut zerovec = VarZeroVecOwned::<str>::new();
583
584 // Insert into an empty vec.
585 items.insert(0, "abc".into());
586 zerovec.insert(0, "abc");
587 assert_eq!(zerovec, &*items);
588
589 zerovec.insert(1, "def");
590 items.insert(1, "def".into());
591 assert_eq!(zerovec, &*items);
592 }
593
594 #[test]
595 #[should_panic]
596 fn test_insert_past_end() {
597 VarZeroVecOwned::<str>::new().insert(1, "");
598 }
599
600 #[test]
601 fn test_remove_integrity() {
602 let mut items: Vec<&str> = vec!["apples", "bananas", "eeples", "", "baneenees", "five", ""];
603 let mut zerovec = VarZeroVecOwned::<str>::try_from_elements(&items).unwrap();
604
605 for index in [0, 2, 4, 0, 1, 1, 0] {
606 items.remove(index);
607 zerovec.remove(index);
608 assert_eq!(zerovec, &*items, "index {}, len {}", index, items.len());
609 }
610 }
611
612 #[test]
613 fn test_removing_last_element_clears() {
614 let mut zerovec = VarZeroVecOwned::<str>::try_from_elements(&["buy some apples"]).unwrap();
615 assert!(!zerovec.as_bytes().is_empty());
616 zerovec.remove(0);
617 assert!(zerovec.as_bytes().is_empty());
618 }
619
620 #[test]
621 #[should_panic]
622 fn test_remove_past_end() {
623 VarZeroVecOwned::<str>::new().remove(0);
624 }
625
626 #[test]
627 fn test_replace_integrity() {
628 let mut items: Vec<&str> = vec!["apples", "bananas", "eeples", "", "baneenees", "five", ""];
629 let mut zerovec = VarZeroVecOwned::<str>::try_from_elements(&items).unwrap();
630
631 // Replace with an element of the same size (and the first element)
632 items[0] = "blablah";
633 zerovec.replace(0, "blablah");
634 assert_eq!(zerovec, &*items);
635
636 // Replace with a smaller element
637 items[1] = "twily";
638 zerovec.replace(1, "twily");
639 assert_eq!(zerovec, &*items);
640
641 // Replace an empty element
642 items[3] = "aoeuidhtns";
643 zerovec.replace(3, "aoeuidhtns");
644 assert_eq!(zerovec, &*items);
645
646 // Replace the last element
647 items[6] = "0123456789";
648 zerovec.replace(6, "0123456789");
649 assert_eq!(zerovec, &*items);
650
651 // Replace with an empty element
652 items[2] = "";
653 zerovec.replace(2, "");
654 assert_eq!(zerovec, &*items);
655 }
656
657 #[test]
658 #[should_panic]
659 fn test_replace_past_end() {
660 VarZeroVecOwned::<str>::new().replace(0, "");
661 }
662}
663