1 | // Copyright © SixtyFPS GmbH <info@slint.dev> |
2 | // SPDX-License-Identifier: GPL-3.0-only OR LicenseRef-Slint-Royalty-free-2.0 OR LicenseRef-Slint-Software-3.0 |
3 | |
4 | //! module for the SharedVector and related things |
5 | #![allow (unsafe_code)] |
6 | #![warn (missing_docs)] |
7 | use core::fmt::Debug; |
8 | use core::mem::MaybeUninit; |
9 | use core::ops::Deref; |
10 | use core::ptr::NonNull; |
11 | |
12 | use portable_atomic as atomic; |
13 | |
14 | #[repr (C)] |
15 | struct SharedVectorHeader { |
16 | refcount: atomic::AtomicIsize, |
17 | size: usize, |
18 | capacity: usize, |
19 | } |
20 | |
21 | #[repr (C)] |
22 | struct SharedVectorInner<T> { |
23 | header: SharedVectorHeader, |
24 | data: MaybeUninit<T>, |
25 | } |
26 | |
27 | fn compute_inner_layout<T>(capacity: usize) -> core::alloc::Layout { |
28 | core(Layout, usize)::alloc::Layout::new::<SharedVectorHeader>() |
29 | .extend(next:core::alloc::Layout::array::<T>(capacity).unwrap()) |
30 | .unwrap() |
31 | .0 |
32 | } |
33 | |
34 | unsafe fn drop_inner<T>(mut inner: NonNull<SharedVectorInner<T>>) { |
35 | debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0); |
36 | let data_ptr: *mut T = inner.as_mut().data.as_mut_ptr(); |
37 | for x: usize in 0..inner.as_ref().header.size { |
38 | core::ptr::drop_in_place(to_drop:data_ptr.add(count:x)); |
39 | } |
40 | alloc::alloc::dealloc( |
41 | inner.as_ptr() as *mut u8, |
42 | compute_inner_layout::<T>(inner.as_ref().header.capacity), |
43 | ) |
44 | } |
45 | |
46 | /// Allocate the memory for the SharedVector with the given capacity. Return the inner with size and refcount set to 1 |
47 | fn alloc_with_capacity<T>(capacity: usize) -> NonNull<SharedVectorInner<T>> { |
48 | let ptr: *mut u8 = unsafe { ::alloc::alloc::alloc(compute_inner_layout::<T>(capacity)) }; |
49 | assert!(!ptr.is_null(), "allocation of {capacity:?} bytes failed" ); |
50 | unsafe { |
51 | core::ptr::write( |
52 | dst:ptr as *mut SharedVectorHeader, |
53 | src:SharedVectorHeader { refcount: 1.into(), size: 0, capacity }, |
54 | ); |
55 | } |
56 | NonNull::new(ptr).unwrap().cast() |
57 | } |
58 | |
59 | /// Return a new capacity suitable for this vector |
60 | /// Loosely based on alloc::raw_vec::RawVec::grow_amortized. |
61 | fn capacity_for_grow(current_cap: usize, required_cap: usize, elem_size: usize) -> usize { |
62 | if current_cap >= required_cap { |
63 | return current_cap; |
64 | } |
65 | let cap: usize = core::cmp::max(v1:current_cap * 2, v2:required_cap); |
66 | let min_non_zero_cap: usize = if elem_size == 1 { |
67 | 8 |
68 | } else if elem_size <= 1024 { |
69 | 4 |
70 | } else { |
71 | 1 |
72 | }; |
73 | core::cmp::max(v1:min_non_zero_cap, v2:cap) |
74 | } |
75 | |
76 | #[repr (C)] |
77 | /// SharedVector holds a reference-counted read-only copy of `[T]`. |
78 | pub struct SharedVector<T> { |
79 | inner: NonNull<SharedVectorInner<T>>, |
80 | } |
81 | |
82 | // Safety: We use atomic reference counting, and if T is Send and Sync, we can send the vector to another thread |
83 | unsafe impl<T: Send + Sync> Send for SharedVector<T> {} |
84 | // Safety: We use atomic reference counting, and if T is Send and Sync, we can access the vector from multiple threads |
85 | unsafe impl<T: Send + Sync> Sync for SharedVector<T> {} |
86 | |
87 | impl<T> Drop for SharedVector<T> { |
88 | fn drop(&mut self) { |
89 | unsafe { |
90 | if self |
91 | .inner |
92 | .cast::<SharedVectorHeader>() |
93 | .as_ref() |
94 | .refcount |
95 | .load(order:atomic::Ordering::Relaxed) |
96 | < 0 |
97 | { |
98 | return; |
99 | } |
100 | if self.inner.as_ref().header.refcount.fetch_sub(val:1, order:atomic::Ordering::SeqCst) == 1 { |
101 | drop_inner(self.inner) |
102 | } |
103 | } |
104 | } |
105 | } |
106 | |
107 | impl<T> Clone for SharedVector<T> { |
108 | fn clone(&self) -> Self { |
109 | unsafe { |
110 | if self |
111 | .inner |
112 | .cast::<SharedVectorHeader>() |
113 | .as_ref() |
114 | .refcount |
115 | .load(order:atomic::Ordering::Relaxed) |
116 | > 0 |
117 | { |
118 | self.inner.as_ref().header.refcount.fetch_add(val:1, order:atomic::Ordering::SeqCst); |
119 | } |
120 | SharedVector { inner: self.inner } |
121 | } |
122 | } |
123 | } |
124 | |
125 | impl<T> SharedVector<T> { |
126 | /// Create a new empty array with a pre-allocated capacity in number of items |
127 | pub fn with_capacity(capacity: usize) -> Self { |
128 | Self { inner: alloc_with_capacity(capacity) } |
129 | } |
130 | |
131 | fn as_ptr(&self) -> *const T { |
132 | unsafe { self.inner.as_ref().data.as_ptr() } |
133 | } |
134 | |
135 | /// Number of elements in the array |
136 | pub fn len(&self) -> usize { |
137 | unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().size } |
138 | } |
139 | |
140 | /// Return true if the SharedVector is empty |
141 | pub fn is_empty(&self) -> bool { |
142 | self.len() == 0 |
143 | } |
144 | |
145 | /// Return a slice to the array |
146 | pub fn as_slice(&self) -> &[T] { |
147 | if self.is_empty() { |
148 | &[] |
149 | } else { |
150 | // Safety: When len > 0, we know that the pointer holds an array of the size of len |
151 | unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) } |
152 | } |
153 | } |
154 | |
155 | /// Returns the number of elements the vector can hold without reallocating, when not shared |
156 | fn capacity(&self) -> usize { |
157 | unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().capacity } |
158 | } |
159 | } |
160 | |
161 | impl<T: Clone> SharedVector<T> { |
162 | /// Create a SharedVector from a slice |
163 | pub fn from_slice(slice: &[T]) -> SharedVector<T> { |
164 | Self::from(slice) |
165 | } |
166 | |
167 | /// Ensure that the reference count is 1 so the array can be changed. |
168 | /// If that's not the case, the array will be cloned |
169 | fn detach(&mut self, new_capacity: usize) { |
170 | let is_shared = |
171 | unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1; |
172 | if !is_shared && new_capacity <= self.capacity() { |
173 | return; |
174 | } |
175 | let mut new_array = SharedVector::with_capacity(new_capacity); |
176 | core::mem::swap(&mut self.inner, &mut new_array.inner); |
177 | let mut size = 0; |
178 | for x in new_array.into_iter() { |
179 | assert_ne!(size, new_capacity); |
180 | unsafe { |
181 | core::ptr::write(self.inner.as_mut().data.as_mut_ptr().add(size), x); |
182 | size += 1; |
183 | self.inner.as_mut().header.size = size; |
184 | } |
185 | if size == new_capacity { |
186 | break; |
187 | } |
188 | } |
189 | } |
190 | |
191 | /// Return a mutable slice to the array. If the array was shared, this will make a copy of the array. |
192 | pub fn make_mut_slice(&mut self) -> &mut [T] { |
193 | self.detach(self.len()); |
194 | unsafe { core::slice::from_raw_parts_mut(self.as_ptr() as *mut T, self.len()) } |
195 | } |
196 | |
197 | /// Add an element to the array. If the array was shared, this will make a copy of the array. |
198 | pub fn push(&mut self, value: T) { |
199 | self.detach(capacity_for_grow(self.capacity(), self.len() + 1, core::mem::size_of::<T>())); |
200 | unsafe { |
201 | core::ptr::write( |
202 | self.inner.as_mut().data.as_mut_ptr().add(self.inner.as_mut().header.size), |
203 | value, |
204 | ); |
205 | self.inner.as_mut().header.size += 1; |
206 | } |
207 | } |
208 | |
209 | /// Removes last element from the array and returns it. |
210 | /// If the array was shared, this will make a copy of the array. |
211 | pub fn pop(&mut self) -> Option<T> { |
212 | if self.is_empty() { |
213 | None |
214 | } else { |
215 | self.detach(self.len()); |
216 | unsafe { |
217 | self.inner.as_mut().header.size -= 1; |
218 | Some(core::ptr::read(self.inner.as_mut().data.as_mut_ptr().add(self.len()))) |
219 | } |
220 | } |
221 | } |
222 | |
223 | /// Resize the array to the given size. |
224 | /// If the array was smaller new elements will be initialized with the value. |
225 | /// If the array was bigger, extra elements will be discarded |
226 | /// |
227 | /// ``` |
228 | /// use i_slint_core::SharedVector; |
229 | /// let mut shared_vector = SharedVector::<u32>::from_slice(&[1, 2, 3]); |
230 | /// shared_vector.resize(5, 8); |
231 | /// assert_eq!(shared_vector.as_slice(), &[1, 2, 3, 8, 8]); |
232 | /// shared_vector.resize(2, 0); |
233 | /// assert_eq!(shared_vector.as_slice(), &[1, 2]); |
234 | /// ``` |
235 | pub fn resize(&mut self, new_len: usize, value: T) { |
236 | if self.len() == new_len { |
237 | return; |
238 | } |
239 | self.detach(new_len); |
240 | // Safety: detach ensured that the array is not shared. |
241 | let inner = unsafe { self.inner.as_mut() }; |
242 | |
243 | if inner.header.size >= new_len { |
244 | self.shrink(new_len); |
245 | } else { |
246 | while inner.header.size < new_len { |
247 | // Safety: The array must have a capacity of at least new_len because of the detach call earlier |
248 | unsafe { |
249 | core::ptr::write(inner.data.as_mut_ptr().add(inner.header.size), value.clone()); |
250 | } |
251 | inner.header.size += 1; |
252 | } |
253 | } |
254 | } |
255 | |
256 | fn shrink(&mut self, new_len: usize) { |
257 | if self.len() == new_len { |
258 | return; |
259 | } |
260 | |
261 | assert!( |
262 | unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } == 1 |
263 | ); |
264 | // Safety: caller (and above debug_assert) must ensure that the array is not shared. |
265 | let inner = unsafe { self.inner.as_mut() }; |
266 | |
267 | while inner.header.size > new_len { |
268 | inner.header.size -= 1; |
269 | // Safety: The array was of size inner.header.size, so there should be an element there |
270 | unsafe { |
271 | core::ptr::drop_in_place(inner.data.as_mut_ptr().add(inner.header.size)); |
272 | } |
273 | } |
274 | } |
275 | |
276 | /// Clears the vector and removes all elements. |
277 | pub fn clear(&mut self) { |
278 | let is_shared = |
279 | unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1; |
280 | if is_shared { |
281 | *self = SharedVector::default(); |
282 | } else { |
283 | self.shrink(0) |
284 | } |
285 | } |
286 | } |
287 | |
288 | impl<T> Deref for SharedVector<T> { |
289 | type Target = [T]; |
290 | fn deref(&self) -> &Self::Target { |
291 | self.as_slice() |
292 | } |
293 | } |
294 | |
295 | /* FIXME: is this a good idea to implement DerefMut knowing what it might detach? |
296 | impl<T> DerefMut for SharedVector<T> { |
297 | fn deref_mut(&mut self) -> &mut Self::Target { |
298 | self.as_mut_slice() |
299 | } |
300 | }*/ |
301 | |
302 | impl<T: Clone> From<&[T]> for SharedVector<T> { |
303 | fn from(slice: &[T]) -> Self { |
304 | let capacity: usize = slice.len(); |
305 | let mut result: SharedVector = Self::with_capacity(capacity); |
306 | for x: &T in slice { |
307 | unsafe { |
308 | core::ptr::write( |
309 | dst:result.inner.as_mut().data.as_mut_ptr().add(result.inner.as_mut().header.size), |
310 | src:x.clone(), |
311 | ); |
312 | result.inner.as_mut().header.size += 1; |
313 | } |
314 | } |
315 | result |
316 | } |
317 | } |
318 | |
319 | impl<T, const N: usize> From<[T; N]> for SharedVector<T> { |
320 | fn from(array: [T; N]) -> Self { |
321 | array.into_iter().collect() |
322 | } |
323 | } |
324 | |
325 | impl<T> FromIterator<T> for SharedVector<T> { |
326 | fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { |
327 | let mut iter = iter.into_iter(); |
328 | let mut capacity = iter.size_hint().0; |
329 | let mut result = Self::with_capacity(capacity); |
330 | let mut size = 0; |
331 | while let Some(x) = iter.next() { |
332 | if size >= capacity { |
333 | capacity = capacity_for_grow( |
334 | capacity, |
335 | size + 1 + iter.size_hint().0, |
336 | core::mem::size_of::<T>(), |
337 | ); |
338 | unsafe { |
339 | result.inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed) |
340 | }; |
341 | let mut iter = IntoIter(IntoIterInner::UnShared(result.inner, 0)); |
342 | result.inner = alloc_with_capacity::<T>(capacity); |
343 | match &mut iter.0 { |
344 | IntoIterInner::UnShared(old_inner, begin) => { |
345 | while *begin < size { |
346 | unsafe { |
347 | core::ptr::write( |
348 | result.inner.as_mut().data.as_mut_ptr().add(*begin), |
349 | core::ptr::read(old_inner.as_ref().data.as_ptr().add(*begin)), |
350 | ); |
351 | *begin += 1; |
352 | result.inner.as_mut().header.size = *begin; |
353 | } |
354 | } |
355 | } |
356 | _ => unreachable!(), |
357 | } |
358 | } |
359 | debug_assert_eq!(result.len(), size); |
360 | debug_assert!(result.capacity() > size); |
361 | unsafe { |
362 | core::ptr::write(result.inner.as_mut().data.as_mut_ptr().add(size), x); |
363 | size += 1; |
364 | result.inner.as_mut().header.size = size; |
365 | } |
366 | } |
367 | result |
368 | } |
369 | } |
370 | |
371 | impl<T: Clone> Extend<T> for SharedVector<T> { |
372 | fn extend<X: IntoIterator<Item = T>>(&mut self, iter: X) { |
373 | let iter: ::IntoIter = iter.into_iter(); |
374 | let hint: usize = iter.size_hint().0; |
375 | if hint > 0 { |
376 | self.detach(new_capacity:capacity_for_grow( |
377 | self.capacity(), |
378 | self.len() + hint, |
379 | elem_size:core::mem::size_of::<T>(), |
380 | )); |
381 | } |
382 | for item: T in iter { |
383 | self.push(item); |
384 | } |
385 | } |
386 | } |
387 | |
388 | static SHARED_NULL: SharedVectorHeader = |
389 | SharedVectorHeader { refcount: atomic::AtomicIsize::new(-1), size: 0, capacity: 0 }; |
390 | |
391 | impl<T> Default for SharedVector<T> { |
392 | fn default() -> Self { |
393 | SharedVector { inner: NonNull::from(&SHARED_NULL).cast() } |
394 | } |
395 | } |
396 | |
397 | impl<T: Debug> Debug for SharedVector<T> { |
398 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { |
399 | self.as_slice().fmt(f) |
400 | } |
401 | } |
402 | |
403 | impl<T> AsRef<[T]> for SharedVector<T> { |
404 | #[inline ] |
405 | fn as_ref(&self) -> &[T] { |
406 | self.as_slice() |
407 | } |
408 | } |
409 | |
410 | impl<T, U> PartialEq<U> for SharedVector<T> |
411 | where |
412 | U: ?Sized + AsRef<[T]>, |
413 | T: PartialEq, |
414 | { |
415 | fn eq(&self, other: &U) -> bool { |
416 | self.as_slice() == other.as_ref() |
417 | } |
418 | } |
419 | |
420 | impl<T: Eq> Eq for SharedVector<T> {} |
421 | |
422 | impl<T: Clone> IntoIterator for SharedVector<T> { |
423 | type Item = T; |
424 | type IntoIter = IntoIter<T>; |
425 | fn into_iter(self) -> Self::IntoIter { |
426 | IntoIter(unsafe { |
427 | if self.inner.as_ref().header.refcount.load(order:atomic::Ordering::Relaxed) == 1 { |
428 | let inner: NonNull> = self.inner; |
429 | core::mem::forget(self); |
430 | inner.as_ref().header.refcount.store(val:0, order:atomic::Ordering::Relaxed); |
431 | IntoIterInner::UnShared(inner, 0) |
432 | } else { |
433 | IntoIterInner::Shared(self, 0) |
434 | } |
435 | }) |
436 | } |
437 | } |
438 | |
439 | #[cfg (feature = "serde" )] |
440 | use serde::ser::SerializeSeq; |
441 | #[cfg (feature = "serde" )] |
442 | impl<T> serde::Serialize for SharedVector<T> |
443 | where |
444 | T: serde::Serialize, |
445 | { |
446 | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> |
447 | where |
448 | S: serde::Serializer, |
449 | { |
450 | let mut seq: ::SerializeSeq = serializer.serialize_seq(len:Some(self.len()))?; |
451 | for item: &T in self.iter() { |
452 | seq.serialize_element(item)?; |
453 | } |
454 | seq.end() |
455 | } |
456 | } |
457 | |
458 | #[cfg (feature = "serde" )] |
459 | impl<'de, T> serde::Deserialize<'de> for SharedVector<T> |
460 | where |
461 | T: Clone + serde::Deserialize<'de>, |
462 | { |
463 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> |
464 | where |
465 | D: serde::Deserializer<'de>, |
466 | { |
467 | let mut elements: alloc::vec::Vec<T> = serde::Deserialize::deserialize(deserializer)?; |
468 | let mut shared_vec: SharedVector = SharedVector::with_capacity(elements.len()); |
469 | for elem: T in elements.drain(..) { |
470 | shared_vec.push(elem); |
471 | } |
472 | Ok(shared_vec) |
473 | } |
474 | } |
475 | |
476 | enum IntoIterInner<T> { |
477 | Shared(SharedVector<T>, usize), |
478 | // Elements up to the usize member are already moved out |
479 | UnShared(NonNull<SharedVectorInner<T>>, usize), |
480 | } |
481 | |
482 | impl<T> Drop for IntoIterInner<T> { |
483 | fn drop(&mut self) { |
484 | match self { |
485 | IntoIterInner::Shared(..) => { /* drop of SharedVector takes care of it */ } |
486 | IntoIterInner::UnShared(mut inner: NonNull>, begin: &mut usize) => unsafe { |
487 | debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0); |
488 | let data_ptr: *mut T = inner.as_mut().data.as_mut_ptr(); |
489 | for x: usize in (*begin)..inner.as_ref().header.size { |
490 | core::ptr::drop_in_place(to_drop:data_ptr.add(count:x)); |
491 | } |
492 | ::alloc::alloc::dealloc( |
493 | inner.as_ptr() as *mut u8, |
494 | compute_inner_layout::<T>(inner.as_ref().header.capacity), |
495 | ) |
496 | }, |
497 | } |
498 | } |
499 | } |
500 | |
501 | /// An iterator that moves out of a SharedVector. |
502 | /// |
503 | /// This `struct` is created by the `into_iter` method on [`SharedVector`] (provided |
504 | /// by the [`IntoIterator`] trait). |
505 | pub struct IntoIter<T>(IntoIterInner<T>); |
506 | |
507 | impl<T: Clone> Iterator for IntoIter<T> { |
508 | type Item = T; |
509 | |
510 | fn next(&mut self) -> Option<Self::Item> { |
511 | match &mut self.0 { |
512 | IntoIterInner::Shared(array: &mut SharedVector, moved: &mut usize) => { |
513 | let result: Option = array.as_slice().get(*moved).cloned(); |
514 | *moved += 1; |
515 | result |
516 | } |
517 | IntoIterInner::UnShared(inner: &mut NonNull>, begin: &mut usize) => unsafe { |
518 | if *begin < inner.as_ref().header.size { |
519 | let r: T = core::ptr::read(src:inner.as_ref().data.as_ptr().add(*begin)); |
520 | *begin += 1; |
521 | Some(r) |
522 | } else { |
523 | None |
524 | } |
525 | }, |
526 | } |
527 | } |
528 | } |
529 | |
530 | #[test ] |
531 | fn simple_test() { |
532 | let x: SharedVector<i32> = SharedVector::from([1, 2, 3]); |
533 | let y: SharedVector<i32> = SharedVector::from([3, 2, 1]); |
534 | assert_eq!(x, x.clone()); |
535 | assert_ne!(x, y); |
536 | let z: [i32; 3] = [1, 2, 3]; |
537 | assert_eq!(z, x.as_slice()); |
538 | let vec: std::vec::Vec<i32> = std::vec![1, 2, 3]; |
539 | assert_eq!(x, vec); |
540 | let def: SharedVector<i32> = Default::default(); |
541 | assert_eq!(def, SharedVector::<i32>::default()); |
542 | assert_ne!(def, x); |
543 | } |
544 | |
545 | #[test ] |
546 | fn push_test() { |
547 | let mut x: SharedVector<i32> = SharedVector::from([1, 2, 3]); |
548 | let y = x.clone(); |
549 | x.push(4); |
550 | x.push(5); |
551 | x.push(6); |
552 | assert_eq!(x.as_slice(), &[1, 2, 3, 4, 5, 6]); |
553 | assert_eq!(y.as_slice(), &[1, 2, 3]); |
554 | } |
555 | |
556 | #[test ] |
557 | #[should_panic ] |
558 | fn invalid_capacity_test() { |
559 | let _: SharedVector<u8> = SharedVector::with_capacity(usize::MAX / 2 - 1000); |
560 | } |
561 | |
562 | #[test ] |
563 | fn collect_from_iter_with_no_size_hint() { |
564 | use std::string::{String, ToString}; |
565 | struct NoSizeHintIter<'a> { |
566 | data: &'a [&'a str], |
567 | i: usize, |
568 | } |
569 | |
570 | impl Iterator for NoSizeHintIter<'_> { |
571 | type Item = String; |
572 | |
573 | fn next(&mut self) -> Option<Self::Item> { |
574 | if self.i >= self.data.len() { |
575 | return None; |
576 | } |
577 | let item = self.data[self.i]; |
578 | self.i += 1; |
579 | Some(item.to_string()) |
580 | } |
581 | |
582 | fn size_hint(&self) -> (usize, Option<usize>) { |
583 | (0, None) |
584 | } |
585 | } |
586 | |
587 | // 5 elements to be above the initial "grow"-capacity of 4 and thus require one realloc. |
588 | let input = NoSizeHintIter { data: &["Hello" , "sweet" , "world" , "of" , "iterators" ], i: 0 }; |
589 | |
590 | let shared_vec: SharedVector<String> = input.collect(); |
591 | assert_eq!(shared_vec.as_slice(), &["Hello" , "sweet" , "world" , "of" , "iterators" ]); |
592 | } |
593 | |
594 | #[test ] |
595 | fn test_capacity_grows_only_when_needed() { |
596 | let mut vec: SharedVector<u8> = SharedVector::with_capacity(2); |
597 | vec.push(0); |
598 | assert_eq!(vec.capacity(), 2); |
599 | vec.push(0); |
600 | assert_eq!(vec.capacity(), 2); |
601 | vec.push(0); |
602 | assert_eq!(vec.len(), 3); |
603 | assert!(vec.capacity() > 2); |
604 | } |
605 | |
606 | #[test ] |
607 | fn test_vector_clear() { |
608 | let mut vec: SharedVector<std::string::String> = Default::default(); |
609 | vec.clear(); |
610 | vec.push("Hello" .into()); |
611 | vec.push("World" .into()); |
612 | vec.push("of" .into()); |
613 | vec.push("Vectors" .into()); |
614 | |
615 | let mut copy = vec.clone(); |
616 | |
617 | assert_eq!(vec.len(), 4); |
618 | let orig_cap = vec.capacity(); |
619 | assert!(orig_cap >= vec.len()); |
620 | vec.clear(); |
621 | assert_eq!(vec.len(), 0); |
622 | assert_eq!(vec.capacity(), 0); // vec was shared, so start with new empty vector. |
623 | vec.push("Welcome back" .into()); |
624 | assert_eq!(vec.len(), 1); |
625 | assert!(vec.capacity() >= vec.len()); |
626 | |
627 | assert_eq!(copy.len(), 4); |
628 | assert_eq!(copy.capacity(), orig_cap); |
629 | copy.clear(); // copy is not shared (anymore), retain capacity. |
630 | assert_eq!(copy.capacity(), orig_cap); |
631 | } |
632 | |
633 | #[test ] |
634 | fn pop_test() { |
635 | let mut x: SharedVector<i32> = SharedVector::from([1, 2, 3]); |
636 | let y = x.clone(); |
637 | assert_eq!(x.pop(), Some(3)); |
638 | assert_eq!(x.pop(), Some(2)); |
639 | assert_eq!(x.pop(), Some(1)); |
640 | assert_eq!(x.pop(), None); |
641 | assert!(x.is_empty()); |
642 | assert_eq!(y.as_slice(), &[1, 2, 3]); |
643 | } |
644 | |
645 | #[cfg (feature = "ffi" )] |
646 | pub(crate) mod ffi { |
647 | use super::*; |
648 | |
649 | #[no_mangle ] |
650 | /// This function is used for the low-level C++ interface to allocate the backing vector of a SharedVector. |
651 | pub unsafe extern "C" fn slint_shared_vector_allocate(size: usize, align: usize) -> *mut u8 { |
652 | alloc::alloc::alloc(layout:alloc::alloc::Layout::from_size_align(size, align).unwrap()) |
653 | } |
654 | |
655 | #[no_mangle ] |
656 | /// This function is used for the low-level C++ interface to deallocate the backing vector of a SharedVector |
657 | pub unsafe extern "C" fn slint_shared_vector_free(ptr: *mut u8, size: usize, align: usize) { |
658 | alloc::alloc::dealloc(ptr, layout:alloc::alloc::Layout::from_size_align(size, align).unwrap()) |
659 | } |
660 | |
661 | #[no_mangle ] |
662 | /// This function is used for the low-level C++ interface to initialize the empty SharedVector. |
663 | pub unsafe extern "C" fn slint_shared_vector_empty() -> *const u8 { |
664 | &SHARED_NULL as *const _ as *const u8 |
665 | } |
666 | } |
667 | |
668 | #[cfg (feature = "serde" )] |
669 | #[test ] |
670 | fn test_serialize_deserialize_sharedvector() { |
671 | let v = SharedVector::from([1, 2, 3]); |
672 | let serialized = serde_json::to_string(&v).unwrap(); |
673 | let deserialized: SharedVector<i32> = serde_json::from_str(&serialized).unwrap(); |
674 | assert_eq!(v, deserialized); |
675 | } |
676 | |