1 | //! **This module is experimental** |
---|---|
2 | //! |
3 | //! This module provides threadsafe versions of FrozenMap and FrozenVec, |
4 | //! ideal for use as a cache. |
5 | //! |
6 | //! These lock internally, however locks only last as long as the method calls |
7 | //! |
8 | |
9 | use stable_deref_trait::StableDeref; |
10 | use std::alloc::Layout; |
11 | use std::borrow::Borrow; |
12 | use std::cmp::Eq; |
13 | use std::collections::BTreeMap; |
14 | use std::collections::HashMap; |
15 | use std::fmt; |
16 | use std::hash::Hash; |
17 | use std::iter::{FromIterator, IntoIterator}; |
18 | use std::ops::Index; |
19 | |
20 | use std::ptr::NonNull; |
21 | use std::sync::atomic::AtomicBool; |
22 | use std::sync::atomic::AtomicPtr; |
23 | use std::sync::atomic::AtomicUsize; |
24 | use std::sync::atomic::Ordering; |
25 | use std::sync::RwLock; |
26 | use std::sync::TryLockError; |
27 | |
28 | /// Append-only threadsafe version of `std::collections::HashMap` where |
29 | /// insertion does not require mutable access |
30 | pub struct FrozenMap<K, V> { |
31 | map: RwLock<HashMap<K, V>>, |
32 | } |
33 | |
34 | impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for FrozenMap<K, V> { |
35 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
36 | match self.map.try_read() { |
37 | Ok(guard: RwLockReadGuard<'_, HashMap<…, …>>) => guard.fmt(f), |
38 | Err(TryLockError::Poisoned(err: PoisonError |
39 | f.debug_tuple(name:"FrozenMap").field(&&**err.get_ref()).finish() |
40 | } |
41 | Err(TryLockError::WouldBlock) => { |
42 | struct LockedPlaceholder; |
43 | impl fmt::Debug for LockedPlaceholder { |
44 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
45 | f.write_str(data:"<locked>") |
46 | } |
47 | } |
48 | f&mut DebugTuple<'_, '_>.debug_tuple(name:"FrozenMap") |
49 | .field(&LockedPlaceholder) |
50 | .finish() |
51 | } |
52 | } |
53 | } |
54 | } |
55 | |
56 | impl<K, V> Default for FrozenMap<K, V> { |
57 | fn default() -> Self { |
58 | Self { |
59 | map: Default::default(), |
60 | } |
61 | } |
62 | } |
63 | |
64 | impl<K, V> FrozenMap<K, V> { |
65 | pub fn new() -> Self { |
66 | Self::default() |
67 | } |
68 | } |
69 | |
70 | impl<T> From<Vec<T>> for FrozenVec<T> { |
71 | fn from(vec: Vec<T>) -> Self { |
72 | Self { |
73 | vec: RwLock::new(vec), |
74 | } |
75 | } |
76 | } |
77 | |
78 | impl<K: Eq + Hash, V: StableDeref> FrozenMap<K, V> { |
79 | // these should never return &K or &V |
80 | // these should never delete any entries |
81 | |
82 | /// If the key exists in the map, returns a reference |
83 | /// to the corresponding value, otherwise inserts a |
84 | /// new entry in the map for that key and returns a |
85 | /// reference to the given value. |
86 | /// |
87 | /// Existing values are never overwritten. |
88 | /// |
89 | /// The key may be any borrowed form of the map's key type, but |
90 | /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for |
91 | /// the key type. |
92 | /// |
93 | /// # Examples |
94 | /// |
95 | /// ``` |
96 | /// use elsa::sync::FrozenMap; |
97 | /// |
98 | /// let map = FrozenMap::new(); |
99 | /// assert_eq!(map.insert(1, Box::new("a")), & "a"); |
100 | /// assert_eq!(map.insert(1, Box::new("b")), & "a"); |
101 | /// ``` |
102 | pub fn insert(&self, k: K, v: V) -> &V::Target { |
103 | let mut map = self.map.write().unwrap(); |
104 | let ret = unsafe { |
105 | let inserted = &**map.entry(k).or_insert(v); |
106 | &*(inserted as *const _) |
107 | }; |
108 | ret |
109 | } |
110 | |
111 | /// If the key exists in the map, returns a reference to the corresponding |
112 | /// value, otherwise inserts a new entry in the map for that key and the |
113 | /// value returned by the creation function, and returns a reference to the |
114 | /// generated value. |
115 | /// |
116 | /// Existing values are never overwritten. |
117 | /// |
118 | /// The key may be any borrowed form of the map's key type, but [`Hash`] and |
119 | /// [`Eq`] on the borrowed form *must* match those for the key type. |
120 | /// |
121 | /// **Note** that the write lock is held for the duration of this function’s |
122 | /// execution, even while the value creation function is executing (if |
123 | /// needed). This will block any concurrent `get` or `insert` calls. |
124 | /// |
125 | /// # Examples |
126 | /// |
127 | /// ``` |
128 | /// use elsa::sync::FrozenMap; |
129 | /// |
130 | /// let map = FrozenMap::new(); |
131 | /// assert_eq!(map.insert_with(1, || Box::new("a")), & "a"); |
132 | /// assert_eq!(map.insert_with(1, || unreachable!()), &"a"); |
133 | /// ``` |
134 | pub fn insert_with(&self, k: K, f: impl FnOnce() -> V) -> &V::Target { |
135 | let mut map = self.map.write().unwrap(); |
136 | let ret = unsafe { |
137 | let inserted = &**map.entry(k).or_insert_with(f); |
138 | &*(inserted as *const _) |
139 | }; |
140 | ret |
141 | } |
142 | |
143 | /// If the key exists in the map, returns a reference to the corresponding |
144 | /// value, otherwise inserts a new entry in the map for that key and the |
145 | /// value returned by the creation function, and returns a reference to the |
146 | /// generated value. |
147 | /// |
148 | /// Existing values are never overwritten. |
149 | /// |
150 | /// The key may be any borrowed form of the map's key type, but [`Hash`] and |
151 | /// [`Eq`] on the borrowed form *must* match those for the key type. |
152 | /// |
153 | /// **Note** that the write lock is held for the duration of this function’s |
154 | /// execution, even while the value creation function is executing (if |
155 | /// needed). This will block any concurrent `get` or `insert` calls. |
156 | /// |
157 | /// # Examples |
158 | /// |
159 | /// ``` |
160 | /// use elsa::sync::FrozenMap; |
161 | /// |
162 | /// let map = FrozenMap::new(); |
163 | /// assert_eq!(map.insert_with_key(1, |_| Box::new("a")), & "a"); |
164 | /// assert_eq!(map.insert_with_key(1, |_| unreachable!()), &"a"); |
165 | /// ``` |
166 | pub fn insert_with_key(&self, k: K, f: impl FnOnce(&K) -> V) -> &V::Target { |
167 | let mut map = self.map.write().unwrap(); |
168 | let ret = unsafe { |
169 | let inserted = &**map.entry(k).or_insert_with_key(f); |
170 | &*(inserted as *const _) |
171 | }; |
172 | ret |
173 | } |
174 | |
175 | /// Returns a reference to the value corresponding to the key. |
176 | /// |
177 | /// The key may be any borrowed form of the map's key type, but |
178 | /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for |
179 | /// the key type. |
180 | /// |
181 | /// # Examples |
182 | /// |
183 | /// ``` |
184 | /// use elsa::sync::FrozenMap; |
185 | /// |
186 | /// let map = FrozenMap::new(); |
187 | /// map.insert(1, Box::new("a")); |
188 | /// assert_eq!(map.get(&1), Some(&"a")); |
189 | /// assert_eq!(map.get(&2), None); |
190 | /// ``` |
191 | pub fn get<Q>(&self, k: &Q) -> Option<&V::Target> |
192 | where |
193 | K: Borrow<Q>, |
194 | Q: Hash + Eq + ?Sized, |
195 | { |
196 | let map = self.map.read().unwrap(); |
197 | let ret = unsafe { map.get(k).map(|x| &*(&**x as *const V::Target)) }; |
198 | ret |
199 | } |
200 | |
201 | /// Applies a function to the owner of the value corresponding to the key (if any). |
202 | /// |
203 | /// The key may be any borrowed form of the map's key type, but |
204 | /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for |
205 | /// the key type. |
206 | /// |
207 | /// # Examples |
208 | /// |
209 | /// ``` |
210 | /// use elsa::sync::FrozenMap; |
211 | /// |
212 | /// let map = FrozenMap::new(); |
213 | /// map.insert(1, Box::new("a")); |
214 | /// assert_eq!(map.map_get(&1, Clone::clone), Some(Box::new("a"))); |
215 | /// assert_eq!(map.map_get(&2, Clone::clone), None); |
216 | /// ``` |
217 | pub fn map_get<Q, T, F>(&self, k: &Q, f: F) -> Option<T> |
218 | where |
219 | K: Borrow<Q>, |
220 | Q: Hash + Eq + ?Sized, |
221 | F: FnOnce(&V) -> T, |
222 | { |
223 | let map = self.map.read().unwrap(); |
224 | let ret = map.get(k).map(f); |
225 | ret |
226 | } |
227 | } |
228 | |
229 | impl<K, V> FrozenMap<K, V> { |
230 | /// Collects the contents of this map into a vector of tuples. |
231 | /// |
232 | /// The order of the entries is as if iterating a [`HashMap`] (stochastic). |
233 | /// |
234 | /// # Examples |
235 | /// |
236 | /// ``` |
237 | /// use elsa::sync::FrozenMap; |
238 | /// |
239 | /// let map = FrozenMap::new(); |
240 | /// map.insert(1, Box::new("a")); |
241 | /// map.insert(2, Box::new("b")); |
242 | /// let mut tuple_vec = map.into_tuple_vec(); |
243 | /// tuple_vec.sort(); |
244 | /// |
245 | /// assert_eq!(tuple_vec, vec![(1, Box::new("a")), (2, Box::new( "b"))]); |
246 | /// ``` |
247 | pub fn into_tuple_vec(self) -> Vec<(K, V)> { |
248 | self.map |
249 | .into_inner() |
250 | .unwrap() |
251 | .into_iter() |
252 | .collect::<Vec<_>>() |
253 | } |
254 | |
255 | /// # Examples |
256 | /// |
257 | /// ``` |
258 | /// use elsa::sync::FrozenMap; |
259 | /// |
260 | /// let map = FrozenMap::new(); |
261 | /// assert_eq!(map.len(), 0); |
262 | /// map.insert(1, Box::new("a")); |
263 | /// assert_eq!(map.len(), 1); |
264 | /// ``` |
265 | pub fn len(&self) -> usize { |
266 | let map = self.map.read().unwrap(); |
267 | map.len() |
268 | } |
269 | |
270 | /// # Examples |
271 | /// |
272 | /// ``` |
273 | /// use elsa::sync::FrozenMap; |
274 | /// |
275 | /// let map = FrozenMap::new(); |
276 | /// assert_eq!(map.is_empty(), true); |
277 | /// map.insert(1, Box::new("a")); |
278 | /// assert_eq!(map.is_empty(), false); |
279 | /// ``` |
280 | pub fn is_empty(&self) -> bool { |
281 | let map = self.map.read().unwrap(); |
282 | map.is_empty() |
283 | } |
284 | |
285 | // TODO add more |
286 | } |
287 | |
288 | impl<K: Clone, V> FrozenMap<K, V> { |
289 | pub fn keys_cloned(&self) -> Vec<K> { |
290 | self.map.read().unwrap().keys().cloned().collect() |
291 | } |
292 | } |
293 | |
294 | impl<K: Eq + Hash, V: Copy> FrozenMap<K, V> { |
295 | /// Returns a copy of the value corresponding to the key. |
296 | /// |
297 | /// The key may be any borrowed form of the map's key type, but |
298 | /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for |
299 | /// the key type. |
300 | /// |
301 | /// # Examples |
302 | /// |
303 | /// ``` |
304 | /// use elsa::sync::FrozenMap; |
305 | /// |
306 | /// let map = FrozenMap::new(); |
307 | /// map.get_copy_or_insert(1, 6); |
308 | /// assert_eq!(map.get_copy(&1), Some(6)); |
309 | /// assert_eq!(map.get_copy(&2), None); |
310 | /// ``` |
311 | pub fn get_copy<Q>(&self, k: &Q) -> Option<V> |
312 | where |
313 | K: Borrow<Q>, |
314 | Q: Hash + Eq + ?Sized, |
315 | { |
316 | let map = self.map.read().unwrap(); |
317 | map.get(k).cloned() |
318 | } |
319 | |
320 | /// If the key exists in the map, returns a reference |
321 | /// to the corresponding value, otherwise inserts a |
322 | /// new entry in the map for that key and returns a |
323 | /// reference to the given value. |
324 | /// |
325 | /// Existing values are never overwritten. |
326 | /// |
327 | /// The key may be any borrowed form of the map's key type, but |
328 | /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for |
329 | /// the key type. |
330 | /// |
331 | /// # Examples |
332 | /// |
333 | /// ``` |
334 | /// use elsa::sync::FrozenMap; |
335 | /// |
336 | /// let map = FrozenMap::new(); |
337 | /// assert_eq!(map.get_copy_or_insert(1, 6), 6); |
338 | /// assert_eq!(map.get_copy_or_insert(1, 12), 6); |
339 | /// ``` |
340 | pub fn get_copy_or_insert(&self, k: K, v: V) -> V { |
341 | let mut map = self.map.write().unwrap(); |
342 | // This is safe because `or_insert` does not overwrite existing values |
343 | let inserted = map.entry(k).or_insert(v); |
344 | *inserted |
345 | } |
346 | |
347 | /// If the key exists in the map, returns a reference to the corresponding |
348 | /// value, otherwise inserts a new entry in the map for that key and the |
349 | /// value returned by the creation function, and returns a reference to the |
350 | /// generated value. |
351 | /// |
352 | /// Existing values are never overwritten. |
353 | /// |
354 | /// The key may be any borrowed form of the map's key type, but [`Hash`] and |
355 | /// [`Eq`] on the borrowed form *must* match those for the key type. |
356 | /// |
357 | /// **Note** that the write lock is held for the duration of this function’s |
358 | /// execution, even while the value creation function is executing (if |
359 | /// needed). This will block any concurrent `get` or `insert` calls. |
360 | /// |
361 | /// # Examples |
362 | /// |
363 | /// ``` |
364 | /// use elsa::sync::FrozenMap; |
365 | /// |
366 | /// let map = FrozenMap::new(); |
367 | /// assert_eq!(map.get_copy_or_insert_with(1, || 6), 6); |
368 | /// assert_eq!(map.get_copy_or_insert_with(1, || unreachable!()), 6); |
369 | /// ``` |
370 | pub fn get_copy_or_insert_with(&self, k: K, f: impl FnOnce() -> V) -> V { |
371 | let mut map = self.map.write().unwrap(); |
372 | // This is safe because `or_insert_with` does not overwrite existing values |
373 | let inserted = map.entry(k).or_insert_with(f); |
374 | *inserted |
375 | } |
376 | |
377 | /// If the key exists in the map, returns a reference to the corresponding |
378 | /// value, otherwise inserts a new entry in the map for that key and the |
379 | /// value returned by the creation function, and returns a reference to the |
380 | /// generated value. |
381 | /// |
382 | /// Existing values are never overwritten. |
383 | /// |
384 | /// The key may be any borrowed form of the map's key type, but [`Hash`] and |
385 | /// [`Eq`] on the borrowed form *must* match those for the key type. |
386 | /// |
387 | /// **Note** that the write lock is held for the duration of this function’s |
388 | /// execution, even while the value creation function is executing (if |
389 | /// needed). This will block any concurrent `get` or `insert` calls. |
390 | /// |
391 | /// # Examples |
392 | /// |
393 | /// ``` |
394 | /// use elsa::sync::FrozenMap; |
395 | /// |
396 | /// let map = FrozenMap::new(); |
397 | /// assert_eq!(map.get_copy_or_insert_with_key(1, |_| 6), 6); |
398 | /// assert_eq!(map.get_copy_or_insert_with_key(1, |_| unreachable!()), 6); |
399 | /// ``` |
400 | pub fn get_copy_or_insert_with_key(&self, k: K, f: impl FnOnce(&K) -> V) -> V { |
401 | let mut map = self.map.write().unwrap(); |
402 | // This is safe because `or_insert_with_key` does not overwrite existing values |
403 | let inserted = map.entry(k).or_insert_with_key(f); |
404 | *inserted |
405 | } |
406 | } |
407 | |
408 | impl<K, V> std::convert::AsMut<HashMap<K, V>> for FrozenMap<K, V> { |
409 | /// Get mutable access to the underlying [`HashMap`]. |
410 | /// |
411 | /// This is safe, as it requires a `&mut self`, ensuring nothing is using |
412 | /// the 'frozen' contents. |
413 | fn as_mut(&mut self) -> &mut HashMap<K, V> { |
414 | self.map.get_mut().unwrap() |
415 | } |
416 | } |
417 | |
418 | impl<K: Clone, V: Clone> Clone for FrozenMap<K, V> { |
419 | fn clone(&self) -> Self { |
420 | Self { |
421 | map: self.map.read().unwrap().clone().into(), |
422 | } |
423 | } |
424 | } |
425 | |
426 | impl<K: Eq + Hash, V: PartialEq> PartialEq for FrozenMap<K, V> { |
427 | fn eq(&self, other: &Self) -> bool { |
428 | let self_ref: &HashMap<K, V> = &self.map.read().unwrap(); |
429 | let other_ref: &HashMap<K, V> = &other.map.read().unwrap(); |
430 | self_ref == other_ref |
431 | } |
432 | } |
433 | |
434 | /// Append-only threadsafe version of `std::vec::Vec` where |
435 | /// insertion does not require mutable access |
436 | pub struct FrozenVec<T> { |
437 | vec: RwLock<Vec<T>>, |
438 | } |
439 | |
440 | impl<T: fmt::Debug> fmt::Debug for FrozenVec<T> { |
441 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
442 | match self.vec.try_read() { |
443 | Ok(guard: RwLockReadGuard<'_, Vec |
444 | Err(TryLockError::Poisoned(err: PoisonError |
445 | f.debug_tuple(name:"FrozenMap").field(&&**err.get_ref()).finish() |
446 | } |
447 | Err(TryLockError::WouldBlock) => { |
448 | struct LockedPlaceholder; |
449 | impl fmt::Debug for LockedPlaceholder { |
450 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
451 | f.write_str(data:"<locked>") |
452 | } |
453 | } |
454 | f&mut DebugTuple<'_, '_>.debug_tuple(name:"FrozenMap") |
455 | .field(&LockedPlaceholder) |
456 | .finish() |
457 | } |
458 | } |
459 | } |
460 | } |
461 | |
462 | impl<T> FrozenVec<T> { |
463 | /// Returns the number of elements in the vector. |
464 | pub fn len(&self) -> usize { |
465 | let vec: RwLockReadGuard<'_, Vec |
466 | vec.len() |
467 | } |
468 | |
469 | /// Returns `true` if the vector contains no elements. |
470 | pub fn is_empty(&self) -> bool { |
471 | self.len() == 0 |
472 | } |
473 | } |
474 | |
475 | impl<T: StableDeref> FrozenVec<T> { |
476 | pub const fn new() -> Self { |
477 | Self { |
478 | vec: RwLock::new(Vec::new()), |
479 | } |
480 | } |
481 | |
482 | // these should never return &T |
483 | // these should never delete any entries |
484 | |
485 | pub fn push(&self, val: T) { |
486 | let mut vec = self.vec.write().unwrap(); |
487 | vec.push(val); |
488 | } |
489 | |
490 | /// Push, immediately getting a reference to the element |
491 | pub fn push_get(&self, val: T) -> &T::Target { |
492 | let mut vec = self.vec.write().unwrap(); |
493 | vec.push(val); |
494 | unsafe { &*(&**vec.get_unchecked(vec.len() - 1) as *const T::Target) } |
495 | } |
496 | |
497 | /// Push, immediately getting a an index of the element |
498 | /// |
499 | /// Index can then be used with the `get` method |
500 | /// |
501 | /// # Examples |
502 | /// |
503 | /// ``` |
504 | /// use elsa::sync::FrozenVec; |
505 | /// |
506 | /// let map = FrozenVec::new(); |
507 | /// let idx = map.push_get_index(String::from("a")); |
508 | /// assert_eq!(map.get(idx), Some("a")); |
509 | /// assert_eq!(idx, 0); |
510 | /// assert_eq!(map.push_get_index(String::from("b")), 1); |
511 | /// ``` |
512 | pub fn push_get_index(&self, val: T) -> usize { |
513 | let mut vec = self.vec.write().unwrap(); |
514 | let index = vec.len(); |
515 | vec.push(val); |
516 | index |
517 | } |
518 | |
519 | pub fn get(&self, index: usize) -> Option<&T::Target> { |
520 | let vec = self.vec.read().unwrap(); |
521 | unsafe { vec.get(index).map(|x| &*(&**x as *const T::Target)) } |
522 | } |
523 | |
524 | /// Returns an iterator over the vector. |
525 | pub fn iter(&self) -> Iter<'_, T> { |
526 | self.into_iter() |
527 | } |
528 | } |
529 | |
530 | /// Iterator over FrozenVec, obtained via `.iter()` |
531 | /// |
532 | /// It is safe to push to the vector during iteration |
533 | #[derive(Debug)] |
534 | pub struct Iter<'a, T> { |
535 | vec: &'a FrozenVec<T>, |
536 | idx: usize, |
537 | } |
538 | |
539 | impl<'a, T: StableDeref> Iterator for Iter<'a, T> { |
540 | type Item = &'a T::Target; |
541 | fn next(&mut self) -> Option<&'a T::Target> { |
542 | if let Some(ret: & |
543 | self.idx += 1; |
544 | Some(ret) |
545 | } else { |
546 | None |
547 | } |
548 | } |
549 | } |
550 | |
551 | impl<'a, T: StableDeref> IntoIterator for &'a FrozenVec<T> { |
552 | type Item = &'a T::Target; |
553 | type IntoIter = Iter<'a, T>; |
554 | fn into_iter(self) -> Iter<'a, T> { |
555 | Iter { vec: self, idx: 0 } |
556 | } |
557 | } |
558 | |
559 | #[test] |
560 | fn test_iteration() { |
561 | let vec = vec!["a", "b", "c", "d"]; |
562 | let frozen: FrozenVec<_> = vec.clone().into(); |
563 | |
564 | assert_eq!(vec, frozen.iter().collect::<Vec<_>>()); |
565 | for (e1, e2) in vec.iter().zip(frozen.iter()) { |
566 | assert_eq!(*e1, e2); |
567 | } |
568 | |
569 | assert_eq!(vec.len(), frozen.iter().count()) |
570 | } |
571 | |
572 | impl<T> FrozenVec<T> { |
573 | /// Returns the internal vector backing this structure |
574 | /// |
575 | /// # Examples |
576 | /// |
577 | /// ``` |
578 | /// use elsa::sync::FrozenVec; |
579 | /// |
580 | /// let map = FrozenVec::new(); |
581 | /// map.push("a"); |
582 | /// map.push("b"); |
583 | /// let tuple_vec = map.into_vec(); |
584 | /// |
585 | /// assert_eq!(tuple_vec, vec!["a", "b"]); |
586 | /// ``` |
587 | pub fn into_vec(self) -> Vec<T> { |
588 | self.vec.into_inner().unwrap() |
589 | } |
590 | |
591 | // TODO add more |
592 | } |
593 | |
594 | impl<T> std::convert::AsMut<Vec<T>> for FrozenVec<T> { |
595 | /// Get mutable access to the underlying vector. |
596 | /// |
597 | /// This is safe, as it requires a `&mut self`, ensuring nothing is using |
598 | /// the 'frozen' contents. |
599 | fn as_mut(&mut self) -> &mut Vec<T> { |
600 | self.vec.get_mut().unwrap() |
601 | } |
602 | } |
603 | |
604 | impl<T> Default for FrozenVec<T> { |
605 | fn default() -> Self { |
606 | Self { |
607 | vec: Default::default(), |
608 | } |
609 | } |
610 | } |
611 | |
612 | impl<T: Clone> Clone for FrozenVec<T> { |
613 | fn clone(&self) -> Self { |
614 | Self { |
615 | vec: self.vec.read().unwrap().clone().into(), |
616 | } |
617 | } |
618 | } |
619 | |
620 | impl<T: PartialEq> PartialEq for FrozenVec<T> { |
621 | fn eq(&self, other: &Self) -> bool { |
622 | let self_ref: &Vec<T> = &self.vec.read().unwrap(); |
623 | let other_ref: &Vec<T> = &other.vec.read().unwrap(); |
624 | self_ref == other_ref |
625 | } |
626 | } |
627 | |
628 | // The context for these functions is that we want to have a |
629 | // series of exponentially increasing buffer sizes. We want |
630 | // to maximize the total size of the buffers (since this |
631 | // determines the maximum size of the container) whilst |
632 | // minimizing the number of buffers (since we pay an up-front |
633 | // cost in space proportional to the number of buffers) |
634 | // without increasing the buffer size too much each time as |
635 | // this determines how much space will be wasted on average |
636 | // in allocated buffers. Finally, we also want a sequence |
637 | // which will generate nice round numbers and is easy to |
638 | // work with. |
639 | |
640 | /// we multiply the buffer size by 4 each time whilst sizing |
641 | /// the first buffer to 3, so the buffer sizes generated by |
642 | /// the function will be 3, 12, 48, 192, etc. |
643 | const fn buffer_size(idx: usize) -> usize { |
644 | 3 << (idx * 2) |
645 | } |
646 | |
647 | /// This computes the sum of the sizes of buffers prior to a |
648 | /// particular buffer index, aka `4^idx - 1`. The choice of |
649 | /// sequence means that the total buffer size will always be |
650 | /// a sequence of `1`s in binary, since it's a power of 2 minus one. |
651 | const fn prior_total_buffer_size(idx: usize) -> usize { |
652 | (1 << (idx * 2)) - 1 |
653 | } |
654 | |
655 | /// This determines which buffer contains the nth item |
656 | /// (assuming the items are arranged sequentially in the buffers). |
657 | /// Since the total buffer sizes are always sequences of 1s in binary, |
658 | /// we can just count the number of binary digits in `(i+1)` and |
659 | /// divide by `2` (rounding up). |
660 | /// (That's what the `(65 - (i + 1).leading_zeros()) >> 1` part does.) |
661 | /// We use 65 rather than `64` so that the division by `2` rounds |
662 | /// up instead of down. We divide by `2 (>> 1)` because we skip |
663 | /// every other power of `2` since we increase the buffer size by `4` |
664 | /// each time, and finally we subtract one because buffer indices are |
665 | /// zero-indexed. |
666 | const fn buffer_index(i: usize) -> usize { |
667 | (((usize::BITS + 1 - (i + 1).leading_zeros()) >> 1) - 1) as usize |
668 | } |
669 | |
670 | /// Each buffer covers 2 bits of address space, so we need half as many |
671 | /// buffers as bits in a usize. |
672 | const NUM_BUFFERS: usize = (usize::BITS / 2) as usize; |
673 | |
674 | /// Append-only threadsafe version of `std::vec::Vec` where |
675 | /// insertion does not require mutable access. |
676 | /// Does not lock for reading, only allows `Copy` types and |
677 | /// will spinlock on pushes without affecting reads. |
678 | /// Note that this data structure is `34` pointers large on |
679 | /// 64 bit systems, |
680 | /// in contrast to `Vec` which is `3` pointers large. |
681 | pub struct LockFreeFrozenVec<T: Copy> { |
682 | data: [AtomicPtr<T>; NUM_BUFFERS], |
683 | len: AtomicUsize, |
684 | locked: AtomicBool, |
685 | } |
686 | |
687 | impl<T: Copy> Drop for LockFreeFrozenVec<T> { |
688 | fn drop(&mut self) { |
689 | // We need to drop the elements from all allocated buffers. |
690 | for i: usize in 0..NUM_BUFFERS { |
691 | let layout: Layout = Self::layout(cap:buffer_size(idx:i)); |
692 | unsafe { |
693 | let ptr: *mut T = *self.data[i].get_mut(); |
694 | if ptr.is_null() { |
695 | // After the first null pointer there will only be more |
696 | // null pointers. |
697 | break; |
698 | } else { |
699 | std::alloc::dealloc(ptr.cast(), layout); |
700 | } |
701 | } |
702 | } |
703 | } |
704 | } |
705 | |
706 | impl<T: Copy> Default for LockFreeFrozenVec<T> { |
707 | /// Creates an empty `LockFreeFrozenVec` that does not allocate |
708 | /// any heap allocations until the first time data is pushed to it. |
709 | fn default() -> Self { |
710 | Self::new() |
711 | } |
712 | } |
713 | |
714 | impl<T: Copy> LockFreeFrozenVec<T> { |
715 | const fn null() -> [AtomicPtr<T>; NUM_BUFFERS] { |
716 | [const { AtomicPtr::new(std::ptr::null_mut()) }; NUM_BUFFERS] |
717 | } |
718 | |
719 | pub const fn new() -> Self { |
720 | Self { |
721 | data: Self::null(), |
722 | len: AtomicUsize::new(0), |
723 | locked: AtomicBool::new(false), |
724 | } |
725 | } |
726 | |
727 | /// Obtains a write lock that ensures other writing threads |
728 | /// wait for us to finish. Reading threads are unaffected and |
729 | /// can concurrently read while we push a new element. |
730 | fn lock<U>(&self, f: impl FnOnce() -> U) -> U { |
731 | while self.locked.swap(true, Ordering::Acquire) { |
732 | // Wheeeee spinlock |
733 | std::hint::spin_loop(); |
734 | } |
735 | |
736 | let ret = f(); |
737 | self.locked.store(false, Ordering::Release); |
738 | ret |
739 | } |
740 | |
741 | fn layout(cap: usize) -> Layout { |
742 | Layout::array::<T>(cap).unwrap() |
743 | } |
744 | |
745 | // these should never return &T |
746 | // these should never delete any entries |
747 | |
748 | const NOT_ZST: () = if std::mem::size_of::<T>() == 0 { |
749 | panic!("`LockFreeFrozenVec` cannot be used with ZSTs"); |
750 | }; |
751 | |
752 | /// Pushes an element to the vector, potentially allocating new memory. |
753 | /// Returns the index at which the element was inserted. |
754 | pub fn push(&self, val: T) -> usize { |
755 | // This statement actually does something: it evaluates a constant. |
756 | #[allow(path_statements)] |
757 | { |
758 | Self::NOT_ZST |
759 | } |
760 | self.lock(|| { |
761 | // These values must be consistent with the pointer we got. |
762 | let len = self.len(); |
763 | let buffer_idx = buffer_index(len); |
764 | let mut ptr = self.data[buffer_idx].load(Ordering::Acquire); |
765 | if ptr.is_null() { |
766 | // Out of memory, allocate more |
767 | let layout = Self::layout(buffer_size(buffer_idx)); |
768 | // SAFETY: `LockFreeFrozenVec` statically rejects zsts and the input `ptr` has always been |
769 | // allocated at the size stated in `cap`. |
770 | unsafe { |
771 | ptr = std::alloc::alloc(layout).cast::<T>(); |
772 | } |
773 | |
774 | assert!(!ptr.is_null()); |
775 | |
776 | self.data[buffer_idx].store(ptr, Ordering::Release); |
777 | } |
778 | let local_index = len - prior_total_buffer_size(buffer_idx); |
779 | unsafe { |
780 | ptr.add(local_index).write(val); |
781 | } |
782 | // This is written before updating the data pointer. Other `push` calls cannot observe this, |
783 | // because they are blocked on aquiring the data pointer before they ever read the `len`. |
784 | // `get` may read the length without synchronization, but that is fine, |
785 | // as there will be actually the right number of elements stored, or less elements, |
786 | // in which case you get a spurious `None`. |
787 | self.len.store(len + 1, Ordering::Release); |
788 | len |
789 | }) |
790 | } |
791 | |
792 | /// Load an element (if it exists). This operation is lock-free and |
793 | /// performs minimal synchronization. |
794 | pub fn get(&self, index: usize) -> Option<T> { |
795 | // The length can only grow, so just doing the length check |
796 | // independently of the read is fine. Worst case we |
797 | // read an old length value and end up returning `None` even if |
798 | // another thread already inserted the value. |
799 | if index >= self.len() { |
800 | return None; |
801 | } |
802 | let buffer_idx = buffer_index(index); |
803 | let buffer_ptr = self.data[buffer_idx].load(Ordering::Acquire); |
804 | let local_index = index - prior_total_buffer_size(buffer_idx); |
805 | Some(unsafe { *buffer_ptr.add(local_index) }) |
806 | } |
807 | |
808 | pub fn is_empty(&self) -> bool { |
809 | self.len() == 0 |
810 | } |
811 | |
812 | #[inline(always)] |
813 | pub fn len(&self) -> usize { |
814 | self.len.load(Ordering::Acquire) |
815 | } |
816 | |
817 | /// Load an element (if it exists). This operation is lock-free and |
818 | /// performs no synchronized atomic operations. This is a useful primitive to |
819 | /// implement your own data structure with newtypes around the index. |
820 | /// |
821 | /// ## Safety |
822 | /// |
823 | /// `index` must be in bounds, i.e. it must be less than `self.len()` |
824 | #[inline] |
825 | pub unsafe fn get_unchecked(&self, index: usize) -> T { |
826 | let buffer_idx = buffer_index(index); |
827 | let buffer_ptr = self.data[buffer_idx].load(Ordering::Relaxed); |
828 | let local_index = index - prior_total_buffer_size(buffer_idx); |
829 | unsafe { *buffer_ptr.add(local_index) } |
830 | } |
831 | |
832 | /// Run a function on each buffer in the vector. |
833 | /// |
834 | /// ## Arguments |
835 | /// - `func`: a function that takes a slice to the buffer and the buffer index |
836 | /// |
837 | fn for_each_buffer(&self, mut func: impl FnMut(&[T], usize)) { |
838 | // for each buffer, run the function |
839 | for buffer_index in 0..NUM_BUFFERS { |
840 | // get the buffer pointer |
841 | if let Some(buffer_ptr) = NonNull::new(self.data[buffer_index].load(Ordering::Acquire)) |
842 | { |
843 | // get the buffer size and index |
844 | let buffer_size = buffer_size(buffer_index); |
845 | |
846 | // create a slice from the buffer pointer and size |
847 | let buffer_slice = |
848 | unsafe { std::slice::from_raw_parts(buffer_ptr.as_ptr(), buffer_size) }; |
849 | |
850 | // run the function |
851 | func(buffer_slice, buffer_index); |
852 | } else { |
853 | // no data in this buffer, so we're done |
854 | break; |
855 | } |
856 | } |
857 | } |
858 | } |
859 | |
860 | impl<T: Copy + PartialEq> PartialEq for LockFreeFrozenVec<T> { |
861 | fn eq(&self, other: &Self) -> bool { |
862 | // first check the length |
863 | let self_len: usize = self.len(); |
864 | let other_len: usize = other.len(); |
865 | if self_len != other_len { |
866 | return false; |
867 | } |
868 | |
869 | // Since the lengths are the same, just check the elements in order |
870 | for index: usize in 0..self_len { |
871 | // This is safe because the indices are in bounds (for `LockFreeFrozenVec` the bounds can only grow). |
872 | if self.get(index) != other.get(index) { |
873 | return false; |
874 | } |
875 | } |
876 | |
877 | true |
878 | } |
879 | } |
880 | |
881 | #[test] |
882 | fn test_non_lockfree_unchecked() { |
883 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] |
884 | struct Moo(i32); |
885 | |
886 | let vec = LockFreeFrozenVec::new(); |
887 | |
888 | let idx_set = std::sync::Mutex::new(std::collections::HashSet::new()); |
889 | |
890 | std::thread::scope(|s| { |
891 | s.spawn(|| { |
892 | for i in 0..1000 { |
893 | idx_set.lock().unwrap().insert(vec.push(Moo(i))); |
894 | } |
895 | }); |
896 | s.spawn(|| { |
897 | for i in 0..1000 { |
898 | idx_set.lock().unwrap().insert(vec.push(Moo(i))); |
899 | } |
900 | }); |
901 | for _ in 0..2000 { |
902 | let idxes = std::mem::take(&mut *idx_set.lock().unwrap()); |
903 | for idx in idxes { |
904 | unsafe { |
905 | vec.get_unchecked(idx); |
906 | } |
907 | } |
908 | } |
909 | }); |
910 | |
911 | // Test dropping empty vecs |
912 | LockFreeFrozenVec::<()>::new(); |
913 | } |
914 | |
915 | impl<T: Copy + Clone> Clone for LockFreeFrozenVec<T> { |
916 | fn clone(&self) -> Self { |
917 | let mut coppied_data = Self::null(); |
918 | // for each buffer, copy the data |
919 | self.for_each_buffer(|buffer_slice, buffer_index| { |
920 | // allocate a new buffer |
921 | let layout = Self::layout(buffer_slice.len()); |
922 | let new_buffer_ptr = unsafe { std::alloc::alloc(layout).cast::<T>() }; |
923 | assert!(!new_buffer_ptr.is_null()); |
924 | // copy the data to the new buffer |
925 | unsafe { |
926 | std::ptr::copy_nonoverlapping( |
927 | buffer_slice.as_ptr(), |
928 | new_buffer_ptr, |
929 | buffer_slice.len(), |
930 | ); |
931 | }; |
932 | // store the new buffer pointer |
933 | *coppied_data[buffer_index].get_mut() = new_buffer_ptr; |
934 | }); |
935 | |
936 | Self { |
937 | data: coppied_data, |
938 | // Since stores always use `Ordering::Release`, this call to `self.len()` (which uses `Ordering::Acquire`) reults |
939 | // in the operation overall being `Ordering::Relaxed` |
940 | len: AtomicUsize::new(self.len()), |
941 | locked: AtomicBool::new(false), |
942 | } |
943 | } |
944 | } |
945 | |
946 | #[test] |
947 | fn test_non_lockfree() { |
948 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] |
949 | struct Moo(i32); |
950 | |
951 | let vec = LockFreeFrozenVec::new(); |
952 | |
953 | assert_eq!(vec.get(1), None); |
954 | |
955 | vec.push(Moo(1)); |
956 | let i = vec.push(Moo(2)); |
957 | vec.push(Moo(3)); |
958 | |
959 | assert_eq!(vec.get(i), Some(Moo(2))); |
960 | |
961 | std::thread::scope(|s| { |
962 | s.spawn(|| { |
963 | for i in 0..1000 { |
964 | vec.push(Moo(i)); |
965 | } |
966 | }); |
967 | s.spawn(|| { |
968 | for i in 0..1000 { |
969 | vec.push(Moo(i)); |
970 | } |
971 | }); |
972 | for i in 0..2000 { |
973 | while vec.get(i).is_none() {} |
974 | } |
975 | }); |
976 | |
977 | // Test cloning |
978 | { |
979 | let vec2 = vec.clone(); |
980 | assert_eq!(vec2.get(0), Some(Moo(1))); |
981 | assert_eq!(vec2.get(1), Some(Moo(2))); |
982 | assert_eq!(vec2.get(2), Some(Moo(3))); |
983 | } |
984 | // Test cloning a large vector |
985 | { |
986 | let large_vec = LockFreeFrozenVec::new(); |
987 | for i in 0..1000 { |
988 | large_vec.push(Moo(i)); |
989 | } |
990 | let large_vec_2 = large_vec.clone(); |
991 | for i in 0..1000 { |
992 | assert_eq!(large_vec_2.get(i), Some(Moo(i as i32))); |
993 | } |
994 | } |
995 | // Test cloning an empty vector |
996 | { |
997 | let empty_vec = LockFreeFrozenVec::<()>::new(); |
998 | let empty_vec_2 = empty_vec.clone(); |
999 | assert_eq!(empty_vec_2.get(0), None); |
1000 | } |
1001 | |
1002 | // Test dropping empty vecs |
1003 | LockFreeFrozenVec::<()>::new(); |
1004 | } |
1005 | |
1006 | // TODO: Implement IntoIterator for LockFreeFrozenVec |
1007 | |
1008 | /// Append-only threadsafe version of `std::collections::BTreeMap` where |
1009 | /// insertion does not require mutable access |
1010 | #[derive(Debug)] |
1011 | pub struct FrozenBTreeMap<K, V>(RwLock<BTreeMap<K, V>>); |
1012 | |
1013 | impl<K: Clone + Ord, V: StableDeref> FrozenBTreeMap<K, V> { |
1014 | pub const fn new() -> Self { |
1015 | Self(RwLock::new(BTreeMap::new())) |
1016 | } |
1017 | |
1018 | // these should never return &K or &V |
1019 | // these should never delete any entries |
1020 | |
1021 | /// Returns a reference to the value corresponding to the key. |
1022 | /// |
1023 | /// The key may be any borrowed form of the map's key type, but |
1024 | /// [`Ord`] on the borrowed form *must* match those for |
1025 | /// the key type. |
1026 | /// |
1027 | /// # Examples |
1028 | /// |
1029 | /// ``` |
1030 | /// use elsa::sync::FrozenBTreeMap; |
1031 | /// |
1032 | /// let map = FrozenBTreeMap::new(); |
1033 | /// map.insert(1, Box::new("a")); |
1034 | /// assert_eq!(map.get(&1), Some(&"a")); |
1035 | /// assert_eq!(map.get(&2), None); |
1036 | /// ``` |
1037 | pub fn get<Q>(&self, k: &Q) -> Option<&V::Target> |
1038 | where |
1039 | K: Borrow<Q>, |
1040 | Q: Ord + ?Sized, |
1041 | { |
1042 | let map = self.0.read().unwrap(); |
1043 | let ret = unsafe { map.get(k).map(|x| &*(&**x as *const V::Target)) }; |
1044 | ret |
1045 | } |
1046 | |
1047 | /// Insert a new value into the map. Does nothing if the key is already occupied. |
1048 | /// |
1049 | /// # Examples |
1050 | /// |
1051 | /// ``` |
1052 | /// use elsa::sync::FrozenBTreeMap; |
1053 | /// |
1054 | /// let map = FrozenBTreeMap::new(); |
1055 | /// map.insert(1, Box::new("a")); |
1056 | /// assert_eq!(map.get(&1), Some(&"a")); |
1057 | /// ``` |
1058 | pub fn insert(&self, k: K, v: V) -> &V::Target { |
1059 | let mut map = self.0.write().unwrap(); |
1060 | let ret = unsafe { |
1061 | let inserted = &**map.entry(k).or_insert(v); |
1062 | &*(inserted as *const _) |
1063 | }; |
1064 | ret |
1065 | } |
1066 | |
1067 | /// Applies a function to the owner of the value corresponding to the key (if any). |
1068 | /// |
1069 | /// The key may be any borrowed form of the map's key type, but |
1070 | /// [`Ord`] on the borrowed form *must* match those for |
1071 | /// the key type. |
1072 | /// |
1073 | /// # Examples |
1074 | /// |
1075 | /// ``` |
1076 | /// use elsa::sync::FrozenBTreeMap; |
1077 | /// |
1078 | /// let map = FrozenBTreeMap::new(); |
1079 | /// map.insert(1, Box::new("a")); |
1080 | /// assert_eq!(map.map_get(&1, Clone::clone), Some(Box::new("a"))); |
1081 | /// assert_eq!(map.map_get(&2, Clone::clone), None); |
1082 | /// ``` |
1083 | pub fn map_get<Q, T, F>(&self, k: &Q, f: F) -> Option<T> |
1084 | where |
1085 | K: Borrow<Q>, |
1086 | Q: Ord + ?Sized, |
1087 | F: FnOnce(&V) -> T, |
1088 | { |
1089 | let map = self.0.read().unwrap(); |
1090 | let ret = map.get(k).map(f); |
1091 | ret |
1092 | } |
1093 | } |
1094 | |
1095 | impl<K, V> FrozenBTreeMap<K, V> { |
1096 | /// Collects the contents of this map into a vector of tuples. |
1097 | /// |
1098 | /// The order of the entries is as if iterating a [`BTreeMap`] (ordered by key). |
1099 | /// |
1100 | /// # Examples |
1101 | /// |
1102 | /// ``` |
1103 | /// use elsa::sync::FrozenBTreeMap; |
1104 | /// |
1105 | /// let map = FrozenBTreeMap::new(); |
1106 | /// map.insert(1, Box::new("a")); |
1107 | /// map.insert(2, Box::new("b")); |
1108 | /// let tuple_vec = map.into_tuple_vec(); |
1109 | /// |
1110 | /// assert_eq!(tuple_vec, vec![(1, Box::new("a")), (2, Box::new( "b"))]); |
1111 | /// ``` |
1112 | pub fn into_tuple_vec(self) -> Vec<(K, V)> { |
1113 | self.0.into_inner().unwrap().into_iter().collect::<Vec<_>>() |
1114 | } |
1115 | |
1116 | /// # Examples |
1117 | /// |
1118 | /// ``` |
1119 | /// use elsa::sync::FrozenBTreeMap; |
1120 | /// |
1121 | /// let map = FrozenBTreeMap::new(); |
1122 | /// assert_eq!(map.len(), 0); |
1123 | /// map.insert(1, Box::new("a")); |
1124 | /// assert_eq!(map.len(), 1); |
1125 | /// ``` |
1126 | pub fn len(&self) -> usize { |
1127 | let map = self.0.read().unwrap(); |
1128 | map.len() |
1129 | } |
1130 | |
1131 | /// # Examples |
1132 | /// |
1133 | /// ``` |
1134 | /// use elsa::sync::FrozenBTreeMap; |
1135 | /// |
1136 | /// let map = FrozenBTreeMap::new(); |
1137 | /// assert_eq!(map.is_empty(), true); |
1138 | /// map.insert(1, Box::new("a")); |
1139 | /// assert_eq!(map.is_empty(), false); |
1140 | /// ``` |
1141 | pub fn is_empty(&self) -> bool { |
1142 | let map = self.0.read().unwrap(); |
1143 | map.is_empty() |
1144 | } |
1145 | } |
1146 | |
1147 | impl<K: Clone + Ord, V: StableDeref> From<BTreeMap<K, V>> for FrozenBTreeMap<K, V> { |
1148 | fn from(map: BTreeMap<K, V>) -> Self { |
1149 | Self(RwLock::new(map)) |
1150 | } |
1151 | } |
1152 | |
1153 | impl<Q: ?Sized, K, V> Index<&Q> for FrozenBTreeMap<K, V> |
1154 | where |
1155 | Q: Ord, |
1156 | K: Clone + Ord + Borrow<Q>, |
1157 | V: StableDeref, |
1158 | { |
1159 | type Output = V::Target; |
1160 | |
1161 | /// # Examples |
1162 | /// |
1163 | /// ``` |
1164 | /// use elsa::sync::FrozenBTreeMap; |
1165 | /// |
1166 | /// let map = FrozenBTreeMap::new(); |
1167 | /// map.insert(1, Box::new("a")); |
1168 | /// assert_eq!(map[&1], "a"); |
1169 | /// ``` |
1170 | fn index(&self, idx: &Q) -> &V::Target { |
1171 | self.get(idx) |
1172 | .expect(msg:"attempted to index FrozenBTreeMap with unknown key") |
1173 | } |
1174 | } |
1175 | |
1176 | impl<K: Clone + Ord, V: StableDeref> FromIterator<(K, V)> for FrozenBTreeMap<K, V> { |
1177 | fn from_iter<T>(iter: T) -> Self |
1178 | where |
1179 | T: IntoIterator<Item = (K, V)>, |
1180 | { |
1181 | let map: BTreeMap<_, _> = iter.into_iter().collect(); |
1182 | map.into() |
1183 | } |
1184 | } |
1185 | |
1186 | impl<K: Clone + Ord, V: StableDeref> Default for FrozenBTreeMap<K, V> { |
1187 | fn default() -> Self { |
1188 | Self::new() |
1189 | } |
1190 | } |
1191 | |
1192 | impl<K: Clone, V: Clone> Clone for FrozenBTreeMap<K, V> { |
1193 | fn clone(&self) -> Self { |
1194 | Self(self.0.read().unwrap().clone().into()) |
1195 | } |
1196 | } |
1197 | |
1198 | impl<K: PartialEq, V: PartialEq> PartialEq for FrozenBTreeMap<K, V> { |
1199 | fn eq(&self, other: &Self) -> bool { |
1200 | let self_ref: &BTreeMap<K, V> = &self.0.read().unwrap(); |
1201 | let other_ref: &BTreeMap<K, V> = &other.0.read().unwrap(); |
1202 | self_ref == other_ref |
1203 | } |
1204 | } |
1205 |
Definitions
- FrozenMap
- map
- fmt
- LockedPlaceholder
- fmt
- default
- new
- from
- insert
- insert_with
- insert_with_key
- get
- map_get
- into_tuple_vec
- len
- is_empty
- keys_cloned
- get_copy
- get_copy_or_insert
- get_copy_or_insert_with
- get_copy_or_insert_with_key
- as_mut
- clone
- eq
- FrozenVec
- vec
- fmt
- LockedPlaceholder
- fmt
- len
- is_empty
- new
- push
- push_get
- push_get_index
- get
- iter
- Iter
- vec
- idx
- Item
- next
- Item
- IntoIter
- into_iter
- into_vec
- as_mut
- default
- clone
- eq
- buffer_size
- prior_total_buffer_size
- buffer_index
- LockFreeFrozenVec
- data
- len
- locked
- drop
- default
- null
- new
- lock
- layout
- push
- get
- is_empty
- len
- get_unchecked
- for_each_buffer
- eq
- clone
- FrozenBTreeMap
- new
- get
- insert
- map_get
- into_tuple_vec
- len
- is_empty
- from
- Output
- index
- from_iter
- default
- clone
Learn Rust with the experts
Find out more