1 | // Copyright 2014 The Servo Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution. |
3 | // |
4 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
5 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
6 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
7 | // option. This file may not be copied, modified, or distributed |
8 | // except according to those terms. |
9 | |
10 | use once_cell::sync::Lazy; |
11 | use parking_lot::Mutex; |
12 | use std::borrow::Cow; |
13 | use std::mem; |
14 | use std::ptr::NonNull; |
15 | use std::sync::atomic::AtomicIsize; |
16 | use std::sync::atomic::Ordering::SeqCst; |
17 | |
18 | const NB_BUCKETS: usize = 1 << 12; // 4096 |
19 | const BUCKET_MASK: u32 = (1 << 12) - 1; |
20 | |
21 | pub(crate) struct Set { |
22 | buckets: Box<[Mutex<Option<Box<Entry>>>]>, |
23 | } |
24 | |
25 | pub(crate) struct Entry { |
26 | pub(crate) string: Box<str>, |
27 | pub(crate) hash: u32, |
28 | pub(crate) ref_count: AtomicIsize, |
29 | next_in_bucket: Option<Box<Entry>>, |
30 | } |
31 | |
32 | // Addresses are a multiples of this, |
33 | // and therefore have have TAG_MASK bits unset, available for tagging. |
34 | pub(crate) const ENTRY_ALIGNMENT: usize = 4; |
35 | |
36 | #[test ] |
37 | fn entry_alignment_is_sufficient() { |
38 | assert!(mem::align_of::<Entry>() >= ENTRY_ALIGNMENT); |
39 | } |
40 | |
41 | pub(crate) static DYNAMIC_SET: Lazy<Set> = Lazy::new(|| { |
42 | // NOTE: Using const initialization for buckets breaks the small-stack test. |
43 | // ``` |
44 | // // buckets: [Mutex<Option<Box<Entry>>>; NB_BUCKETS], |
45 | // const MUTEX: Mutex<Option<Box<Entry>>> = Mutex::new(None); |
46 | // let buckets = Box::new([MUTEX; NB_BUCKETS]); |
47 | // ``` |
48 | let buckets: Box<[Mutex>]> = (0..NB_BUCKETS).map(|_| Mutex::new(val:None)).collect(); |
49 | Set { buckets } |
50 | }); |
51 | |
52 | impl Set { |
53 | pub(crate) fn insert(&self, string: Cow<str>, hash: u32) -> NonNull<Entry> { |
54 | let bucket_index = (hash & BUCKET_MASK) as usize; |
55 | let mut linked_list = self.buckets[bucket_index].lock(); |
56 | |
57 | { |
58 | let mut ptr: Option<&mut Box<Entry>> = linked_list.as_mut(); |
59 | |
60 | while let Some(entry) = ptr.take() { |
61 | if entry.hash == hash && *entry.string == *string { |
62 | if entry.ref_count.fetch_add(1, SeqCst) > 0 { |
63 | return NonNull::from(&mut **entry); |
64 | } |
65 | // Uh-oh. The pointer's reference count was zero, which means someone may try |
66 | // to free it. (Naive attempts to defend against this, for example having the |
67 | // destructor check to see whether the reference count is indeed zero, don't |
68 | // work due to ABA.) Thus we need to temporarily add a duplicate string to the |
69 | // list. |
70 | entry.ref_count.fetch_sub(1, SeqCst); |
71 | break; |
72 | } |
73 | ptr = entry.next_in_bucket.as_mut(); |
74 | } |
75 | } |
76 | debug_assert!(mem::align_of::<Entry>() >= ENTRY_ALIGNMENT); |
77 | let string = string.into_owned(); |
78 | let mut entry = Box::new(Entry { |
79 | next_in_bucket: linked_list.take(), |
80 | hash, |
81 | ref_count: AtomicIsize::new(1), |
82 | string: string.into_boxed_str(), |
83 | }); |
84 | let ptr = NonNull::from(&mut *entry); |
85 | *linked_list = Some(entry); |
86 | ptr |
87 | } |
88 | |
89 | pub(crate) fn remove(&self, ptr: *mut Entry) { |
90 | let value: &Entry = unsafe { &*ptr }; |
91 | let bucket_index = (value.hash & BUCKET_MASK) as usize; |
92 | |
93 | let mut linked_list = self.buckets[bucket_index].lock(); |
94 | debug_assert!(value.ref_count.load(SeqCst) == 0); |
95 | let mut current: &mut Option<Box<Entry>> = &mut linked_list; |
96 | |
97 | while let Some(entry_ptr) = current.as_mut() { |
98 | let entry_ptr: *mut Entry = &mut **entry_ptr; |
99 | if entry_ptr == ptr { |
100 | mem::drop(mem::replace(current, unsafe { |
101 | (*entry_ptr).next_in_bucket.take() |
102 | })); |
103 | break; |
104 | } |
105 | current = unsafe { &mut (*entry_ptr).next_in_bucket }; |
106 | } |
107 | } |
108 | } |
109 | |