1 | //===-- sanitizer_ring_buffer.h ---------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Simple ring buffer. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | #ifndef SANITIZER_RING_BUFFER_H |
13 | #define SANITIZER_RING_BUFFER_H |
14 | |
15 | #include "sanitizer_common.h" |
16 | |
17 | namespace __sanitizer { |
18 | // RingBuffer<T>: fixed-size ring buffer optimized for speed of push(). |
19 | // T should be a POD type and sizeof(T) should be divisible by sizeof(void*). |
20 | // At creation, all elements are zero. |
21 | template<class T> |
22 | class RingBuffer { |
23 | public: |
24 | COMPILER_CHECK(sizeof(T) % sizeof(void *) == 0); |
25 | static RingBuffer *New(uptr Size) { |
26 | void *Ptr = MmapOrDie(SizeInBytes(Size), "RingBuffer" ); |
27 | RingBuffer *RB = reinterpret_cast<RingBuffer*>(Ptr); |
28 | uptr End = reinterpret_cast<uptr>(Ptr) + SizeInBytes(Size); |
29 | RB->last_ = RB->next_ = reinterpret_cast<T*>(End - sizeof(T)); |
30 | return RB; |
31 | } |
32 | void Delete() { |
33 | UnmapOrDie(this, SizeInBytes(size())); |
34 | } |
35 | uptr size() const { |
36 | return last_ + 1 - |
37 | reinterpret_cast<T *>(reinterpret_cast<uptr>(this) + |
38 | 2 * sizeof(T *)); |
39 | } |
40 | |
41 | static uptr SizeInBytes(uptr Size) { |
42 | return Size * sizeof(T) + 2 * sizeof(T*); |
43 | } |
44 | |
45 | uptr SizeInBytes() { return SizeInBytes(size()); } |
46 | |
47 | void push(T t) { |
48 | *next_ = t; |
49 | next_--; |
50 | static_assert((sizeof(T) % sizeof(T *)) == 0, |
51 | "The condition below works only if sizeof(T) is divisible by " |
52 | "sizeof(T*)." ); |
53 | if (next_ <= reinterpret_cast<T*>(&next_)) |
54 | next_ = last_; |
55 | } |
56 | |
57 | T operator[](uptr Idx) const { |
58 | CHECK_LT(Idx, size()); |
59 | sptr IdxNext = Idx + 1; |
60 | if (IdxNext > last_ - next_) |
61 | IdxNext -= size(); |
62 | return next_[IdxNext]; |
63 | } |
64 | |
65 | private: |
66 | RingBuffer() {} |
67 | ~RingBuffer() {} |
68 | RingBuffer(const RingBuffer&) = delete; |
69 | |
70 | // Data layout: |
71 | // LNDDDDDDDD |
72 | // D: data elements. |
73 | // L: last_, always points to the last data element. |
74 | // N: next_, initially equals to last_, is decremented on every push, |
75 | // wraps around if it's less or equal than its own address. |
76 | T *last_; |
77 | T *next_; |
78 | T data_[1]; // flexible array. |
79 | }; |
80 | |
81 | // A ring buffer with externally provided storage that encodes its state in 8 |
82 | // bytes. Has significant constraints on size and alignment of storage. |
83 | // See a comment in hwasan/hwasan_thread_list.h for the motivation behind this. |
84 | #if SANITIZER_WORDSIZE == 64 |
85 | template <class T> |
86 | class CompactRingBuffer { |
87 | // Top byte of long_ stores the buffer size in pages. |
88 | // Lower bytes store the address of the next buffer element. |
89 | static constexpr int kPageSizeBits = 12; |
90 | static constexpr int kSizeShift = 56; |
91 | static constexpr int kSizeBits = 64 - kSizeShift; |
92 | static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1; |
93 | |
94 | uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; } |
95 | |
96 | static uptr SignExtend(uptr x) { return ((sptr)x) << kSizeBits >> kSizeBits; } |
97 | |
98 | void Init(void *storage, uptr size) { |
99 | CHECK_EQ(sizeof(CompactRingBuffer<T>), sizeof(void *)); |
100 | CHECK(IsPowerOfTwo(size)); |
101 | CHECK_GE(size, 1 << kPageSizeBits); |
102 | CHECK_LE(size, 128 << kPageSizeBits); |
103 | CHECK_EQ(size % 4096, 0); |
104 | CHECK_EQ(size % sizeof(T), 0); |
105 | uptr st = (uptr)storage; |
106 | CHECK_EQ(st % (size * 2), 0); |
107 | CHECK_EQ(st, SignExtend(st & kNextMask)); |
108 | long_ = (st & kNextMask) | ((size >> kPageSizeBits) << kSizeShift); |
109 | } |
110 | |
111 | void SetNext(const T *next) { |
112 | long_ = (long_ & ~kNextMask) | ((uptr)next & kNextMask); |
113 | } |
114 | |
115 | public: |
116 | CompactRingBuffer(void *storage, uptr size) { |
117 | Init(storage, size); |
118 | } |
119 | |
120 | // A copy constructor of sorts. |
121 | CompactRingBuffer(const CompactRingBuffer &other, void *storage) { |
122 | uptr size = other.GetStorageSize(); |
123 | internal_memcpy(storage, other.StartOfStorage(), size); |
124 | Init(storage, size); |
125 | uptr Idx = other.Next() - (const T *)other.StartOfStorage(); |
126 | SetNext((const T *)storage + Idx); |
127 | } |
128 | |
129 | T *Next() const { return (T *)(SignExtend(x: long_ & kNextMask)); } |
130 | |
131 | void *StartOfStorage() const { |
132 | return (void *)((uptr)Next() & ~(GetStorageSize() - 1)); |
133 | } |
134 | |
135 | void *EndOfStorage() const { |
136 | return (void *)((uptr)StartOfStorage() + GetStorageSize()); |
137 | } |
138 | |
139 | uptr size() const { return GetStorageSize() / sizeof(T); } |
140 | |
141 | void push(T t) { |
142 | T *next = Next(); |
143 | *next = t; |
144 | next++; |
145 | next = (T *)((uptr)next & ~GetStorageSize()); |
146 | SetNext(next); |
147 | } |
148 | |
149 | const T &operator[](uptr Idx) const { |
150 | CHECK_LT(Idx, size()); |
151 | const T *Begin = (const T *)StartOfStorage(); |
152 | sptr StorageIdx = Next() - Begin; |
153 | StorageIdx -= (sptr)(Idx + 1); |
154 | if (StorageIdx < 0) |
155 | StorageIdx += size(); |
156 | return Begin[StorageIdx]; |
157 | } |
158 | |
159 | public: |
160 | ~CompactRingBuffer() {} |
161 | CompactRingBuffer(const CompactRingBuffer &) = delete; |
162 | |
163 | uptr long_; |
164 | }; |
165 | #endif |
166 | } // namespace __sanitizer |
167 | |
168 | #endif // SANITIZER_RING_BUFFER_H |
169 | |