1//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// Cache used by SizeClassAllocator64.
17template <class SizeClassAllocator>
18struct SizeClassAllocator64LocalCache {
19 typedef SizeClassAllocator Allocator;
20 typedef MemoryMapper<Allocator> MemoryMapperT;
21
22 void Init(AllocatorGlobalStats *s) {
23 stats_.Init();
24 if (s)
25 s->Register(s: &stats_);
26 }
27
28 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
29 Drain(allocator);
30 if (s)
31 s->Unregister(s: &stats_);
32 }
33
34 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
35 CHECK_NE(class_id, 0UL);
36 CHECK_LT(class_id, kNumClasses);
37 PerClass *c = &per_class_[class_id];
38 if (UNLIKELY(c->count == 0)) {
39 if (UNLIKELY(!Refill(c, allocator, class_id)))
40 return nullptr;
41 DCHECK_GT(c->count, 0);
42 }
43 CompactPtrT chunk = c->chunks[--c->count];
44 stats_.Add(i: AllocatorStatAllocated, v: c->class_size);
45 return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
46 allocator->GetRegionBeginBySizeClass(class_id), chunk));
47 }
48
49 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
50 CHECK_NE(class_id, 0UL);
51 CHECK_LT(class_id, kNumClasses);
52 // If the first allocator call on a new thread is a deallocation, then
53 // max_count will be zero, leading to check failure.
54 PerClass *c = &per_class_[class_id];
55 InitCache(c);
56 if (UNLIKELY(c->count == c->max_count))
57 DrainHalfMax(c, allocator, class_id);
58 CompactPtrT chunk = allocator->PointerToCompactPtr(
59 allocator->GetRegionBeginBySizeClass(class_id),
60 reinterpret_cast<uptr>(p));
61 c->chunks[c->count++] = chunk;
62 stats_.Sub(i: AllocatorStatAllocated, v: c->class_size);
63 }
64
65 void Drain(SizeClassAllocator *allocator) {
66 MemoryMapperT memory_mapper(*allocator);
67 for (uptr i = 1; i < kNumClasses; i++) {
68 PerClass *c = &per_class_[i];
69 while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
70 }
71 }
72
73 private:
74 typedef typename Allocator::SizeClassMapT SizeClassMap;
75 static const uptr kNumClasses = SizeClassMap::kNumClasses;
76 typedef typename Allocator::CompactPtrT CompactPtrT;
77
78 struct PerClass {
79 u32 count;
80 u32 max_count;
81 uptr class_size;
82 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
83 };
84 PerClass per_class_[kNumClasses];
85 AllocatorStats stats_;
86
87 void InitCache(PerClass *c) {
88 if (LIKELY(c->max_count))
89 return;
90 for (uptr i = 1; i < kNumClasses; i++) {
91 PerClass *c = &per_class_[i];
92 const uptr size = Allocator::ClassIdToSize(i);
93 c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
94 c->class_size = size;
95 }
96 DCHECK_NE(c->max_count, 0UL);
97 }
98
99 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
100 uptr class_id) {
101 InitCache(c);
102 const uptr num_requested_chunks = c->max_count / 2;
103 if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
104 num_requested_chunks)))
105 return false;
106 c->count = num_requested_chunks;
107 return true;
108 }
109
110 NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
111 uptr class_id) {
112 MemoryMapperT memory_mapper(*allocator);
113 Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
114 }
115
116 void Drain(MemoryMapperT *memory_mapper, PerClass *c,
117 SizeClassAllocator *allocator, uptr class_id, uptr count) {
118 CHECK_GE(c->count, count);
119 const uptr first_idx_to_drain = c->count - count;
120 c->count -= count;
121 allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
122 &c->chunks[first_idx_to_drain], count);
123 }
124};
125
126// Cache used by SizeClassAllocator32.
127template <class SizeClassAllocator>
128struct SizeClassAllocator32LocalCache {
129 typedef SizeClassAllocator Allocator;
130 typedef typename Allocator::TransferBatch TransferBatch;
131
132 void Init(AllocatorGlobalStats *s) {
133 stats_.Init();
134 if (s)
135 s->Register(s: &stats_);
136 }
137
138 // Returns a TransferBatch suitable for class_id.
139 TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
140 TransferBatch *b) {
141 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
142 return (TransferBatch*)Allocate(allocator, class_id: batch_class_id);
143 return b;
144 }
145
146 // Destroys TransferBatch b.
147 void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
148 TransferBatch *b) {
149 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
150 Deallocate(allocator, class_id: batch_class_id, p: b);
151 }
152
153 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
154 Drain(allocator);
155 if (s)
156 s->Unregister(s: &stats_);
157 }
158
159 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
160 CHECK_NE(class_id, 0UL);
161 CHECK_LT(class_id, kNumClasses);
162 PerClass *c = &per_class_[class_id];
163 if (UNLIKELY(c->count == 0)) {
164 if (UNLIKELY(!Refill(c, allocator, class_id)))
165 return nullptr;
166 DCHECK_GT(c->count, 0);
167 }
168 void *res = c->batch[--c->count];
169 PREFETCH(c->batch[c->count - 1]);
170 stats_.Add(i: AllocatorStatAllocated, v: c->class_size);
171 return res;
172 }
173
174 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
175 CHECK_NE(class_id, 0UL);
176 CHECK_LT(class_id, kNumClasses);
177 // If the first allocator call on a new thread is a deallocation, then
178 // max_count will be zero, leading to check failure.
179 PerClass *c = &per_class_[class_id];
180 InitCache(c);
181 if (UNLIKELY(c->count == c->max_count))
182 Drain(c, allocator, class_id);
183 c->batch[c->count++] = p;
184 stats_.Sub(i: AllocatorStatAllocated, v: c->class_size);
185 }
186
187 void Drain(SizeClassAllocator *allocator) {
188 for (uptr i = 1; i < kNumClasses; i++) {
189 PerClass *c = &per_class_[i];
190 while (c->count > 0)
191 Drain(c, allocator, i);
192 }
193 }
194
195 private:
196 typedef typename Allocator::SizeClassMapT SizeClassMap;
197 static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
198 static const uptr kNumClasses = SizeClassMap::kNumClasses;
199 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
200 // allocated from kBatchClassID size class (except for those that are needed
201 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
202 // different region of RAM to improve security.
203 static const bool kUseSeparateSizeClassForBatch =
204 Allocator::kUseSeparateSizeClassForBatch;
205
206 struct PerClass {
207 uptr count;
208 uptr max_count;
209 uptr class_size;
210 uptr batch_class_id;
211 void *batch[2 * TransferBatch::kMaxNumCached];
212 };
213 PerClass per_class_[kNumClasses];
214 AllocatorStats stats_;
215
216 void InitCache(PerClass *c) {
217 if (LIKELY(c->max_count))
218 return;
219 const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
220 for (uptr i = 1; i < kNumClasses; i++) {
221 PerClass *c = &per_class_[i];
222 const uptr size = Allocator::ClassIdToSize(i);
223 const uptr max_cached = TransferBatch::MaxCached(size);
224 c->max_count = 2 * max_cached;
225 c->class_size = size;
226 // Precompute the class id to use to store batches for the current class
227 // id. 0 means the class size is large enough to store a batch within one
228 // of the chunks. If using a separate size class, it will always be
229 // kBatchClassID, except for kBatchClassID itself.
230 if (kUseSeparateSizeClassForBatch) {
231 c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
232 } else {
233 c->batch_class_id = (size <
234 TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
235 batch_class_id : 0;
236 }
237 }
238 DCHECK_NE(c->max_count, 0UL);
239 }
240
241 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
242 uptr class_id) {
243 InitCache(c);
244 TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
245 if (UNLIKELY(!b))
246 return false;
247 CHECK_GT(b->Count(), 0);
248 b->CopyToArray(c->batch);
249 c->count = b->Count();
250 DestroyBatch(class_id, allocator, b);
251 return true;
252 }
253
254 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
255 uptr class_id) {
256 const uptr count = Min(c->max_count / 2, c->count);
257 const uptr first_idx_to_drain = c->count - count;
258 TransferBatch *b = CreateBatch(
259 class_id, allocator, b: (TransferBatch *)c->batch[first_idx_to_drain]);
260 // Failure to allocate a batch while releasing memory is non recoverable.
261 // TODO(alekseys): Figure out how to do it without allocating a new batch.
262 if (UNLIKELY(!b)) {
263 Report(format: "FATAL: Internal error: %s's allocator failed to allocate a "
264 "transfer batch.\n", SanitizerToolName);
265 Die();
266 }
267 b->SetFromArray(&c->batch[first_idx_to_drain], count);
268 c->count -= count;
269 allocator->DeallocateBatch(&stats_, class_id, b);
270 }
271};
272

source code of compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h