1//===-- primary32.h ---------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_PRIMARY32_H_
10#define SCUDO_PRIMARY32_H_
11
12#include "allocator_common.h"
13#include "bytemap.h"
14#include "common.h"
15#include "list.h"
16#include "local_cache.h"
17#include "options.h"
18#include "release.h"
19#include "report.h"
20#include "stats.h"
21#include "string_utils.h"
22#include "thread_annotations.h"
23
24namespace scudo {
25
26// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
27//
28// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
29// boundary, and keeps a bytemap of the mappable address space to track the size
30// class they are associated with.
31//
32// Mapped regions are split into equally sized Blocks according to the size
33// class they belong to, and the associated pointers are shuffled to prevent any
34// predictable address pattern (the predictability increases with the block
35// size).
36//
37// Regions for size class 0 are special and used to hold TransferBatches, which
38// allow to transfer arrays of pointers from the global size class freelist to
39// the thread specific freelist for said class, and back.
40//
41// Memory used by this allocator is never unmapped but can be partially
42// reclaimed if the platform allows for it.
43
44template <typename Config> class SizeClassAllocator32 {
45public:
46 typedef typename Config::CompactPtrT CompactPtrT;
47 typedef typename Config::SizeClassMap SizeClassMap;
48 static const uptr GroupSizeLog = Config::getGroupSizeLog();
49 // The bytemap can only track UINT8_MAX - 1 classes.
50 static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
51 // Regions should be large enough to hold the largest Block.
52 static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize,
53 "");
54 typedef SizeClassAllocator32<Config> ThisT;
55 typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
56 typedef TransferBatch<ThisT> TransferBatchT;
57 typedef BatchGroup<ThisT> BatchGroupT;
58
59 static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
60 "BatchGroupT uses the same class size as TransferBatchT");
61
62 static uptr getSizeByClassId(uptr ClassId) {
63 return (ClassId == SizeClassMap::BatchClassId)
64 ? sizeof(TransferBatchT)
65 : SizeClassMap::getSizeByClassId(ClassId);
66 }
67
68 static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
69
70 void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
71 if (SCUDO_FUCHSIA)
72 reportError(Message: "SizeClassAllocator32 is not supported on Fuchsia");
73
74 if (SCUDO_TRUSTY)
75 reportError(Message: "SizeClassAllocator32 is not supported on Trusty");
76
77 DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
78 PossibleRegions.init();
79 u32 Seed;
80 const u64 Time = getMonotonicTimeFast();
81 if (!getRandom(Buffer: reinterpret_cast<void *>(&Seed), Length: sizeof(Seed)))
82 Seed = static_cast<u32>(
83 Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
84 for (uptr I = 0; I < NumClasses; I++) {
85 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
86 Sci->RandState = getRandomU32(State: &Seed);
87 // Sci->MaxRegionIndex is already initialized to 0.
88 Sci->MinRegionIndex = NumRegions;
89 Sci->ReleaseInfo.LastReleaseAtNs = Time;
90 }
91 setOption(O: Option::ReleaseInterval, Value: static_cast<sptr>(ReleaseToOsInterval));
92 }
93
94 void unmapTestOnly() {
95 {
96 ScopedLock L(RegionsStashMutex);
97 while (NumberOfStashedRegions > 0) {
98 unmap(Addr: reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
99 Size: RegionSize);
100 }
101 }
102
103 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
104 for (uptr I = 0; I < NumClasses; I++) {
105 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
106 ScopedLock L(Sci->Mutex);
107 if (Sci->MinRegionIndex < MinRegionIndex)
108 MinRegionIndex = Sci->MinRegionIndex;
109 if (Sci->MaxRegionIndex > MaxRegionIndex)
110 MaxRegionIndex = Sci->MaxRegionIndex;
111 *Sci = {};
112 }
113
114 ScopedLock L(ByteMapMutex);
115 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
116 if (PossibleRegions[I])
117 unmap(Addr: reinterpret_cast<void *>(I * RegionSize), Size: RegionSize);
118 PossibleRegions.unmapTestOnly();
119 }
120
121 // When all blocks are freed, it has to be the same size as `AllocatedUser`.
122 void verifyAllBlocksAreReleasedTestOnly() {
123 // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
124 uptr BatchClassUsedInFreeLists = 0;
125 for (uptr I = 0; I < NumClasses; I++) {
126 // We have to count BatchClassUsedInFreeLists in other regions first.
127 if (I == SizeClassMap::BatchClassId)
128 continue;
129 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
130 ScopedLock L1(Sci->Mutex);
131 uptr TotalBlocks = 0;
132 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
133 // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
134 BatchClassUsedInFreeLists += BG.Batches.size() + 1;
135 for (const auto &It : BG.Batches)
136 TotalBlocks += It.getCount();
137 }
138
139 const uptr BlockSize = getSizeByClassId(ClassId: I);
140 DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
141 DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
142 }
143
144 SizeClassInfo *Sci = getSizeClassInfo(ClassId: SizeClassMap::BatchClassId);
145 ScopedLock L1(Sci->Mutex);
146 uptr TotalBlocks = 0;
147 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
148 if (LIKELY(!BG.Batches.empty())) {
149 for (const auto &It : BG.Batches)
150 TotalBlocks += It.getCount();
151 } else {
152 // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
153 // itself.
154 ++TotalBlocks;
155 }
156 }
157
158 const uptr BlockSize = getSizeByClassId(ClassId: SizeClassMap::BatchClassId);
159 DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
160 Sci->AllocatedUser / BlockSize);
161 const uptr BlocksInUse =
162 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
163 DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
164 }
165
166 CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
167 return static_cast<CompactPtrT>(Ptr);
168 }
169
170 void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
171 return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
172 }
173
174 uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
175 const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
176 return CompactPtr & ~Mask;
177 }
178
179 uptr decompactGroupBase(uptr CompactPtrGroupBase) {
180 return CompactPtrGroupBase;
181 }
182
183 ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
184 const uptr PageSize = getPageSizeCached();
185 return BlockSize < PageSize / 16U;
186 }
187
188 ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
189 const uptr PageSize = getPageSizeCached();
190 return BlockSize > PageSize;
191 }
192
193 u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
194 const u16 MaxBlockCount) {
195 DCHECK_LT(ClassId, NumClasses);
196 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
197 ScopedLock L(Sci->Mutex);
198
199 u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
200 if (UNLIKELY(PopCount == 0)) {
201 if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
202 return 0U;
203 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
204 DCHECK_NE(PopCount, 0U);
205 }
206
207 return PopCount;
208 }
209
210 // Push the array of free blocks to the designated batch group.
211 void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
212 DCHECK_LT(ClassId, NumClasses);
213 DCHECK_GT(Size, 0);
214
215 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
216 if (ClassId == SizeClassMap::BatchClassId) {
217 ScopedLock L(Sci->Mutex);
218 pushBatchClassBlocks(Sci, Array, Size);
219 return;
220 }
221
222 // TODO(chiahungduan): Consider not doing grouping if the group size is not
223 // greater than the block size with a certain scale.
224
225 // Sort the blocks so that blocks belonging to the same group can be pushed
226 // together.
227 bool SameGroup = true;
228 for (u32 I = 1; I < Size; ++I) {
229 if (compactPtrGroupBase(CompactPtr: Array[I - 1]) != compactPtrGroupBase(CompactPtr: Array[I]))
230 SameGroup = false;
231 CompactPtrT Cur = Array[I];
232 u32 J = I;
233 while (J > 0 &&
234 compactPtrGroupBase(CompactPtr: Cur) < compactPtrGroupBase(CompactPtr: Array[J - 1])) {
235 Array[J] = Array[J - 1];
236 --J;
237 }
238 Array[J] = Cur;
239 }
240
241 ScopedLock L(Sci->Mutex);
242 pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
243 }
244
245 void disable() NO_THREAD_SAFETY_ANALYSIS {
246 // The BatchClassId must be locked last since other classes can use it.
247 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
248 if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
249 continue;
250 getSizeClassInfo(ClassId: static_cast<uptr>(I))->Mutex.lock();
251 }
252 getSizeClassInfo(ClassId: SizeClassMap::BatchClassId)->Mutex.lock();
253 RegionsStashMutex.lock();
254 ByteMapMutex.lock();
255 }
256
257 void enable() NO_THREAD_SAFETY_ANALYSIS {
258 ByteMapMutex.unlock();
259 RegionsStashMutex.unlock();
260 getSizeClassInfo(ClassId: SizeClassMap::BatchClassId)->Mutex.unlock();
261 for (uptr I = 0; I < NumClasses; I++) {
262 if (I == SizeClassMap::BatchClassId)
263 continue;
264 getSizeClassInfo(ClassId: I)->Mutex.unlock();
265 }
266 }
267
268 template <typename F> void iterateOverBlocks(F Callback) {
269 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
270 for (uptr I = 0; I < NumClasses; I++) {
271 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
272 // TODO: The call of `iterateOverBlocks` requires disabling
273 // SizeClassAllocator32. We may consider locking each region on demand
274 // only.
275 Sci->Mutex.assertHeld();
276 if (Sci->MinRegionIndex < MinRegionIndex)
277 MinRegionIndex = Sci->MinRegionIndex;
278 if (Sci->MaxRegionIndex > MaxRegionIndex)
279 MaxRegionIndex = Sci->MaxRegionIndex;
280 }
281
282 // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
283 ByteMapMutex.assertHeld();
284
285 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
286 if (PossibleRegions[I] &&
287 (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
288 const uptr BlockSize = getSizeByClassId(ClassId: PossibleRegions[I] - 1U);
289 const uptr From = I * RegionSize;
290 const uptr To = From + (RegionSize / BlockSize) * BlockSize;
291 for (uptr Block = From; Block < To; Block += BlockSize)
292 Callback(Block);
293 }
294 }
295 }
296
297 void getStats(ScopedString *Str) {
298 // TODO(kostyak): get the RSS per region.
299 uptr TotalMapped = 0;
300 uptr PoppedBlocks = 0;
301 uptr PushedBlocks = 0;
302 for (uptr I = 0; I < NumClasses; I++) {
303 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
304 ScopedLock L(Sci->Mutex);
305 TotalMapped += Sci->AllocatedUser;
306 PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
307 PushedBlocks += Sci->FreeListInfo.PushedBlocks;
308 }
309 Str->append(Format: "Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
310 "remains %zu\n",
311 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
312 for (uptr I = 0; I < NumClasses; I++) {
313 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
314 ScopedLock L(Sci->Mutex);
315 getStats(Str, I, Sci);
316 }
317 }
318
319 void getFragmentationInfo(ScopedString *Str) {
320 Str->append(
321 Format: "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
322 getPageSizeCached());
323
324 for (uptr I = 1; I < NumClasses; I++) {
325 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
326 ScopedLock L(Sci->Mutex);
327 getSizeClassFragmentationInfo(Sci, ClassId: I, Str);
328 }
329 }
330
331 bool setOption(Option O, sptr Value) {
332 if (O == Option::ReleaseInterval) {
333 const s32 Interval = Max(
334 Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
335 Config::getMinReleaseToOsIntervalMs());
336 atomic_store_relaxed(A: &ReleaseToOsIntervalMs, V: Interval);
337 return true;
338 }
339 // Not supported by the Primary, but not an error either.
340 return true;
341 }
342
343 uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
344 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
345 // TODO: Once we have separate locks like primary64, we may consider using
346 // tryLock() as well.
347 ScopedLock L(Sci->Mutex);
348 return releaseToOSMaybe(Sci, ClassId, ReleaseType);
349 }
350
351 uptr releaseToOS(ReleaseToOS ReleaseType) {
352 uptr TotalReleasedBytes = 0;
353 for (uptr I = 0; I < NumClasses; I++) {
354 if (I == SizeClassMap::BatchClassId)
355 continue;
356 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
357 ScopedLock L(Sci->Mutex);
358 TotalReleasedBytes += releaseToOSMaybe(Sci, ClassId: I, ReleaseType);
359 }
360 return TotalReleasedBytes;
361 }
362
363 const char *getRegionInfoArrayAddress() const { return nullptr; }
364 static uptr getRegionInfoArraySize() { return 0; }
365
366 static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
367 UNUSED uptr Ptr) {
368 return {};
369 }
370
371 AtomicOptions Options;
372
373private:
374 static const uptr NumClasses = SizeClassMap::NumClasses;
375 static const uptr RegionSize = 1UL << Config::getRegionSizeLog();
376 static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >>
377 Config::getRegionSizeLog();
378 static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
379 typedef FlatByteMap<NumRegions> ByteMap;
380
381 struct ReleaseToOsInfo {
382 uptr BytesInFreeListAtLastCheckpoint;
383 uptr RangesReleased;
384 uptr LastReleasedBytes;
385 u64 LastReleaseAtNs;
386 };
387
388 struct BlocksInfo {
389 SinglyLinkedList<BatchGroupT> BlockList = {};
390 uptr PoppedBlocks = 0;
391 uptr PushedBlocks = 0;
392 };
393
394 struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
395 HybridMutex Mutex;
396 BlocksInfo FreeListInfo GUARDED_BY(Mutex);
397 uptr CurrentRegion GUARDED_BY(Mutex);
398 uptr CurrentRegionAllocated GUARDED_BY(Mutex);
399 u32 RandState;
400 uptr AllocatedUser GUARDED_BY(Mutex);
401 // Lowest & highest region index allocated for this size class, to avoid
402 // looping through the whole NumRegions.
403 uptr MinRegionIndex GUARDED_BY(Mutex);
404 uptr MaxRegionIndex GUARDED_BY(Mutex);
405 ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
406 };
407 static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
408
409 uptr computeRegionId(uptr Mem) {
410 const uptr Id = Mem >> Config::getRegionSizeLog();
411 CHECK_LT(Id, NumRegions);
412 return Id;
413 }
414
415 uptr allocateRegionSlow() {
416 uptr MapSize = 2 * RegionSize;
417 const uptr MapBase = reinterpret_cast<uptr>(
418 map(Addr: nullptr, Size: MapSize, Name: "scudo:primary", MAP_ALLOWNOMEM));
419 if (!MapBase)
420 return 0;
421 const uptr MapEnd = MapBase + MapSize;
422 uptr Region = MapBase;
423 if (isAligned(X: Region, Alignment: RegionSize)) {
424 ScopedLock L(RegionsStashMutex);
425 if (NumberOfStashedRegions < MaxStashedRegions)
426 RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
427 else
428 MapSize = RegionSize;
429 } else {
430 Region = roundUp(X: MapBase, Boundary: RegionSize);
431 unmap(Addr: reinterpret_cast<void *>(MapBase), Size: Region - MapBase);
432 MapSize = RegionSize;
433 }
434 const uptr End = Region + MapSize;
435 if (End != MapEnd)
436 unmap(Addr: reinterpret_cast<void *>(End), Size: MapEnd - End);
437
438 DCHECK_EQ(Region % RegionSize, 0U);
439 static_assert(Config::getRegionSizeLog() == GroupSizeLog,
440 "Memory group should be the same size as Region");
441
442 return Region;
443 }
444
445 uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
446 DCHECK_LT(ClassId, NumClasses);
447 uptr Region = 0;
448 {
449 ScopedLock L(RegionsStashMutex);
450 if (NumberOfStashedRegions > 0)
451 Region = RegionsStash[--NumberOfStashedRegions];
452 }
453 if (!Region)
454 Region = allocateRegionSlow();
455 if (LIKELY(Region)) {
456 // Sci->Mutex is held by the caller, updating the Min/Max is safe.
457 const uptr RegionIndex = computeRegionId(Mem: Region);
458 if (RegionIndex < Sci->MinRegionIndex)
459 Sci->MinRegionIndex = RegionIndex;
460 if (RegionIndex > Sci->MaxRegionIndex)
461 Sci->MaxRegionIndex = RegionIndex;
462 ScopedLock L(ByteMapMutex);
463 PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
464 }
465 return Region;
466 }
467
468 SizeClassInfo *getSizeClassInfo(uptr ClassId) {
469 DCHECK_LT(ClassId, NumClasses);
470 return &SizeClassInfoArray[ClassId];
471 }
472
473 void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
474 REQUIRES(Sci->Mutex) {
475 DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
476
477 // Free blocks are recorded by TransferBatch in freelist for all
478 // size-classes. In addition, TransferBatch is allocated from BatchClassId.
479 // In order not to use additional block to record the free blocks in
480 // BatchClassId, they are self-contained. I.e., A TransferBatch records the
481 // block address of itself. See the figure below:
482 //
483 // TransferBatch at 0xABCD
484 // +----------------------------+
485 // | Free blocks' addr |
486 // | +------+------+------+ |
487 // | |0xABCD|... |... | |
488 // | +------+------+------+ |
489 // +----------------------------+
490 //
491 // When we allocate all the free blocks in the TransferBatch, the block used
492 // by TransferBatch is also free for use. We don't need to recycle the
493 // TransferBatch. Note that the correctness is maintained by the invariant,
494 //
495 // Each popBlocks() request returns the entire TransferBatch. Returning
496 // part of the blocks in a TransferBatch is invalid.
497 //
498 // This ensures that TransferBatch won't leak the address itself while it's
499 // still holding other valid data.
500 //
501 // Besides, BatchGroup is also allocated from BatchClassId and has its
502 // address recorded in the TransferBatch too. To maintain the correctness,
503 //
504 // The address of BatchGroup is always recorded in the last TransferBatch
505 // in the freelist (also imply that the freelist should only be
506 // updated with push_front). Once the last TransferBatch is popped,
507 // the block used by BatchGroup is also free for use.
508 //
509 // With this approach, the blocks used by BatchGroup and TransferBatch are
510 // reusable and don't need additional space for them.
511
512 Sci->FreeListInfo.PushedBlocks += Size;
513 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
514
515 if (BG == nullptr) {
516 // Construct `BatchGroup` on the last element.
517 BG = reinterpret_cast<BatchGroupT *>(
518 decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1]));
519 --Size;
520 BG->Batches.clear();
521 // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
522 // memory group here.
523 BG->CompactPtrGroupBase = 0;
524 // `BG` is also the block of BatchClassId. Note that this is different
525 // from `CreateGroup` in `pushBlocksImpl`
526 BG->PushedBlocks = 1;
527 BG->BytesInBGAtLastCheckpoint = 0;
528 BG->MaxCachedPerBatch =
529 CacheT::getMaxCached(getSizeByClassId(ClassId: SizeClassMap::BatchClassId));
530
531 Sci->FreeListInfo.BlockList.push_front(BG);
532 }
533
534 if (UNLIKELY(Size == 0))
535 return;
536
537 // This happens under 2 cases.
538 // 1. just allocated a new `BatchGroup`.
539 // 2. Only 1 block is pushed when the freelist is empty.
540 if (BG->Batches.empty()) {
541 // Construct the `TransferBatch` on the last element.
542 TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
543 decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1]));
544 TB->clear();
545 // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
546 // recorded in the TransferBatch.
547 TB->add(Array[Size - 1]);
548 TB->add(
549 compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(BG)));
550 --Size;
551 DCHECK_EQ(BG->PushedBlocks, 1U);
552 // `TB` is also the block of BatchClassId.
553 BG->PushedBlocks += 1;
554 BG->Batches.push_front(TB);
555 }
556
557 TransferBatchT *CurBatch = BG->Batches.front();
558 DCHECK_NE(CurBatch, nullptr);
559
560 for (u32 I = 0; I < Size;) {
561 u16 UnusedSlots =
562 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
563 if (UnusedSlots == 0) {
564 CurBatch = reinterpret_cast<TransferBatchT *>(
565 decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[I]));
566 CurBatch->clear();
567 // Self-contained
568 CurBatch->add(Array[I]);
569 ++I;
570 // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
571 // BatchClassId.
572 BG->Batches.push_front(CurBatch);
573 UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
574 }
575 // `UnusedSlots` is u16 so the result will be also fit in u16.
576 const u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I));
577 CurBatch->appendFromArray(&Array[I], AppendSize);
578 I += AppendSize;
579 }
580
581 BG->PushedBlocks += Size;
582 }
583 // Push the blocks to their batch group. The layout will be like,
584 //
585 // FreeListInfo.BlockList - > BG -> BG -> BG
586 // | | |
587 // v v v
588 // TB TB TB
589 // |
590 // v
591 // TB
592 //
593 // Each BlockGroup(BG) will associate with unique group id and the free blocks
594 // are managed by a list of TransferBatch(TB). To reduce the time of inserting
595 // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
596 // that we can get better performance of maintaining sorted property.
597 // Use `SameGroup=true` to indicate that all blocks in the array are from the
598 // same group then we will skip checking the group id of each block.
599 //
600 // The region mutex needs to be held while calling this method.
601 void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
602 CompactPtrT *Array, u32 Size, bool SameGroup = false)
603 REQUIRES(Sci->Mutex) {
604 DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
605 DCHECK_GT(Size, 0U);
606
607 auto CreateGroup = [&](uptr CompactPtrGroupBase) {
608 BatchGroupT *BG =
609 reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
610 BG->Batches.clear();
611 TransferBatchT *TB =
612 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
613 TB->clear();
614
615 BG->CompactPtrGroupBase = CompactPtrGroupBase;
616 BG->Batches.push_front(TB);
617 BG->PushedBlocks = 0;
618 BG->BytesInBGAtLastCheckpoint = 0;
619 BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
620
621 return BG;
622 };
623
624 auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
625 SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
626 TransferBatchT *CurBatch = Batches.front();
627 DCHECK_NE(CurBatch, nullptr);
628
629 for (u32 I = 0; I < Size;) {
630 DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
631 u16 UnusedSlots =
632 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
633 if (UnusedSlots == 0) {
634 CurBatch =
635 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
636 CurBatch->clear();
637 Batches.push_front(CurBatch);
638 UnusedSlots = BG->MaxCachedPerBatch;
639 }
640 // `UnusedSlots` is u16 so the result will be also fit in u16.
641 u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I));
642 CurBatch->appendFromArray(&Array[I], AppendSize);
643 I += AppendSize;
644 }
645
646 BG->PushedBlocks += Size;
647 };
648
649 Sci->FreeListInfo.PushedBlocks += Size;
650 BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
651
652 // In the following, `Cur` always points to the BatchGroup for blocks that
653 // will be pushed next. `Prev` is the element right before `Cur`.
654 BatchGroupT *Prev = nullptr;
655
656 while (Cur != nullptr &&
657 compactPtrGroupBase(CompactPtr: Array[0]) > Cur->CompactPtrGroupBase) {
658 Prev = Cur;
659 Cur = Cur->Next;
660 }
661
662 if (Cur == nullptr ||
663 compactPtrGroupBase(CompactPtr: Array[0]) != Cur->CompactPtrGroupBase) {
664 Cur = CreateGroup(compactPtrGroupBase(CompactPtr: Array[0]));
665 if (Prev == nullptr)
666 Sci->FreeListInfo.BlockList.push_front(Cur);
667 else
668 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
669 }
670
671 // All the blocks are from the same group, just push without checking group
672 // id.
673 if (SameGroup) {
674 for (u32 I = 0; I < Size; ++I)
675 DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
676
677 InsertBlocks(Cur, Array, Size);
678 return;
679 }
680
681 // The blocks are sorted by group id. Determine the segment of group and
682 // push them to their group together.
683 u32 Count = 1;
684 for (u32 I = 1; I < Size; ++I) {
685 if (compactPtrGroupBase(CompactPtr: Array[I - 1]) != compactPtrGroupBase(CompactPtr: Array[I])) {
686 DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
687 InsertBlocks(Cur, Array + I - Count, Count);
688
689 while (Cur != nullptr &&
690 compactPtrGroupBase(CompactPtr: Array[I]) > Cur->CompactPtrGroupBase) {
691 Prev = Cur;
692 Cur = Cur->Next;
693 }
694
695 if (Cur == nullptr ||
696 compactPtrGroupBase(CompactPtr: Array[I]) != Cur->CompactPtrGroupBase) {
697 Cur = CreateGroup(compactPtrGroupBase(CompactPtr: Array[I]));
698 DCHECK_NE(Prev, nullptr);
699 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
700 }
701
702 Count = 1;
703 } else {
704 ++Count;
705 }
706 }
707
708 InsertBlocks(Cur, Array + Size - Count, Count);
709 }
710
711 u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
712 CompactPtrT *ToArray, const u16 MaxBlockCount)
713 REQUIRES(Sci->Mutex) {
714 if (Sci->FreeListInfo.BlockList.empty())
715 return 0U;
716
717 SinglyLinkedList<TransferBatchT> &Batches =
718 Sci->FreeListInfo.BlockList.front()->Batches;
719
720 if (Batches.empty()) {
721 DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
722 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
723 Sci->FreeListInfo.BlockList.pop_front();
724
725 // Block used by `BatchGroup` is from BatchClassId. Turn the block into
726 // `TransferBatch` with single block.
727 TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
728 ToArray[0] =
729 compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(TB));
730 Sci->FreeListInfo.PoppedBlocks += 1;
731 return 1U;
732 }
733
734 // So far, instead of always filling the blocks to `MaxBlockCount`, we only
735 // examine single `TransferBatch` to minimize the time spent on the primary
736 // allocator. Besides, the sizes of `TransferBatch` and
737 // `CacheT::getMaxCached()` may also impact the time spent on accessing the
738 // primary allocator.
739 // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
740 // blocks and/or adjust the size of `TransferBatch` according to
741 // `CacheT::getMaxCached()`.
742 TransferBatchT *B = Batches.front();
743 DCHECK_NE(B, nullptr);
744 DCHECK_GT(B->getCount(), 0U);
745
746 // BachClassId should always take all blocks in the TransferBatch. Read the
747 // comment in `pushBatchClassBlocks()` for more details.
748 const u16 PopCount = ClassId == SizeClassMap::BatchClassId
749 ? B->getCount()
750 : Min(MaxBlockCount, B->getCount());
751 B->moveNToArray(ToArray, PopCount);
752
753 // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
754 // done without holding `Mutex`.
755 if (B->empty()) {
756 Batches.pop_front();
757 // `TransferBatch` of BatchClassId is self-contained, no need to
758 // deallocate. Read the comment in `pushBatchClassBlocks()` for more
759 // details.
760 if (ClassId != SizeClassMap::BatchClassId)
761 C->deallocate(SizeClassMap::BatchClassId, B);
762
763 if (Batches.empty()) {
764 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
765 Sci->FreeListInfo.BlockList.pop_front();
766
767 // We don't keep BatchGroup with zero blocks to avoid empty-checking
768 // while allocating. Note that block used for constructing BatchGroup is
769 // recorded as free blocks in the last element of BatchGroup::Batches.
770 // Which means, once we pop the last TransferBatch, the block is
771 // implicitly deallocated.
772 if (ClassId != SizeClassMap::BatchClassId)
773 C->deallocate(SizeClassMap::BatchClassId, BG);
774 }
775 }
776
777 Sci->FreeListInfo.PoppedBlocks += PopCount;
778 return PopCount;
779 }
780
781 NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
782 REQUIRES(Sci->Mutex) {
783 uptr Region;
784 uptr Offset;
785 // If the size-class currently has a region associated to it, use it. The
786 // newly created blocks will be located after the currently allocated memory
787 // for that region (up to RegionSize). Otherwise, create a new region, where
788 // the new blocks will be carved from the beginning.
789 if (Sci->CurrentRegion) {
790 Region = Sci->CurrentRegion;
791 DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
792 Offset = Sci->CurrentRegionAllocated;
793 } else {
794 DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
795 Region = allocateRegion(Sci, ClassId);
796 if (UNLIKELY(!Region))
797 return false;
798 C->getStats().add(StatMapped, RegionSize);
799 Sci->CurrentRegion = Region;
800 Offset = 0;
801 }
802
803 const uptr Size = getSizeByClassId(ClassId);
804 const u16 MaxCount = CacheT::getMaxCached(Size);
805 DCHECK_GT(MaxCount, 0U);
806 // The maximum number of blocks we should carve in the region is dictated
807 // by the maximum number of batches we want to fill, and the amount of
808 // memory left in the current region (we use the lowest of the two). This
809 // will not be 0 as we ensure that a region can at least hold one block (via
810 // static_assert and at the end of this function).
811 const u32 NumberOfBlocks =
812 Min(A: MaxNumBatches * MaxCount,
813 B: static_cast<u32>((RegionSize - Offset) / Size));
814 DCHECK_GT(NumberOfBlocks, 0U);
815
816 constexpr u32 ShuffleArraySize =
817 MaxNumBatches * TransferBatchT::MaxNumCached;
818 // Fill the transfer batches and put them in the size-class freelist. We
819 // need to randomize the blocks for security purposes, so we first fill a
820 // local array that we then shuffle before populating the batches.
821 CompactPtrT ShuffleArray[ShuffleArraySize];
822 DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
823
824 uptr P = Region + Offset;
825 for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
826 ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
827
828 if (ClassId != SizeClassMap::BatchClassId) {
829 u32 N = 1;
830 uptr CurGroup = compactPtrGroupBase(CompactPtr: ShuffleArray[0]);
831 for (u32 I = 1; I < NumberOfBlocks; I++) {
832 if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
833 shuffle(ShuffleArray + I - N, N, &Sci->RandState);
834 pushBlocksImpl(C, ClassId, Sci, Array: ShuffleArray + I - N, Size: N,
835 /*SameGroup=*/SameGroup: true);
836 N = 1;
837 CurGroup = compactPtrGroupBase(CompactPtr: ShuffleArray[I]);
838 } else {
839 ++N;
840 }
841 }
842
843 shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
844 pushBlocksImpl(C, ClassId, Sci, Array: &ShuffleArray[NumberOfBlocks - N], Size: N,
845 /*SameGroup=*/SameGroup: true);
846 } else {
847 pushBatchClassBlocks(Sci, Array: ShuffleArray, Size: NumberOfBlocks);
848 }
849
850 // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
851 // the requests from `PushBlocks` and `PopBatch` which are external
852 // interfaces. `populateFreeList` is the internal interface so we should set
853 // the values back to avoid incorrectly setting the stats.
854 Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
855
856 const uptr AllocatedUser = Size * NumberOfBlocks;
857 C->getStats().add(StatFree, AllocatedUser);
858 DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
859 // If there is not enough room in the region currently associated to fit
860 // more blocks, we deassociate the region by resetting CurrentRegion and
861 // CurrentRegionAllocated. Otherwise, update the allocated amount.
862 if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
863 Sci->CurrentRegion = 0;
864 Sci->CurrentRegionAllocated = 0;
865 } else {
866 Sci->CurrentRegionAllocated += AllocatedUser;
867 }
868 Sci->AllocatedUser += AllocatedUser;
869
870 return true;
871 }
872
873 void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
874 REQUIRES(Sci->Mutex) {
875 if (Sci->AllocatedUser == 0)
876 return;
877 const uptr BlockSize = getSizeByClassId(ClassId);
878 const uptr InUse =
879 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
880 const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
881 uptr PushedBytesDelta = 0;
882 if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
883 PushedBytesDelta =
884 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
885 }
886 const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
887 Str->append(Format: " %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
888 "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
889 "latest pushed bytes: %6zuK\n",
890 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
891 Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
892 InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
893 Sci->ReleaseInfo.LastReleasedBytes >> 10,
894 PushedBytesDelta >> 10);
895 }
896
897 void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
898 ScopedString *Str) REQUIRES(Sci->Mutex) {
899 const uptr BlockSize = getSizeByClassId(ClassId);
900 const uptr First = Sci->MinRegionIndex;
901 const uptr Last = Sci->MaxRegionIndex;
902 const uptr Base = First * RegionSize;
903 const uptr NumberOfRegions = Last - First + 1U;
904 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
905 ScopedLock L(ByteMapMutex);
906 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
907 };
908
909 FragmentationRecorder Recorder;
910 if (!Sci->FreeListInfo.BlockList.empty()) {
911 PageReleaseContext Context =
912 markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
913 ReleaseType: ReleaseToOS::ForceAll);
914 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
915 }
916
917 const uptr PageSize = getPageSizeCached();
918 const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
919 const uptr InUseBlocks =
920 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
921 uptr AllocatedPagesCount = 0;
922 if (TotalBlocks != 0U) {
923 for (uptr I = 0; I < NumberOfRegions; ++I) {
924 if (SkipRegion(I))
925 continue;
926 AllocatedPagesCount += RegionSize / PageSize;
927 }
928
929 DCHECK_NE(AllocatedPagesCount, 0U);
930 }
931
932 DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
933 const uptr InUsePages =
934 AllocatedPagesCount - Recorder.getReleasedPagesCount();
935 const uptr InUseBytes = InUsePages * PageSize;
936
937 uptr Integral;
938 uptr Fractional;
939 computePercentage(Numerator: BlockSize * InUseBlocks, Denominator: InUsePages * PageSize, Integral: &Integral,
940 Fractional: &Fractional);
941 Str->append(Format: " %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
942 "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
943 ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
944 AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
945 }
946
947 NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
948 ReleaseToOS ReleaseType = ReleaseToOS::Normal)
949 REQUIRES(Sci->Mutex) {
950 const uptr BlockSize = getSizeByClassId(ClassId);
951
952 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
953 const uptr BytesInFreeList =
954 Sci->AllocatedUser -
955 (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
956 BlockSize;
957
958 if (UNLIKELY(BytesInFreeList == 0))
959 return 0;
960
961 // ====================================================================== //
962 // 1. Check if we have enough free blocks and if it's worth doing a page
963 // release.
964 // ====================================================================== //
965 if (ReleaseType != ReleaseToOS::ForceAll &&
966 !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
967 ReleaseType)) {
968 return 0;
969 }
970
971 const uptr First = Sci->MinRegionIndex;
972 const uptr Last = Sci->MaxRegionIndex;
973 DCHECK_NE(Last, 0U);
974 DCHECK_LE(First, Last);
975 uptr TotalReleasedBytes = 0;
976 const uptr Base = First * RegionSize;
977 const uptr NumberOfRegions = Last - First + 1U;
978
979 // ==================================================================== //
980 // 2. Mark the free blocks and we can tell which pages are in-use by
981 // querying `PageReleaseContext`.
982 // ==================================================================== //
983 PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
984 NumberOfRegions, ReleaseType);
985 if (!Context.hasBlockMarked())
986 return 0;
987
988 // ==================================================================== //
989 // 3. Release the unused physical pages back to the OS.
990 // ==================================================================== //
991 ReleaseRecorder Recorder(Base);
992 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
993 ScopedLock L(ByteMapMutex);
994 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
995 };
996 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
997
998 if (Recorder.getReleasedRangesCount() > 0) {
999 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1000 Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
1001 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
1002 TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
1003 }
1004 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
1005
1006 return TotalReleasedBytes;
1007 }
1008
1009 bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
1010 uptr BytesInFreeList, ReleaseToOS ReleaseType)
1011 REQUIRES(Sci->Mutex) {
1012 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
1013 const uptr PageSize = getPageSizeCached();
1014
1015 if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
1016 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1017
1018 // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
1019 // so that we won't underestimate the releasable pages. For example, the
1020 // following is the region usage,
1021 //
1022 // BytesInFreeListAtLastCheckpoint AllocatedUser
1023 // v v
1024 // |--------------------------------------->
1025 // ^ ^
1026 // BytesInFreeList ReleaseThreshold
1027 //
1028 // In general, if we have collected enough bytes and the amount of free
1029 // bytes meets the ReleaseThreshold, we will try to do page release. If we
1030 // don't update `BytesInFreeListAtLastCheckpoint` when the current
1031 // `BytesInFreeList` is smaller, we may take longer time to wait for enough
1032 // freed blocks because we miss the bytes between
1033 // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
1034 const uptr PushedBytesDelta =
1035 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1036 if (PushedBytesDelta < PageSize)
1037 return false;
1038
1039 // Releasing smaller blocks is expensive, so we want to make sure that a
1040 // significant amount of bytes are free, and that there has been a good
1041 // amount of batches pushed to the freelist before attempting to release.
1042 if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
1043 if (PushedBytesDelta < Sci->AllocatedUser / 16U)
1044 return false;
1045
1046 if (ReleaseType == ReleaseToOS::Normal) {
1047 const s32 IntervalMs = atomic_load_relaxed(A: &ReleaseToOsIntervalMs);
1048 if (IntervalMs < 0)
1049 return false;
1050
1051 // The constant 8 here is selected from profiling some apps and the number
1052 // of unreleased pages in the large size classes is around 16 pages or
1053 // more. Choose half of it as a heuristic and which also avoids page
1054 // release every time for every pushBlocks() attempt by large blocks.
1055 const bool ByPassReleaseInterval =
1056 isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
1057 if (!ByPassReleaseInterval) {
1058 if (Sci->ReleaseInfo.LastReleaseAtNs +
1059 static_cast<u64>(IntervalMs) * 1000000 >
1060 getMonotonicTimeFast()) {
1061 // Memory was returned recently.
1062 return false;
1063 }
1064 }
1065 } // if (ReleaseType == ReleaseToOS::Normal)
1066
1067 return true;
1068 }
1069
1070 PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
1071 const uptr BlockSize, const uptr Base,
1072 const uptr NumberOfRegions,
1073 ReleaseToOS ReleaseType)
1074 REQUIRES(Sci->Mutex) {
1075 const uptr PageSize = getPageSizeCached();
1076 const uptr GroupSize = (1UL << GroupSizeLog);
1077 const uptr CurGroupBase =
1078 compactPtrGroupBase(CompactPtr: compactPtr(ClassId, Ptr: Sci->CurrentRegion));
1079
1080 PageReleaseContext Context(BlockSize, NumberOfRegions,
1081 /*ReleaseSize=*/RegionSize);
1082
1083 auto DecompactPtr = [](CompactPtrT CompactPtr) {
1084 return reinterpret_cast<uptr>(CompactPtr);
1085 };
1086 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
1087 const uptr GroupBase = decompactGroupBase(CompactPtrGroupBase: BG.CompactPtrGroupBase);
1088 // The `GroupSize` may not be divided by `BlockSize`, which means there is
1089 // an unused space at the end of Region. Exclude that space to avoid
1090 // unused page map entry.
1091 uptr AllocatedGroupSize = GroupBase == CurGroupBase
1092 ? Sci->CurrentRegionAllocated
1093 : roundDownSlow(X: GroupSize, Boundary: BlockSize);
1094 if (AllocatedGroupSize == 0)
1095 continue;
1096
1097 // TransferBatches are pushed in front of BG.Batches. The first one may
1098 // not have all caches used.
1099 const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1100 BG.Batches.front()->getCount();
1101 const uptr BytesInBG = NumBlocks * BlockSize;
1102
1103 if (ReleaseType != ReleaseToOS::ForceAll) {
1104 if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
1105 BG.BytesInBGAtLastCheckpoint = BytesInBG;
1106 continue;
1107 }
1108
1109 const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
1110 if (PushedBytesDelta < PageSize)
1111 continue;
1112
1113 // Given the randomness property, we try to release the pages only if
1114 // the bytes used by free blocks exceed certain proportion of allocated
1115 // spaces.
1116 if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
1117 (100U - 1U - BlockSize / 16U)) {
1118 continue;
1119 }
1120 }
1121
1122 // TODO: Consider updating this after page release if `ReleaseRecorder`
1123 // can tell the released bytes in each group.
1124 BG.BytesInBGAtLastCheckpoint = BytesInBG;
1125
1126 const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
1127 const uptr RegionIndex = (GroupBase - Base) / RegionSize;
1128
1129 if (NumBlocks == MaxContainedBlocks) {
1130 for (const auto &It : BG.Batches)
1131 for (u16 I = 0; I < It.getCount(); ++I)
1132 DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
1133
1134 const uptr To = GroupBase + AllocatedGroupSize;
1135 Context.markRangeAsAllCounted(From: GroupBase, To, Base: GroupBase, RegionIndex,
1136 RegionSize: AllocatedGroupSize);
1137 } else {
1138 DCHECK_LT(NumBlocks, MaxContainedBlocks);
1139
1140 // Note that we don't always visit blocks in each BatchGroup so that we
1141 // may miss the chance of releasing certain pages that cross
1142 // BatchGroups.
1143 Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
1144 RegionIndex, AllocatedGroupSize,
1145 /*MayContainLastBlockInRegion=*/true);
1146 }
1147
1148 // We may not be able to do the page release In a rare case that we may
1149 // fail on PageMap allocation.
1150 if (UNLIKELY(!Context.hasBlockMarked()))
1151 break;
1152 }
1153
1154 return Context;
1155 }
1156
1157 SizeClassInfo SizeClassInfoArray[NumClasses] = {};
1158
1159 HybridMutex ByteMapMutex;
1160 // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
1161 ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
1162 atomic_s32 ReleaseToOsIntervalMs = {};
1163 // Unless several threads request regions simultaneously from different size
1164 // classes, the stash rarely contains more than 1 entry.
1165 static constexpr uptr MaxStashedRegions = 4;
1166 HybridMutex RegionsStashMutex;
1167 uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
1168 uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
1169};
1170
1171} // namespace scudo
1172
1173#endif // SCUDO_PRIMARY32_H_
1174

source code of compiler-rt/lib/scudo/standalone/primary32.h