1 | //===-- primary32.h ---------------------------------------------*- C++ -*-===// |
---|---|
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef SCUDO_PRIMARY32_H_ |
10 | #define SCUDO_PRIMARY32_H_ |
11 | |
12 | #include "allocator_common.h" |
13 | #include "bytemap.h" |
14 | #include "common.h" |
15 | #include "list.h" |
16 | #include "options.h" |
17 | #include "release.h" |
18 | #include "report.h" |
19 | #include "size_class_allocator.h" |
20 | #include "stats.h" |
21 | #include "string_utils.h" |
22 | #include "thread_annotations.h" |
23 | |
24 | namespace scudo { |
25 | |
26 | // SizeClassAllocator32 is an allocator for 32 or 64-bit address space. |
27 | // |
28 | // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes |
29 | // boundary, and keeps a bytemap of the mappable address space to track the size |
30 | // class they are associated with. |
31 | // |
32 | // Mapped regions are split into equally sized Blocks according to the size |
33 | // class they belong to, and the associated pointers are shuffled to prevent any |
34 | // predictable address pattern (the predictability increases with the block |
35 | // size). |
36 | // |
37 | // Regions for size class 0 are special and used to hold TransferBatches, which |
38 | // allow to transfer arrays of pointers from the global size class freelist to |
39 | // the thread specific freelist for said class, and back. |
40 | // |
41 | // Memory used by this allocator is never unmapped but can be partially |
42 | // reclaimed if the platform allows for it. |
43 | |
44 | template <typename Config> class SizeClassAllocator32 { |
45 | public: |
46 | typedef typename Config::CompactPtrT CompactPtrT; |
47 | typedef typename Config::SizeClassMap SizeClassMap; |
48 | static const uptr GroupSizeLog = Config::getGroupSizeLog(); |
49 | // The bytemap can only track UINT8_MAX - 1 classes. |
50 | static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), ""); |
51 | // Regions should be large enough to hold the largest Block. |
52 | static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize, |
53 | ""); |
54 | typedef SizeClassAllocator32<Config> ThisT; |
55 | using SizeClassAllocatorT = |
56 | typename Conditional<Config::getEnableBlockCache(), |
57 | SizeClassAllocatorLocalCache<ThisT>, |
58 | SizeClassAllocatorNoCache<ThisT>>::type; |
59 | typedef TransferBatch<ThisT> TransferBatchT; |
60 | typedef BatchGroup<ThisT> BatchGroupT; |
61 | |
62 | static uptr getSizeByClassId(uptr ClassId) { |
63 | return (ClassId == SizeClassMap::BatchClassId) |
64 | ? Max(A: sizeof(BatchGroupT), B: sizeof(TransferBatchT)) |
65 | : SizeClassMap::getSizeByClassId(ClassId); |
66 | } |
67 | |
68 | static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; } |
69 | |
70 | void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { |
71 | if (SCUDO_FUCHSIA) |
72 | reportError(Message: "SizeClassAllocator32 is not supported on Fuchsia"); |
73 | |
74 | if (SCUDO_TRUSTY) |
75 | reportError(Message: "SizeClassAllocator32 is not supported on Trusty"); |
76 | |
77 | DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT))); |
78 | PossibleRegions.init(); |
79 | u32 Seed; |
80 | const u64 Time = getMonotonicTimeFast(); |
81 | if (!getRandom(Buffer: reinterpret_cast<void *>(&Seed), Length: sizeof(Seed))) |
82 | Seed = static_cast<u32>( |
83 | Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6)); |
84 | for (uptr I = 0; I < NumClasses; I++) { |
85 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
86 | Sci->RandState = getRandomU32(State: &Seed); |
87 | // Sci->MaxRegionIndex is already initialized to 0. |
88 | Sci->MinRegionIndex = NumRegions; |
89 | Sci->ReleaseInfo.LastReleaseAtNs = Time; |
90 | } |
91 | |
92 | // The default value in the primary config has the higher priority. |
93 | if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN) |
94 | ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); |
95 | setOption(O: Option::ReleaseInterval, Value: static_cast<sptr>(ReleaseToOsInterval)); |
96 | } |
97 | |
98 | void unmapTestOnly() { |
99 | { |
100 | ScopedLock L(RegionsStashMutex); |
101 | while (NumberOfStashedRegions > 0) { |
102 | unmap(Addr: reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]), |
103 | Size: RegionSize); |
104 | } |
105 | } |
106 | |
107 | uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0; |
108 | for (uptr I = 0; I < NumClasses; I++) { |
109 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
110 | ScopedLock L(Sci->Mutex); |
111 | if (Sci->MinRegionIndex < MinRegionIndex) |
112 | MinRegionIndex = Sci->MinRegionIndex; |
113 | if (Sci->MaxRegionIndex > MaxRegionIndex) |
114 | MaxRegionIndex = Sci->MaxRegionIndex; |
115 | *Sci = {}; |
116 | } |
117 | |
118 | ScopedLock L(ByteMapMutex); |
119 | for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) |
120 | if (PossibleRegions[I]) |
121 | unmap(Addr: reinterpret_cast<void *>(I * RegionSize), Size: RegionSize); |
122 | PossibleRegions.unmapTestOnly(); |
123 | } |
124 | |
125 | // When all blocks are freed, it has to be the same size as `AllocatedUser`. |
126 | void verifyAllBlocksAreReleasedTestOnly() { |
127 | // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass. |
128 | uptr BatchClassUsedInFreeLists = 0; |
129 | for (uptr I = 0; I < NumClasses; I++) { |
130 | // We have to count BatchClassUsedInFreeLists in other regions first. |
131 | if (I == SizeClassMap::BatchClassId) |
132 | continue; |
133 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
134 | ScopedLock L1(Sci->Mutex); |
135 | uptr TotalBlocks = 0; |
136 | for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) { |
137 | // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`. |
138 | BatchClassUsedInFreeLists += BG.Batches.size() + 1; |
139 | for (const auto &It : BG.Batches) |
140 | TotalBlocks += It.getCount(); |
141 | } |
142 | |
143 | const uptr BlockSize = getSizeByClassId(ClassId: I); |
144 | DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize); |
145 | DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks); |
146 | } |
147 | |
148 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: SizeClassMap::BatchClassId); |
149 | ScopedLock L1(Sci->Mutex); |
150 | uptr TotalBlocks = 0; |
151 | for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) { |
152 | if (LIKELY(!BG.Batches.empty())) { |
153 | for (const auto &It : BG.Batches) |
154 | TotalBlocks += It.getCount(); |
155 | } else { |
156 | // `BatchGroup` with empty freelist doesn't have `TransferBatch` record |
157 | // itself. |
158 | ++TotalBlocks; |
159 | } |
160 | } |
161 | |
162 | const uptr BlockSize = getSizeByClassId(ClassId: SizeClassMap::BatchClassId); |
163 | DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists, |
164 | Sci->AllocatedUser / BlockSize); |
165 | const uptr BlocksInUse = |
166 | Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks; |
167 | DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists); |
168 | } |
169 | |
170 | CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const { |
171 | return static_cast<CompactPtrT>(Ptr); |
172 | } |
173 | |
174 | void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const { |
175 | return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr)); |
176 | } |
177 | |
178 | uptr compactPtrGroupBase(CompactPtrT CompactPtr) { |
179 | const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1; |
180 | return CompactPtr & ~Mask; |
181 | } |
182 | |
183 | uptr decompactGroupBase(uptr CompactPtrGroupBase) { |
184 | return CompactPtrGroupBase; |
185 | } |
186 | |
187 | ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) { |
188 | const uptr PageSize = getPageSizeCached(); |
189 | return BlockSize < PageSize / 16U; |
190 | } |
191 | |
192 | ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) { |
193 | const uptr PageSize = getPageSizeCached(); |
194 | return BlockSize > PageSize; |
195 | } |
196 | |
197 | u16 popBlocks(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
198 | CompactPtrT *ToArray, const u16 MaxBlockCount) { |
199 | DCHECK_LT(ClassId, NumClasses); |
200 | SizeClassInfo *Sci = getSizeClassInfo(ClassId); |
201 | ScopedLock L(Sci->Mutex); |
202 | |
203 | u16 PopCount = |
204 | popBlocksImpl(SizeClassAllocator, ClassId, Sci, ToArray, MaxBlockCount); |
205 | if (UNLIKELY(PopCount == 0)) { |
206 | if (UNLIKELY(!populateFreeList(SizeClassAllocator, ClassId, Sci))) |
207 | return 0U; |
208 | PopCount = popBlocksImpl(SizeClassAllocator, ClassId, Sci, ToArray, |
209 | MaxBlockCount); |
210 | DCHECK_NE(PopCount, 0U); |
211 | } |
212 | |
213 | return PopCount; |
214 | } |
215 | |
216 | // Push the array of free blocks to the designated batch group. |
217 | void pushBlocks(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
218 | CompactPtrT *Array, u32 Size) { |
219 | DCHECK_LT(ClassId, NumClasses); |
220 | DCHECK_GT(Size, 0); |
221 | |
222 | SizeClassInfo *Sci = getSizeClassInfo(ClassId); |
223 | if (ClassId == SizeClassMap::BatchClassId) { |
224 | ScopedLock L(Sci->Mutex); |
225 | pushBatchClassBlocks(Sci, Array, Size); |
226 | return; |
227 | } |
228 | |
229 | // TODO(chiahungduan): Consider not doing grouping if the group size is not |
230 | // greater than the block size with a certain scale. |
231 | |
232 | // Sort the blocks so that blocks belonging to the same group can be pushed |
233 | // together. |
234 | bool SameGroup = true; |
235 | for (u32 I = 1; I < Size; ++I) { |
236 | if (compactPtrGroupBase(CompactPtr: Array[I - 1]) != compactPtrGroupBase(CompactPtr: Array[I])) |
237 | SameGroup = false; |
238 | CompactPtrT Cur = Array[I]; |
239 | u32 J = I; |
240 | while (J > 0 && |
241 | compactPtrGroupBase(CompactPtr: Cur) < compactPtrGroupBase(CompactPtr: Array[J - 1])) { |
242 | Array[J] = Array[J - 1]; |
243 | --J; |
244 | } |
245 | Array[J] = Cur; |
246 | } |
247 | |
248 | ScopedLock L(Sci->Mutex); |
249 | pushBlocksImpl(SizeClassAllocator, ClassId, Sci, Array, Size, SameGroup); |
250 | } |
251 | |
252 | void disable() NO_THREAD_SAFETY_ANALYSIS { |
253 | // The BatchClassId must be locked last since other classes can use it. |
254 | for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) { |
255 | if (static_cast<uptr>(I) == SizeClassMap::BatchClassId) |
256 | continue; |
257 | getSizeClassInfo(ClassId: static_cast<uptr>(I))->Mutex.lock(); |
258 | } |
259 | getSizeClassInfo(ClassId: SizeClassMap::BatchClassId)->Mutex.lock(); |
260 | RegionsStashMutex.lock(); |
261 | ByteMapMutex.lock(); |
262 | } |
263 | |
264 | void enable() NO_THREAD_SAFETY_ANALYSIS { |
265 | ByteMapMutex.unlock(); |
266 | RegionsStashMutex.unlock(); |
267 | getSizeClassInfo(ClassId: SizeClassMap::BatchClassId)->Mutex.unlock(); |
268 | for (uptr I = 0; I < NumClasses; I++) { |
269 | if (I == SizeClassMap::BatchClassId) |
270 | continue; |
271 | getSizeClassInfo(ClassId: I)->Mutex.unlock(); |
272 | } |
273 | } |
274 | |
275 | template <typename F> void iterateOverBlocks(F Callback) { |
276 | uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0; |
277 | for (uptr I = 0; I < NumClasses; I++) { |
278 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
279 | // TODO: The call of `iterateOverBlocks` requires disabling |
280 | // SizeClassAllocator32. We may consider locking each region on demand |
281 | // only. |
282 | Sci->Mutex.assertHeld(); |
283 | if (Sci->MinRegionIndex < MinRegionIndex) |
284 | MinRegionIndex = Sci->MinRegionIndex; |
285 | if (Sci->MaxRegionIndex > MaxRegionIndex) |
286 | MaxRegionIndex = Sci->MaxRegionIndex; |
287 | } |
288 | |
289 | // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held. |
290 | ByteMapMutex.assertHeld(); |
291 | |
292 | for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) { |
293 | if (PossibleRegions[I] && |
294 | (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) { |
295 | const uptr BlockSize = getSizeByClassId(ClassId: PossibleRegions[I] - 1U); |
296 | const uptr From = I * RegionSize; |
297 | const uptr To = From + (RegionSize / BlockSize) * BlockSize; |
298 | for (uptr Block = From; Block < To; Block += BlockSize) |
299 | Callback(Block); |
300 | } |
301 | } |
302 | } |
303 | |
304 | void getStats(ScopedString *Str) { |
305 | // TODO(kostyak): get the RSS per region. |
306 | uptr TotalMapped = 0; |
307 | uptr PoppedBlocks = 0; |
308 | uptr PushedBlocks = 0; |
309 | for (uptr I = 0; I < NumClasses; I++) { |
310 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
311 | ScopedLock L(Sci->Mutex); |
312 | TotalMapped += Sci->AllocatedUser; |
313 | PoppedBlocks += Sci->FreeListInfo.PoppedBlocks; |
314 | PushedBlocks += Sci->FreeListInfo.PushedBlocks; |
315 | } |
316 | Str->append(Format: "Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; " |
317 | "remains %zu\n", |
318 | TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks); |
319 | for (uptr I = 0; I < NumClasses; I++) { |
320 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
321 | ScopedLock L(Sci->Mutex); |
322 | getStats(Str, I, Sci); |
323 | } |
324 | } |
325 | |
326 | void getFragmentationInfo(ScopedString *Str) { |
327 | Str->append( |
328 | Format: "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n", |
329 | getPageSizeCached()); |
330 | |
331 | for (uptr I = 1; I < NumClasses; I++) { |
332 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
333 | ScopedLock L(Sci->Mutex); |
334 | getSizeClassFragmentationInfo(Sci, ClassId: I, Str); |
335 | } |
336 | } |
337 | |
338 | void getMemoryGroupFragmentationInfo(ScopedString *Str) { |
339 | // Each region is also a memory group because region size is the same as |
340 | // group size. |
341 | getFragmentationInfo(Str); |
342 | } |
343 | |
344 | bool setOption(Option O, sptr Value) { |
345 | if (O == Option::ReleaseInterval) { |
346 | const s32 Interval = Max( |
347 | Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()), |
348 | Config::getMinReleaseToOsIntervalMs()); |
349 | atomic_store_relaxed(A: &ReleaseToOsIntervalMs, V: Interval); |
350 | return true; |
351 | } |
352 | // Not supported by the Primary, but not an error either. |
353 | return true; |
354 | } |
355 | |
356 | uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) { |
357 | SizeClassInfo *Sci = getSizeClassInfo(ClassId); |
358 | // TODO: Once we have separate locks like primary64, we may consider using |
359 | // tryLock() as well. |
360 | ScopedLock L(Sci->Mutex); |
361 | return releaseToOSMaybe(Sci, ClassId, ReleaseType); |
362 | } |
363 | |
364 | uptr releaseToOS(ReleaseToOS ReleaseType) { |
365 | uptr TotalReleasedBytes = 0; |
366 | for (uptr I = 0; I < NumClasses; I++) { |
367 | if (I == SizeClassMap::BatchClassId) |
368 | continue; |
369 | SizeClassInfo *Sci = getSizeClassInfo(ClassId: I); |
370 | ScopedLock L(Sci->Mutex); |
371 | TotalReleasedBytes += releaseToOSMaybe(Sci, ClassId: I, ReleaseType); |
372 | } |
373 | return TotalReleasedBytes; |
374 | } |
375 | |
376 | const char *getRegionInfoArrayAddress() const { return nullptr; } |
377 | static uptr getRegionInfoArraySize() { return 0; } |
378 | |
379 | static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData, |
380 | UNUSED uptr Ptr) { |
381 | return {}; |
382 | } |
383 | |
384 | AtomicOptions Options; |
385 | |
386 | private: |
387 | static const uptr NumClasses = SizeClassMap::NumClasses; |
388 | static const uptr RegionSize = 1UL << Config::getRegionSizeLog(); |
389 | static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> |
390 | Config::getRegionSizeLog(); |
391 | static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; |
392 | typedef FlatByteMap<NumRegions> ByteMap; |
393 | |
394 | struct ReleaseToOsInfo { |
395 | uptr BytesInFreeListAtLastCheckpoint; |
396 | uptr NumReleasesAttempted; |
397 | uptr LastReleasedBytes; |
398 | u64 LastReleaseAtNs; |
399 | }; |
400 | |
401 | struct BlocksInfo { |
402 | SinglyLinkedList<BatchGroupT> BlockList = {}; |
403 | uptr PoppedBlocks = 0; |
404 | uptr PushedBlocks = 0; |
405 | }; |
406 | |
407 | struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { |
408 | HybridMutex Mutex; |
409 | BlocksInfo FreeListInfo GUARDED_BY(Mutex); |
410 | uptr CurrentRegion GUARDED_BY(Mutex); |
411 | uptr CurrentRegionAllocated GUARDED_BY(Mutex); |
412 | u32 RandState; |
413 | uptr AllocatedUser GUARDED_BY(Mutex); |
414 | // Lowest & highest region index allocated for this size class, to avoid |
415 | // looping through the whole NumRegions. |
416 | uptr MinRegionIndex GUARDED_BY(Mutex); |
417 | uptr MaxRegionIndex GUARDED_BY(Mutex); |
418 | ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex); |
419 | }; |
420 | static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); |
421 | |
422 | uptr computeRegionId(uptr Mem) { |
423 | const uptr Id = Mem >> Config::getRegionSizeLog(); |
424 | CHECK_LT(Id, NumRegions); |
425 | return Id; |
426 | } |
427 | |
428 | uptr allocateRegionSlow() { |
429 | uptr MapSize = 2 * RegionSize; |
430 | const uptr MapBase = reinterpret_cast<uptr>( |
431 | map(Addr: nullptr, Size: MapSize, Name: "scudo:primary", MAP_ALLOWNOMEM)); |
432 | if (!MapBase) |
433 | return 0; |
434 | const uptr MapEnd = MapBase + MapSize; |
435 | uptr Region = MapBase; |
436 | if (isAligned(X: Region, Alignment: RegionSize)) { |
437 | ScopedLock L(RegionsStashMutex); |
438 | if (NumberOfStashedRegions < MaxStashedRegions) |
439 | RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize; |
440 | else |
441 | MapSize = RegionSize; |
442 | } else { |
443 | Region = roundUp(X: MapBase, Boundary: RegionSize); |
444 | unmap(Addr: reinterpret_cast<void *>(MapBase), Size: Region - MapBase); |
445 | MapSize = RegionSize; |
446 | } |
447 | const uptr End = Region + MapSize; |
448 | if (End != MapEnd) |
449 | unmap(Addr: reinterpret_cast<void *>(End), Size: MapEnd - End); |
450 | |
451 | DCHECK_EQ(Region % RegionSize, 0U); |
452 | static_assert(Config::getRegionSizeLog() == GroupSizeLog, |
453 | "Memory group should be the same size as Region"); |
454 | |
455 | return Region; |
456 | } |
457 | |
458 | uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) { |
459 | DCHECK_LT(ClassId, NumClasses); |
460 | uptr Region = 0; |
461 | { |
462 | ScopedLock L(RegionsStashMutex); |
463 | if (NumberOfStashedRegions > 0) |
464 | Region = RegionsStash[--NumberOfStashedRegions]; |
465 | } |
466 | if (!Region) |
467 | Region = allocateRegionSlow(); |
468 | if (LIKELY(Region)) { |
469 | // Sci->Mutex is held by the caller, updating the Min/Max is safe. |
470 | const uptr RegionIndex = computeRegionId(Mem: Region); |
471 | if (RegionIndex < Sci->MinRegionIndex) |
472 | Sci->MinRegionIndex = RegionIndex; |
473 | if (RegionIndex > Sci->MaxRegionIndex) |
474 | Sci->MaxRegionIndex = RegionIndex; |
475 | ScopedLock L(ByteMapMutex); |
476 | PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U)); |
477 | } |
478 | return Region; |
479 | } |
480 | |
481 | SizeClassInfo *getSizeClassInfo(uptr ClassId) { |
482 | DCHECK_LT(ClassId, NumClasses); |
483 | return &SizeClassInfoArray[ClassId]; |
484 | } |
485 | |
486 | void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size) |
487 | REQUIRES(Sci->Mutex) { |
488 | DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId)); |
489 | |
490 | // Free blocks are recorded by TransferBatch in freelist for all |
491 | // size-classes. In addition, TransferBatch is allocated from BatchClassId. |
492 | // In order not to use additional block to record the free blocks in |
493 | // BatchClassId, they are self-contained. I.e., A TransferBatch records the |
494 | // block address of itself. See the figure below: |
495 | // |
496 | // TransferBatch at 0xABCD |
497 | // +----------------------------+ |
498 | // | Free blocks' addr | |
499 | // | +------+------+------+ | |
500 | // | |0xABCD|... |... | | |
501 | // | +------+------+------+ | |
502 | // +----------------------------+ |
503 | // |
504 | // When we allocate all the free blocks in the TransferBatch, the block used |
505 | // by TransferBatch is also free for use. We don't need to recycle the |
506 | // TransferBatch. Note that the correctness is maintained by the invariant, |
507 | // |
508 | // Each popBlocks() request returns the entire TransferBatch. Returning |
509 | // part of the blocks in a TransferBatch is invalid. |
510 | // |
511 | // This ensures that TransferBatch won't leak the address itself while it's |
512 | // still holding other valid data. |
513 | // |
514 | // Besides, BatchGroup is also allocated from BatchClassId and has its |
515 | // address recorded in the TransferBatch too. To maintain the correctness, |
516 | // |
517 | // The address of BatchGroup is always recorded in the last TransferBatch |
518 | // in the freelist (also imply that the freelist should only be |
519 | // updated with push_front). Once the last TransferBatch is popped, |
520 | // the block used by BatchGroup is also free for use. |
521 | // |
522 | // With this approach, the blocks used by BatchGroup and TransferBatch are |
523 | // reusable and don't need additional space for them. |
524 | |
525 | Sci->FreeListInfo.PushedBlocks += Size; |
526 | BatchGroupT *BG = Sci->FreeListInfo.BlockList.front(); |
527 | |
528 | if (BG == nullptr) { |
529 | // Construct `BatchGroup` on the last element. |
530 | BG = reinterpret_cast<BatchGroupT *>( |
531 | decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1])); |
532 | --Size; |
533 | BG->Batches.clear(); |
534 | // BatchClass hasn't enabled memory group. Use `0` to indicate there's no |
535 | // memory group here. |
536 | BG->CompactPtrGroupBase = 0; |
537 | BG->BytesInBGAtLastCheckpoint = 0; |
538 | BG->MaxCachedPerBatch = SizeClassAllocatorT::getMaxCached( |
539 | getSizeByClassId(ClassId: SizeClassMap::BatchClassId)); |
540 | |
541 | Sci->FreeListInfo.BlockList.push_front(BG); |
542 | } |
543 | |
544 | if (UNLIKELY(Size == 0)) |
545 | return; |
546 | |
547 | // This happens under 2 cases. |
548 | // 1. just allocated a new `BatchGroup`. |
549 | // 2. Only 1 block is pushed when the freelist is empty. |
550 | if (BG->Batches.empty()) { |
551 | // Construct the `TransferBatch` on the last element. |
552 | TransferBatchT *TB = reinterpret_cast<TransferBatchT *>( |
553 | decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1])); |
554 | TB->clear(); |
555 | // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are |
556 | // recorded in the TransferBatch. |
557 | TB->add(Array[Size - 1]); |
558 | TB->add( |
559 | compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(BG))); |
560 | --Size; |
561 | BG->Batches.push_front(TB); |
562 | } |
563 | |
564 | TransferBatchT *CurBatch = BG->Batches.front(); |
565 | DCHECK_NE(CurBatch, nullptr); |
566 | |
567 | for (u32 I = 0; I < Size;) { |
568 | u16 UnusedSlots = |
569 | static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount()); |
570 | if (UnusedSlots == 0) { |
571 | CurBatch = reinterpret_cast<TransferBatchT *>( |
572 | decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[I])); |
573 | CurBatch->clear(); |
574 | // Self-contained |
575 | CurBatch->add(Array[I]); |
576 | ++I; |
577 | // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of |
578 | // BatchClassId. |
579 | BG->Batches.push_front(CurBatch); |
580 | UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1); |
581 | } |
582 | // `UnusedSlots` is u16 so the result will be also fit in u16. |
583 | const u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I)); |
584 | CurBatch->appendFromArray(&Array[I], AppendSize); |
585 | I += AppendSize; |
586 | } |
587 | } |
588 | // Push the blocks to their batch group. The layout will be like, |
589 | // |
590 | // FreeListInfo.BlockList - > BG -> BG -> BG |
591 | // | | | |
592 | // v v v |
593 | // TB TB TB |
594 | // | |
595 | // v |
596 | // TB |
597 | // |
598 | // Each BlockGroup(BG) will associate with unique group id and the free blocks |
599 | // are managed by a list of TransferBatch(TB). To reduce the time of inserting |
600 | // blocks, BGs are sorted and the input `Array` are supposed to be sorted so |
601 | // that we can get better performance of maintaining sorted property. |
602 | // Use `SameGroup=true` to indicate that all blocks in the array are from the |
603 | // same group then we will skip checking the group id of each block. |
604 | // |
605 | // The region mutex needs to be held while calling this method. |
606 | void pushBlocksImpl(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
607 | SizeClassInfo *Sci, CompactPtrT *Array, u32 Size, |
608 | bool SameGroup = false) REQUIRES(Sci->Mutex) { |
609 | DCHECK_NE(ClassId, SizeClassMap::BatchClassId); |
610 | DCHECK_GT(Size, 0U); |
611 | |
612 | auto CreateGroup = [&](uptr CompactPtrGroupBase) { |
613 | BatchGroupT *BG = reinterpret_cast<BatchGroupT *>( |
614 | SizeClassAllocator->getBatchClassBlock()); |
615 | BG->Batches.clear(); |
616 | TransferBatchT *TB = reinterpret_cast<TransferBatchT *>( |
617 | SizeClassAllocator->getBatchClassBlock()); |
618 | TB->clear(); |
619 | |
620 | BG->CompactPtrGroupBase = CompactPtrGroupBase; |
621 | BG->Batches.push_front(TB); |
622 | BG->BytesInBGAtLastCheckpoint = 0; |
623 | BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached; |
624 | |
625 | return BG; |
626 | }; |
627 | |
628 | auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) { |
629 | SinglyLinkedList<TransferBatchT> &Batches = BG->Batches; |
630 | TransferBatchT *CurBatch = Batches.front(); |
631 | DCHECK_NE(CurBatch, nullptr); |
632 | |
633 | for (u32 I = 0; I < Size;) { |
634 | DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount()); |
635 | u16 UnusedSlots = |
636 | static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount()); |
637 | if (UnusedSlots == 0) { |
638 | CurBatch = reinterpret_cast<TransferBatchT *>( |
639 | SizeClassAllocator->getBatchClassBlock()); |
640 | CurBatch->clear(); |
641 | Batches.push_front(CurBatch); |
642 | UnusedSlots = BG->MaxCachedPerBatch; |
643 | } |
644 | // `UnusedSlots` is u16 so the result will be also fit in u16. |
645 | u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I)); |
646 | CurBatch->appendFromArray(&Array[I], AppendSize); |
647 | I += AppendSize; |
648 | } |
649 | }; |
650 | |
651 | Sci->FreeListInfo.PushedBlocks += Size; |
652 | BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front(); |
653 | |
654 | // In the following, `Cur` always points to the BatchGroup for blocks that |
655 | // will be pushed next. `Prev` is the element right before `Cur`. |
656 | BatchGroupT *Prev = nullptr; |
657 | |
658 | while (Cur != nullptr && |
659 | compactPtrGroupBase(CompactPtr: Array[0]) > Cur->CompactPtrGroupBase) { |
660 | Prev = Cur; |
661 | Cur = Cur->Next; |
662 | } |
663 | |
664 | if (Cur == nullptr || |
665 | compactPtrGroupBase(CompactPtr: Array[0]) != Cur->CompactPtrGroupBase) { |
666 | Cur = CreateGroup(compactPtrGroupBase(CompactPtr: Array[0])); |
667 | if (Prev == nullptr) |
668 | Sci->FreeListInfo.BlockList.push_front(Cur); |
669 | else |
670 | Sci->FreeListInfo.BlockList.insert(Prev, Cur); |
671 | } |
672 | |
673 | // All the blocks are from the same group, just push without checking group |
674 | // id. |
675 | if (SameGroup) { |
676 | for (u32 I = 0; I < Size; ++I) |
677 | DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase); |
678 | |
679 | InsertBlocks(Cur, Array, Size); |
680 | return; |
681 | } |
682 | |
683 | // The blocks are sorted by group id. Determine the segment of group and |
684 | // push them to their group together. |
685 | u32 Count = 1; |
686 | for (u32 I = 1; I < Size; ++I) { |
687 | if (compactPtrGroupBase(CompactPtr: Array[I - 1]) != compactPtrGroupBase(CompactPtr: Array[I])) { |
688 | DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase); |
689 | InsertBlocks(Cur, Array + I - Count, Count); |
690 | |
691 | while (Cur != nullptr && |
692 | compactPtrGroupBase(CompactPtr: Array[I]) > Cur->CompactPtrGroupBase) { |
693 | Prev = Cur; |
694 | Cur = Cur->Next; |
695 | } |
696 | |
697 | if (Cur == nullptr || |
698 | compactPtrGroupBase(CompactPtr: Array[I]) != Cur->CompactPtrGroupBase) { |
699 | Cur = CreateGroup(compactPtrGroupBase(CompactPtr: Array[I])); |
700 | DCHECK_NE(Prev, nullptr); |
701 | Sci->FreeListInfo.BlockList.insert(Prev, Cur); |
702 | } |
703 | |
704 | Count = 1; |
705 | } else { |
706 | ++Count; |
707 | } |
708 | } |
709 | |
710 | InsertBlocks(Cur, Array + Size - Count, Count); |
711 | } |
712 | |
713 | u16 popBlocksImpl(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
714 | SizeClassInfo *Sci, CompactPtrT *ToArray, |
715 | const u16 MaxBlockCount) REQUIRES(Sci->Mutex) { |
716 | if (Sci->FreeListInfo.BlockList.empty()) |
717 | return 0U; |
718 | |
719 | SinglyLinkedList<TransferBatchT> &Batches = |
720 | Sci->FreeListInfo.BlockList.front()->Batches; |
721 | |
722 | if (Batches.empty()) { |
723 | DCHECK_EQ(ClassId, SizeClassMap::BatchClassId); |
724 | BatchGroupT *BG = Sci->FreeListInfo.BlockList.front(); |
725 | Sci->FreeListInfo.BlockList.pop_front(); |
726 | |
727 | // Block used by `BatchGroup` is from BatchClassId. Turn the block into |
728 | // `TransferBatch` with single block. |
729 | TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG); |
730 | ToArray[0] = |
731 | compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(TB)); |
732 | Sci->FreeListInfo.PoppedBlocks += 1; |
733 | return 1U; |
734 | } |
735 | |
736 | // So far, instead of always filling the blocks to `MaxBlockCount`, we only |
737 | // examine single `TransferBatch` to minimize the time spent on the primary |
738 | // allocator. Besides, the sizes of `TransferBatch` and |
739 | // `SizeClassAllocatorT::getMaxCached()` may also impact the time spent on |
740 | // accessing the primary allocator. |
741 | // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount` |
742 | // blocks and/or adjust the size of `TransferBatch` according to |
743 | // `SizeClassAllocatorT::getMaxCached()`. |
744 | TransferBatchT *B = Batches.front(); |
745 | DCHECK_NE(B, nullptr); |
746 | DCHECK_GT(B->getCount(), 0U); |
747 | |
748 | // BachClassId should always take all blocks in the TransferBatch. Read the |
749 | // comment in `pushBatchClassBlocks()` for more details. |
750 | const u16 PopCount = ClassId == SizeClassMap::BatchClassId |
751 | ? B->getCount() |
752 | : Min(MaxBlockCount, B->getCount()); |
753 | B->moveNToArray(ToArray, PopCount); |
754 | |
755 | // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be |
756 | // done without holding `Mutex`. |
757 | if (B->empty()) { |
758 | Batches.pop_front(); |
759 | // `TransferBatch` of BatchClassId is self-contained, no need to |
760 | // deallocate. Read the comment in `pushBatchClassBlocks()` for more |
761 | // details. |
762 | if (ClassId != SizeClassMap::BatchClassId) |
763 | SizeClassAllocator->deallocate(SizeClassMap::BatchClassId, B); |
764 | |
765 | if (Batches.empty()) { |
766 | BatchGroupT *BG = Sci->FreeListInfo.BlockList.front(); |
767 | Sci->FreeListInfo.BlockList.pop_front(); |
768 | |
769 | // We don't keep BatchGroup with zero blocks to avoid empty-checking |
770 | // while allocating. Note that block used for constructing BatchGroup is |
771 | // recorded as free blocks in the last element of BatchGroup::Batches. |
772 | // Which means, once we pop the last TransferBatch, the block is |
773 | // implicitly deallocated. |
774 | if (ClassId != SizeClassMap::BatchClassId) |
775 | SizeClassAllocator->deallocate(SizeClassMap::BatchClassId, BG); |
776 | } |
777 | } |
778 | |
779 | Sci->FreeListInfo.PoppedBlocks += PopCount; |
780 | return PopCount; |
781 | } |
782 | |
783 | NOINLINE bool populateFreeList(SizeClassAllocatorT *SizeClassAllocator, |
784 | uptr ClassId, SizeClassInfo *Sci) |
785 | REQUIRES(Sci->Mutex) { |
786 | uptr Region; |
787 | uptr Offset; |
788 | // If the size-class currently has a region associated to it, use it. The |
789 | // newly created blocks will be located after the currently allocated memory |
790 | // for that region (up to RegionSize). Otherwise, create a new region, where |
791 | // the new blocks will be carved from the beginning. |
792 | if (Sci->CurrentRegion) { |
793 | Region = Sci->CurrentRegion; |
794 | DCHECK_GT(Sci->CurrentRegionAllocated, 0U); |
795 | Offset = Sci->CurrentRegionAllocated; |
796 | } else { |
797 | DCHECK_EQ(Sci->CurrentRegionAllocated, 0U); |
798 | Region = allocateRegion(Sci, ClassId); |
799 | if (UNLIKELY(!Region)) |
800 | return false; |
801 | SizeClassAllocator->getStats().add(StatMapped, RegionSize); |
802 | Sci->CurrentRegion = Region; |
803 | Offset = 0; |
804 | } |
805 | |
806 | const uptr Size = getSizeByClassId(ClassId); |
807 | const u16 MaxCount = SizeClassAllocatorT::getMaxCached(Size); |
808 | DCHECK_GT(MaxCount, 0U); |
809 | // The maximum number of blocks we should carve in the region is dictated |
810 | // by the maximum number of batches we want to fill, and the amount of |
811 | // memory left in the current region (we use the lowest of the two). This |
812 | // will not be 0 as we ensure that a region can at least hold one block (via |
813 | // static_assert and at the end of this function). |
814 | const u32 NumberOfBlocks = |
815 | Min(A: MaxNumBatches * MaxCount, |
816 | B: static_cast<u32>((RegionSize - Offset) / Size)); |
817 | DCHECK_GT(NumberOfBlocks, 0U); |
818 | |
819 | constexpr u32 ShuffleArraySize = |
820 | MaxNumBatches * TransferBatchT::MaxNumCached; |
821 | // Fill the transfer batches and put them in the size-class freelist. We |
822 | // need to randomize the blocks for security purposes, so we first fill a |
823 | // local array that we then shuffle before populating the batches. |
824 | CompactPtrT ShuffleArray[ShuffleArraySize]; |
825 | DCHECK_LE(NumberOfBlocks, ShuffleArraySize); |
826 | |
827 | uptr P = Region + Offset; |
828 | for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) |
829 | ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P); |
830 | |
831 | if (ClassId != SizeClassMap::BatchClassId) { |
832 | u32 N = 1; |
833 | uptr CurGroup = compactPtrGroupBase(CompactPtr: ShuffleArray[0]); |
834 | for (u32 I = 1; I < NumberOfBlocks; I++) { |
835 | if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) { |
836 | shuffle(ShuffleArray + I - N, N, &Sci->RandState); |
837 | pushBlocksImpl(SizeClassAllocator, ClassId, Sci, Array: ShuffleArray + I - N, |
838 | Size: N, |
839 | /*SameGroup=*/SameGroup: true); |
840 | N = 1; |
841 | CurGroup = compactPtrGroupBase(CompactPtr: ShuffleArray[I]); |
842 | } else { |
843 | ++N; |
844 | } |
845 | } |
846 | |
847 | shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState); |
848 | pushBlocksImpl(SizeClassAllocator, ClassId, Sci, |
849 | Array: &ShuffleArray[NumberOfBlocks - N], Size: N, |
850 | /*SameGroup=*/SameGroup: true); |
851 | } else { |
852 | pushBatchClassBlocks(Sci, Array: ShuffleArray, Size: NumberOfBlocks); |
853 | } |
854 | |
855 | // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record |
856 | // the requests from `PushBlocks` and `PopBatch` which are external |
857 | // interfaces. `populateFreeList` is the internal interface so we should set |
858 | // the values back to avoid incorrectly setting the stats. |
859 | Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks; |
860 | |
861 | const uptr AllocatedUser = Size * NumberOfBlocks; |
862 | SizeClassAllocator->getStats().add(StatFree, AllocatedUser); |
863 | DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize); |
864 | // If there is not enough room in the region currently associated to fit |
865 | // more blocks, we deassociate the region by resetting CurrentRegion and |
866 | // CurrentRegionAllocated. Otherwise, update the allocated amount. |
867 | if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) { |
868 | Sci->CurrentRegion = 0; |
869 | Sci->CurrentRegionAllocated = 0; |
870 | } else { |
871 | Sci->CurrentRegionAllocated += AllocatedUser; |
872 | } |
873 | Sci->AllocatedUser += AllocatedUser; |
874 | |
875 | return true; |
876 | } |
877 | |
878 | void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci) |
879 | REQUIRES(Sci->Mutex) { |
880 | if (Sci->AllocatedUser == 0) |
881 | return; |
882 | const uptr BlockSize = getSizeByClassId(ClassId); |
883 | const uptr InUse = |
884 | Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks; |
885 | const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize; |
886 | uptr PushedBytesDelta = 0; |
887 | if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) { |
888 | PushedBytesDelta = |
889 | BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint; |
890 | } |
891 | const uptr AvailableChunks = Sci->AllocatedUser / BlockSize; |
892 | Str->append( |
893 | Format: " %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu " |
894 | "inuse: %6zu avail: %6zu releases attempted: %6zu last released: %6zuK " |
895 | "latest pushed bytes: %6zuK\n", |
896 | ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10, |
897 | Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks, InUse, |
898 | AvailableChunks, Sci->ReleaseInfo.NumReleasesAttempted, |
899 | Sci->ReleaseInfo.LastReleasedBytes >> 10, PushedBytesDelta >> 10); |
900 | } |
901 | |
902 | void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId, |
903 | ScopedString *Str) REQUIRES(Sci->Mutex) { |
904 | const uptr BlockSize = getSizeByClassId(ClassId); |
905 | const uptr First = Sci->MinRegionIndex; |
906 | const uptr Last = Sci->MaxRegionIndex; |
907 | const uptr Base = First * RegionSize; |
908 | const uptr NumberOfRegions = Last - First + 1U; |
909 | auto SkipRegion = [this, First, ClassId](uptr RegionIndex) { |
910 | ScopedLock L(ByteMapMutex); |
911 | return (PossibleRegions[First + RegionIndex] - 1U) != ClassId; |
912 | }; |
913 | |
914 | FragmentationRecorder Recorder; |
915 | if (!Sci->FreeListInfo.BlockList.empty()) { |
916 | PageReleaseContext Context = |
917 | markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions, |
918 | ReleaseType: ReleaseToOS::ForceAll); |
919 | releaseFreeMemoryToOS(Context, Recorder, SkipRegion); |
920 | } |
921 | |
922 | const uptr PageSize = getPageSizeCached(); |
923 | const uptr TotalBlocks = Sci->AllocatedUser / BlockSize; |
924 | const uptr InUseBlocks = |
925 | Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks; |
926 | uptr AllocatedPagesCount = 0; |
927 | if (TotalBlocks != 0U) { |
928 | for (uptr I = 0; I < NumberOfRegions; ++I) { |
929 | if (SkipRegion(I)) |
930 | continue; |
931 | AllocatedPagesCount += RegionSize / PageSize; |
932 | } |
933 | |
934 | DCHECK_NE(AllocatedPagesCount, 0U); |
935 | } |
936 | |
937 | DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount()); |
938 | const uptr InUsePages = |
939 | AllocatedPagesCount - Recorder.getReleasedPagesCount(); |
940 | const uptr InUseBytes = InUsePages * PageSize; |
941 | |
942 | uptr Integral; |
943 | uptr Fractional; |
944 | computePercentage(Numerator: BlockSize * InUseBlocks, Denominator: InUseBytes, Integral: &Integral, |
945 | Fractional: &Fractional); |
946 | Str->append(Format: " %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total " |
947 | "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n", |
948 | ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages, |
949 | AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional); |
950 | } |
951 | |
952 | NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId, |
953 | ReleaseToOS ReleaseType = ReleaseToOS::Normal) |
954 | REQUIRES(Sci->Mutex) { |
955 | const uptr BlockSize = getSizeByClassId(ClassId); |
956 | |
957 | DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks); |
958 | const uptr BytesInFreeList = |
959 | Sci->AllocatedUser - |
960 | (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) * |
961 | BlockSize; |
962 | |
963 | if (UNLIKELY(BytesInFreeList == 0)) |
964 | return 0; |
965 | |
966 | // ====================================================================== // |
967 | // 1. Check if we have enough free blocks and if it's worth doing a page |
968 | // release. |
969 | // ====================================================================== // |
970 | if (ReleaseType != ReleaseToOS::ForceAll && |
971 | !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList, |
972 | ReleaseType)) { |
973 | return 0; |
974 | } |
975 | |
976 | const uptr First = Sci->MinRegionIndex; |
977 | const uptr Last = Sci->MaxRegionIndex; |
978 | DCHECK_NE(Last, 0U); |
979 | DCHECK_LE(First, Last); |
980 | uptr TotalReleasedBytes = 0; |
981 | const uptr Base = First * RegionSize; |
982 | const uptr NumberOfRegions = Last - First + 1U; |
983 | |
984 | // The following steps contribute to the majority time spent in page |
985 | // releasing thus we increment the counter here. |
986 | ++Sci->ReleaseInfo.NumReleasesAttempted; |
987 | |
988 | // ==================================================================== // |
989 | // 2. Mark the free blocks and we can tell which pages are in-use by |
990 | // querying `PageReleaseContext`. |
991 | // ==================================================================== // |
992 | PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base, |
993 | NumberOfRegions, ReleaseType); |
994 | if (!Context.hasBlockMarked()) |
995 | return 0; |
996 | |
997 | // ==================================================================== // |
998 | // 3. Release the unused physical pages back to the OS. |
999 | // ==================================================================== // |
1000 | ReleaseRecorder Recorder(Base); |
1001 | auto SkipRegion = [this, First, ClassId](uptr RegionIndex) { |
1002 | ScopedLock L(ByteMapMutex); |
1003 | return (PossibleRegions[First + RegionIndex] - 1U) != ClassId; |
1004 | }; |
1005 | releaseFreeMemoryToOS(Context, Recorder, SkipRegion); |
1006 | |
1007 | if (Recorder.getReleasedBytes() > 0) { |
1008 | Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList; |
1009 | Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes(); |
1010 | TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes; |
1011 | } |
1012 | Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast(); |
1013 | |
1014 | return TotalReleasedBytes; |
1015 | } |
1016 | |
1017 | bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize, |
1018 | uptr BytesInFreeList, ReleaseToOS ReleaseType) |
1019 | REQUIRES(Sci->Mutex) { |
1020 | DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks); |
1021 | const uptr PageSize = getPageSizeCached(); |
1022 | |
1023 | if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) |
1024 | Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList; |
1025 | |
1026 | // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value |
1027 | // so that we won't underestimate the releasable pages. For example, the |
1028 | // following is the region usage, |
1029 | // |
1030 | // BytesInFreeListAtLastCheckpoint AllocatedUser |
1031 | // v v |
1032 | // |---------------------------------------> |
1033 | // ^ ^ |
1034 | // BytesInFreeList ReleaseThreshold |
1035 | // |
1036 | // In general, if we have collected enough bytes and the amount of free |
1037 | // bytes meets the ReleaseThreshold, we will try to do page release. If we |
1038 | // don't update `BytesInFreeListAtLastCheckpoint` when the current |
1039 | // `BytesInFreeList` is smaller, we may take longer time to wait for enough |
1040 | // freed blocks because we miss the bytes between |
1041 | // (BytesInFreeListAtLastCheckpoint - BytesInFreeList). |
1042 | const uptr PushedBytesDelta = |
1043 | BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint; |
1044 | if (PushedBytesDelta < PageSize) |
1045 | return false; |
1046 | |
1047 | // Releasing smaller blocks is expensive, so we want to make sure that a |
1048 | // significant amount of bytes are free, and that there has been a good |
1049 | // amount of batches pushed to the freelist before attempting to release. |
1050 | if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal) |
1051 | if (PushedBytesDelta < Sci->AllocatedUser / 16U) |
1052 | return false; |
1053 | |
1054 | if (ReleaseType == ReleaseToOS::Normal) { |
1055 | const s32 IntervalMs = atomic_load_relaxed(A: &ReleaseToOsIntervalMs); |
1056 | if (IntervalMs < 0) |
1057 | return false; |
1058 | |
1059 | // The constant 8 here is selected from profiling some apps and the number |
1060 | // of unreleased pages in the large size classes is around 16 pages or |
1061 | // more. Choose half of it as a heuristic and which also avoids page |
1062 | // release every time for every pushBlocks() attempt by large blocks. |
1063 | const bool ByPassReleaseInterval = |
1064 | isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize; |
1065 | if (!ByPassReleaseInterval) { |
1066 | if (Sci->ReleaseInfo.LastReleaseAtNs + |
1067 | static_cast<u64>(IntervalMs) * 1000000 > |
1068 | getMonotonicTimeFast()) { |
1069 | // Memory was returned recently. |
1070 | return false; |
1071 | } |
1072 | } |
1073 | } // if (ReleaseType == ReleaseToOS::Normal) |
1074 | |
1075 | return true; |
1076 | } |
1077 | |
1078 | PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId, |
1079 | const uptr BlockSize, const uptr Base, |
1080 | const uptr NumberOfRegions, |
1081 | ReleaseToOS ReleaseType) |
1082 | REQUIRES(Sci->Mutex) { |
1083 | const uptr PageSize = getPageSizeCached(); |
1084 | const uptr GroupSize = (1UL << GroupSizeLog); |
1085 | const uptr CurGroupBase = |
1086 | compactPtrGroupBase(CompactPtr: compactPtr(ClassId, Ptr: Sci->CurrentRegion)); |
1087 | |
1088 | PageReleaseContext Context(BlockSize, NumberOfRegions, |
1089 | /*ReleaseSize=*/RegionSize); |
1090 | |
1091 | auto DecompactPtr = [](CompactPtrT CompactPtr) { |
1092 | return reinterpret_cast<uptr>(CompactPtr); |
1093 | }; |
1094 | for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) { |
1095 | const uptr GroupBase = decompactGroupBase(CompactPtrGroupBase: BG.CompactPtrGroupBase); |
1096 | // The `GroupSize` may not be divided by `BlockSize`, which means there is |
1097 | // an unused space at the end of Region. Exclude that space to avoid |
1098 | // unused page map entry. |
1099 | uptr AllocatedGroupSize = GroupBase == CurGroupBase |
1100 | ? Sci->CurrentRegionAllocated |
1101 | : roundDownSlow(X: GroupSize, Boundary: BlockSize); |
1102 | if (AllocatedGroupSize == 0) |
1103 | continue; |
1104 | |
1105 | // TransferBatches are pushed in front of BG.Batches. The first one may |
1106 | // not have all caches used. |
1107 | const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch + |
1108 | BG.Batches.front()->getCount(); |
1109 | const uptr BytesInBG = NumBlocks * BlockSize; |
1110 | |
1111 | if (ReleaseType != ReleaseToOS::ForceAll) { |
1112 | if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) { |
1113 | BG.BytesInBGAtLastCheckpoint = BytesInBG; |
1114 | continue; |
1115 | } |
1116 | |
1117 | const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint; |
1118 | if (PushedBytesDelta < PageSize) |
1119 | continue; |
1120 | |
1121 | // Given the randomness property, we try to release the pages only if |
1122 | // the bytes used by free blocks exceed certain proportion of allocated |
1123 | // spaces. |
1124 | if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize < |
1125 | (100U - 1U - BlockSize / 16U)) { |
1126 | continue; |
1127 | } |
1128 | } |
1129 | |
1130 | // TODO: Consider updating this after page release if `ReleaseRecorder` |
1131 | // can tell the released bytes in each group. |
1132 | BG.BytesInBGAtLastCheckpoint = BytesInBG; |
1133 | |
1134 | const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize; |
1135 | const uptr RegionIndex = (GroupBase - Base) / RegionSize; |
1136 | |
1137 | if (NumBlocks == MaxContainedBlocks) { |
1138 | for (const auto &It : BG.Batches) |
1139 | for (u16 I = 0; I < It.getCount(); ++I) |
1140 | DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase); |
1141 | |
1142 | const uptr To = GroupBase + AllocatedGroupSize; |
1143 | Context.markRangeAsAllCounted(From: GroupBase, To, Base: GroupBase, RegionIndex, |
1144 | RegionSize: AllocatedGroupSize); |
1145 | } else { |
1146 | DCHECK_LT(NumBlocks, MaxContainedBlocks); |
1147 | |
1148 | // Note that we don't always visit blocks in each BatchGroup so that we |
1149 | // may miss the chance of releasing certain pages that cross |
1150 | // BatchGroups. |
1151 | Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase, |
1152 | RegionIndex, AllocatedGroupSize, |
1153 | /*MayContainLastBlockInRegion=*/true); |
1154 | } |
1155 | |
1156 | // We may not be able to do the page release In a rare case that we may |
1157 | // fail on PageMap allocation. |
1158 | if (UNLIKELY(!Context.hasBlockMarked())) |
1159 | break; |
1160 | } |
1161 | |
1162 | return Context; |
1163 | } |
1164 | |
1165 | SizeClassInfo SizeClassInfoArray[NumClasses] = {}; |
1166 | |
1167 | HybridMutex ByteMapMutex; |
1168 | // Track the regions in use, 0 is unused, otherwise store ClassId + 1. |
1169 | ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {}; |
1170 | atomic_s32 ReleaseToOsIntervalMs = {}; |
1171 | // Unless several threads request regions simultaneously from different size |
1172 | // classes, the stash rarely contains more than 1 entry. |
1173 | static constexpr uptr MaxStashedRegions = 4; |
1174 | HybridMutex RegionsStashMutex; |
1175 | uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0; |
1176 | uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {}; |
1177 | }; |
1178 | |
1179 | } // namespace scudo |
1180 | |
1181 | #endif // SCUDO_PRIMARY32_H_ |
1182 |
Definitions
- SizeClassAllocator32
- getSizeByClassId
- canAllocate
- init
- unmapTestOnly
- verifyAllBlocksAreReleasedTestOnly
- compactPtr
- decompactPtr
- compactPtrGroupBase
- decompactGroupBase
- isSmallBlock
- isLargeBlock
- popBlocks
- pushBlocks
- disable
- enable
- iterateOverBlocks
- getStats
- getFragmentationInfo
- getMemoryGroupFragmentationInfo
- setOption
- tryReleaseToOS
- releaseToOS
- getRegionInfoArrayAddress
- getRegionInfoArraySize
- findNearestBlock
- ReleaseToOsInfo
- BlocksInfo
- SizeClassInfo
- computeRegionId
- allocateRegionSlow
- allocateRegion
- getSizeClassInfo
- pushBatchClassBlocks
- pushBlocksImpl
- popBlocksImpl
- populateFreeList
- getStats
- getSizeClassFragmentationInfo
- releaseToOSMaybe
- hasChanceToReleasePages
- markFreeBlocks
Improve your Profiling and Debugging skills
Find out more