1//===-- secondary.h ---------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_SECONDARY_H_
10#define SCUDO_SECONDARY_H_
11
12#include "chunk.h"
13#include "common.h"
14#include "list.h"
15#include "mem_map.h"
16#include "memtag.h"
17#include "mutex.h"
18#include "options.h"
19#include "stats.h"
20#include "string_utils.h"
21#include "thread_annotations.h"
22
23namespace scudo {
24
25// This allocator wraps the platform allocation primitives, and as such is on
26// the slower side and should preferably be used for larger sized allocations.
27// Blocks allocated will be preceded and followed by a guard page, and hold
28// their own header that is not checksummed: the guard pages and the Combined
29// header should be enough for our purpose.
30
31namespace LargeBlock {
32
33struct alignas(Max<uptr>(A: archSupportsMemoryTagging()
34 ? archMemoryTagGranuleSize()
35 : 1,
36 B: 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
37 LargeBlock::Header *Prev;
38 LargeBlock::Header *Next;
39 uptr CommitBase;
40 uptr CommitSize;
41 MemMapT MemMap;
42};
43
44static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45static_assert(!archSupportsMemoryTagging() ||
46 sizeof(Header) % archMemoryTagGranuleSize() == 0,
47 "");
48
49constexpr uptr getHeaderSize() { return sizeof(Header); }
50
51template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52 if (allocatorSupportsMemoryTagging<Config>())
53 return addFixedTag(Ptr, Tag: 1);
54 return Ptr;
55}
56
57template <typename Config> static Header *getHeader(uptr Ptr) {
58 return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
59}
60
61template <typename Config> static Header *getHeader(const void *Ptr) {
62 return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
63}
64
65} // namespace LargeBlock
66
67static inline void unmap(LargeBlock::Header *H) {
68 // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69 // over the ownership before unmap() so that any operation along with unmap()
70 // won't touch inaccessible pages.
71 MemMapT MemMap = H->MemMap;
72 MemMap.unmap(Addr: MemMap.getBase(), Size: MemMap.getCapacity());
73}
74
75namespace {
76struct CachedBlock {
77 uptr CommitBase = 0;
78 uptr CommitSize = 0;
79 uptr BlockBegin = 0;
80 MemMapT MemMap = {};
81 u64 Time = 0;
82
83 bool isValid() { return CommitBase != 0; }
84
85 void invalidate() { CommitBase = 0; }
86};
87} // namespace
88
89template <typename Config> class MapAllocatorNoCache {
90public:
91 void init(UNUSED s32 ReleaseToOsInterval) {}
92 bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
93 UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
94 UNUSED bool *Zeroed) {
95 return false;
96 }
97 void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
98 bool canCache(UNUSED uptr Size) { return false; }
99 void disable() {}
100 void enable() {}
101 void releaseToOS() {}
102 void disableMemoryTagging() {}
103 void unmapTestOnly() {}
104 bool setOption(Option O, UNUSED sptr Value) {
105 if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
106 O == Option::MaxCacheEntrySize)
107 return false;
108 // Not supported by the Secondary Cache, but not an error either.
109 return true;
110 }
111
112 void getStats(UNUSED ScopedString *Str) {
113 Str->append(Format: "Secondary Cache Disabled\n");
114 }
115};
116
117static const uptr MaxUnusedCachePages = 4U;
118
119template <typename Config>
120bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
121 uptr AllocPos, uptr Flags, MemMapT &MemMap) {
122 Flags |= MAP_RESIZABLE;
123 Flags |= MAP_ALLOWNOMEM;
124
125 const uptr PageSize = getPageSizeCached();
126 if (SCUDO_TRUSTY) {
127 /*
128 * On Trusty we need AllocPos to be usable for shared memory, which cannot
129 * cross multiple mappings. This means we need to split around AllocPos
130 * and not over it. We can only do this if the address is page-aligned.
131 */
132 const uptr TaggedSize = AllocPos - CommitBase;
133 if (useMemoryTagging<Config>(Options) && isAligned(X: TaggedSize, Alignment: PageSize)) {
134 DCHECK_GT(TaggedSize, 0);
135 return MemMap.remap(Addr: CommitBase, Size: TaggedSize, Name: "scudo:secondary",
136 MAP_MEMTAG | Flags) &&
137 MemMap.remap(Addr: AllocPos, Size: CommitSize - TaggedSize, Name: "scudo:secondary",
138 Flags);
139 } else {
140 const uptr RemapFlags =
141 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
142 return MemMap.remap(Addr: CommitBase, Size: CommitSize, Name: "scudo:secondary",
143 Flags: RemapFlags);
144 }
145 }
146
147 const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
148 if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
149 const uptr UntaggedPos = Max(A: AllocPos, B: CommitBase + MaxUnusedCacheBytes);
150 return MemMap.remap(Addr: CommitBase, Size: UntaggedPos - CommitBase, Name: "scudo:secondary",
151 MAP_MEMTAG | Flags) &&
152 MemMap.remap(Addr: UntaggedPos, Size: CommitBase + CommitSize - UntaggedPos,
153 Name: "scudo:secondary", Flags);
154 } else {
155 const uptr RemapFlags =
156 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
157 return MemMap.remap(Addr: CommitBase, Size: CommitSize, Name: "scudo:secondary", Flags: RemapFlags);
158 }
159}
160
161// Template specialization to avoid producing zero-length array
162template <typename T, size_t Size> class NonZeroLengthArray {
163public:
164 T &operator[](uptr Idx) { return values[Idx]; }
165
166private:
167 T values[Size];
168};
169template <typename T> class NonZeroLengthArray<T, 0> {
170public:
171 T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
172};
173
174template <typename Config> class MapAllocatorCache {
175public:
176 void getStats(ScopedString *Str) {
177 ScopedLock L(Mutex);
178 uptr Integral;
179 uptr Fractional;
180 computePercentage(Numerator: SuccessfulRetrieves, Denominator: CallsToRetrieve, Integral: &Integral,
181 Fractional: &Fractional);
182 const s32 Interval = atomic_load_relaxed(A: &ReleaseToOsIntervalMs);
183 Str->append(
184 Format: "Stats: MapAllocatorCache: EntriesCount: %d, "
185 "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
186 EntriesCount, atomic_load_relaxed(A: &MaxEntriesCount),
187 atomic_load_relaxed(A: &MaxEntrySize), Interval >= 0 ? Interval : -1);
188 Str->append(Format: "Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189 "(%zu.%02zu%%)\n",
190 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191 for (CachedBlock Entry : Entries) {
192 if (!Entry.isValid())
193 continue;
194 Str->append(Format: "StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195 "BlockSize: %zu %s\n",
196 Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197 Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
198 }
199 }
200
201 // Ensure the default maximum specified fits the array.
202 static_assert(Config::getDefaultMaxEntriesCount() <=
203 Config::getEntriesArraySize(),
204 "");
205
206 void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207 DCHECK_EQ(EntriesCount, 0U);
208 setOption(O: Option::MaxCacheEntriesCount,
209 Value: static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
210 setOption(O: Option::MaxCacheEntrySize,
211 Value: static_cast<sptr>(Config::getDefaultMaxEntrySize()));
212 setOption(O: Option::ReleaseInterval, Value: static_cast<sptr>(ReleaseToOsInterval));
213 }
214
215 void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
216 if (!canCache(Size: H->CommitSize))
217 return unmap(H);
218
219 bool EntryCached = false;
220 bool EmptyCache = false;
221 const s32 Interval = atomic_load_relaxed(A: &ReleaseToOsIntervalMs);
222 const u64 Time = getMonotonicTimeFast();
223 const u32 MaxCount = atomic_load_relaxed(A: &MaxEntriesCount);
224 CachedBlock Entry;
225 Entry.CommitBase = H->CommitBase;
226 Entry.CommitSize = H->CommitSize;
227 Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
228 Entry.MemMap = H->MemMap;
229 Entry.Time = Time;
230 if (useMemoryTagging<Config>(Options)) {
231 if (Interval == 0 && !SCUDO_FUCHSIA) {
232 // Release the memory and make it inaccessible at the same time by
233 // creating a new MAP_NOACCESS mapping on top of the existing mapping.
234 // Fuchsia does not support replacing mappings by creating a new mapping
235 // on top so we just do the two syscalls there.
236 Entry.Time = 0;
237 mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
238 Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
239 } else {
240 Entry.MemMap.setMemoryPermission(Addr: Entry.CommitBase, Size: Entry.CommitSize,
241 MAP_NOACCESS);
242 }
243 } else if (Interval == 0) {
244 Entry.MemMap.releaseAndZeroPagesToOS(From: Entry.CommitBase, Size: Entry.CommitSize);
245 Entry.Time = 0;
246 }
247 do {
248 ScopedLock L(Mutex);
249 if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
250 // If we get here then memory tagging was disabled in between when we
251 // read Options and when we locked Mutex. We can't insert our entry into
252 // the quarantine or the cache because the permissions would be wrong so
253 // just unmap it.
254 break;
255 }
256 if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
257 QuarantinePos =
258 (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
259 if (!Quarantine[QuarantinePos].isValid()) {
260 Quarantine[QuarantinePos] = Entry;
261 return;
262 }
263 CachedBlock PrevEntry = Quarantine[QuarantinePos];
264 Quarantine[QuarantinePos] = Entry;
265 if (OldestTime == 0)
266 OldestTime = Entry.Time;
267 Entry = PrevEntry;
268 }
269 if (EntriesCount >= MaxCount) {
270 if (IsFullEvents++ == 4U)
271 EmptyCache = true;
272 } else {
273 for (u32 I = 0; I < MaxCount; I++) {
274 if (Entries[I].isValid())
275 continue;
276 if (I != 0)
277 Entries[I] = Entries[0];
278 Entries[0] = Entry;
279 EntriesCount++;
280 if (OldestTime == 0)
281 OldestTime = Entry.Time;
282 EntryCached = true;
283 break;
284 }
285 }
286 } while (0);
287 if (EmptyCache)
288 empty();
289 else if (Interval >= 0)
290 releaseOlderThan(Time: Time - static_cast<u64>(Interval) * 1000000);
291 if (!EntryCached)
292 Entry.MemMap.unmap(Addr: Entry.MemMap.getBase(), Size: Entry.MemMap.getCapacity());
293 }
294
295 bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
296 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
297 const uptr PageSize = getPageSizeCached();
298 const u32 MaxCount = atomic_load_relaxed(A: &MaxEntriesCount);
299 // 10% of the requested size proved to be the optimal choice for
300 // retrieving cached blocks after testing several options.
301 constexpr u32 FragmentedBytesDivisor = 10;
302 bool Found = false;
303 CachedBlock Entry;
304 uptr EntryHeaderPos = 0;
305 {
306 ScopedLock L(Mutex);
307 CallsToRetrieve++;
308 if (EntriesCount == 0)
309 return false;
310 u32 OptimalFitIndex = 0;
311 uptr MinDiff = UINTPTR_MAX;
312 for (u32 I = 0; I < MaxCount; I++) {
313 if (!Entries[I].isValid())
314 continue;
315 const uptr CommitBase = Entries[I].CommitBase;
316 const uptr CommitSize = Entries[I].CommitSize;
317 const uptr AllocPos =
318 roundDown(X: CommitBase + CommitSize - Size, Boundary: Alignment);
319 const uptr HeaderPos = AllocPos - HeadersSize;
320 if (HeaderPos > CommitBase + CommitSize)
321 continue;
322 if (HeaderPos < CommitBase ||
323 AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
324 continue;
325 }
326 Found = true;
327 const uptr Diff = HeaderPos - CommitBase;
328 // immediately use a cached block if it's size is close enough to the
329 // requested size.
330 const uptr MaxAllowedFragmentedBytes =
331 (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
332 if (Diff <= MaxAllowedFragmentedBytes) {
333 OptimalFitIndex = I;
334 EntryHeaderPos = HeaderPos;
335 break;
336 }
337 // keep track of the smallest cached block
338 // that is greater than (AllocSize + HeaderSize)
339 if (Diff > MinDiff)
340 continue;
341 OptimalFitIndex = I;
342 MinDiff = Diff;
343 EntryHeaderPos = HeaderPos;
344 }
345 if (Found) {
346 Entry = Entries[OptimalFitIndex];
347 Entries[OptimalFitIndex].invalidate();
348 EntriesCount--;
349 SuccessfulRetrieves++;
350 }
351 }
352 if (!Found)
353 return false;
354
355 *H = reinterpret_cast<LargeBlock::Header *>(
356 LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
357 *Zeroed = Entry.Time == 0;
358 if (useMemoryTagging<Config>(Options))
359 Entry.MemMap.setMemoryPermission(Addr: Entry.CommitBase, Size: Entry.CommitSize, Flags: 0);
360 uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
361 if (useMemoryTagging<Config>(Options)) {
362 if (*Zeroed) {
363 storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
364 NewBlockBegin);
365 } else if (Entry.BlockBegin < NewBlockBegin) {
366 storeTags(Begin: Entry.BlockBegin, End: NewBlockBegin);
367 } else {
368 storeTags(Begin: untagPointer(Ptr: NewBlockBegin), End: untagPointer(Ptr: Entry.BlockBegin));
369 }
370 }
371 (*H)->CommitBase = Entry.CommitBase;
372 (*H)->CommitSize = Entry.CommitSize;
373 (*H)->MemMap = Entry.MemMap;
374 return true;
375 }
376
377 bool canCache(uptr Size) {
378 return atomic_load_relaxed(A: &MaxEntriesCount) != 0U &&
379 Size <= atomic_load_relaxed(A: &MaxEntrySize);
380 }
381
382 bool setOption(Option O, sptr Value) {
383 if (O == Option::ReleaseInterval) {
384 const s32 Interval = Max(
385 Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
386 Config::getMinReleaseToOsIntervalMs());
387 atomic_store_relaxed(A: &ReleaseToOsIntervalMs, V: Interval);
388 return true;
389 }
390 if (O == Option::MaxCacheEntriesCount) {
391 const u32 MaxCount = static_cast<u32>(Value);
392 if (MaxCount > Config::getEntriesArraySize())
393 return false;
394 atomic_store_relaxed(A: &MaxEntriesCount, V: MaxCount);
395 return true;
396 }
397 if (O == Option::MaxCacheEntrySize) {
398 atomic_store_relaxed(A: &MaxEntrySize, V: static_cast<uptr>(Value));
399 return true;
400 }
401 // Not supported by the Secondary Cache, but not an error either.
402 return true;
403 }
404
405 void releaseToOS() { releaseOlderThan(UINT64_MAX); }
406
407 void disableMemoryTagging() EXCLUDES(Mutex) {
408 ScopedLock L(Mutex);
409 for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
410 if (Quarantine[I].isValid()) {
411 MemMapT &MemMap = Quarantine[I].MemMap;
412 MemMap.unmap(Addr: MemMap.getBase(), Size: MemMap.getCapacity());
413 Quarantine[I].invalidate();
414 }
415 }
416 const u32 MaxCount = atomic_load_relaxed(A: &MaxEntriesCount);
417 for (u32 I = 0; I < MaxCount; I++) {
418 if (Entries[I].isValid()) {
419 Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
420 Entries[I].CommitSize, 0);
421 }
422 }
423 QuarantinePos = -1U;
424 }
425
426 void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
427
428 void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
429
430 void unmapTestOnly() { empty(); }
431
432private:
433 void empty() {
434 MemMapT MapInfo[Config::getEntriesArraySize()];
435 uptr N = 0;
436 {
437 ScopedLock L(Mutex);
438 for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
439 if (!Entries[I].isValid())
440 continue;
441 MapInfo[N] = Entries[I].MemMap;
442 Entries[I].invalidate();
443 N++;
444 }
445 EntriesCount = 0;
446 IsFullEvents = 0;
447 }
448 for (uptr I = 0; I < N; I++) {
449 MemMapT &MemMap = MapInfo[I];
450 MemMap.unmap(Addr: MemMap.getBase(), Size: MemMap.getCapacity());
451 }
452 }
453
454 void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
455 if (!Entry.isValid() || !Entry.Time)
456 return;
457 if (Entry.Time > Time) {
458 if (OldestTime == 0 || Entry.Time < OldestTime)
459 OldestTime = Entry.Time;
460 return;
461 }
462 Entry.MemMap.releaseAndZeroPagesToOS(From: Entry.CommitBase, Size: Entry.CommitSize);
463 Entry.Time = 0;
464 }
465
466 void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
467 ScopedLock L(Mutex);
468 if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
469 return;
470 OldestTime = 0;
471 for (uptr I = 0; I < Config::getQuarantineSize(); I++)
472 releaseIfOlderThan(Entry&: Quarantine[I], Time);
473 for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
474 releaseIfOlderThan(Entry&: Entries[I], Time);
475 }
476
477 HybridMutex Mutex;
478 u32 EntriesCount GUARDED_BY(Mutex) = 0;
479 u32 QuarantinePos GUARDED_BY(Mutex) = 0;
480 atomic_u32 MaxEntriesCount = {};
481 atomic_uptr MaxEntrySize = {};
482 u64 OldestTime GUARDED_BY(Mutex) = 0;
483 u32 IsFullEvents GUARDED_BY(Mutex) = 0;
484 atomic_s32 ReleaseToOsIntervalMs = {};
485 u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
486 u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
487
488 CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
489 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
490 Quarantine GUARDED_BY(Mutex) = {};
491};
492
493template <typename Config> class MapAllocator {
494public:
495 void init(GlobalStats *S,
496 s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
497 DCHECK_EQ(AllocatedBytes, 0U);
498 DCHECK_EQ(FreedBytes, 0U);
499 Cache.init(ReleaseToOsInterval);
500 Stats.init();
501 if (LIKELY(S))
502 S->link(S: &Stats);
503 }
504
505 void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
506 uptr *BlockEnd = nullptr,
507 FillContentsMode FillContents = NoFill);
508
509 void deallocate(const Options &Options, void *Ptr);
510
511 static uptr getBlockEnd(void *Ptr) {
512 auto *B = LargeBlock::getHeader<Config>(Ptr);
513 return B->CommitBase + B->CommitSize;
514 }
515
516 static uptr getBlockSize(void *Ptr) {
517 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
518 }
519
520 static constexpr uptr getHeadersSize() {
521 return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
522 }
523
524 void disable() NO_THREAD_SAFETY_ANALYSIS {
525 Mutex.lock();
526 Cache.disable();
527 }
528
529 void enable() NO_THREAD_SAFETY_ANALYSIS {
530 Cache.enable();
531 Mutex.unlock();
532 }
533
534 template <typename F> void iterateOverBlocks(F Callback) const {
535 Mutex.assertHeld();
536
537 for (const auto &H : InUseBlocks) {
538 uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
539 if (allocatorSupportsMemoryTagging<Config>())
540 Ptr = untagPointer(Ptr);
541 Callback(Ptr);
542 }
543 }
544
545 bool canCache(uptr Size) { return Cache.canCache(Size); }
546
547 bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
548
549 void releaseToOS() { Cache.releaseToOS(); }
550
551 void disableMemoryTagging() { Cache.disableMemoryTagging(); }
552
553 void unmapTestOnly() { Cache.unmapTestOnly(); }
554
555 void getStats(ScopedString *Str);
556
557private:
558 typename Config::template CacheT<typename Config::CacheConfig> Cache;
559
560 mutable HybridMutex Mutex;
561 DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
562 uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
563 uptr FreedBytes GUARDED_BY(Mutex) = 0;
564 uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
565 uptr LargestSize GUARDED_BY(Mutex) = 0;
566 u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
567 u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
568 LocalStats Stats GUARDED_BY(Mutex);
569};
570
571// As with the Primary, the size passed to this function includes any desired
572// alignment, so that the frontend can align the user allocation. The hint
573// parameter allows us to unmap spurious memory when dealing with larger
574// (greater than a page) alignments on 32-bit platforms.
575// Due to the sparsity of address space available on those platforms, requesting
576// an allocation from the Secondary with a large alignment would end up wasting
577// VA space (even though we are not committing the whole thing), hence the need
578// to trim off some of the reserved space.
579// For allocations requested with an alignment greater than or equal to a page,
580// the committed memory will amount to something close to Size - AlignmentHint
581// (pending rounding and headers).
582template <typename Config>
583void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
584 uptr Alignment, uptr *BlockEndPtr,
585 FillContentsMode FillContents) {
586 if (Options.get(Opt: OptionBit::AddLargeAllocationSlack))
587 Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
588 Alignment = Max(A: Alignment, B: uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
589 const uptr PageSize = getPageSizeCached();
590
591 // Note that cached blocks may have aligned address already. Thus we simply
592 // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
593 const uptr MinNeededSizeForCache = roundUp(X: Size + getHeadersSize(), Boundary: PageSize);
594
595 if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
596 LargeBlock::Header *H;
597 bool Zeroed;
598 if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
599 &Zeroed)) {
600 const uptr BlockEnd = H->CommitBase + H->CommitSize;
601 if (BlockEndPtr)
602 *BlockEndPtr = BlockEnd;
603 uptr HInt = reinterpret_cast<uptr>(H);
604 if (allocatorSupportsMemoryTagging<Config>())
605 HInt = untagPointer(Ptr: HInt);
606 const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
607 void *Ptr = reinterpret_cast<void *>(PtrInt);
608 if (FillContents && !Zeroed)
609 memset(s: Ptr, c: FillContents == ZeroFill ? 0 : PatternFillByte,
610 n: BlockEnd - PtrInt);
611 {
612 ScopedLock L(Mutex);
613 InUseBlocks.push_back(X: H);
614 AllocatedBytes += H->CommitSize;
615 FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
616 NumberOfAllocs++;
617 Stats.add(I: StatAllocated, V: H->CommitSize);
618 Stats.add(I: StatMapped, V: H->MemMap.getCapacity());
619 }
620 return Ptr;
621 }
622 }
623
624 uptr RoundedSize =
625 roundUp(X: roundUp(X: Size, Boundary: Alignment) + getHeadersSize(), Boundary: PageSize);
626 if (Alignment > PageSize)
627 RoundedSize += Alignment - PageSize;
628
629 ReservedMemoryT ReservedMemory;
630 const uptr MapSize = RoundedSize + 2 * PageSize;
631 if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
632 MAP_ALLOWNOMEM))) {
633 return nullptr;
634 }
635
636 // Take the entire ownership of reserved region.
637 MemMapT MemMap = ReservedMemory.dispatch(Addr: ReservedMemory.getBase(),
638 Size: ReservedMemory.getCapacity());
639 uptr MapBase = MemMap.getBase();
640 uptr CommitBase = MapBase + PageSize;
641 uptr MapEnd = MapBase + MapSize;
642
643 // In the unlikely event of alignments larger than a page, adjust the amount
644 // of memory we want to commit, and trim the extra memory.
645 if (UNLIKELY(Alignment >= PageSize)) {
646 // For alignments greater than or equal to a page, the user pointer (eg: the
647 // pointer that is returned by the C or C++ allocation APIs) ends up on a
648 // page boundary , and our headers will live in the preceding page.
649 CommitBase = roundUp(X: MapBase + PageSize + 1, Boundary: Alignment) - PageSize;
650 const uptr NewMapBase = CommitBase - PageSize;
651 DCHECK_GE(NewMapBase, MapBase);
652 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
653 // are less constrained memory wise, and that saves us two syscalls.
654 if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
655 MemMap.unmap(Addr: MapBase, Size: NewMapBase - MapBase);
656 MapBase = NewMapBase;
657 }
658 const uptr NewMapEnd =
659 CommitBase + PageSize + roundUp(X: Size, Boundary: PageSize) + PageSize;
660 DCHECK_LE(NewMapEnd, MapEnd);
661 if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
662 MemMap.unmap(Addr: NewMapEnd, Size: MapEnd - NewMapEnd);
663 MapEnd = NewMapEnd;
664 }
665 }
666
667 const uptr CommitSize = MapEnd - PageSize - CommitBase;
668 const uptr AllocPos = roundDown(X: CommitBase + CommitSize - Size, Boundary: Alignment);
669 if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
670 MemMap)) {
671 MemMap.unmap(Addr: MemMap.getBase(), Size: MemMap.getCapacity());
672 return nullptr;
673 }
674 const uptr HeaderPos = AllocPos - getHeadersSize();
675 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
676 LargeBlock::addHeaderTag<Config>(HeaderPos));
677 if (useMemoryTagging<Config>(Options))
678 storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
679 reinterpret_cast<uptr>(H + 1));
680 H->CommitBase = CommitBase;
681 H->CommitSize = CommitSize;
682 H->MemMap = MemMap;
683 if (BlockEndPtr)
684 *BlockEndPtr = CommitBase + CommitSize;
685 {
686 ScopedLock L(Mutex);
687 InUseBlocks.push_back(X: H);
688 AllocatedBytes += CommitSize;
689 FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
690 if (LargestSize < CommitSize)
691 LargestSize = CommitSize;
692 NumberOfAllocs++;
693 Stats.add(I: StatAllocated, V: CommitSize);
694 Stats.add(I: StatMapped, V: H->MemMap.getCapacity());
695 }
696 return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
697}
698
699template <typename Config>
700void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
701 EXCLUDES(Mutex) {
702 LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
703 const uptr CommitSize = H->CommitSize;
704 {
705 ScopedLock L(Mutex);
706 InUseBlocks.remove(X: H);
707 FreedBytes += CommitSize;
708 FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
709 NumberOfFrees++;
710 Stats.sub(I: StatAllocated, V: CommitSize);
711 Stats.sub(I: StatMapped, V: H->MemMap.getCapacity());
712 }
713 Cache.store(Options, H);
714}
715
716template <typename Config>
717void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
718 ScopedLock L(Mutex);
719 Str->append(Format: "Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
720 "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
721 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
722 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
723 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
724 FragmentedBytes >> 10);
725 Cache.getStats(Str);
726}
727
728} // namespace scudo
729
730#endif // SCUDO_SECONDARY_H_
731

source code of compiler-rt/lib/scudo/standalone/secondary.h