1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// This class implements a complete memory allocator by using two
17// internal allocators:
18// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
19// When allocating 2^x bytes it should return 2^x aligned chunk.
20// PrimaryAllocator is used via a local AllocatorCache.
21// SecondaryAllocator can allocate anything, but is not efficient.
22template <class PrimaryAllocator,
23 class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
24class CombinedAllocator {
25 public:
26 using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
27 using SecondaryAllocator =
28 LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
29 LargeMmapAllocatorPtrArray,
30 typename PrimaryAllocator::AddressSpaceView>;
31
32 void InitLinkerInitialized(s32 release_to_os_interval_ms,
33 uptr heap_start = 0) {
34 primary_.Init(release_to_os_interval_ms, heap_start);
35 secondary_.InitLinkerInitialized();
36 }
37
38 void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
39 stats_.Init();
40 primary_.Init(release_to_os_interval_ms, heap_start);
41 secondary_.Init();
42 }
43
44 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
45 // Returning 0 on malloc(0) may break a lot of code.
46 if (size == 0)
47 size = 1;
48 if (size + alignment < size) {
49 Report(format: "WARNING: %s: CombinedAllocator allocation overflow: "
50 "0x%zx bytes with 0x%zx alignment requested\n",
51 SanitizerToolName, size, alignment);
52 return nullptr;
53 }
54 uptr original_size = size;
55 // If alignment requirements are to be fulfilled by the frontend allocator
56 // rather than by the primary or secondary, passing an alignment lower than
57 // or equal to 8 will prevent any further rounding up, as well as the later
58 // alignment check.
59 if (alignment > 8)
60 size = RoundUpTo(size, boundary: alignment);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
67 void *res;
68 if (primary_.CanAllocate(size, alignment))
69 res = cache->Allocate(&primary_, primary_.ClassID(size));
70 else
71 res = secondary_.Allocate(&stats_, original_size, alignment);
72 if (alignment > 8)
73 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
74 return res;
75 }
76
77 s32 ReleaseToOSIntervalMs() const {
78 return primary_.ReleaseToOSIntervalMs();
79 }
80
81 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
82 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 }
84
85 void ForceReleaseToOS() {
86 primary_.ForceReleaseToOS();
87 }
88
89 void Deallocate(AllocatorCache *cache, void *p) {
90 if (!p) return;
91 if (primary_.PointerIsMine(p))
92 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
93 else
94 secondary_.Deallocate(&stats_, p);
95 }
96
97 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
98 uptr alignment) {
99 if (!p)
100 return Allocate(cache, size: new_size, alignment);
101 if (!new_size) {
102 Deallocate(cache, p);
103 return nullptr;
104 }
105 CHECK(PointerIsMine(p));
106 uptr old_size = GetActuallyAllocatedSize(p);
107 uptr memcpy_size = Min(a: new_size, b: old_size);
108 void *new_p = Allocate(cache, size: new_size, alignment);
109 if (new_p)
110 internal_memcpy(dest: new_p, src: p, n: memcpy_size);
111 Deallocate(cache, p);
112 return new_p;
113 }
114
115 bool PointerIsMine(const void *p) const {
116 if (primary_.PointerIsMine(p))
117 return true;
118 return secondary_.PointerIsMine(p);
119 }
120
121 bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }
122
123 void *GetMetaData(const void *p) {
124 if (primary_.PointerIsMine(p))
125 return primary_.GetMetaData(p);
126 return secondary_.GetMetaData(p);
127 }
128
129 void *GetBlockBegin(const void *p) {
130 if (primary_.PointerIsMine(p))
131 return primary_.GetBlockBegin(p);
132 return secondary_.GetBlockBegin(p);
133 }
134
135 // This function does the same as GetBlockBegin, but is much faster.
136 // Must be called with the allocator locked.
137 void *GetBlockBeginFastLocked(const void *p) {
138 if (primary_.PointerIsMine(p))
139 return primary_.GetBlockBegin(p);
140 return secondary_.GetBlockBeginFastLocked(p);
141 }
142
143 uptr GetActuallyAllocatedSize(void *p) {
144 if (primary_.PointerIsMine(p))
145 return primary_.GetActuallyAllocatedSize(p);
146 return secondary_.GetActuallyAllocatedSize(p);
147 }
148
149 uptr TotalMemoryUsed() {
150 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
151 }
152
153 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
154
155 void InitCache(AllocatorCache *cache) {
156 cache->Init(&stats_);
157 }
158
159 void DestroyCache(AllocatorCache *cache) {
160 cache->Destroy(&primary_, &stats_);
161 }
162
163 void SwallowCache(AllocatorCache *cache) {
164 cache->Drain(&primary_);
165 }
166
167 void GetStats(AllocatorStatCounters s) const {
168 stats_.Get(s);
169 }
170
171 void PrintStats() {
172 primary_.PrintStats();
173 secondary_.PrintStats();
174 }
175
176 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
177 // introspection API.
178 void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
179 primary_.ForceLock();
180 secondary_.ForceLock();
181 }
182
183 void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
184 secondary_.ForceUnlock();
185 primary_.ForceUnlock();
186 }
187
188 // Iterate over all existing chunks.
189 // The allocator must be locked when calling this function.
190 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
191 primary_.ForEachChunk(callback, arg);
192 secondary_.ForEachChunk(callback, arg);
193 }
194
195 private:
196 PrimaryAllocator primary_;
197 SecondaryAllocator secondary_;
198 AllocatorGlobalStats stats_;
199};
200

source code of compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h