1// Copyright (C) 2016 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#ifndef QV4GC_H
5#define QV4GC_H
6
7//
8// W A R N I N G
9// -------------
10//
11// This file is not part of the Qt API. It exists purely as an
12// implementation detail. This header file may change from version to
13// version without notice, or even be removed.
14//
15// We mean it.
16//
17
18#include <private/qv4global_p.h>
19#include <private/qv4value_p.h>
20#include <private/qv4scopedvalue_p.h>
21#include <private/qv4object_p.h>
22#include <private/qv4mmdefs_p.h>
23#include <QVector>
24
25#define MM_DEBUG 0
26
27QT_BEGIN_NAMESPACE
28
29namespace QV4 {
30
31struct ChunkAllocator;
32struct MemorySegment;
33
34struct BlockAllocator {
35 BlockAllocator(ChunkAllocator *chunkAllocator, ExecutionEngine *engine)
36 : chunkAllocator(chunkAllocator), engine(engine)
37 {
38 memset(s: freeBins, c: 0, n: sizeof(freeBins));
39 }
40
41 enum { NumBins = 8 };
42
43 static inline size_t binForSlots(size_t nSlots) {
44 return nSlots >= NumBins ? NumBins - 1 : nSlots;
45 }
46
47 HeapItem *allocate(size_t size, bool forceAllocation = false);
48
49 size_t totalSlots() const {
50 return Chunk::AvailableSlots*chunks.size();
51 }
52
53 size_t allocatedMem() const {
54 return chunks.size()*Chunk::DataSize;
55 }
56 size_t usedMem() const {
57 uint used = 0;
58 for (auto c : chunks)
59 used += c->nUsedSlots()*Chunk::SlotSize;
60 return used;
61 }
62
63 void sweep();
64 void freeAll();
65 void resetBlackBits();
66
67 // bump allocations
68 HeapItem *nextFree = nullptr;
69 size_t nFree = 0;
70 size_t usedSlotsAfterLastSweep = 0;
71 HeapItem *freeBins[NumBins];
72 ChunkAllocator *chunkAllocator;
73 ExecutionEngine *engine;
74 std::vector<Chunk *> chunks;
75 uint *allocationStats = nullptr;
76};
77
78struct HugeItemAllocator {
79 HugeItemAllocator(ChunkAllocator *chunkAllocator, ExecutionEngine *engine)
80 : chunkAllocator(chunkAllocator), engine(engine)
81 {}
82
83 HeapItem *allocate(size_t size);
84 void sweep(ClassDestroyStatsCallback classCountPtr);
85 void freeAll();
86 void resetBlackBits();
87
88 size_t usedMem() const {
89 size_t used = 0;
90 for (const auto &c : chunks)
91 used += c.size;
92 return used;
93 }
94
95 ChunkAllocator *chunkAllocator;
96 ExecutionEngine *engine;
97 struct HugeChunk {
98 MemorySegment *segment;
99 Chunk *chunk;
100 size_t size;
101 };
102
103 std::vector<HugeChunk> chunks;
104};
105
106
107class Q_QML_EXPORT MemoryManager
108{
109 Q_DISABLE_COPY(MemoryManager);
110
111public:
112 MemoryManager(ExecutionEngine *engine);
113 ~MemoryManager();
114
115 // TODO: this is only for 64bit (and x86 with SSE/AVX), so exend it for other architectures to be slightly more efficient (meaning, align on 8-byte boundaries).
116 // Note: all occurrences of "16" in alloc/dealloc are also due to the alignment.
117 constexpr static inline std::size_t align(std::size_t size)
118 { return (size + Chunk::SlotSize - 1) & ~(Chunk::SlotSize - 1); }
119
120 template<typename ManagedType>
121 inline typename ManagedType::Data *allocManaged(std::size_t size, Heap::InternalClass *ic)
122 {
123 Q_STATIC_ASSERT(std::is_trivial_v<typename ManagedType::Data>);
124 size = align(size);
125 typename ManagedType::Data *d = static_cast<typename ManagedType::Data *>(allocData(size));
126 d->internalClass.set(engine, ic);
127 Q_ASSERT(d->internalClass && d->internalClass->vtable);
128 Q_ASSERT(ic->vtable == ManagedType::staticVTable());
129 return d;
130 }
131
132 template<typename ManagedType>
133 inline typename ManagedType::Data *allocManaged(std::size_t size, InternalClass *ic)
134 {
135 return allocManaged<ManagedType>(size, ic->d());
136 }
137
138 template<typename ManagedType>
139 inline typename ManagedType::Data *allocManaged(std::size_t size)
140 {
141 Scope scope(engine);
142 Scoped<InternalClass> ic(scope, ManagedType::defaultInternalClass(engine));
143 return allocManaged<ManagedType>(size, ic);
144 }
145
146 template <typename ObjectType>
147 typename ObjectType::Data *allocateObject(Heap::InternalClass *ic)
148 {
149 Heap::Object *o = allocObjectWithMemberData(vtable: ObjectType::staticVTable(), nMembers: ic->size);
150 o->internalClass.set(e: engine, newVal: ic);
151 Q_ASSERT(o->internalClass.get() && o->vtable());
152 Q_ASSERT(o->vtable() == ObjectType::staticVTable());
153 return static_cast<typename ObjectType::Data *>(o);
154 }
155
156 template <typename ObjectType>
157 typename ObjectType::Data *allocateObject(InternalClass *ic)
158 {
159 return allocateObject<ObjectType>(ic->d());
160 }
161
162 template <typename ObjectType>
163 typename ObjectType::Data *allocateObject()
164 {
165 Scope scope(engine);
166 Scoped<InternalClass> ic(scope, ObjectType::defaultInternalClass(engine));
167 ic = ic->changeVTable(vt: ObjectType::staticVTable());
168 ic = ic->changePrototype(proto: ObjectType::defaultPrototype(engine)->d());
169 return allocateObject<ObjectType>(ic);
170 }
171
172 template <typename ManagedType, typename Arg1>
173 typename ManagedType::Data *allocWithStringData(std::size_t unmanagedSize, Arg1 &&arg1)
174 {
175 typename ManagedType::Data *o = reinterpret_cast<typename ManagedType::Data *>(allocString(unmanagedSize));
176 o->internalClass.set(engine, ManagedType::defaultInternalClass(engine));
177 Q_ASSERT(o->internalClass && o->internalClass->vtable);
178 o->init(std::forward<Arg1>(arg1));
179 return o;
180 }
181
182 template <typename ObjectType, typename... Args>
183 typename ObjectType::Data *allocObject(Heap::InternalClass *ic, Args&&... args)
184 {
185 typename ObjectType::Data *d = allocateObject<ObjectType>(ic);
186 d->init(std::forward<Args>(args)...);
187 return d;
188 }
189
190 template <typename ObjectType, typename... Args>
191 typename ObjectType::Data *allocObject(InternalClass *ic, Args&&... args)
192 {
193 typename ObjectType::Data *d = allocateObject<ObjectType>(ic);
194 d->init(std::forward<Args>(args)...);
195 return d;
196 }
197
198 template <typename ObjectType, typename... Args>
199 typename ObjectType::Data *allocate(Args&&... args)
200 {
201 Scope scope(engine);
202 Scoped<ObjectType> t(scope, allocateObject<ObjectType>());
203 t->d_unchecked()->init(std::forward<Args>(args)...);
204 return t->d();
205 }
206
207 template <typename ManagedType, typename... Args>
208 typename ManagedType::Data *alloc(Args&&... args)
209 {
210 Scope scope(engine);
211 Scoped<ManagedType> t(scope, allocManaged<ManagedType>(sizeof(typename ManagedType::Data)));
212 t->d_unchecked()->init(std::forward<Args>(args)...);
213 return t->d();
214 }
215
216 void runGC();
217
218 void dumpStats() const;
219
220 size_t getUsedMem() const;
221 size_t getAllocatedMem() const;
222 size_t getLargeItemsMem() const;
223
224 // called when a JS object grows itself. Specifically: Heap::String::append
225 // and InternalClassDataPrivate<PropertyAttributes>.
226 void changeUnmanagedHeapSizeUsage(qptrdiff delta) { unmanagedHeapSize += delta; }
227
228 template<typename ManagedType>
229 typename ManagedType::Data *allocIC()
230 {
231 Heap::Base *b = *allocate(allocator: &icAllocator, size: align(size: sizeof(typename ManagedType::Data)));
232 return static_cast<typename ManagedType::Data *>(b);
233 }
234
235 void registerWeakMap(Heap::MapObject *map);
236 void registerWeakSet(Heap::SetObject *set);
237
238protected:
239 /// expects size to be aligned
240 Heap::Base *allocString(std::size_t unmanagedSize);
241 Heap::Base *allocData(std::size_t size);
242 Heap::Object *allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers);
243
244private:
245 enum {
246 MinUnmanagedHeapSizeGCLimit = 128 * 1024
247 };
248
249 void collectFromJSStack(MarkStack *markStack) const;
250 void mark();
251 void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr);
252 bool shouldRunGC() const;
253 void collectRoots(MarkStack *markStack);
254
255 HeapItem *allocate(BlockAllocator *allocator, std::size_t size)
256 {
257 bool didGCRun = false;
258 if (aggressiveGC) {
259 runGC();
260 didGCRun = true;
261 }
262
263 if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) {
264 if (!didGCRun)
265 runGC();
266
267 if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) {
268 // more than 75% full, raise limit
269 unmanagedHeapSizeGCLimit = std::max(a: unmanagedHeapSizeGCLimit,
270 b: unmanagedHeapSize) * 2;
271 } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
272 // less than 25% full, lower limit
273 unmanagedHeapSizeGCLimit = qMax(a: std::size_t(MinUnmanagedHeapSizeGCLimit),
274 b: unmanagedHeapSizeGCLimit/2);
275 }
276 didGCRun = true;
277 }
278
279 if (size > Chunk::DataSize)
280 return hugeItemAllocator.allocate(size);
281
282 if (HeapItem *m = allocator->allocate(size))
283 return m;
284
285 if (!didGCRun && shouldRunGC())
286 runGC();
287
288 return allocator->allocate(size, forceAllocation: true);
289 }
290
291public:
292 QV4::ExecutionEngine *engine;
293 ChunkAllocator *chunkAllocator;
294 BlockAllocator blockAllocator;
295 BlockAllocator icAllocator;
296 HugeItemAllocator hugeItemAllocator;
297 PersistentValueStorage *m_persistentValues;
298 PersistentValueStorage *m_weakValues;
299 QVector<Value *> m_pendingFreedObjectWrapperValue;
300 Heap::MapObject *weakMaps = nullptr;
301 Heap::SetObject *weakSets = nullptr;
302
303 std::size_t unmanagedHeapSize = 0; // the amount of bytes of heap that is not managed by the memory manager, but which is held onto by managed items.
304 std::size_t unmanagedHeapSizeGCLimit;
305 std::size_t usedSlotsAfterLastFullSweep = 0;
306
307 bool gcBlocked = false;
308 bool aggressiveGC = false;
309 bool gcStats = false;
310 bool gcCollectorStats = false;
311
312 int allocationCount = 0;
313 size_t lastAllocRequestedSlots = 0;
314
315 struct {
316 size_t maxReservedMem = 0;
317 size_t maxAllocatedMem = 0;
318 size_t maxUsedMem = 0;
319 uint allocations[BlockAllocator::NumBins];
320 } statistics;
321};
322
323}
324
325QT_END_NAMESPACE
326
327#endif // QV4GC_H
328

source code of qtdeclarative/src/qml/memory/qv4mm_p.h