1// Copyright (C) 2016 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#ifndef QV4GC_H
5#define QV4GC_H
6
7//
8// W A R N I N G
9// -------------
10//
11// This file is not part of the Qt API. It exists purely as an
12// implementation detail. This header file may change from version to
13// version without notice, or even be removed.
14//
15// We mean it.
16//
17
18#include <private/qv4global_p.h>
19#include <private/qv4value_p.h>
20#include <private/qv4scopedvalue_p.h>
21#include <private/qv4object_p.h>
22#include <private/qv4mmdefs_p.h>
23#include <QVector>
24
25#define MM_DEBUG 0
26
27QT_BEGIN_NAMESPACE
28
29namespace QV4 {
30
31struct GCData { virtual ~GCData(){};};
32
33struct GCIteratorStorage {
34 PersistentValueStorage::Iterator it{nullptr, 0};
35};
36
37struct GCStateMachine {
38 Q_GADGET_EXPORT(Q_QML_EXPORT)
39
40public:
41 enum GCState {
42 MarkStart = 0,
43 MarkGlobalObject,
44 MarkJSStack,
45 InitMarkPersistentValues,
46 MarkPersistentValues,
47 InitMarkWeakValues,
48 MarkWeakValues,
49 MarkDrain,
50 MarkReady,
51 InitCallDestroyObjects,
52 CallDestroyObjects,
53 FreeWeakMaps,
54 FreeWeakSets,
55 HandleQObjectWrappers,
56 DoSweep,
57 Invalid,
58 Count,
59 };
60 Q_ENUM(GCState)
61
62 struct StepTiming {
63 qint64 rolling_sum = 0;
64 qint64 count = 0;
65 };
66
67 struct GCStateInfo {
68 using ExtraData = std::variant<std::monostate, GCIteratorStorage>;
69 GCState (*execute)(GCStateMachine *, ExtraData &) = nullptr; // Function to execute for this state, returns true if ready to transition
70 bool breakAfter{false};
71 };
72
73 using ExtraData = GCStateInfo::ExtraData;
74 GCState state{GCState::Invalid};
75 std::chrono::microseconds timeLimit{};
76 QDeadlineTimer deadline;
77 std::array<GCStateInfo, GCState::Count> stateInfoMap;
78 std::array<StepTiming, GCState::Count> executionTiming{};
79 MemoryManager *mm = nullptr;
80 ExtraData stateData; // extra date for specific states
81 bool collectTimings = false;
82
83 GCStateMachine();
84
85 inline void step() {
86 if (!inProgress()) {
87 reset();
88 }
89 transition();
90 }
91
92 inline bool inProgress() {
93 return state != GCState::Invalid;
94 }
95
96 inline void reset() {
97 state = GCState::MarkStart;
98 }
99
100 Q_QML_EXPORT void transition();
101
102 inline void handleTimeout(GCState state) {
103 Q_UNUSED(state);
104 }
105};
106
107using GCState = GCStateMachine::GCState;
108using GCStateInfo = GCStateMachine::GCStateInfo;
109
110struct ChunkAllocator;
111struct MemorySegment;
112
113struct BlockAllocator {
114 BlockAllocator(ChunkAllocator *chunkAllocator, ExecutionEngine *engine)
115 : chunkAllocator(chunkAllocator), engine(engine)
116 {
117 memset(s: freeBins, c: 0, n: sizeof(freeBins));
118 }
119
120 enum { NumBins = 8 };
121
122 static inline size_t binForSlots(size_t nSlots) {
123 return nSlots >= NumBins ? NumBins - 1 : nSlots;
124 }
125
126 HeapItem *allocate(size_t size, bool forceAllocation = false);
127
128 size_t totalSlots() const {
129 return Chunk::AvailableSlots*chunks.size();
130 }
131
132 size_t allocatedMem() const {
133 return chunks.size()*Chunk::DataSize;
134 }
135 size_t usedMem() const {
136 uint used = 0;
137 for (auto c : chunks)
138 used += c->nUsedSlots()*Chunk::SlotSize;
139 return used;
140 }
141
142 void sweep();
143 void freeAll();
144 void resetBlackBits();
145
146 // bump allocations
147 HeapItem *nextFree = nullptr;
148 size_t nFree = 0;
149 size_t usedSlotsAfterLastSweep = 0;
150 HeapItem *freeBins[NumBins];
151 ChunkAllocator *chunkAllocator;
152 ExecutionEngine *engine;
153 std::vector<Chunk *> chunks;
154 uint *allocationStats = nullptr;
155};
156
157struct HugeItemAllocator {
158 HugeItemAllocator(ChunkAllocator *chunkAllocator, ExecutionEngine *engine)
159 : chunkAllocator(chunkAllocator), engine(engine)
160 {}
161
162 HeapItem *allocate(size_t size);
163 void sweep(ClassDestroyStatsCallback classCountPtr);
164 void freeAll();
165 void resetBlackBits();
166
167 size_t usedMem() const {
168 size_t used = 0;
169 for (const auto &c : chunks)
170 used += c.size;
171 return used;
172 }
173
174 ChunkAllocator *chunkAllocator;
175 ExecutionEngine *engine;
176 struct HugeChunk {
177 MemorySegment *segment;
178 Chunk *chunk;
179 size_t size;
180 };
181
182 std::vector<HugeChunk> chunks;
183};
184
185
186class Q_QML_EXPORT MemoryManager
187{
188 Q_DISABLE_COPY(MemoryManager);
189
190public:
191 MemoryManager(ExecutionEngine *engine);
192 ~MemoryManager();
193
194 template <typename ToBeMarked>
195 friend struct GCCriticalSection;
196
197 // TODO: this is only for 64bit (and x86 with SSE/AVX), so exend it for other architectures to be slightly more efficient (meaning, align on 8-byte boundaries).
198 // Note: all occurrences of "16" in alloc/dealloc are also due to the alignment.
199 constexpr static inline std::size_t align(std::size_t size)
200 { return (size + Chunk::SlotSize - 1) & ~(Chunk::SlotSize - 1); }
201
202 /* NOTE: allocManaged comes in various overloads. If size is not passed explicitly
203 sizeof(ManagedType::Data) is used for size. However, there are quite a few cases
204 where we allocate more than sizeof(ManagedType::Data); that's generally the case
205 when the Object has a ValueArray member.
206 If no internal class pointer is provided, ManagedType::defaultInternalClass(engine)
207 will be used as the internal class.
208 */
209
210 template<typename ManagedType>
211 inline typename ManagedType::Data *allocManaged(std::size_t size, Heap::InternalClass *ic)
212 {
213 Q_STATIC_ASSERT(std::is_trivial_v<typename ManagedType::Data>);
214 size = align(size);
215 typename ManagedType::Data *d = static_cast<typename ManagedType::Data *>(allocData(size));
216 d->internalClass.set(engine, ic);
217 Q_ASSERT(d->internalClass && d->internalClass->vtable);
218 Q_ASSERT(ic->vtable == ManagedType::staticVTable());
219 return d;
220 }
221
222 template<typename ManagedType>
223 inline typename ManagedType::Data *allocManaged(Heap::InternalClass *ic)
224 {
225 return allocManaged<ManagedType>(sizeof(typename ManagedType::Data), ic);
226 }
227
228 template<typename ManagedType>
229 inline typename ManagedType::Data *allocManaged(std::size_t size, InternalClass *ic)
230 {
231 return allocManaged<ManagedType>(size, ic->d());
232 }
233
234 template<typename ManagedType>
235 inline typename ManagedType::Data *allocManaged(InternalClass *ic)
236 {
237 return allocManaged<ManagedType>(sizeof(typename ManagedType::Data), ic);
238 }
239
240 template<typename ManagedType>
241 inline typename ManagedType::Data *allocManaged(std::size_t size)
242 {
243 Scope scope(engine);
244 Scoped<InternalClass> ic(scope, ManagedType::defaultInternalClass(engine));
245 return allocManaged<ManagedType>(size, ic);
246 }
247
248 template<typename ManagedType>
249 inline typename ManagedType::Data *allocManaged()
250 {
251 auto constexpr size = sizeof(typename ManagedType::Data);
252 Scope scope(engine);
253 Scoped<InternalClass> ic(scope, ManagedType::defaultInternalClass(engine));
254 return allocManaged<ManagedType>(size, ic);
255 }
256
257 template <typename ObjectType>
258 typename ObjectType::Data *allocateObject(Heap::InternalClass *ic)
259 {
260 Heap::Object *o = allocObjectWithMemberData(vtable: ObjectType::staticVTable(), nMembers: ic->size);
261 o->internalClass.set(e: engine, newVal: ic);
262 Q_ASSERT(o->internalClass.get() && o->vtable());
263 Q_ASSERT(o->vtable() == ObjectType::staticVTable());
264 return static_cast<typename ObjectType::Data *>(o);
265 }
266
267 template <typename ObjectType>
268 typename ObjectType::Data *allocateObject(InternalClass *ic)
269 {
270 return allocateObject<ObjectType>(ic->d());
271 }
272
273 template <typename ObjectType>
274 typename ObjectType::Data *allocateObject()
275 {
276 Scope scope(engine);
277 Scoped<InternalClass> ic(scope, ObjectType::defaultInternalClass(engine));
278 ic = ic->changeVTable(vt: ObjectType::staticVTable());
279 ic = ic->changePrototype(proto: ObjectType::defaultPrototype(engine)->d());
280 return allocateObject<ObjectType>(ic);
281 }
282
283 template <typename ManagedType, typename Arg1>
284 typename ManagedType::Data *allocWithStringData(std::size_t unmanagedSize, Arg1 &&arg1)
285 {
286 typename ManagedType::Data *o = reinterpret_cast<typename ManagedType::Data *>(allocString(unmanagedSize));
287 o->internalClass.set(engine, ManagedType::defaultInternalClass(engine));
288 Q_ASSERT(o->internalClass && o->internalClass->vtable);
289 o->init(std::forward<Arg1>(arg1));
290 return o;
291 }
292
293 template <typename ObjectType, typename... Args>
294 typename ObjectType::Data *allocObject(Heap::InternalClass *ic, Args&&... args)
295 {
296 typename ObjectType::Data *d = allocateObject<ObjectType>(ic);
297 d->init(std::forward<Args>(args)...);
298 return d;
299 }
300
301 template <typename ObjectType, typename... Args>
302 typename ObjectType::Data *allocObject(InternalClass *ic, Args&&... args)
303 {
304 typename ObjectType::Data *d = allocateObject<ObjectType>(ic);
305 d->init(std::forward<Args>(args)...);
306 return d;
307 }
308
309 template <typename ObjectType, typename... Args>
310 typename ObjectType::Data *allocate(Args&&... args)
311 {
312 Scope scope(engine);
313 Scoped<ObjectType> t(scope, allocateObject<ObjectType>());
314 t->d_unchecked()->init(std::forward<Args>(args)...);
315 return t->d();
316 }
317
318 template <typename ManagedType, typename... Args>
319 typename ManagedType::Data *alloc(Args&&... args)
320 {
321 Scope scope(engine);
322 Scoped<ManagedType> t(scope, allocManaged<ManagedType>());
323 t->d_unchecked()->init(std::forward<Args>(args)...);
324 return t->d();
325 }
326
327 void runGC();
328 bool tryForceGCCompletion();
329 void runFullGC();
330
331 void dumpStats() const;
332
333 size_t getUsedMem() const;
334 size_t getAllocatedMem() const;
335 size_t getLargeItemsMem() const;
336
337 // called when a JS object grows itself. Specifically: Heap::String::append
338 // and InternalClassDataPrivate<PropertyAttributes>.
339 void changeUnmanagedHeapSizeUsage(qptrdiff delta) { unmanagedHeapSize += delta; }
340
341 // called at the end of a gc cycle
342 void updateUnmanagedHeapSizeGCLimit();
343
344 template<typename ManagedType>
345 typename ManagedType::Data *allocIC()
346 {
347 Heap::Base *b = *allocate(allocator: &icAllocator, size: align(size: sizeof(typename ManagedType::Data)));
348 return static_cast<typename ManagedType::Data *>(b);
349 }
350
351 void registerWeakMap(Heap::MapObject *map);
352 void registerWeakSet(Heap::SetObject *set);
353
354 void onEventLoop();
355
356 //GC related methods
357 void setGCTimeLimit(int timeMs);
358 MarkStack* markStack() { return m_markStack.get(); }
359
360protected:
361 /// expects size to be aligned
362 Heap::Base *allocString(std::size_t unmanagedSize);
363 Heap::Base *allocData(std::size_t size);
364 Heap::Object *allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers);
365
366private:
367 enum {
368 MinUnmanagedHeapSizeGCLimit = 128 * 1024
369 };
370
371public:
372 void collectFromJSStack(MarkStack *markStack) const;
373 void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr);
374 void cleanupDeletedQObjectWrappersInSweep();
375 bool isAboveUnmanagedHeapLimit()
376 {
377 const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
378 const bool aboveUnmanagedHeapLimit = incrementalGCIsAlreadyRunning
379 ? unmanagedHeapSize > 3 * unmanagedHeapSizeGCLimit / 2
380 : unmanagedHeapSize > unmanagedHeapSizeGCLimit;
381 return aboveUnmanagedHeapLimit;
382 }
383private:
384 bool shouldRunGC() const;
385
386 HeapItem *allocate(BlockAllocator *allocator, std::size_t size)
387 {
388 const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
389
390 bool didGCRun = false;
391 if (aggressiveGC) {
392 runFullGC();
393 didGCRun = true;
394 }
395
396 if (isAboveUnmanagedHeapLimit()) {
397 if (!didGCRun)
398 incrementalGCIsAlreadyRunning ? (void) tryForceGCCompletion() : runGC();
399 didGCRun = true;
400 }
401
402 if (size > Chunk::DataSize)
403 return hugeItemAllocator.allocate(size);
404
405 if (HeapItem *m = allocator->allocate(size))
406 return m;
407
408 if (!didGCRun && shouldRunGC())
409 runGC();
410
411 return allocator->allocate(size, forceAllocation: true);
412 }
413
414public:
415 QV4::ExecutionEngine *engine;
416 ChunkAllocator *chunkAllocator;
417 BlockAllocator blockAllocator;
418 BlockAllocator icAllocator;
419 HugeItemAllocator hugeItemAllocator;
420 PersistentValueStorage *m_persistentValues;
421 PersistentValueStorage *m_weakValues;
422 QVector<Value *> m_pendingFreedObjectWrapperValue;
423 Heap::MapObject *weakMaps = nullptr;
424 Heap::SetObject *weakSets = nullptr;
425
426 std::unique_ptr<GCStateMachine> gcStateMachine{nullptr};
427 std::unique_ptr<MarkStack> m_markStack{nullptr};
428
429 std::size_t unmanagedHeapSize = 0; // the amount of bytes of heap that is not managed by the memory manager, but which is held onto by managed items.
430 std::size_t unmanagedHeapSizeGCLimit;
431 std::size_t usedSlotsAfterLastFullSweep = 0;
432
433 enum Blockness : quint8 {Unblocked, NormalBlocked, InCriticalSection };
434 Blockness gcBlocked = Unblocked;
435 bool aggressiveGC = false;
436 bool gcStats = false;
437 bool gcCollectorStats = false;
438
439 int allocationCount = 0;
440 size_t lastAllocRequestedSlots = 0;
441
442 struct {
443 size_t maxReservedMem = 0;
444 size_t maxAllocatedMem = 0;
445 size_t maxUsedMem = 0;
446 uint allocations[BlockAllocator::NumBins];
447 } statistics;
448};
449
450/*!
451 \internal
452 GCCriticalSection prevets the gc from running, until it is destructed.
453 In its dtor, it runs a check whether we've reached the unmanaegd heap limit,
454 and triggers a gc run if necessary.
455 Lastly, it can optionally mark an object passed to it before runnig the gc.
456 */
457template <typename ToBeMarked = void>
458struct GCCriticalSection {
459 Q_DISABLE_COPY_MOVE(GCCriticalSection)
460
461 Q_NODISCARD_CTOR GCCriticalSection(QV4::ExecutionEngine *engine, ToBeMarked *toBeMarked = nullptr)
462 : m_engine(engine)
463 , m_oldState(std::exchange(obj&: engine->memoryManager->gcBlocked, new_val: MemoryManager::InCriticalSection))
464 , m_toBeMarked(toBeMarked)
465 {
466 // disallow nested critical sections
467 Q_ASSERT(m_oldState != MemoryManager::InCriticalSection);
468 }
469 ~GCCriticalSection()
470 {
471 m_engine->memoryManager->gcBlocked = m_oldState;
472 if (m_oldState != MemoryManager::Unblocked)
473 if constexpr (!std::is_same_v<ToBeMarked, void>)
474 if (m_toBeMarked)
475 m_toBeMarked->markObjects(m_engine->memoryManager->markStack());
476 /* because we blocked the gc, we might be using too much memoryon the unmanaged heap
477 and did not run the normal fixup logic. So recheck again, and trigger a gc run
478 if necessary*/
479 if (!m_engine->memoryManager->isAboveUnmanagedHeapLimit())
480 return;
481 if (!m_engine->isGCOngoing) {
482 m_engine->memoryManager->runGC();
483 } else {
484 [[maybe_unused]] bool gcFinished = m_engine->memoryManager->tryForceGCCompletion();
485 Q_ASSERT(gcFinished);
486 }
487 }
488
489private:
490 QV4::ExecutionEngine *m_engine;
491 MemoryManager::Blockness m_oldState;
492 ToBeMarked *m_toBeMarked;
493};
494
495}
496
497QT_END_NAMESPACE
498
499#endif // QV4GC_H
500

Provided by KDAB

Privacy Policy
Learn Advanced QML with KDAB
Find out more

source code of qtdeclarative/src/qml/memory/qv4mm_p.h