1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "PageAllocation.h"
5#include "PageReservation.h"
6
7#include <private/qnumeric_p.h>
8#include <private/qv4alloca_p.h>
9#include <private/qv4engine_p.h>
10#include <private/qv4identifiertable_p.h>
11#include <private/qv4mapobject_p.h>
12#include <private/qv4mm_p.h>
13#include <private/qv4object_p.h>
14#include <private/qv4profiling_p.h>
15#include <private/qv4qobjectwrapper_p.h>
16#include <private/qv4setobject_p.h>
17#include <private/qv4stackframe_p.h>
18
19#include <QtQml/qqmlengine.h>
20
21#include <QtCore/qalgorithms.h>
22#include <QtCore/qelapsedtimer.h>
23#include <QtCore/qloggingcategory.h>
24#include <QtCore/qmap.h>
25#include <QtCore/qscopedvaluerollback.h>
26
27#include <algorithm>
28#include <chrono>
29#include <cstdlib>
30
31//#define MM_STATS
32
33#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
34#define MM_STATS
35#endif
36
37#if MM_DEBUG
38#define DEBUG qDebug() << "MM:"
39#else
40#define DEBUG if (1) ; else qDebug() << "MM:"
41#endif
42
43#ifdef V4_USE_VALGRIND
44#include <valgrind/valgrind.h>
45#include <valgrind/memcheck.h>
46#endif
47
48#ifdef V4_USE_HEAPTRACK
49#include <heaptrack_api.h>
50#endif
51
52#if OS(QNX)
53#include <sys/storage.h> // __tls()
54#endif
55
56#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
57#include <pthread_np.h>
58#endif
59
60Q_STATIC_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
61Q_STATIC_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
62Q_STATIC_LOGGING_CATEGORY(lcGcStateTransitions, "qt.qml.gc.stateTransitions")
63Q_STATIC_LOGGING_CATEGORY(lcGcForcedRuns, "qt.qml.gc.forcedRuns")
64Q_STATIC_LOGGING_CATEGORY(lcGcStepExecution, "qt.qml.gc.stepExecution")
65
66using namespace WTF;
67
68QT_BEGIN_NAMESPACE
69
70namespace QV4 {
71
72enum {
73 MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16,
74 GCOverallocation = 200 /* Max overallocation by the GC in % */
75};
76
77struct MemorySegment {
78 enum {
79#ifdef Q_OS_RTEMS
80 NumChunks = sizeof(quint64),
81#else
82 NumChunks = 8*sizeof(quint64),
83#endif
84 SegmentSize = NumChunks*Chunk::ChunkSize,
85 };
86
87 MemorySegment(size_t size)
88 {
89 size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
90 if (size < SegmentSize)
91 size = SegmentSize;
92
93 pageReservation = PageReservation::reserve(size, usage: OSAllocator::JSGCHeapPages);
94 base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
95 nChunks = NumChunks;
96 availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
97 if (availableBytes < SegmentSize)
98 --nChunks;
99 }
100 MemorySegment(MemorySegment &&other) {
101 qSwap(value1&: pageReservation, value2&: other.pageReservation);
102 qSwap(value1&: base, value2&: other.base);
103 qSwap(value1&: allocatedMap, value2&: other.allocatedMap);
104 qSwap(value1&: availableBytes, value2&: other.availableBytes);
105 qSwap(value1&: nChunks, value2&: other.nChunks);
106 }
107
108 ~MemorySegment() {
109 if (base)
110 pageReservation.deallocate();
111 }
112
113 void setBit(size_t index) {
114 Q_ASSERT(index < nChunks);
115 quint64 bit = static_cast<quint64>(1) << index;
116// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
117 allocatedMap |= bit;
118 }
119 void clearBit(size_t index) {
120 Q_ASSERT(index < nChunks);
121 quint64 bit = static_cast<quint64>(1) << index;
122// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
123 allocatedMap &= ~bit;
124 }
125 bool testBit(size_t index) const {
126 Q_ASSERT(index < nChunks);
127 quint64 bit = static_cast<quint64>(1) << index;
128 return (allocatedMap & bit);
129 }
130
131 Chunk *allocate(size_t size);
132 void free(Chunk *chunk, size_t size) {
133 DEBUG << "freeing chunk" << chunk;
134 size_t index = static_cast<size_t>(chunk - base);
135 size_t end = qMin(a: static_cast<size_t>(NumChunks), b: index + (size - 1)/Chunk::ChunkSize + 1);
136 while (index < end) {
137 Q_ASSERT(testBit(index));
138 clearBit(index);
139 ++index;
140 }
141
142 size_t pageSize = WTF::pageSize();
143 size = (size + pageSize - 1) & ~(pageSize - 1);
144#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
145 // Linux and Windows zero out pages that have been decommitted and get committed again.
146 // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
147 // memory before decommit, so that we can be sure that all chunks we allocate will be
148 // zero initialized.
149 memset(chunk, 0, size);
150#endif
151 pageReservation.decommit(start: chunk, size);
152 }
153
154 bool contains(Chunk *c) const {
155 return c >= base && c < base + nChunks;
156 }
157
158 PageReservation pageReservation;
159 Chunk *base = nullptr;
160 quint64 allocatedMap = 0;
161 size_t availableBytes = 0;
162 uint nChunks = 0;
163};
164
165Chunk *MemorySegment::allocate(size_t size)
166{
167 if (!allocatedMap && size >= SegmentSize) {
168 // chunk allocated for one huge allocation
169 Q_ASSERT(availableBytes >= size);
170 pageReservation.commit(start: base, size);
171 allocatedMap = ~static_cast<quint64>(0);
172 return base;
173 }
174 size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
175 uint sequence = 0;
176 Chunk *candidate = nullptr;
177 for (uint i = 0; i < nChunks; ++i) {
178 if (!testBit(index: i)) {
179 if (!candidate)
180 candidate = base + i;
181 ++sequence;
182 } else {
183 candidate = nullptr;
184 sequence = 0;
185 }
186 if (sequence == requiredChunks) {
187 pageReservation.commit(start: candidate, size);
188 for (uint i = 0; i < requiredChunks; ++i)
189 setBit(candidate - base + i);
190 DEBUG << "allocated chunk " << candidate << Qt::hex << size;
191
192 return candidate;
193 }
194 }
195 return nullptr;
196}
197
198struct ChunkAllocator {
199 ChunkAllocator() {}
200
201 size_t requiredChunkSize(size_t size) {
202 size += Chunk::HeaderSize; // space required for the Chunk header
203 size_t pageSize = WTF::pageSize();
204 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
205 if (size < Chunk::ChunkSize)
206 size = Chunk::ChunkSize;
207 return size;
208 }
209
210 Chunk *allocate(size_t size = 0);
211 void free(Chunk *chunk, size_t size = 0);
212
213 std::vector<MemorySegment> memorySegments;
214};
215
216Chunk *ChunkAllocator::allocate(size_t size)
217{
218 size = requiredChunkSize(size);
219 for (auto &m : memorySegments) {
220 if (~m.allocatedMap) {
221 Chunk *c = m.allocate(size);
222 if (c)
223 return c;
224 }
225 }
226
227 // allocate a new segment
228 memorySegments.push_back(x: MemorySegment(size));
229 Chunk *c = memorySegments.back().allocate(size);
230 Q_ASSERT(c);
231 return c;
232}
233
234void ChunkAllocator::free(Chunk *chunk, size_t size)
235{
236 size = requiredChunkSize(size);
237 for (auto &m : memorySegments) {
238 if (m.contains(c: chunk)) {
239 m.free(chunk, size);
240 return;
241 }
242 }
243 Q_ASSERT(false);
244}
245
246#ifdef DUMP_SWEEP
247QString binary(quintptr n) {
248 QString s = QString::number(n, 2);
249 while (s.length() < 64)
250 s.prepend(QChar::fromLatin1('0'));
251 return s;
252}
253#define SDUMP qDebug
254#else
255QString binary(quintptr) { return QString(); }
256#define SDUMP if (1) ; else qDebug
257#endif
258
259// Stores a classname -> freed count mapping.
260typedef QHash<const char*, int> MMStatsHash;
261Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal)
262
263// This indirection avoids sticking QHash code in each of the call sites, which
264// shaves off some instructions in the case that it's unused.
265static void increaseFreedCountForClass(const char *className)
266{
267 (*freedObjectStatsGlobal())[className]++;
268}
269
270//bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr)
271bool Chunk::sweep(ExecutionEngine *engine)
272{
273 bool hasUsedSlots = false;
274 SDUMP() << "sweeping chunk" << this;
275 HeapItem *o = realBase();
276 bool lastSlotFree = false;
277 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
278 quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
279 Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
280 quintptr e = extendsBitmap[i];
281 SDUMP() << " index=" << i;
282 SDUMP() << " toFree =" << binary(toFree);
283 SDUMP() << " black =" << binary(blackBitmap[i]);
284 SDUMP() << " object =" << binary(objectBitmap[i]);
285 SDUMP() << " extends =" << binary(e);
286 if (lastSlotFree)
287 e &= (e + 1); // clear all lowest extent bits
288 while (toFree) {
289 uint index = qCountTrailingZeroBits(v: toFree);
290 quintptr bit = (static_cast<quintptr>(1) << index);
291
292 toFree ^= bit; // mask out freed slot
293 // DEBUG << " index" << hex << index << toFree;
294
295 // remove all extends slots that have been freed
296 // this is a bit of bit trickery.
297 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
298 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
299 quintptr result = objmask + 1;
300 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
301 result |= mask; // ensure we don't clear stuff to the right of the current object
302 e &= result;
303
304 HeapItem *itemToFree = o + index;
305 Heap::Base *b = *itemToFree;
306 const VTable *v = b->internalClass->vtable;
307// if (Q_UNLIKELY(classCountPtr))
308// classCountPtr(v->className);
309 if (v->destroy) {
310 v->destroy(b);
311 b->_checkIsDestroyed();
312 }
313#ifdef V4_USE_HEAPTRACK
314 heaptrack_report_free(itemToFree);
315#endif
316 }
317 Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i])
318 - (blackBitmap[i] | e)) * Chunk::SlotSize,
319 Profiling::SmallItem);
320 objectBitmap[i] = blackBitmap[i];
321 hasUsedSlots |= (blackBitmap[i] != 0);
322 extendsBitmap[i] = e;
323 lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
324 SDUMP() << " new extends =" << binary(e);
325 SDUMP() << " lastSlotFree" << lastSlotFree;
326 Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0);
327 o += Chunk::Bits;
328 }
329 // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
330 return hasUsedSlots;
331}
332
333void Chunk::freeAll(ExecutionEngine *engine)
334{
335 // DEBUG << "sweeping chunk" << this << (*freeList);
336 HeapItem *o = realBase();
337 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
338 quintptr toFree = objectBitmap[i];
339 quintptr e = extendsBitmap[i];
340 // DEBUG << hex << " index=" << i << toFree;
341 while (toFree) {
342 uint index = qCountTrailingZeroBits(v: toFree);
343 quintptr bit = (static_cast<quintptr>(1) << index);
344
345 toFree ^= bit; // mask out freed slot
346 // DEBUG << " index" << hex << index << toFree;
347
348 // remove all extends slots that have been freed
349 // this is a bit of bit trickery.
350 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
351 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
352 quintptr result = objmask + 1;
353 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
354 result |= mask; // ensure we don't clear stuff to the right of the current object
355 e &= result;
356
357 HeapItem *itemToFree = o + index;
358 Heap::Base *b = *itemToFree;
359 if (b->internalClass->vtable->destroy) {
360 b->internalClass->vtable->destroy(b);
361 b->_checkIsDestroyed();
362 }
363#ifdef V4_USE_HEAPTRACK
364 heaptrack_report_free(itemToFree);
365#endif
366 }
367 Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i])
368 - qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem);
369 objectBitmap[i] = 0;
370 extendsBitmap[i] = e;
371 o += Chunk::Bits;
372 }
373 // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
374}
375
376void Chunk::resetBlackBits()
377{
378 memset(s: blackBitmap, c: 0, n: sizeof(blackBitmap));
379}
380
381void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
382{
383// qDebug() << "sortIntoBins:";
384 HeapItem *base = realBase();
385#if QT_POINTER_SIZE == 8
386 const int start = 0;
387#else
388 const int start = 1;
389#endif
390#ifndef QT_NO_DEBUG
391 uint freeSlots = 0;
392 uint allocatedSlots = 0;
393#endif
394 for (int i = start; i < EntriesInBitmap; ++i) {
395 quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]);
396#if QT_POINTER_SIZE == 8
397 if (!i)
398 usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
399#endif
400#ifndef QT_NO_DEBUG
401 allocatedSlots += qPopulationCount(v: usedSlots);
402// qDebug() << hex << " i=" << i << "used=" << usedSlots;
403#endif
404 while (1) {
405 uint index = qCountTrailingZeroBits(v: usedSlots + 1);
406 if (index == Bits)
407 break;
408 uint freeStart = i*Bits + index;
409 usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
410 while (!usedSlots) {
411 if (++i < EntriesInBitmap) {
412 usedSlots = (objectBitmap[i]|extendsBitmap[i]);
413 } else {
414 Q_ASSERT(i == EntriesInBitmap);
415 // Overflows to 0 when counting trailing zeroes above in next iteration.
416 // Then, all the bits are zeroes and we break.
417 usedSlots = std::numeric_limits<quintptr>::max();
418 break;
419 }
420#ifndef QT_NO_DEBUG
421 allocatedSlots += qPopulationCount(v: usedSlots);
422// qDebug() << hex << " i=" << i << "used=" << usedSlots;
423#endif
424 }
425 HeapItem *freeItem = base + freeStart;
426
427 index = qCountTrailingZeroBits(v: usedSlots);
428 usedSlots |= (quintptr(1) << index) - 1;
429 uint freeEnd = i*Bits + index;
430 uint nSlots = freeEnd - freeStart;
431#ifndef QT_NO_DEBUG
432// qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots;
433 freeSlots += nSlots;
434#endif
435 Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots);
436 freeItem->freeData.availableSlots = nSlots;
437 uint bin = qMin(a: nBins - 1, b: nSlots);
438 freeItem->freeData.next = bins[bin];
439 bins[bin] = freeItem;
440 }
441 }
442#ifndef QT_NO_DEBUG
443 Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr));
444#endif
445}
446
447HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) {
448 Q_ASSERT((size % Chunk::SlotSize) == 0);
449 size_t slotsRequired = size >> Chunk::SlotSizeShift;
450
451 if (allocationStats)
452 ++allocationStats[binForSlots(nSlots: slotsRequired)];
453
454 HeapItem **last;
455
456 HeapItem *m;
457
458 if (slotsRequired < NumBins - 1) {
459 m = freeBins[slotsRequired];
460 if (m) {
461 freeBins[slotsRequired] = m->freeData.next;
462 goto done;
463 }
464 }
465
466 if (nFree >= slotsRequired) {
467 // use bump allocation
468 Q_ASSERT(nextFree);
469 m = nextFree;
470 nextFree += slotsRequired;
471 nFree -= slotsRequired;
472 goto done;
473 }
474
475 // DEBUG << "No matching bin found for item" << size << bin;
476 // search last bin for a large enough item
477 last = &freeBins[NumBins - 1];
478 while ((m = *last)) {
479 if (m->freeData.availableSlots >= slotsRequired) {
480 *last = m->freeData.next; // take it out of the list
481
482 size_t remainingSlots = m->freeData.availableSlots - slotsRequired;
483 // DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots;
484 if (remainingSlots == 0)
485 goto done;
486
487 HeapItem *remainder = m + slotsRequired;
488 if (remainingSlots > nFree) {
489 if (nFree) {
490 size_t bin = binForSlots(nSlots: nFree);
491 nextFree->freeData.next = freeBins[bin];
492 nextFree->freeData.availableSlots = nFree;
493 freeBins[bin] = nextFree;
494 }
495 nextFree = remainder;
496 nFree = remainingSlots;
497 } else {
498 remainder->freeData.availableSlots = remainingSlots;
499 size_t binForRemainder = binForSlots(nSlots: remainingSlots);
500 remainder->freeData.next = freeBins[binForRemainder];
501 freeBins[binForRemainder] = remainder;
502 }
503 goto done;
504 }
505 last = &m->freeData.next;
506 }
507
508 if (slotsRequired < NumBins - 1) {
509 // check if we can split up another slot
510 for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
511 m = freeBins[i];
512 if (m) {
513 freeBins[i] = m->freeData.next; // take it out of the list
514// qDebug() << "got item" << slotsRequired << "from slot" << i;
515 size_t remainingSlots = i - slotsRequired;
516 Q_ASSERT(remainingSlots < NumBins - 1);
517 HeapItem *remainder = m + slotsRequired;
518 remainder->freeData.availableSlots = remainingSlots;
519 remainder->freeData.next = freeBins[remainingSlots];
520 freeBins[remainingSlots] = remainder;
521 goto done;
522 }
523 }
524 }
525
526 if (!m) {
527 if (!forceAllocation)
528 return nullptr;
529 if (nFree) {
530 // Save any remaining slots of the current chunk
531 // for later, smaller allocations.
532 size_t bin = binForSlots(nSlots: nFree);
533 nextFree->freeData.next = freeBins[bin];
534 nextFree->freeData.availableSlots = nFree;
535 freeBins[bin] = nextFree;
536 }
537 Chunk *newChunk = chunkAllocator->allocate();
538 Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
539 chunks.push_back(x: newChunk);
540 nextFree = newChunk->first();
541 nFree = Chunk::AvailableSlots;
542 m = nextFree;
543 nextFree += slotsRequired;
544 nFree -= slotsRequired;
545 }
546
547done:
548 m->setAllocatedSlots(slotsRequired);
549 Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem);
550#ifdef V4_USE_HEAPTRACK
551 heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize);
552#endif
553 // DEBUG << " " << hex << m->chunk() << m->chunk()->objectBitmap[0] << m->chunk()->extendsBitmap[0] << (m - m->chunk()->realBase());
554 return m;
555}
556
557void BlockAllocator::sweep()
558{
559 nextFree = nullptr;
560 nFree = 0;
561 memset(s: freeBins, c: 0, n: sizeof(freeBins));
562
563// qDebug() << "BlockAlloc: sweep";
564 usedSlotsAfterLastSweep = 0;
565
566 auto firstEmptyChunk = std::partition(first: chunks.begin(), last: chunks.end(), pred: [this](Chunk *c) {
567 return c->sweep(engine);
568 });
569
570 std::for_each(first: chunks.begin(), last: firstEmptyChunk, f: [this](Chunk *c) {
571 c->sortIntoBins(bins: freeBins, nBins: NumBins);
572 usedSlotsAfterLastSweep += c->nUsedSlots();
573 });
574
575 // only free the chunks at the end to avoid that the sweep() calls indirectly
576 // access freed memory
577 std::for_each(first: firstEmptyChunk, last: chunks.end(), f: [this](Chunk *c) {
578 Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
579 chunkAllocator->free(chunk: c);
580 });
581
582 chunks.erase(first: firstEmptyChunk, last: chunks.end());
583}
584
585void BlockAllocator::freeAll()
586{
587 for (auto c : chunks)
588 c->freeAll(engine);
589 for (auto c : chunks) {
590 Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
591 chunkAllocator->free(chunk: c);
592 }
593}
594
595void BlockAllocator::resetBlackBits()
596{
597 for (auto c : chunks)
598 c->resetBlackBits();
599}
600
601HeapItem *HugeItemAllocator::allocate(size_t size) {
602 MemorySegment *m = nullptr;
603 Chunk *c = nullptr;
604 if (size >= MemorySegment::SegmentSize/2) {
605 // too large to handle through the ChunkAllocator, let's get our own memory segement
606 size += Chunk::HeaderSize; // space required for the Chunk header
607 size_t pageSize = WTF::pageSize();
608 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
609 m = new MemorySegment(size);
610 c = m->allocate(size);
611 } else {
612 c = chunkAllocator->allocate(size);
613 }
614 Q_ASSERT(c);
615 chunks.push_back(x: HugeChunk{.segment: m, .chunk: c, .size: size});
616 Chunk::setBit(bitmap: c->objectBitmap, index: c->first() - c->realBase());
617 Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem);
618#ifdef V4_USE_HEAPTRACK
619 heaptrack_report_alloc(c, size);
620#endif
621 return c->first();
622}
623
624static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
625{
626 HeapItem *itemToFree = c.chunk->first();
627 Heap::Base *b = *itemToFree;
628 const VTable *v = b->internalClass->vtable;
629 if (Q_UNLIKELY(classCountPtr))
630 classCountPtr(v->className);
631
632 if (v->destroy) {
633 v->destroy(b);
634 b->_checkIsDestroyed();
635 }
636 if (c.segment) {
637 // own memory segment
638 c.segment->free(chunk: c.chunk, size: c.size);
639 delete c.segment;
640 } else {
641 chunkAllocator->free(chunk: c.chunk, size: c.size);
642 }
643#ifdef V4_USE_HEAPTRACK
644 heaptrack_report_free(c.chunk);
645#endif
646}
647
648void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr)
649{
650 auto isBlack = [this, classCountPtr] (const HugeChunk &c) {
651 bool b = c.chunk->first()->isBlack();
652 Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase());
653 if (!b) {
654 Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
655 freeHugeChunk(chunkAllocator, c, classCountPtr);
656 }
657 return !b;
658 };
659
660 auto newEnd = std::remove_if(first: chunks.begin(), last: chunks.end(), pred: isBlack);
661 chunks.erase(first: newEnd, last: chunks.end());
662}
663
664void HugeItemAllocator::resetBlackBits()
665{
666 for (auto c : chunks)
667 Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase());
668}
669
670void HugeItemAllocator::freeAll()
671{
672 for (auto &c : chunks) {
673 Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
674 freeHugeChunk(chunkAllocator, c, classCountPtr: nullptr);
675 }
676}
677
678namespace {
679using ExtraData = GCStateInfo::ExtraData;
680GCState markStart(GCStateMachine *that, ExtraData &)
681{
682 //Initialize the mark stack
683 that->mm->m_markStack = std::make_unique<MarkStack>(args&: that->mm->engine);
684 that->mm->engine->isGCOngoing = true;
685 return GCState::MarkGlobalObject;
686}
687
688GCState markGlobalObject(GCStateMachine *that, ExtraData &)
689{
690 that->mm->engine->markObjects(markStack: that->mm->m_markStack.get());
691 return GCState::MarkJSStack;
692}
693
694GCState markJSStack(GCStateMachine *that, ExtraData &)
695{
696 that->mm->collectFromJSStack(markStack: that->mm->markStack());
697 return GCState::InitMarkPersistentValues;
698}
699
700GCState initMarkPersistentValues(GCStateMachine *that, ExtraData &stateData)
701{
702 if (!that->mm->m_persistentValues)
703 return GCState::InitMarkWeakValues; // no persistent values to mark
704 stateData = GCIteratorStorage { .it: that->mm->m_persistentValues->begin() };
705 return GCState::MarkPersistentValues;
706}
707
708static constexpr int markLoopIterationCount = 1024;
709
710bool wasDrainNecessary(MarkStack *ms, QDeadlineTimer deadline)
711{
712 if (ms->remainingBeforeSoftLimit() > markLoopIterationCount)
713 return false;
714 // drain
715 ms->drain(deadline);
716 return true;
717}
718
719GCState markPersistentValues(GCStateMachine *that, ExtraData &stateData) {
720 auto markStack = that->mm->markStack();
721 if (wasDrainNecessary(ms: markStack, deadline: that->deadline) && that->deadline.hasExpired())
722 return GCState::MarkPersistentValues;
723 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(v&: stateData).it;
724 // avoid repeatedly hitting the timer constantly by batching iterations
725 for (int i = 0; i < markLoopIterationCount; ++i) {
726 if (!it.p)
727 return GCState::InitMarkWeakValues;
728 if (Managed *m = (*it).as<Managed>())
729 m->mark(markStack);
730 ++it;
731 }
732 return GCState::MarkPersistentValues;
733}
734
735GCState initMarkWeakValues(GCStateMachine *that, ExtraData &stateData)
736{
737 stateData = GCIteratorStorage { .it: that->mm->m_weakValues->begin() };
738 return GCState::MarkWeakValues;
739}
740
741GCState markWeakValues(GCStateMachine *that, ExtraData &stateData)
742{
743 auto markStack = that->mm->markStack();
744 if (wasDrainNecessary(ms: markStack, deadline: that->deadline) && that->deadline.hasExpired())
745 return GCState::MarkWeakValues;
746 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(v&: stateData).it;
747 // avoid repeatedly hitting the timer constantly by batching iterations
748 for (int i = 0; i < markLoopIterationCount; ++i) {
749 if (!it.p)
750 return GCState::MarkDrain;
751 QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
752 ++it;
753 if (!qobjectWrapper)
754 continue;
755 QObject *qobject = qobjectWrapper->object();
756 if (!qobject)
757 continue;
758 bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: qobject);
759
760 if (!keepAlive) {
761 if (QObject *parent = qobject->parent()) {
762 while (parent->parent())
763 parent = parent->parent();
764 keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: parent);
765 }
766 }
767
768 if (keepAlive)
769 qobjectWrapper->mark(markStack: that->mm->markStack());
770 }
771 return GCState::MarkWeakValues;
772}
773
774GCState markDrain(GCStateMachine *that, ExtraData &)
775{
776 if (that->deadline.isForever()) {
777 that->mm->markStack()->drain();
778 return GCState::MarkReady;
779 }
780 auto drainState = that->mm->m_markStack->drain(deadline: that->deadline);
781 return drainState == MarkStack::DrainState::Complete
782 ? GCState::MarkReady
783 : GCState::MarkDrain;
784}
785
786GCState markReady(GCStateMachine *, ExtraData &)
787{
788 //Possibility to do some clean up, stat printing, etc...
789 return GCState::InitCallDestroyObjects;
790}
791
792/** \!internal
793collects new references from the stack, then drains the mark stack again
794*/
795void redrain(GCStateMachine *that)
796{
797 that->mm->collectFromJSStack(markStack: that->mm->markStack());
798 that->mm->m_markStack->drain();
799}
800
801GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData)
802{
803 // as we don't have a deletion barrier, we need to rescan the stack
804 redrain(that);
805 if (!that->mm->m_weakValues)
806 return GCState::FreeWeakMaps; // no need to call destroy objects
807 stateData = GCIteratorStorage { .it: that->mm->m_weakValues->begin() };
808 return GCState::CallDestroyObjects;
809}
810GCState callDestroyObject(GCStateMachine *that, ExtraData &stateData)
811{
812 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(v&: stateData).it;
813 // destroyObject might call user code, which really shouldn't call back into the gc
814 auto oldState = std::exchange(obj&: that->mm->gcBlocked, new_val: QV4::MemoryManager::Blockness::InCriticalSection);
815 auto cleanup = qScopeGuard(f: [&]() {
816 that->mm->gcBlocked = oldState;
817 });
818 // avoid repeatedly hitting the timer constantly by batching iterations
819 for (int i = 0; i < markLoopIterationCount; ++i) {
820 if (!it.p)
821 return GCState::FreeWeakMaps;
822 Managed *m = (*it).managed();
823 ++it;
824 if (!m || m->markBit())
825 continue;
826 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
827 // signal before we start sweeping the heap
828 if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>())
829 qobjectWrapper->destroyObject(/*lastSweep =*/lastCall: false);
830 }
831 return GCState::CallDestroyObjects;
832}
833
834void freeWeakMaps(MemoryManager *mm)
835{
836 for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) {
837 if (!map->isMarked())
838 continue;
839 map->removeUnmarkedKeys();
840 *lastMap = map;
841 lastMap = &map->nextWeakMap;
842 }
843}
844
845GCState freeWeakMaps(GCStateMachine *that, ExtraData &)
846{
847 freeWeakMaps(mm: that->mm);
848 return GCState::FreeWeakSets;
849}
850
851void freeWeakSets(MemoryManager *mm)
852{
853 for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) {
854
855 if (!set->isMarked())
856 continue;
857 set->removeUnmarkedKeys();
858 *lastSet = set;
859 lastSet = &set->nextWeakSet;
860 }
861}
862
863GCState freeWeakSets(GCStateMachine *that, ExtraData &)
864{
865 freeWeakSets(mm: that->mm);
866 return GCState::HandleQObjectWrappers;
867}
868
869GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &)
870{
871 that->mm->cleanupDeletedQObjectWrappersInSweep();
872 return GCState::DoSweep;
873}
874
875GCState doSweep(GCStateMachine *that, ExtraData &)
876{
877 auto mm = that->mm;
878
879 mm->engine->identifierTable->sweep();
880 mm->blockAllocator.sweep();
881 mm->hugeItemAllocator.sweep(classCountPtr: that->mm->gcCollectorStats ? increaseFreedCountForClass : nullptr);
882 mm->icAllocator.sweep();
883
884 // reset all black bits
885 mm->blockAllocator.resetBlackBits();
886 mm->hugeItemAllocator.resetBlackBits();
887 mm->icAllocator.resetBlackBits();
888
889 mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep;
890 mm->gcBlocked = MemoryManager::Unblocked;
891 mm->m_markStack.reset();
892 mm->engine->isGCOngoing = false;
893
894 mm->updateUnmanagedHeapSizeGCLimit();
895
896 return GCState::Invalid;
897}
898
899}
900
901
902MemoryManager::MemoryManager(ExecutionEngine *engine)
903 : engine(engine)
904 , chunkAllocator(new ChunkAllocator)
905 , blockAllocator(chunkAllocator, engine)
906 , icAllocator(chunkAllocator, engine)
907 , hugeItemAllocator(chunkAllocator, engine)
908 , m_persistentValues(new PersistentValueStorage(engine))
909 , m_weakValues(new PersistentValueStorage(engine))
910 , unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit)
911 , aggressiveGC(!qEnvironmentVariableIsEmpty(varName: "QV4_MM_AGGRESSIVE_GC"))
912 , gcStats(lcGcStats().isDebugEnabled())
913 , gcCollectorStats(lcGcAllocatorStats().isDebugEnabled())
914{
915#ifdef V4_USE_VALGRIND
916 VALGRIND_CREATE_MEMPOOL(this, 0, true);
917#endif
918 memset(s: statistics.allocations, c: 0, n: sizeof(statistics.allocations));
919 if (gcStats)
920 blockAllocator.allocationStats = statistics.allocations;
921
922 gcStateMachine = std::make_unique<GCStateMachine>();
923 gcStateMachine->mm = this;
924
925 gcStateMachine->stateInfoMap[GCState::MarkStart] = {
926 .execute: markStart,
927 .breakAfter: false,
928 };
929 gcStateMachine->stateInfoMap[GCState::MarkGlobalObject] = {
930 .execute: markGlobalObject,
931 .breakAfter: false,
932 };
933 gcStateMachine->stateInfoMap[GCState::MarkJSStack] = {
934 .execute: markJSStack,
935 .breakAfter: false,
936 };
937 gcStateMachine->stateInfoMap[GCState::InitMarkPersistentValues] = {
938 .execute: initMarkPersistentValues,
939 .breakAfter: false,
940 };
941 gcStateMachine->stateInfoMap[GCState::MarkPersistentValues] = {
942 .execute: markPersistentValues,
943 .breakAfter: false,
944 };
945 gcStateMachine->stateInfoMap[GCState::InitMarkWeakValues] = {
946 .execute: initMarkWeakValues,
947 .breakAfter: false,
948 };
949 gcStateMachine->stateInfoMap[GCState::MarkWeakValues] = {
950 .execute: markWeakValues,
951 .breakAfter: false,
952 };
953 gcStateMachine->stateInfoMap[GCState::MarkDrain] = {
954 .execute: markDrain,
955 .breakAfter: false,
956 };
957 gcStateMachine->stateInfoMap[GCState::MarkReady] = {
958 .execute: markReady,
959 .breakAfter: false,
960 };
961 gcStateMachine->stateInfoMap[GCState::InitCallDestroyObjects] = {
962 .execute: initCallDestroyObjects,
963 .breakAfter: false,
964 };
965 gcStateMachine->stateInfoMap[GCState::CallDestroyObjects] = {
966 .execute: callDestroyObject,
967 .breakAfter: false,
968 };
969 gcStateMachine->stateInfoMap[GCState::FreeWeakMaps] = {
970 .execute: freeWeakMaps,
971 .breakAfter: false,
972 };
973 gcStateMachine->stateInfoMap[GCState::FreeWeakSets] = {
974 .execute: freeWeakSets,
975 .breakAfter: true, // ensure that handleQObjectWrappers runs in isolation
976 };
977 gcStateMachine->stateInfoMap[GCState::HandleQObjectWrappers] = {
978 .execute: handleQObjectWrappers,
979 .breakAfter: false,
980 };
981 gcStateMachine->stateInfoMap[GCState::DoSweep] = {
982 .execute: doSweep,
983 .breakAfter: false,
984 };
985}
986
987Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
988{
989 const size_t stringSize = align(size: sizeof(Heap::String));
990#ifdef MM_STATS
991 lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift;
992 ++allocationCount;
993#endif
994 unmanagedHeapSize += unmanagedSize;
995
996 HeapItem *m = allocate(allocator: &blockAllocator, size: stringSize);
997 memset(s: m, c: 0, n: stringSize);
998 return *m;
999}
1000
1001Heap::Base *MemoryManager::allocData(std::size_t size)
1002{
1003#ifdef MM_STATS
1004 lastAllocRequestedSlots = size >> Chunk::SlotSizeShift;
1005 ++allocationCount;
1006#endif
1007
1008 Q_ASSERT(size >= Chunk::SlotSize);
1009 Q_ASSERT(size % Chunk::SlotSize == 0);
1010
1011 HeapItem *m = allocate(allocator: &blockAllocator, size);
1012 memset(s: m, c: 0, n: size);
1013 return *m;
1014}
1015
1016Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers)
1017{
1018 uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value);
1019 Q_ASSERT(!(size % sizeof(HeapItem)));
1020
1021 Heap::Object *o;
1022 if (nMembers <= vtable->nInlineProperties) {
1023 o = static_cast<Heap::Object *>(allocData(size));
1024 } else {
1025 // Allocate both in one go through the block allocator
1026 nMembers -= vtable->nInlineProperties;
1027 std::size_t memberSize = align(size: sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
1028 size_t totalSize = size + memberSize;
1029 Heap::MemberData *m;
1030 if (totalSize > Chunk::DataSize) {
1031 o = static_cast<Heap::Object *>(allocData(size));
1032 m = hugeItemAllocator.allocate(size: memberSize)->as<Heap::MemberData>();
1033 } else {
1034 HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(size: totalSize));
1035 Heap::Base *b = *mh;
1036 o = static_cast<Heap::Object *>(b);
1037 mh += (size >> Chunk::SlotSizeShift);
1038 m = mh->as<Heap::MemberData>();
1039 Chunk *c = mh->chunk();
1040 size_t index = mh - c->realBase();
1041 Chunk::setBit(bitmap: c->objectBitmap, index);
1042 Chunk::clearBit(bitmap: c->extendsBitmap, index);
1043 }
1044 m->internalClass.set(e: engine, newVal: engine->internalClasses(icType: EngineBase::Class_MemberData));
1045 o->memberData.set(e: engine, newVal: m);
1046 Q_ASSERT(o->memberData->internalClass);
1047 m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
1048 m->values.size = o->memberData->values.alloc;
1049 m->init();
1050// qDebug() << " got" << o->memberData << o->memberData->size;
1051 }
1052// qDebug() << "allocating object with memberData" << o << o->memberData.operator->();
1053 return o;
1054}
1055
1056static uint markStackSize = 0;
1057
1058MarkStack::MarkStack(ExecutionEngine *engine)
1059 : m_engine(engine)
1060{
1061 m_base = (Heap::Base **)engine->gcStack->base();
1062 m_top = m_base;
1063 const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
1064 m_hardLimit = m_base + size;
1065 m_softLimit = m_base + size * 3 / 4;
1066}
1067
1068void MarkStack::drain()
1069{
1070 // we're not calling drain(QDeadlineTimer::Forever) as that has higher overhead
1071 while (m_top > m_base) {
1072 Heap::Base *h = pop();
1073 ++markStackSize;
1074 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1075 Q_ASSERT(h->internalClass);
1076 h->internalClass->vtable->markObjects(h, this);
1077 }
1078}
1079
1080MarkStack::DrainState MarkStack::drain(QDeadlineTimer deadline)
1081{
1082 do {
1083 for (int i = 0; i <= markLoopIterationCount * 10; ++i) {
1084 if (m_top == m_base)
1085 return DrainState::Complete;
1086 Heap::Base *h = pop();
1087 ++markStackSize;
1088 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1089 Q_ASSERT(h->internalClass);
1090 h->internalClass->vtable->markObjects(h, this);
1091 }
1092 } while (!deadline.hasExpired());
1093 return DrainState::Ongoing;
1094}
1095
1096void MarkStack::setSoftLimit(size_t size)
1097{
1098 m_softLimit = m_base + size;
1099 Q_ASSERT(m_softLimit < m_hardLimit);
1100}
1101
1102void MemoryManager::onEventLoop()
1103{
1104 if (engine->inShutdown)
1105 return;
1106 if (gcBlocked == InCriticalSection) {
1107 QMetaObject::invokeMethod(object: engine->publicEngine, function: [this]{
1108 onEventLoop();
1109 }, type: Qt::QueuedConnection);
1110 return;
1111 }
1112 if (gcStateMachine->inProgress()) {
1113 gcStateMachine->step();
1114 }
1115}
1116
1117
1118void MemoryManager::setGCTimeLimit(int timeMs)
1119{
1120 gcStateMachine->timeLimit = std::chrono::milliseconds(timeMs);
1121}
1122
1123void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr)
1124{
1125
1126 for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
1127 Managed *m = (*it).managed();
1128 if (!m || m->markBit())
1129 continue;
1130 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
1131 // signal before we start sweeping the heap
1132 if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>()) {
1133 qobjectWrapper->destroyObject(lastCall: lastSweep);
1134 }
1135 }
1136
1137 freeWeakMaps(mm: this);
1138 freeWeakSets(mm: this);
1139
1140 cleanupDeletedQObjectWrappersInSweep();
1141
1142 if (!lastSweep) {
1143 engine->identifierTable->sweep();
1144 blockAllocator.sweep(/*classCountPtr*/);
1145 hugeItemAllocator.sweep(classCountPtr);
1146 icAllocator.sweep(/*classCountPtr*/);
1147 }
1148
1149 // reset all black bits
1150 blockAllocator.resetBlackBits();
1151 hugeItemAllocator.resetBlackBits();
1152 icAllocator.resetBlackBits();
1153
1154 usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep;
1155 updateUnmanagedHeapSizeGCLimit();
1156 gcBlocked = MemoryManager::Unblocked;
1157}
1158
1159/*
1160 \internal
1161 Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper
1162 Used both in MemoryManager::sweep, and the corresponding gc statemachine phase
1163*/
1164void MemoryManager::cleanupDeletedQObjectWrappersInSweep()
1165{
1166 // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
1167 // that they are all set to undefined.
1168 for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
1169 Managed *m = (*it).managed();
1170 if (!m || m->markBit())
1171 continue;
1172 (*it) = Value::undefinedValue();
1173 }
1174
1175 // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
1176 const int pendingCount = m_pendingFreedObjectWrapperValue.size();
1177 if (pendingCount) {
1178 QVector<Value *> remainingWeakQObjectWrappers;
1179 remainingWeakQObjectWrappers.reserve(asize: pendingCount);
1180 for (int i = 0; i < pendingCount; ++i) {
1181 Value *v = m_pendingFreedObjectWrapperValue.at(i);
1182 if (v->isUndefined() || v->isEmpty())
1183 PersistentValueStorage::free(v);
1184 else
1185 remainingWeakQObjectWrappers.append(t: v);
1186 }
1187 m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers;
1188 }
1189
1190 if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) {
1191 for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) {
1192 if (it.value().isNullOrUndefined())
1193 it = multiplyWrappedQObjects->erase(it);
1194 else
1195 ++it;
1196 }
1197 }
1198}
1199
1200bool MemoryManager::shouldRunGC() const
1201{
1202 size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots();
1203 if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100)
1204 return true;
1205 return false;
1206}
1207
1208static size_t dumpBins(BlockAllocator *b, const char *title)
1209{
1210 const QLoggingCategory &stats = lcGcAllocatorStats();
1211 size_t totalSlotMem = 0;
1212 if (title)
1213 qDebug(cat: stats) << "Slot map for" << title << "allocator:";
1214 for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
1215 uint nEntries = 0;
1216 HeapItem *h = b->freeBins[i];
1217 while (h) {
1218 ++nEntries;
1219 totalSlotMem += h->freeData.availableSlots;
1220 h = h->freeData.next;
1221 }
1222 if (title)
1223 qDebug(cat: stats) << " number of entries in slot" << i << ":" << nEntries;
1224 }
1225 SDUMP() << " large slot map";
1226 HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
1227 while (h) {
1228 SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
1229 h = h->freeData.next;
1230 }
1231
1232 if (title)
1233 qDebug(cat: stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
1234 return totalSlotMem*Chunk::SlotSize;
1235}
1236
1237/*!
1238 \internal
1239 Precondition: Incremental garbage collection must be currently active
1240 Finishes incremental garbage collection, unless in a critical section
1241 Code entering a critical section is expected to check if we need to
1242 force a gc completion, and to trigger the gc again if necessary
1243 when exiting the critcial section.
1244 Returns \c true if the gc cycle completed, false otherwise.
1245 */
1246bool MemoryManager::tryForceGCCompletion()
1247{
1248 if (gcBlocked == InCriticalSection) {
1249 qCDebug(lcGcForcedRuns)
1250 << "Tried to force the GC to complete a run but failed due to being in a critical section.";
1251 return false;
1252 }
1253
1254 const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
1255 Q_ASSERT(incrementalGCIsAlreadyRunning);
1256
1257 qCDebug(lcGcForcedRuns) << "Forcing the GC to complete a run.";
1258
1259 auto oldTimeLimit = std::exchange(obj&: gcStateMachine->timeLimit, new_val: std::chrono::microseconds::max());
1260 while (gcStateMachine->inProgress()) {
1261 gcStateMachine->step();
1262 }
1263 gcStateMachine->timeLimit = oldTimeLimit;
1264 return true;
1265}
1266
1267void MemoryManager::runFullGC()
1268{
1269 runGC();
1270 const bool incrementalGCStillRunning = m_markStack != nullptr;
1271 if (incrementalGCStillRunning)
1272 tryForceGCCompletion();
1273}
1274
1275void MemoryManager::runGC()
1276{
1277 if (gcBlocked != Unblocked) {
1278 return;
1279 }
1280
1281 gcBlocked = MemoryManager::NormalBlocked;
1282
1283 if (gcStats) {
1284 statistics.maxReservedMem = qMax(a: statistics.maxReservedMem, b: getAllocatedMem());
1285 statistics.maxAllocatedMem = qMax(a: statistics.maxAllocatedMem, b: getUsedMem() + getLargeItemsMem());
1286 }
1287
1288 if (!gcCollectorStats) {
1289 gcStateMachine->step();
1290 } else {
1291 bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit);
1292 size_t oldUnmanagedSize = unmanagedHeapSize;
1293
1294 const size_t totalMem = getAllocatedMem();
1295 const size_t usedBefore = getUsedMem();
1296 const size_t largeItemsBefore = getLargeItemsMem();
1297
1298 const QLoggingCategory &stats = lcGcAllocatorStats();
1299 qDebug(cat: stats) << "========== GC ==========";
1300#ifdef MM_STATS
1301 qDebug(cat: stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
1302 qDebug(cat: stats) << " Allocations since last GC" << allocationCount;
1303 allocationCount = 0;
1304#endif
1305 size_t oldChunks = blockAllocator.chunks.size();
1306 qDebug(cat: stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks";
1307 qDebug(cat: stats) << "Fragmented memory before GC" << (totalMem - usedBefore);
1308 dumpBins(b: &blockAllocator, title: "Block");
1309 dumpBins(b: &icAllocator, title: "InternalClass");
1310
1311 QElapsedTimer t;
1312 t.start();
1313 gcStateMachine->step();
1314 qint64 markTime = t.nsecsElapsed()/1000;
1315 t.start();
1316 const size_t usedAfter = getUsedMem();
1317 const size_t largeItemsAfter = getLargeItemsMem();
1318
1319 if (triggeredByUnmanagedHeap) {
1320 qDebug(cat: stats) << "triggered by unmanaged heap:";
1321 qDebug(cat: stats) << " old unmanaged heap size:" << oldUnmanagedSize;
1322 qDebug(cat: stats) << " new unmanaged heap:" << unmanagedHeapSize;
1323 qDebug(cat: stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
1324 }
1325 size_t memInBins = dumpBins(b: &blockAllocator, title: "Block")
1326 + dumpBins(b: &icAllocator, title: "InternalClasss");
1327 qDebug(cat: stats) << "Marked object in" << markTime << "us.";
1328 qDebug(cat: stats) << " " << markStackSize << "objects marked";
1329
1330 // sort our object types by number of freed instances
1331 MMStatsHash freedObjectStats;
1332 std::swap(a&: freedObjectStats, b&: *freedObjectStatsGlobal());
1333 typedef std::pair<const char*, int> ObjectStatInfo;
1334 std::vector<ObjectStatInfo> freedObjectsSorted;
1335 freedObjectsSorted.reserve(n: freedObjectStats.size());
1336 for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) {
1337 freedObjectsSorted.push_back(x: std::make_pair(x: it.key(), y: it.value()));
1338 }
1339 std::sort(first: freedObjectsSorted.begin(), last: freedObjectsSorted.end(), comp: [](const ObjectStatInfo &a, const ObjectStatInfo &b) {
1340 return a.second > b.second && strcmp(s1: a.first, s2: b.first) < 0;
1341 });
1342
1343 qDebug(cat: stats) << "Used memory before GC:" << usedBefore;
1344 qDebug(cat: stats) << "Used memory after GC:" << usedAfter;
1345 qDebug(cat: stats) << "Freed up bytes :" << (usedBefore - usedAfter);
1346 qDebug(cat: stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
1347 size_t lost = blockAllocator.allocatedMem() + icAllocator.allocatedMem()
1348 - memInBins - usedAfter;
1349 if (lost)
1350 qDebug(cat: stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
1351 if (largeItemsBefore || largeItemsAfter) {
1352 qDebug(cat: stats) << "Large item memory before GC:" << largeItemsBefore;
1353 qDebug(cat: stats) << "Large item memory after GC:" << largeItemsAfter;
1354 qDebug(cat: stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
1355 }
1356
1357 for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) {
1358 qDebug(cat: stats).noquote() << QString::fromLatin1(ba: "Freed JS type: %1 (%2 instances)").arg(args: QString::fromLatin1(ba: it->first), args: QString::number(it->second));
1359 }
1360
1361 qDebug(cat: stats) << "======== End GC ========";
1362 }
1363
1364 if (gcStats)
1365 statistics.maxUsedMem = qMax(a: statistics.maxUsedMem, b: getUsedMem() + getLargeItemsMem());
1366}
1367
1368size_t MemoryManager::getUsedMem() const
1369{
1370 return blockAllocator.usedMem() + icAllocator.usedMem();
1371}
1372
1373size_t MemoryManager::getAllocatedMem() const
1374{
1375 return blockAllocator.allocatedMem() + icAllocator.allocatedMem() + hugeItemAllocator.usedMem();
1376}
1377
1378size_t MemoryManager::getLargeItemsMem() const
1379{
1380 return hugeItemAllocator.usedMem();
1381}
1382
1383void MemoryManager::updateUnmanagedHeapSizeGCLimit()
1384{
1385 if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) {
1386 // more than 75% full, raise limit
1387 unmanagedHeapSizeGCLimit = std::max(a: unmanagedHeapSizeGCLimit,
1388 b: unmanagedHeapSize) * 2;
1389 } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
1390 // less than 25% full, lower limit
1391 unmanagedHeapSizeGCLimit = qMax(a: std::size_t(MinUnmanagedHeapSizeGCLimit),
1392 b: unmanagedHeapSizeGCLimit/2);
1393 }
1394
1395 if (aggressiveGC && !engine->inShutdown) {
1396 // ensure we don't 'loose' any memory
1397 // but not during shutdown, because than we skip parts of sweep
1398 // and use freeAll instead
1399 Q_ASSERT(blockAllocator.allocatedMem()
1400 == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
1401 Q_ASSERT(icAllocator.allocatedMem()
1402 == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
1403 }
1404}
1405
1406void MemoryManager::registerWeakMap(Heap::MapObject *map)
1407{
1408 map->nextWeakMap = weakMaps;
1409 weakMaps = map;
1410}
1411
1412void MemoryManager::registerWeakSet(Heap::SetObject *set)
1413{
1414 set->nextWeakSet = weakSets;
1415 weakSets = set;
1416}
1417
1418MemoryManager::~MemoryManager()
1419{
1420 delete m_persistentValues;
1421 dumpStats();
1422
1423 // do one last non-incremental sweep to clean up C++ objects
1424 // first, abort any on-going incremental gc operation
1425 setGCTimeLimit(-1);
1426 if (engine->isGCOngoing) {
1427 engine->isGCOngoing = false;
1428 m_markStack.reset();
1429 gcStateMachine->state = GCState::Invalid;
1430 blockAllocator.resetBlackBits();
1431 hugeItemAllocator.resetBlackBits();
1432 icAllocator.resetBlackBits();
1433 }
1434 // then sweep
1435 sweep(/*lastSweep*/true);
1436
1437 blockAllocator.freeAll();
1438 hugeItemAllocator.freeAll();
1439 icAllocator.freeAll();
1440
1441 delete m_weakValues;
1442#ifdef V4_USE_VALGRIND
1443 VALGRIND_DESTROY_MEMPOOL(this);
1444#endif
1445 delete chunkAllocator;
1446}
1447
1448
1449void MemoryManager::dumpStats() const
1450{
1451 if (!gcStats)
1452 return;
1453
1454 const QLoggingCategory &stats = lcGcStats();
1455 qDebug(cat: stats) << "Qml GC memory allocation statistics:";
1456 qDebug(cat: stats) << "Total memory allocated:" << statistics.maxReservedMem;
1457 qDebug(cat: stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem;
1458 qDebug(cat: stats) << "Max memory used after a GC run:" << statistics.maxUsedMem;
1459 qDebug(cat: stats) << "Requests for different item sizes:";
1460 for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
1461 qDebug(cat: stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i];
1462 qDebug(cat: stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
1463}
1464
1465void MemoryManager::collectFromJSStack(MarkStack *markStack) const
1466{
1467 Value *v = engine->jsStackBase;
1468 Value *top = engine->jsStackTop;
1469 while (v < top) {
1470 Managed *m = v->managed();
1471 if (m) {
1472 Q_ASSERT(m->inUse());
1473 // Skip pointers to already freed objects, they are bogus as well
1474 m->mark(markStack);
1475 }
1476 ++v;
1477 }
1478
1479 for (auto *frame = engine->currentStackFrame; frame; frame = frame->parentFrame()) {
1480 if (!frame->isMetaTypesFrame())
1481 continue;
1482
1483 if (const QQmlPrivate::AOTTrackedLocalsStorage *locals
1484 = static_cast<const MetaTypesStackFrame *>(frame)->locals()) {
1485 // Actual AOT-compiled functions initialize the locals firsth thing when they
1486 // are called. However, the ScopedStackFrame has no locals, but still uses a
1487 // MetaTypesStackFrame.
1488 locals->markObjects(markStack);
1489 }
1490 }
1491}
1492
1493GCStateMachine::GCStateMachine()
1494 : collectTimings(lcGcStepExecution().isDebugEnabled())
1495{
1496 // base assumption: target 60fps, use at most 1/3 of time for gc
1497 // unless overridden by env variable
1498 bool ok = false;
1499 auto envTimeLimit = qEnvironmentVariableIntValue(varName: "QV4_GC_TIMELIMIT", ok: &ok );
1500 if (!ok)
1501 envTimeLimit = (1000 / 60) / 3;
1502 if (envTimeLimit > 0)
1503 timeLimit = std::chrono::milliseconds { envTimeLimit };
1504 else
1505 timeLimit = std::chrono::milliseconds { 0 };
1506}
1507
1508static void logStepTiming(GCStateMachine* that, quint64 timing) {
1509 auto registerTimingWithResetOnOverflow = [](
1510 GCStateMachine::StepTiming& storage, quint64 timing, GCState state
1511 ) {
1512 auto wouldOverflow = [](quint64 lhs, quint64 rhs) {
1513 return rhs > 0 && lhs > std::numeric_limits<quint64>::max() - rhs;
1514 };
1515
1516 if (wouldOverflow(storage.rolling_sum, timing) || wouldOverflow(storage.count, 1)) {
1517 qDebug(catFunc: lcGcStepExecution) << "Resetting timings storage for"
1518 << QMetaEnum::fromType<GCState>().key(index: state) << "due to overflow.";
1519 storage.rolling_sum = timing;
1520 storage.count = 1;
1521 } else {
1522 storage.rolling_sum += timing;
1523 storage.count += 1;
1524 }
1525 };
1526
1527 GCStateMachine::StepTiming& storage = that->executionTiming[that->state];
1528 registerTimingWithResetOnOverflow(storage, timing, that->state);
1529
1530 qDebug(catFunc: lcGcStepExecution) << "Performed" << QMetaEnum::fromType<GCState>().key(index: that->state)
1531 << "in" << timing << "microseconds";
1532 qDebug(catFunc: lcGcStepExecution) << "This step was performed" << storage.count << " time(s), executing in"
1533 << (storage.rolling_sum / storage.count) << "microseconds on average.";
1534}
1535
1536static GCState executeWithLoggingIfEnabled(GCStateMachine* that, GCStateInfo& stateInfo) {
1537 if (!that->collectTimings)
1538 return stateInfo.execute(that, that->stateData);
1539
1540 QElapsedTimer timer;
1541 timer.start();
1542 GCState next = stateInfo.execute(that, that->stateData);
1543 logStepTiming(that, timing: timer.nsecsElapsed()/1000);
1544 return next;
1545}
1546
1547void GCStateMachine::transition() {
1548 if (timeLimit.count() > 0) {
1549 deadline = QDeadlineTimer(timeLimit);
1550 bool deadlineExpired = false;
1551 while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid) {
1552 if (state > GCState::InitCallDestroyObjects) {
1553 /* initCallDestroyObjects is the last action which drains the mark
1554 stack by default. But as our write-barrier might end up putting
1555 objects on the markStack which still reference other objects.
1556 Especially when we call user code triggered by Component.onDestruction,
1557 but also when we run into a timeout.
1558 We don't redrain before InitCallDestroyObjects, as that would
1559 potentially lead to useless busy-work (e.g., if the last referencs
1560 to objects are removed while the mark phase is running)
1561 */
1562 redrain(that: this);
1563 }
1564 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1565 << QMetaEnum::fromType<GCState>().key(index: state) << "state";
1566 GCStateInfo& stateInfo = stateInfoMap[int(state)];
1567 state = executeWithLoggingIfEnabled(that: this, stateInfo);
1568 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1569 << QMetaEnum::fromType<GCState>().key(index: state) << "state";
1570 if (stateInfo.breakAfter)
1571 break;
1572 }
1573 if (deadlineExpired)
1574 handleTimeout(state);
1575 if (state != GCState::Invalid)
1576 QMetaObject::invokeMethod(object: mm->engine->publicEngine, function: [this]{
1577 mm->onEventLoop();
1578 }, type: Qt::QueuedConnection);
1579 } else {
1580 deadline = QDeadlineTimer::Forever;
1581 while (state != GCState::Invalid) {
1582 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1583 << QMetaEnum::fromType<GCState>().key(index: state) << "state";
1584 GCStateInfo& stateInfo = stateInfoMap[int(state)];
1585 state = executeWithLoggingIfEnabled(that: this, stateInfo);
1586 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1587 << QMetaEnum::fromType<GCState>().key(index: state) << "state";
1588 }
1589 }
1590}
1591
1592} // namespace QV4
1593
1594QT_END_NAMESPACE
1595
1596#include "moc_qv4mm_p.cpp"
1597

source code of qtdeclarative/src/qml/memory/qv4mm.cpp