1 | // Copyright (C) 2021 The Qt Company Ltd. |
---|---|
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qv4engine_p.h" |
5 | #include "qv4object_p.h" |
6 | #include "qv4mm_p.h" |
7 | #include "qv4qobjectwrapper_p.h" |
8 | #include "qv4identifiertable_p.h" |
9 | #include <QtCore/qalgorithms.h> |
10 | #include <QtCore/private/qnumeric_p.h> |
11 | #include <QtCore/qloggingcategory.h> |
12 | #include <private/qv4alloca_p.h> |
13 | #include <qqmlengine.h> |
14 | #include "PageReservation.h" |
15 | #include "PageAllocation.h" |
16 | |
17 | #include <QElapsedTimer> |
18 | #include <QMap> |
19 | #include <QScopedValueRollback> |
20 | |
21 | #include <iostream> |
22 | #include <cstdlib> |
23 | #include <algorithm> |
24 | #include "qv4profiling_p.h" |
25 | #include "qv4mapobject_p.h" |
26 | #include "qv4setobject_p.h" |
27 | |
28 | #include <chrono> |
29 | |
30 | //#define MM_STATS |
31 | |
32 | #if !defined(MM_STATS) && !defined(QT_NO_DEBUG) |
33 | #define MM_STATS |
34 | #endif |
35 | |
36 | #if MM_DEBUG |
37 | #define DEBUG qDebug() << "MM:" |
38 | #else |
39 | #define DEBUG if (1) ; else qDebug() << "MM:" |
40 | #endif |
41 | |
42 | #ifdef V4_USE_VALGRIND |
43 | #include <valgrind/valgrind.h> |
44 | #include <valgrind/memcheck.h> |
45 | #endif |
46 | |
47 | #ifdef V4_USE_HEAPTRACK |
48 | #include <heaptrack_api.h> |
49 | #endif |
50 | |
51 | #if OS(QNX) |
52 | #include <sys/storage.h> // __tls() |
53 | #endif |
54 | |
55 | #if USE(PTHREADS) && HAVE(PTHREAD_NP_H) |
56 | #include <pthread_np.h> |
57 | #endif |
58 | |
59 | Q_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics") |
60 | Q_DECLARE_LOGGING_CATEGORY(lcGcStats) |
61 | Q_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats") |
62 | Q_DECLARE_LOGGING_CATEGORY(lcGcAllocatorStats) |
63 | Q_LOGGING_CATEGORY(lcGcStateTransitions, "qt.qml.gc.stateTransitions") |
64 | Q_DECLARE_LOGGING_CATEGORY(lcGcStateTransitions) |
65 | Q_LOGGING_CATEGORY(lcGcForcedRuns, "qt.qml.gc.forcedRuns") |
66 | Q_DECLARE_LOGGING_CATEGORY(lcGcForcedRuns) |
67 | Q_LOGGING_CATEGORY(lcGcStepExecution, "qt.qml.gc.stepExecution") |
68 | Q_DECLARE_LOGGING_CATEGORY(lcGcStepExecution) |
69 | |
70 | using namespace WTF; |
71 | |
72 | QT_BEGIN_NAMESPACE |
73 | |
74 | namespace QV4 { |
75 | |
76 | enum { |
77 | MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16, |
78 | GCOverallocation = 200 /* Max overallocation by the GC in % */ |
79 | }; |
80 | |
81 | struct MemorySegment { |
82 | enum { |
83 | #ifdef Q_OS_RTEMS |
84 | NumChunks = sizeof(quint64), |
85 | #else |
86 | NumChunks = 8*sizeof(quint64), |
87 | #endif |
88 | SegmentSize = NumChunks*Chunk::ChunkSize, |
89 | }; |
90 | |
91 | MemorySegment(size_t size) |
92 | { |
93 | size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory |
94 | if (size < SegmentSize) |
95 | size = SegmentSize; |
96 | |
97 | pageReservation = PageReservation::reserve(size, usage: OSAllocator::JSGCHeapPages); |
98 | base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1)); |
99 | nChunks = NumChunks; |
100 | availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base())); |
101 | if (availableBytes < SegmentSize) |
102 | --nChunks; |
103 | } |
104 | MemorySegment(MemorySegment &&other) { |
105 | qSwap(value1&: pageReservation, value2&: other.pageReservation); |
106 | qSwap(value1&: base, value2&: other.base); |
107 | qSwap(value1&: allocatedMap, value2&: other.allocatedMap); |
108 | qSwap(value1&: availableBytes, value2&: other.availableBytes); |
109 | qSwap(value1&: nChunks, value2&: other.nChunks); |
110 | } |
111 | |
112 | ~MemorySegment() { |
113 | if (base) |
114 | pageReservation.deallocate(); |
115 | } |
116 | |
117 | void setBit(size_t index) { |
118 | Q_ASSERT(index < nChunks); |
119 | quint64 bit = static_cast<quint64>(1) << index; |
120 | // qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit; |
121 | allocatedMap |= bit; |
122 | } |
123 | void clearBit(size_t index) { |
124 | Q_ASSERT(index < nChunks); |
125 | quint64 bit = static_cast<quint64>(1) << index; |
126 | // qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit; |
127 | allocatedMap &= ~bit; |
128 | } |
129 | bool testBit(size_t index) const { |
130 | Q_ASSERT(index < nChunks); |
131 | quint64 bit = static_cast<quint64>(1) << index; |
132 | return (allocatedMap & bit); |
133 | } |
134 | |
135 | Chunk *allocate(size_t size); |
136 | void free(Chunk *chunk, size_t size) { |
137 | DEBUG << "freeing chunk"<< chunk; |
138 | size_t index = static_cast<size_t>(chunk - base); |
139 | size_t end = qMin(a: static_cast<size_t>(NumChunks), b: index + (size - 1)/Chunk::ChunkSize + 1); |
140 | while (index < end) { |
141 | Q_ASSERT(testBit(index)); |
142 | clearBit(index); |
143 | ++index; |
144 | } |
145 | |
146 | size_t pageSize = WTF::pageSize(); |
147 | size = (size + pageSize - 1) & ~(pageSize - 1); |
148 | #if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN) |
149 | // Linux and Windows zero out pages that have been decommitted and get committed again. |
150 | // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the |
151 | // memory before decommit, so that we can be sure that all chunks we allocate will be |
152 | // zero initialized. |
153 | memset(chunk, 0, size); |
154 | #endif |
155 | pageReservation.decommit(start: chunk, size); |
156 | } |
157 | |
158 | bool contains(Chunk *c) const { |
159 | return c >= base && c < base + nChunks; |
160 | } |
161 | |
162 | PageReservation pageReservation; |
163 | Chunk *base = nullptr; |
164 | quint64 allocatedMap = 0; |
165 | size_t availableBytes = 0; |
166 | uint nChunks = 0; |
167 | }; |
168 | |
169 | Chunk *MemorySegment::allocate(size_t size) |
170 | { |
171 | if (!allocatedMap && size >= SegmentSize) { |
172 | // chunk allocated for one huge allocation |
173 | Q_ASSERT(availableBytes >= size); |
174 | pageReservation.commit(start: base, size); |
175 | allocatedMap = ~static_cast<quint64>(0); |
176 | return base; |
177 | } |
178 | size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk); |
179 | uint sequence = 0; |
180 | Chunk *candidate = nullptr; |
181 | for (uint i = 0; i < nChunks; ++i) { |
182 | if (!testBit(index: i)) { |
183 | if (!candidate) |
184 | candidate = base + i; |
185 | ++sequence; |
186 | } else { |
187 | candidate = nullptr; |
188 | sequence = 0; |
189 | } |
190 | if (sequence == requiredChunks) { |
191 | pageReservation.commit(start: candidate, size); |
192 | for (uint i = 0; i < requiredChunks; ++i) |
193 | setBit(candidate - base + i); |
194 | DEBUG << "allocated chunk "<< candidate << Qt::hex << size; |
195 | |
196 | return candidate; |
197 | } |
198 | } |
199 | return nullptr; |
200 | } |
201 | |
202 | struct ChunkAllocator { |
203 | ChunkAllocator() {} |
204 | |
205 | size_t requiredChunkSize(size_t size) { |
206 | size += Chunk::HeaderSize; // space required for the Chunk header |
207 | size_t pageSize = WTF::pageSize(); |
208 | size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes |
209 | if (size < Chunk::ChunkSize) |
210 | size = Chunk::ChunkSize; |
211 | return size; |
212 | } |
213 | |
214 | Chunk *allocate(size_t size = 0); |
215 | void free(Chunk *chunk, size_t size = 0); |
216 | |
217 | std::vector<MemorySegment> memorySegments; |
218 | }; |
219 | |
220 | Chunk *ChunkAllocator::allocate(size_t size) |
221 | { |
222 | size = requiredChunkSize(size); |
223 | for (auto &m : memorySegments) { |
224 | if (~m.allocatedMap) { |
225 | Chunk *c = m.allocate(size); |
226 | if (c) |
227 | return c; |
228 | } |
229 | } |
230 | |
231 | // allocate a new segment |
232 | memorySegments.push_back(x: MemorySegment(size)); |
233 | Chunk *c = memorySegments.back().allocate(size); |
234 | Q_ASSERT(c); |
235 | return c; |
236 | } |
237 | |
238 | void ChunkAllocator::free(Chunk *chunk, size_t size) |
239 | { |
240 | size = requiredChunkSize(size); |
241 | for (auto &m : memorySegments) { |
242 | if (m.contains(c: chunk)) { |
243 | m.free(chunk, size); |
244 | return; |
245 | } |
246 | } |
247 | Q_ASSERT(false); |
248 | } |
249 | |
250 | #ifdef DUMP_SWEEP |
251 | QString binary(quintptr n) { |
252 | QString s = QString::number(n, 2); |
253 | while (s.length() < 64) |
254 | s.prepend(QChar::fromLatin1('0')); |
255 | return s; |
256 | } |
257 | #define SDUMP qDebug |
258 | #else |
259 | QString binary(quintptr) { return QString(); } |
260 | #define SDUMP if (1) ; else qDebug |
261 | #endif |
262 | |
263 | // Stores a classname -> freed count mapping. |
264 | typedef QHash<const char*, int> MMStatsHash; |
265 | Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal) |
266 | |
267 | // This indirection avoids sticking QHash code in each of the call sites, which |
268 | // shaves off some instructions in the case that it's unused. |
269 | static void increaseFreedCountForClass(const char *className) |
270 | { |
271 | (*freedObjectStatsGlobal())[className]++; |
272 | } |
273 | |
274 | //bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr) |
275 | bool Chunk::sweep(ExecutionEngine *engine) |
276 | { |
277 | bool hasUsedSlots = false; |
278 | SDUMP() << "sweeping chunk"<< this; |
279 | HeapItem *o = realBase(); |
280 | bool lastSlotFree = false; |
281 | for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) { |
282 | quintptr toFree = objectBitmap[i] ^ blackBitmap[i]; |
283 | Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used |
284 | quintptr e = extendsBitmap[i]; |
285 | SDUMP() << " index="<< i; |
286 | SDUMP() << " toFree ="<< binary(toFree); |
287 | SDUMP() << " black ="<< binary(blackBitmap[i]); |
288 | SDUMP() << " object ="<< binary(objectBitmap[i]); |
289 | SDUMP() << " extends ="<< binary(e); |
290 | if (lastSlotFree) |
291 | e &= (e + 1); // clear all lowest extent bits |
292 | while (toFree) { |
293 | uint index = qCountTrailingZeroBits(v: toFree); |
294 | quintptr bit = (static_cast<quintptr>(1) << index); |
295 | |
296 | toFree ^= bit; // mask out freed slot |
297 | // DEBUG << " index" << hex << index << toFree; |
298 | |
299 | // remove all extends slots that have been freed |
300 | // this is a bit of bit trickery. |
301 | quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit |
302 | quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object |
303 | quintptr result = objmask + 1; |
304 | Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something |
305 | result |= mask; // ensure we don't clear stuff to the right of the current object |
306 | e &= result; |
307 | |
308 | HeapItem *itemToFree = o + index; |
309 | Heap::Base *b = *itemToFree; |
310 | const VTable *v = b->internalClass->vtable; |
311 | // if (Q_UNLIKELY(classCountPtr)) |
312 | // classCountPtr(v->className); |
313 | if (v->destroy) { |
314 | v->destroy(b); |
315 | b->_checkIsDestroyed(); |
316 | } |
317 | #ifdef V4_USE_HEAPTRACK |
318 | heaptrack_report_free(itemToFree); |
319 | #endif |
320 | } |
321 | Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i]) |
322 | - (blackBitmap[i] | e)) * Chunk::SlotSize, |
323 | Profiling::SmallItem); |
324 | objectBitmap[i] = blackBitmap[i]; |
325 | hasUsedSlots |= (blackBitmap[i] != 0); |
326 | extendsBitmap[i] = e; |
327 | lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1)); |
328 | SDUMP() << " new extends ="<< binary(e); |
329 | SDUMP() << " lastSlotFree"<< lastSlotFree; |
330 | Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0); |
331 | o += Chunk::Bits; |
332 | } |
333 | // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots."; |
334 | return hasUsedSlots; |
335 | } |
336 | |
337 | void Chunk::freeAll(ExecutionEngine *engine) |
338 | { |
339 | // DEBUG << "sweeping chunk" << this << (*freeList); |
340 | HeapItem *o = realBase(); |
341 | for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) { |
342 | quintptr toFree = objectBitmap[i]; |
343 | quintptr e = extendsBitmap[i]; |
344 | // DEBUG << hex << " index=" << i << toFree; |
345 | while (toFree) { |
346 | uint index = qCountTrailingZeroBits(v: toFree); |
347 | quintptr bit = (static_cast<quintptr>(1) << index); |
348 | |
349 | toFree ^= bit; // mask out freed slot |
350 | // DEBUG << " index" << hex << index << toFree; |
351 | |
352 | // remove all extends slots that have been freed |
353 | // this is a bit of bit trickery. |
354 | quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit |
355 | quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object |
356 | quintptr result = objmask + 1; |
357 | Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something |
358 | result |= mask; // ensure we don't clear stuff to the right of the current object |
359 | e &= result; |
360 | |
361 | HeapItem *itemToFree = o + index; |
362 | Heap::Base *b = *itemToFree; |
363 | if (b->internalClass->vtable->destroy) { |
364 | b->internalClass->vtable->destroy(b); |
365 | b->_checkIsDestroyed(); |
366 | } |
367 | #ifdef V4_USE_HEAPTRACK |
368 | heaptrack_report_free(itemToFree); |
369 | #endif |
370 | } |
371 | Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i]) |
372 | - qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem); |
373 | objectBitmap[i] = 0; |
374 | extendsBitmap[i] = e; |
375 | o += Chunk::Bits; |
376 | } |
377 | // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots."; |
378 | } |
379 | |
380 | void Chunk::resetBlackBits() |
381 | { |
382 | memset(s: blackBitmap, c: 0, n: sizeof(blackBitmap)); |
383 | } |
384 | |
385 | void Chunk::sortIntoBins(HeapItem **bins, uint nBins) |
386 | { |
387 | // qDebug() << "sortIntoBins:"; |
388 | HeapItem *base = realBase(); |
389 | #if QT_POINTER_SIZE == 8 |
390 | const int start = 0; |
391 | #else |
392 | const int start = 1; |
393 | #endif |
394 | #ifndef QT_NO_DEBUG |
395 | uint freeSlots = 0; |
396 | uint allocatedSlots = 0; |
397 | #endif |
398 | for (int i = start; i < EntriesInBitmap; ++i) { |
399 | quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]); |
400 | #if QT_POINTER_SIZE == 8 |
401 | if (!i) |
402 | usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1; |
403 | #endif |
404 | #ifndef QT_NO_DEBUG |
405 | allocatedSlots += qPopulationCount(v: usedSlots); |
406 | // qDebug() << hex << " i=" << i << "used=" << usedSlots; |
407 | #endif |
408 | while (1) { |
409 | uint index = qCountTrailingZeroBits(v: usedSlots + 1); |
410 | if (index == Bits) |
411 | break; |
412 | uint freeStart = i*Bits + index; |
413 | usedSlots &= ~((static_cast<quintptr>(1) << index) - 1); |
414 | while (!usedSlots) { |
415 | if (++i < EntriesInBitmap) { |
416 | usedSlots = (objectBitmap[i]|extendsBitmap[i]); |
417 | } else { |
418 | Q_ASSERT(i == EntriesInBitmap); |
419 | // Overflows to 0 when counting trailing zeroes above in next iteration. |
420 | // Then, all the bits are zeroes and we break. |
421 | usedSlots = std::numeric_limits<quintptr>::max(); |
422 | break; |
423 | } |
424 | #ifndef QT_NO_DEBUG |
425 | allocatedSlots += qPopulationCount(v: usedSlots); |
426 | // qDebug() << hex << " i=" << i << "used=" << usedSlots; |
427 | #endif |
428 | } |
429 | HeapItem *freeItem = base + freeStart; |
430 | |
431 | index = qCountTrailingZeroBits(v: usedSlots); |
432 | usedSlots |= (quintptr(1) << index) - 1; |
433 | uint freeEnd = i*Bits + index; |
434 | uint nSlots = freeEnd - freeStart; |
435 | #ifndef QT_NO_DEBUG |
436 | // qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots; |
437 | freeSlots += nSlots; |
438 | #endif |
439 | Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots); |
440 | freeItem->freeData.availableSlots = nSlots; |
441 | uint bin = qMin(a: nBins - 1, b: nSlots); |
442 | freeItem->freeData.next = bins[bin]; |
443 | bins[bin] = freeItem; |
444 | } |
445 | } |
446 | #ifndef QT_NO_DEBUG |
447 | Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr)); |
448 | #endif |
449 | } |
450 | |
451 | HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) { |
452 | Q_ASSERT((size % Chunk::SlotSize) == 0); |
453 | size_t slotsRequired = size >> Chunk::SlotSizeShift; |
454 | |
455 | if (allocationStats) |
456 | ++allocationStats[binForSlots(nSlots: slotsRequired)]; |
457 | |
458 | HeapItem **last; |
459 | |
460 | HeapItem *m; |
461 | |
462 | if (slotsRequired < NumBins - 1) { |
463 | m = freeBins[slotsRequired]; |
464 | if (m) { |
465 | freeBins[slotsRequired] = m->freeData.next; |
466 | goto done; |
467 | } |
468 | } |
469 | |
470 | if (nFree >= slotsRequired) { |
471 | // use bump allocation |
472 | Q_ASSERT(nextFree); |
473 | m = nextFree; |
474 | nextFree += slotsRequired; |
475 | nFree -= slotsRequired; |
476 | goto done; |
477 | } |
478 | |
479 | // DEBUG << "No matching bin found for item" << size << bin; |
480 | // search last bin for a large enough item |
481 | last = &freeBins[NumBins - 1]; |
482 | while ((m = *last)) { |
483 | if (m->freeData.availableSlots >= slotsRequired) { |
484 | *last = m->freeData.next; // take it out of the list |
485 | |
486 | size_t remainingSlots = m->freeData.availableSlots - slotsRequired; |
487 | // DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots; |
488 | if (remainingSlots == 0) |
489 | goto done; |
490 | |
491 | HeapItem *remainder = m + slotsRequired; |
492 | if (remainingSlots > nFree) { |
493 | if (nFree) { |
494 | size_t bin = binForSlots(nSlots: nFree); |
495 | nextFree->freeData.next = freeBins[bin]; |
496 | nextFree->freeData.availableSlots = nFree; |
497 | freeBins[bin] = nextFree; |
498 | } |
499 | nextFree = remainder; |
500 | nFree = remainingSlots; |
501 | } else { |
502 | remainder->freeData.availableSlots = remainingSlots; |
503 | size_t binForRemainder = binForSlots(nSlots: remainingSlots); |
504 | remainder->freeData.next = freeBins[binForRemainder]; |
505 | freeBins[binForRemainder] = remainder; |
506 | } |
507 | goto done; |
508 | } |
509 | last = &m->freeData.next; |
510 | } |
511 | |
512 | if (slotsRequired < NumBins - 1) { |
513 | // check if we can split up another slot |
514 | for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) { |
515 | m = freeBins[i]; |
516 | if (m) { |
517 | freeBins[i] = m->freeData.next; // take it out of the list |
518 | // qDebug() << "got item" << slotsRequired << "from slot" << i; |
519 | size_t remainingSlots = i - slotsRequired; |
520 | Q_ASSERT(remainingSlots < NumBins - 1); |
521 | HeapItem *remainder = m + slotsRequired; |
522 | remainder->freeData.availableSlots = remainingSlots; |
523 | remainder->freeData.next = freeBins[remainingSlots]; |
524 | freeBins[remainingSlots] = remainder; |
525 | goto done; |
526 | } |
527 | } |
528 | } |
529 | |
530 | if (!m) { |
531 | if (!forceAllocation) |
532 | return nullptr; |
533 | if (nFree) { |
534 | // Save any remaining slots of the current chunk |
535 | // for later, smaller allocations. |
536 | size_t bin = binForSlots(nSlots: nFree); |
537 | nextFree->freeData.next = freeBins[bin]; |
538 | nextFree->freeData.availableSlots = nFree; |
539 | freeBins[bin] = nextFree; |
540 | } |
541 | Chunk *newChunk = chunkAllocator->allocate(); |
542 | Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
543 | chunks.push_back(x: newChunk); |
544 | nextFree = newChunk->first(); |
545 | nFree = Chunk::AvailableSlots; |
546 | m = nextFree; |
547 | nextFree += slotsRequired; |
548 | nFree -= slotsRequired; |
549 | } |
550 | |
551 | done: |
552 | m->setAllocatedSlots(slotsRequired); |
553 | Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem); |
554 | #ifdef V4_USE_HEAPTRACK |
555 | heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize); |
556 | #endif |
557 | // DEBUG << " " << hex << m->chunk() << m->chunk()->objectBitmap[0] << m->chunk()->extendsBitmap[0] << (m - m->chunk()->realBase()); |
558 | return m; |
559 | } |
560 | |
561 | void BlockAllocator::sweep() |
562 | { |
563 | nextFree = nullptr; |
564 | nFree = 0; |
565 | memset(s: freeBins, c: 0, n: sizeof(freeBins)); |
566 | |
567 | // qDebug() << "BlockAlloc: sweep"; |
568 | usedSlotsAfterLastSweep = 0; |
569 | |
570 | auto firstEmptyChunk = std::partition(first: chunks.begin(), last: chunks.end(), pred: [this](Chunk *c) { |
571 | return c->sweep(engine); |
572 | }); |
573 | |
574 | std::for_each(first: chunks.begin(), last: firstEmptyChunk, f: [this](Chunk *c) { |
575 | c->sortIntoBins(bins: freeBins, nBins: NumBins); |
576 | usedSlotsAfterLastSweep += c->nUsedSlots(); |
577 | }); |
578 | |
579 | // only free the chunks at the end to avoid that the sweep() calls indirectly |
580 | // access freed memory |
581 | std::for_each(first: firstEmptyChunk, last: chunks.end(), f: [this](Chunk *c) { |
582 | Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
583 | chunkAllocator->free(chunk: c); |
584 | }); |
585 | |
586 | chunks.erase(first: firstEmptyChunk, last: chunks.end()); |
587 | } |
588 | |
589 | void BlockAllocator::freeAll() |
590 | { |
591 | for (auto c : chunks) |
592 | c->freeAll(engine); |
593 | for (auto c : chunks) { |
594 | Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
595 | chunkAllocator->free(chunk: c); |
596 | } |
597 | } |
598 | |
599 | void BlockAllocator::resetBlackBits() |
600 | { |
601 | for (auto c : chunks) |
602 | c->resetBlackBits(); |
603 | } |
604 | |
605 | HeapItem *HugeItemAllocator::allocate(size_t size) { |
606 | MemorySegment *m = nullptr; |
607 | Chunk *c = nullptr; |
608 | if (size >= MemorySegment::SegmentSize/2) { |
609 | // too large to handle through the ChunkAllocator, let's get our own memory segement |
610 | size += Chunk::HeaderSize; // space required for the Chunk header |
611 | size_t pageSize = WTF::pageSize(); |
612 | size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes |
613 | m = new MemorySegment(size); |
614 | c = m->allocate(size); |
615 | } else { |
616 | c = chunkAllocator->allocate(size); |
617 | } |
618 | Q_ASSERT(c); |
619 | chunks.push_back(x: HugeChunk{.segment: m, .chunk: c, .size: size}); |
620 | Chunk::setBit(bitmap: c->objectBitmap, index: c->first() - c->realBase()); |
621 | Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem); |
622 | #ifdef V4_USE_HEAPTRACK |
623 | heaptrack_report_alloc(c, size); |
624 | #endif |
625 | return c->first(); |
626 | } |
627 | |
628 | static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr) |
629 | { |
630 | HeapItem *itemToFree = c.chunk->first(); |
631 | Heap::Base *b = *itemToFree; |
632 | const VTable *v = b->internalClass->vtable; |
633 | if (Q_UNLIKELY(classCountPtr)) |
634 | classCountPtr(v->className); |
635 | |
636 | if (v->destroy) { |
637 | v->destroy(b); |
638 | b->_checkIsDestroyed(); |
639 | } |
640 | if (c.segment) { |
641 | // own memory segment |
642 | c.segment->free(chunk: c.chunk, size: c.size); |
643 | delete c.segment; |
644 | } else { |
645 | chunkAllocator->free(chunk: c.chunk, size: c.size); |
646 | } |
647 | #ifdef V4_USE_HEAPTRACK |
648 | heaptrack_report_free(c.chunk); |
649 | #endif |
650 | } |
651 | |
652 | void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr) |
653 | { |
654 | auto isBlack = [this, classCountPtr] (const HugeChunk &c) { |
655 | bool b = c.chunk->first()->isBlack(); |
656 | Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase()); |
657 | if (!b) { |
658 | Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem); |
659 | freeHugeChunk(chunkAllocator, c, classCountPtr); |
660 | } |
661 | return !b; |
662 | }; |
663 | |
664 | auto newEnd = std::remove_if(first: chunks.begin(), last: chunks.end(), pred: isBlack); |
665 | chunks.erase(first: newEnd, last: chunks.end()); |
666 | } |
667 | |
668 | void HugeItemAllocator::resetBlackBits() |
669 | { |
670 | for (auto c : chunks) |
671 | Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase()); |
672 | } |
673 | |
674 | void HugeItemAllocator::freeAll() |
675 | { |
676 | for (auto &c : chunks) { |
677 | Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem); |
678 | freeHugeChunk(chunkAllocator, c, classCountPtr: nullptr); |
679 | } |
680 | } |
681 | |
682 | namespace { |
683 | using ExtraData = GCStateInfo::ExtraData; |
684 | GCState markStart(GCStateMachine *that, ExtraData &) |
685 | { |
686 | //Initialize the mark stack |
687 | that->mm->m_markStack = std::make_unique<MarkStack>(args&: that->mm->engine); |
688 | that->mm->engine->isGCOngoing = true; |
689 | return GCState::MarkGlobalObject; |
690 | } |
691 | |
692 | GCState markGlobalObject(GCStateMachine *that, ExtraData &) |
693 | { |
694 | that->mm->engine->markObjects(markStack: that->mm->m_markStack.get()); |
695 | return GCState::MarkJSStack; |
696 | } |
697 | |
698 | GCState markJSStack(GCStateMachine *that, ExtraData &) |
699 | { |
700 | that->mm->collectFromJSStack(markStack: that->mm->markStack()); |
701 | return GCState::InitMarkPersistentValues; |
702 | } |
703 | |
704 | GCState initMarkPersistentValues(GCStateMachine *that, ExtraData &stateData) |
705 | { |
706 | if (!that->mm->m_persistentValues) |
707 | return GCState::InitMarkWeakValues; // no persistent values to mark |
708 | stateData = GCIteratorStorage { .it: that->mm->m_persistentValues->begin() }; |
709 | return GCState::MarkPersistentValues; |
710 | } |
711 | |
712 | static constexpr int markLoopIterationCount = 1024; |
713 | |
714 | bool wasDrainNecessary(MarkStack *ms, QDeadlineTimer deadline) |
715 | { |
716 | if (ms->remainingBeforeSoftLimit() > markLoopIterationCount) |
717 | return false; |
718 | // drain |
719 | ms->drain(deadline); |
720 | return true; |
721 | } |
722 | |
723 | GCState markPersistentValues(GCStateMachine *that, ExtraData &stateData) { |
724 | auto markStack = that->mm->markStack(); |
725 | if (wasDrainNecessary(ms: markStack, deadline: that->deadline) && that->deadline.hasExpired()) |
726 | return GCState::MarkPersistentValues; |
727 | PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(v&: stateData).it; |
728 | // avoid repeatedly hitting the timer constantly by batching iterations |
729 | for (int i = 0; i < markLoopIterationCount; ++i) { |
730 | if (!it.p) |
731 | return GCState::InitMarkWeakValues; |
732 | if (Managed *m = (*it).as<Managed>()) |
733 | m->mark(markStack); |
734 | ++it; |
735 | } |
736 | return GCState::MarkPersistentValues; |
737 | } |
738 | |
739 | GCState initMarkWeakValues(GCStateMachine *that, ExtraData &stateData) |
740 | { |
741 | stateData = GCIteratorStorage { .it: that->mm->m_weakValues->begin() }; |
742 | return GCState::MarkWeakValues; |
743 | } |
744 | |
745 | GCState markWeakValues(GCStateMachine *that, ExtraData &stateData) |
746 | { |
747 | auto markStack = that->mm->markStack(); |
748 | if (wasDrainNecessary(ms: markStack, deadline: that->deadline) && that->deadline.hasExpired()) |
749 | return GCState::MarkWeakValues; |
750 | PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(v&: stateData).it; |
751 | // avoid repeatedly hitting the timer constantly by batching iterations |
752 | for (int i = 0; i < markLoopIterationCount; ++i) { |
753 | if (!it.p) |
754 | return GCState::MarkDrain; |
755 | QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>(); |
756 | ++it; |
757 | if (!qobjectWrapper) |
758 | continue; |
759 | QObject *qobject = qobjectWrapper->object(); |
760 | if (!qobject) |
761 | continue; |
762 | bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: qobject); |
763 | |
764 | if (!keepAlive) { |
765 | if (QObject *parent = qobject->parent()) { |
766 | while (parent->parent()) |
767 | parent = parent->parent(); |
768 | keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: parent); |
769 | } |
770 | } |
771 | |
772 | if (keepAlive) |
773 | qobjectWrapper->mark(markStack: that->mm->markStack()); |
774 | } |
775 | return GCState::MarkWeakValues; |
776 | } |
777 | |
778 | GCState markDrain(GCStateMachine *that, ExtraData &) |
779 | { |
780 | if (that->deadline.isForever()) { |
781 | that->mm->markStack()->drain(); |
782 | return GCState::MarkReady; |
783 | } |
784 | auto drainState = that->mm->m_markStack->drain(deadline: that->deadline); |
785 | return drainState == MarkStack::DrainState::Complete |
786 | ? GCState::MarkReady |
787 | : GCState::MarkDrain; |
788 | } |
789 | |
790 | GCState markReady(GCStateMachine *, ExtraData &) |
791 | { |
792 | //Possibility to do some clean up, stat printing, etc... |
793 | return GCState::InitCallDestroyObjects; |
794 | } |
795 | |
796 | /** \!internal |
797 | collects new references from the stack, then drains the mark stack again |
798 | */ |
799 | void redrain(GCStateMachine *that) |
800 | { |
801 | that->mm->collectFromJSStack(markStack: that->mm->markStack()); |
802 | that->mm->m_markStack->drain(); |
803 | } |
804 | |
805 | GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData) |
806 | { |
807 | // as we don't have a deletion barrier, we need to rescan the stack |
808 | redrain(that); |
809 | if (!that->mm->m_weakValues) |
810 | return GCState::FreeWeakMaps; // no need to call destroy objects |
811 | stateData = GCIteratorStorage { .it: that->mm->m_weakValues->begin() }; |
812 | return GCState::CallDestroyObjects; |
813 | } |
814 | GCState callDestroyObject(GCStateMachine *that, ExtraData &stateData) |
815 | { |
816 | PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(v&: stateData).it; |
817 | // destroyObject might call user code, which really shouldn't call back into the gc |
818 | auto oldState = std::exchange(obj&: that->mm->gcBlocked, new_val: QV4::MemoryManager::Blockness::InCriticalSection); |
819 | auto cleanup = qScopeGuard(f: [&]() { |
820 | that->mm->gcBlocked = oldState; |
821 | }); |
822 | // avoid repeatedly hitting the timer constantly by batching iterations |
823 | for (int i = 0; i < markLoopIterationCount; ++i) { |
824 | if (!it.p) |
825 | return GCState::FreeWeakMaps; |
826 | Managed *m = (*it).managed(); |
827 | ++it; |
828 | if (!m || m->markBit()) |
829 | continue; |
830 | // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed |
831 | // signal before we start sweeping the heap |
832 | if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>()) |
833 | qobjectWrapper->destroyObject(/*lastSweep =*/lastCall: false); |
834 | } |
835 | return GCState::CallDestroyObjects; |
836 | } |
837 | |
838 | void freeWeakMaps(MemoryManager *mm) |
839 | { |
840 | for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) { |
841 | if (!map->isMarked()) |
842 | continue; |
843 | map->removeUnmarkedKeys(); |
844 | *lastMap = map; |
845 | lastMap = &map->nextWeakMap; |
846 | } |
847 | } |
848 | |
849 | GCState freeWeakMaps(GCStateMachine *that, ExtraData &) |
850 | { |
851 | freeWeakMaps(mm: that->mm); |
852 | return GCState::FreeWeakSets; |
853 | } |
854 | |
855 | void freeWeakSets(MemoryManager *mm) |
856 | { |
857 | for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) { |
858 | |
859 | if (!set->isMarked()) |
860 | continue; |
861 | set->removeUnmarkedKeys(); |
862 | *lastSet = set; |
863 | lastSet = &set->nextWeakSet; |
864 | } |
865 | } |
866 | |
867 | GCState freeWeakSets(GCStateMachine *that, ExtraData &) |
868 | { |
869 | freeWeakSets(mm: that->mm); |
870 | return GCState::HandleQObjectWrappers; |
871 | } |
872 | |
873 | GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &) |
874 | { |
875 | that->mm->cleanupDeletedQObjectWrappersInSweep(); |
876 | return GCState::DoSweep; |
877 | } |
878 | |
879 | GCState doSweep(GCStateMachine *that, ExtraData &) |
880 | { |
881 | auto mm = that->mm; |
882 | |
883 | mm->engine->identifierTable->sweep(); |
884 | mm->blockAllocator.sweep(); |
885 | mm->hugeItemAllocator.sweep(classCountPtr: that->mm->gcCollectorStats ? increaseFreedCountForClass : nullptr); |
886 | mm->icAllocator.sweep(); |
887 | |
888 | // reset all black bits |
889 | mm->blockAllocator.resetBlackBits(); |
890 | mm->hugeItemAllocator.resetBlackBits(); |
891 | mm->icAllocator.resetBlackBits(); |
892 | |
893 | mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep; |
894 | mm->gcBlocked = MemoryManager::Unblocked; |
895 | mm->m_markStack.reset(); |
896 | mm->engine->isGCOngoing = false; |
897 | |
898 | mm->updateUnmanagedHeapSizeGCLimit(); |
899 | |
900 | return GCState::Invalid; |
901 | } |
902 | |
903 | } |
904 | |
905 | |
906 | MemoryManager::MemoryManager(ExecutionEngine *engine) |
907 | : engine(engine) |
908 | , chunkAllocator(new ChunkAllocator) |
909 | , blockAllocator(chunkAllocator, engine) |
910 | , icAllocator(chunkAllocator, engine) |
911 | , hugeItemAllocator(chunkAllocator, engine) |
912 | , m_persistentValues(new PersistentValueStorage(engine)) |
913 | , m_weakValues(new PersistentValueStorage(engine)) |
914 | , unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit) |
915 | , aggressiveGC(!qEnvironmentVariableIsEmpty(varName: "QV4_MM_AGGRESSIVE_GC")) |
916 | , gcStats(lcGcStats().isDebugEnabled()) |
917 | , gcCollectorStats(lcGcAllocatorStats().isDebugEnabled()) |
918 | { |
919 | #ifdef V4_USE_VALGRIND |
920 | VALGRIND_CREATE_MEMPOOL(this, 0, true); |
921 | #endif |
922 | memset(s: statistics.allocations, c: 0, n: sizeof(statistics.allocations)); |
923 | if (gcStats) |
924 | blockAllocator.allocationStats = statistics.allocations; |
925 | |
926 | gcStateMachine = std::make_unique<GCStateMachine>(); |
927 | gcStateMachine->mm = this; |
928 | |
929 | gcStateMachine->stateInfoMap[GCState::MarkStart] = { |
930 | .execute: markStart, |
931 | .breakAfter: false, |
932 | }; |
933 | gcStateMachine->stateInfoMap[GCState::MarkGlobalObject] = { |
934 | .execute: markGlobalObject, |
935 | .breakAfter: false, |
936 | }; |
937 | gcStateMachine->stateInfoMap[GCState::MarkJSStack] = { |
938 | .execute: markJSStack, |
939 | .breakAfter: false, |
940 | }; |
941 | gcStateMachine->stateInfoMap[GCState::InitMarkPersistentValues] = { |
942 | .execute: initMarkPersistentValues, |
943 | .breakAfter: false, |
944 | }; |
945 | gcStateMachine->stateInfoMap[GCState::MarkPersistentValues] = { |
946 | .execute: markPersistentValues, |
947 | .breakAfter: false, |
948 | }; |
949 | gcStateMachine->stateInfoMap[GCState::InitMarkWeakValues] = { |
950 | .execute: initMarkWeakValues, |
951 | .breakAfter: false, |
952 | }; |
953 | gcStateMachine->stateInfoMap[GCState::MarkWeakValues] = { |
954 | .execute: markWeakValues, |
955 | .breakAfter: false, |
956 | }; |
957 | gcStateMachine->stateInfoMap[GCState::MarkDrain] = { |
958 | .execute: markDrain, |
959 | .breakAfter: false, |
960 | }; |
961 | gcStateMachine->stateInfoMap[GCState::MarkReady] = { |
962 | .execute: markReady, |
963 | .breakAfter: false, |
964 | }; |
965 | gcStateMachine->stateInfoMap[GCState::InitCallDestroyObjects] = { |
966 | .execute: initCallDestroyObjects, |
967 | .breakAfter: false, |
968 | }; |
969 | gcStateMachine->stateInfoMap[GCState::CallDestroyObjects] = { |
970 | .execute: callDestroyObject, |
971 | .breakAfter: false, |
972 | }; |
973 | gcStateMachine->stateInfoMap[GCState::FreeWeakMaps] = { |
974 | .execute: freeWeakMaps, |
975 | .breakAfter: false, |
976 | }; |
977 | gcStateMachine->stateInfoMap[GCState::FreeWeakSets] = { |
978 | .execute: freeWeakSets, |
979 | .breakAfter: true, // ensure that handleQObjectWrappers runs in isolation |
980 | }; |
981 | gcStateMachine->stateInfoMap[GCState::HandleQObjectWrappers] = { |
982 | .execute: handleQObjectWrappers, |
983 | .breakAfter: false, |
984 | }; |
985 | gcStateMachine->stateInfoMap[GCState::DoSweep] = { |
986 | .execute: doSweep, |
987 | .breakAfter: false, |
988 | }; |
989 | } |
990 | |
991 | Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize) |
992 | { |
993 | const size_t stringSize = align(size: sizeof(Heap::String)); |
994 | #ifdef MM_STATS |
995 | lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift; |
996 | ++allocationCount; |
997 | #endif |
998 | unmanagedHeapSize += unmanagedSize; |
999 | |
1000 | HeapItem *m = allocate(allocator: &blockAllocator, size: stringSize); |
1001 | memset(s: m, c: 0, n: stringSize); |
1002 | return *m; |
1003 | } |
1004 | |
1005 | Heap::Base *MemoryManager::allocData(std::size_t size) |
1006 | { |
1007 | #ifdef MM_STATS |
1008 | lastAllocRequestedSlots = size >> Chunk::SlotSizeShift; |
1009 | ++allocationCount; |
1010 | #endif |
1011 | |
1012 | Q_ASSERT(size >= Chunk::SlotSize); |
1013 | Q_ASSERT(size % Chunk::SlotSize == 0); |
1014 | |
1015 | HeapItem *m = allocate(allocator: &blockAllocator, size); |
1016 | memset(s: m, c: 0, n: size); |
1017 | return *m; |
1018 | } |
1019 | |
1020 | Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers) |
1021 | { |
1022 | uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value); |
1023 | Q_ASSERT(!(size % sizeof(HeapItem))); |
1024 | |
1025 | Heap::Object *o; |
1026 | if (nMembers <= vtable->nInlineProperties) { |
1027 | o = static_cast<Heap::Object *>(allocData(size)); |
1028 | } else { |
1029 | // Allocate both in one go through the block allocator |
1030 | nMembers -= vtable->nInlineProperties; |
1031 | std::size_t memberSize = align(size: sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value)); |
1032 | size_t totalSize = size + memberSize; |
1033 | Heap::MemberData *m; |
1034 | if (totalSize > Chunk::DataSize) { |
1035 | o = static_cast<Heap::Object *>(allocData(size)); |
1036 | m = hugeItemAllocator.allocate(size: memberSize)->as<Heap::MemberData>(); |
1037 | } else { |
1038 | HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(size: totalSize)); |
1039 | Heap::Base *b = *mh; |
1040 | o = static_cast<Heap::Object *>(b); |
1041 | mh += (size >> Chunk::SlotSizeShift); |
1042 | m = mh->as<Heap::MemberData>(); |
1043 | Chunk *c = mh->chunk(); |
1044 | size_t index = mh - c->realBase(); |
1045 | Chunk::setBit(bitmap: c->objectBitmap, index); |
1046 | Chunk::clearBit(bitmap: c->extendsBitmap, index); |
1047 | } |
1048 | m->internalClass.set(e: engine, newVal: engine->internalClasses(icType: EngineBase::Class_MemberData)); |
1049 | o->memberData.set(e: engine, newVal: m); |
1050 | Q_ASSERT(o->memberData->internalClass); |
1051 | m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value)); |
1052 | m->values.size = o->memberData->values.alloc; |
1053 | m->init(); |
1054 | // qDebug() << " got" << o->memberData << o->memberData->size; |
1055 | } |
1056 | // qDebug() << "allocating object with memberData" << o << o->memberData.operator->(); |
1057 | return o; |
1058 | } |
1059 | |
1060 | static uint markStackSize = 0; |
1061 | |
1062 | MarkStack::MarkStack(ExecutionEngine *engine) |
1063 | : m_engine(engine) |
1064 | { |
1065 | m_base = (Heap::Base **)engine->gcStack->base(); |
1066 | m_top = m_base; |
1067 | const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base); |
1068 | m_hardLimit = m_base + size; |
1069 | m_softLimit = m_base + size * 3 / 4; |
1070 | } |
1071 | |
1072 | void MarkStack::drain() |
1073 | { |
1074 | // we're not calling drain(QDeadlineTimer::Forever) as that has higher overhead |
1075 | while (m_top > m_base) { |
1076 | Heap::Base *h = pop(); |
1077 | ++markStackSize; |
1078 | Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen. |
1079 | Q_ASSERT(h->internalClass); |
1080 | h->internalClass->vtable->markObjects(h, this); |
1081 | } |
1082 | } |
1083 | |
1084 | MarkStack::DrainState MarkStack::drain(QDeadlineTimer deadline) |
1085 | { |
1086 | do { |
1087 | for (int i = 0; i <= markLoopIterationCount * 10; ++i) { |
1088 | if (m_top == m_base) |
1089 | return DrainState::Complete; |
1090 | Heap::Base *h = pop(); |
1091 | ++markStackSize; |
1092 | Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen. |
1093 | Q_ASSERT(h->internalClass); |
1094 | h->internalClass->vtable->markObjects(h, this); |
1095 | } |
1096 | } while (!deadline.hasExpired()); |
1097 | return DrainState::Ongoing; |
1098 | } |
1099 | |
1100 | void MarkStack::setSoftLimit(size_t size) |
1101 | { |
1102 | m_softLimit = m_base + size; |
1103 | Q_ASSERT(m_softLimit < m_hardLimit); |
1104 | } |
1105 | |
1106 | void MemoryManager::onEventLoop() |
1107 | { |
1108 | if (engine->inShutdown) |
1109 | return; |
1110 | if (gcBlocked == InCriticalSection) { |
1111 | QMetaObject::invokeMethod(object: engine->publicEngine, function: [this]{ |
1112 | onEventLoop(); |
1113 | }, type: Qt::QueuedConnection); |
1114 | return; |
1115 | } |
1116 | if (gcStateMachine->inProgress()) { |
1117 | gcStateMachine->step(); |
1118 | } |
1119 | } |
1120 | |
1121 | |
1122 | void MemoryManager::setGCTimeLimit(int timeMs) |
1123 | { |
1124 | gcStateMachine->timeLimit = std::chrono::milliseconds(timeMs); |
1125 | } |
1126 | |
1127 | void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr) |
1128 | { |
1129 | |
1130 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
1131 | Managed *m = (*it).managed(); |
1132 | if (!m || m->markBit()) |
1133 | continue; |
1134 | // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed |
1135 | // signal before we start sweeping the heap |
1136 | if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>()) { |
1137 | qobjectWrapper->destroyObject(lastCall: lastSweep); |
1138 | } |
1139 | } |
1140 | |
1141 | freeWeakMaps(mm: this); |
1142 | freeWeakSets(mm: this); |
1143 | |
1144 | cleanupDeletedQObjectWrappersInSweep(); |
1145 | |
1146 | if (!lastSweep) { |
1147 | engine->identifierTable->sweep(); |
1148 | blockAllocator.sweep(/*classCountPtr*/); |
1149 | hugeItemAllocator.sweep(classCountPtr); |
1150 | icAllocator.sweep(/*classCountPtr*/); |
1151 | } |
1152 | |
1153 | // reset all black bits |
1154 | blockAllocator.resetBlackBits(); |
1155 | hugeItemAllocator.resetBlackBits(); |
1156 | icAllocator.resetBlackBits(); |
1157 | |
1158 | usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep; |
1159 | updateUnmanagedHeapSizeGCLimit(); |
1160 | gcBlocked = MemoryManager::Unblocked; |
1161 | } |
1162 | |
1163 | /* |
1164 | \internal |
1165 | Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper |
1166 | Used both in MemoryManager::sweep, and the corresponding gc statemachine phase |
1167 | */ |
1168 | void MemoryManager::cleanupDeletedQObjectWrappersInSweep() |
1169 | { |
1170 | // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure |
1171 | // that they are all set to undefined. |
1172 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
1173 | Managed *m = (*it).managed(); |
1174 | if (!m || m->markBit()) |
1175 | continue; |
1176 | (*it) = Value::undefinedValue(); |
1177 | } |
1178 | |
1179 | // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed |
1180 | const int pendingCount = m_pendingFreedObjectWrapperValue.size(); |
1181 | if (pendingCount) { |
1182 | QVector<Value *> remainingWeakQObjectWrappers; |
1183 | remainingWeakQObjectWrappers.reserve(asize: pendingCount); |
1184 | for (int i = 0; i < pendingCount; ++i) { |
1185 | Value *v = m_pendingFreedObjectWrapperValue.at(i); |
1186 | if (v->isUndefined() || v->isEmpty()) |
1187 | PersistentValueStorage::free(v); |
1188 | else |
1189 | remainingWeakQObjectWrappers.append(t: v); |
1190 | } |
1191 | m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers; |
1192 | } |
1193 | |
1194 | if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) { |
1195 | for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) { |
1196 | if (it.value().isNullOrUndefined()) |
1197 | it = multiplyWrappedQObjects->erase(it); |
1198 | else |
1199 | ++it; |
1200 | } |
1201 | } |
1202 | } |
1203 | |
1204 | bool MemoryManager::shouldRunGC() const |
1205 | { |
1206 | size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots(); |
1207 | if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100) |
1208 | return true; |
1209 | return false; |
1210 | } |
1211 | |
1212 | static size_t dumpBins(BlockAllocator *b, const char *title) |
1213 | { |
1214 | const QLoggingCategory &stats = lcGcAllocatorStats(); |
1215 | size_t totalSlotMem = 0; |
1216 | if (title) |
1217 | qDebug(cat: stats) << "Slot map for"<< title << "allocator:"; |
1218 | for (uint i = 0; i < BlockAllocator::NumBins; ++i) { |
1219 | uint nEntries = 0; |
1220 | HeapItem *h = b->freeBins[i]; |
1221 | while (h) { |
1222 | ++nEntries; |
1223 | totalSlotMem += h->freeData.availableSlots; |
1224 | h = h->freeData.next; |
1225 | } |
1226 | if (title) |
1227 | qDebug(cat: stats) << " number of entries in slot"<< i << ":"<< nEntries; |
1228 | } |
1229 | SDUMP() << " large slot map"; |
1230 | HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1]; |
1231 | while (h) { |
1232 | SDUMP() << " "<< Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots; |
1233 | h = h->freeData.next; |
1234 | } |
1235 | |
1236 | if (title) |
1237 | qDebug(cat: stats) << " total mem in bins"<< totalSlotMem*Chunk::SlotSize; |
1238 | return totalSlotMem*Chunk::SlotSize; |
1239 | } |
1240 | |
1241 | /*! |
1242 | \internal |
1243 | Precondition: Incremental garbage collection must be currently active |
1244 | Finishes incremental garbage collection, unless in a critical section |
1245 | Code entering a critical section is expected to check if we need to |
1246 | force a gc completion, and to trigger the gc again if necessary |
1247 | when exiting the critcial section. |
1248 | Returns \c true if the gc cycle completed, false otherwise. |
1249 | */ |
1250 | bool MemoryManager::tryForceGCCompletion() |
1251 | { |
1252 | if (gcBlocked == InCriticalSection) { |
1253 | qCDebug(lcGcForcedRuns) |
1254 | << "Tried to force the GC to complete a run but failed due to being in a critical section."; |
1255 | return false; |
1256 | } |
1257 | |
1258 | const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr; |
1259 | Q_ASSERT(incrementalGCIsAlreadyRunning); |
1260 | |
1261 | qCDebug(lcGcForcedRuns) << "Forcing the GC to complete a run."; |
1262 | |
1263 | auto oldTimeLimit = std::exchange(obj&: gcStateMachine->timeLimit, new_val: std::chrono::microseconds::max()); |
1264 | while (gcStateMachine->inProgress()) { |
1265 | gcStateMachine->step(); |
1266 | } |
1267 | gcStateMachine->timeLimit = oldTimeLimit; |
1268 | return true; |
1269 | } |
1270 | |
1271 | void MemoryManager::runFullGC() |
1272 | { |
1273 | runGC(); |
1274 | const bool incrementalGCStillRunning = m_markStack != nullptr; |
1275 | if (incrementalGCStillRunning) |
1276 | tryForceGCCompletion(); |
1277 | } |
1278 | |
1279 | void MemoryManager::runGC() |
1280 | { |
1281 | if (gcBlocked != Unblocked) { |
1282 | return; |
1283 | } |
1284 | |
1285 | gcBlocked = MemoryManager::NormalBlocked; |
1286 | |
1287 | if (gcStats) { |
1288 | statistics.maxReservedMem = qMax(a: statistics.maxReservedMem, b: getAllocatedMem()); |
1289 | statistics.maxAllocatedMem = qMax(a: statistics.maxAllocatedMem, b: getUsedMem() + getLargeItemsMem()); |
1290 | } |
1291 | |
1292 | if (!gcCollectorStats) { |
1293 | gcStateMachine->step(); |
1294 | } else { |
1295 | bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit); |
1296 | size_t oldUnmanagedSize = unmanagedHeapSize; |
1297 | |
1298 | const size_t totalMem = getAllocatedMem(); |
1299 | const size_t usedBefore = getUsedMem(); |
1300 | const size_t largeItemsBefore = getLargeItemsMem(); |
1301 | |
1302 | const QLoggingCategory &stats = lcGcAllocatorStats(); |
1303 | qDebug(cat: stats) << "========== GC =========="; |
1304 | #ifdef MM_STATS |
1305 | qDebug(cat: stats) << " Triggered by alloc request of"<< lastAllocRequestedSlots << "slots."; |
1306 | qDebug(cat: stats) << " Allocations since last GC"<< allocationCount; |
1307 | allocationCount = 0; |
1308 | #endif |
1309 | size_t oldChunks = blockAllocator.chunks.size(); |
1310 | qDebug(cat: stats) << "Allocated"<< totalMem << "bytes in"<< oldChunks << "chunks"; |
1311 | qDebug(cat: stats) << "Fragmented memory before GC"<< (totalMem - usedBefore); |
1312 | dumpBins(b: &blockAllocator, title: "Block"); |
1313 | dumpBins(b: &icAllocator, title: "InternalClass"); |
1314 | |
1315 | QElapsedTimer t; |
1316 | t.start(); |
1317 | gcStateMachine->step(); |
1318 | qint64 markTime = t.nsecsElapsed()/1000; |
1319 | t.restart(); |
1320 | const size_t usedAfter = getUsedMem(); |
1321 | const size_t largeItemsAfter = getLargeItemsMem(); |
1322 | |
1323 | if (triggeredByUnmanagedHeap) { |
1324 | qDebug(cat: stats) << "triggered by unmanaged heap:"; |
1325 | qDebug(cat: stats) << " old unmanaged heap size:"<< oldUnmanagedSize; |
1326 | qDebug(cat: stats) << " new unmanaged heap:"<< unmanagedHeapSize; |
1327 | qDebug(cat: stats) << " unmanaged heap limit:"<< unmanagedHeapSizeGCLimit; |
1328 | } |
1329 | size_t memInBins = dumpBins(b: &blockAllocator, title: "Block") |
1330 | + dumpBins(b: &icAllocator, title: "InternalClasss"); |
1331 | qDebug(cat: stats) << "Marked object in"<< markTime << "us."; |
1332 | qDebug(cat: stats) << " "<< markStackSize << "objects marked"; |
1333 | |
1334 | // sort our object types by number of freed instances |
1335 | MMStatsHash freedObjectStats; |
1336 | std::swap(a&: freedObjectStats, b&: *freedObjectStatsGlobal()); |
1337 | typedef std::pair<const char*, int> ObjectStatInfo; |
1338 | std::vector<ObjectStatInfo> freedObjectsSorted; |
1339 | freedObjectsSorted.reserve(n: freedObjectStats.size()); |
1340 | for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) { |
1341 | freedObjectsSorted.push_back(x: std::make_pair(x: it.key(), y: it.value())); |
1342 | } |
1343 | std::sort(first: freedObjectsSorted.begin(), last: freedObjectsSorted.end(), comp: [](const ObjectStatInfo &a, const ObjectStatInfo &b) { |
1344 | return a.second > b.second && strcmp(s1: a.first, s2: b.first) < 0; |
1345 | }); |
1346 | |
1347 | qDebug(cat: stats) << "Used memory before GC:"<< usedBefore; |
1348 | qDebug(cat: stats) << "Used memory after GC:"<< usedAfter; |
1349 | qDebug(cat: stats) << "Freed up bytes :"<< (usedBefore - usedAfter); |
1350 | qDebug(cat: stats) << "Freed up chunks :"<< (oldChunks - blockAllocator.chunks.size()); |
1351 | size_t lost = blockAllocator.allocatedMem() + icAllocator.allocatedMem() |
1352 | - memInBins - usedAfter; |
1353 | if (lost) |
1354 | qDebug(cat: stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:"<< lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; |
1355 | if (largeItemsBefore || largeItemsAfter) { |
1356 | qDebug(cat: stats) << "Large item memory before GC:"<< largeItemsBefore; |
1357 | qDebug(cat: stats) << "Large item memory after GC:"<< largeItemsAfter; |
1358 | qDebug(cat: stats) << "Large item memory freed up:"<< (largeItemsBefore - largeItemsAfter); |
1359 | } |
1360 | |
1361 | for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) { |
1362 | qDebug(cat: stats).noquote() << QString::fromLatin1(ba: "Freed JS type: %1 (%2 instances)").arg(args: QString::fromLatin1(ba: it->first), args: QString::number(it->second)); |
1363 | } |
1364 | |
1365 | qDebug(cat: stats) << "======== End GC ========"; |
1366 | } |
1367 | |
1368 | if (gcStats) |
1369 | statistics.maxUsedMem = qMax(a: statistics.maxUsedMem, b: getUsedMem() + getLargeItemsMem()); |
1370 | } |
1371 | |
1372 | size_t MemoryManager::getUsedMem() const |
1373 | { |
1374 | return blockAllocator.usedMem() + icAllocator.usedMem(); |
1375 | } |
1376 | |
1377 | size_t MemoryManager::getAllocatedMem() const |
1378 | { |
1379 | return blockAllocator.allocatedMem() + icAllocator.allocatedMem() + hugeItemAllocator.usedMem(); |
1380 | } |
1381 | |
1382 | size_t MemoryManager::getLargeItemsMem() const |
1383 | { |
1384 | return hugeItemAllocator.usedMem(); |
1385 | } |
1386 | |
1387 | void MemoryManager::updateUnmanagedHeapSizeGCLimit() |
1388 | { |
1389 | if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) { |
1390 | // more than 75% full, raise limit |
1391 | unmanagedHeapSizeGCLimit = std::max(a: unmanagedHeapSizeGCLimit, |
1392 | b: unmanagedHeapSize) * 2; |
1393 | } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) { |
1394 | // less than 25% full, lower limit |
1395 | unmanagedHeapSizeGCLimit = qMax(a: std::size_t(MinUnmanagedHeapSizeGCLimit), |
1396 | b: unmanagedHeapSizeGCLimit/2); |
1397 | } |
1398 | |
1399 | if (aggressiveGC && !engine->inShutdown) { |
1400 | // ensure we don't 'loose' any memory |
1401 | // but not during shutdown, because than we skip parts of sweep |
1402 | // and use freeAll instead |
1403 | Q_ASSERT(blockAllocator.allocatedMem() |
1404 | == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr)); |
1405 | Q_ASSERT(icAllocator.allocatedMem() |
1406 | == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr)); |
1407 | } |
1408 | } |
1409 | |
1410 | void MemoryManager::registerWeakMap(Heap::MapObject *map) |
1411 | { |
1412 | map->nextWeakMap = weakMaps; |
1413 | weakMaps = map; |
1414 | } |
1415 | |
1416 | void MemoryManager::registerWeakSet(Heap::SetObject *set) |
1417 | { |
1418 | set->nextWeakSet = weakSets; |
1419 | weakSets = set; |
1420 | } |
1421 | |
1422 | MemoryManager::~MemoryManager() |
1423 | { |
1424 | delete m_persistentValues; |
1425 | dumpStats(); |
1426 | |
1427 | // do one last non-incremental sweep to clean up C++ objects |
1428 | // first, abort any on-going incremental gc operation |
1429 | setGCTimeLimit(-1); |
1430 | if (engine->isGCOngoing) { |
1431 | engine->isGCOngoing = false; |
1432 | m_markStack.reset(); |
1433 | gcStateMachine->state = GCState::Invalid; |
1434 | blockAllocator.resetBlackBits(); |
1435 | hugeItemAllocator.resetBlackBits(); |
1436 | icAllocator.resetBlackBits(); |
1437 | } |
1438 | // then sweep |
1439 | sweep(/*lastSweep*/true); |
1440 | |
1441 | blockAllocator.freeAll(); |
1442 | hugeItemAllocator.freeAll(); |
1443 | icAllocator.freeAll(); |
1444 | |
1445 | delete m_weakValues; |
1446 | #ifdef V4_USE_VALGRIND |
1447 | VALGRIND_DESTROY_MEMPOOL(this); |
1448 | #endif |
1449 | delete chunkAllocator; |
1450 | } |
1451 | |
1452 | |
1453 | void MemoryManager::dumpStats() const |
1454 | { |
1455 | if (!gcStats) |
1456 | return; |
1457 | |
1458 | const QLoggingCategory &stats = lcGcStats(); |
1459 | qDebug(cat: stats) << "Qml GC memory allocation statistics:"; |
1460 | qDebug(cat: stats) << "Total memory allocated:"<< statistics.maxReservedMem; |
1461 | qDebug(cat: stats) << "Max memory used before a GC run:"<< statistics.maxAllocatedMem; |
1462 | qDebug(cat: stats) << "Max memory used after a GC run:"<< statistics.maxUsedMem; |
1463 | qDebug(cat: stats) << "Requests for different item sizes:"; |
1464 | for (int i = 1; i < BlockAllocator::NumBins - 1; ++i) |
1465 | qDebug(cat: stats) << " <"<< (i << Chunk::SlotSizeShift) << " bytes: "<< statistics.allocations[i]; |
1466 | qDebug(cat: stats) << " >="<< ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: "<< statistics.allocations[BlockAllocator::NumBins - 1]; |
1467 | } |
1468 | |
1469 | void MemoryManager::collectFromJSStack(MarkStack *markStack) const |
1470 | { |
1471 | Value *v = engine->jsStackBase; |
1472 | Value *top = engine->jsStackTop; |
1473 | while (v < top) { |
1474 | Managed *m = v->managed(); |
1475 | if (m) { |
1476 | Q_ASSERT(m->inUse()); |
1477 | // Skip pointers to already freed objects, they are bogus as well |
1478 | m->mark(markStack); |
1479 | } |
1480 | ++v; |
1481 | } |
1482 | } |
1483 | |
1484 | GCStateMachine::GCStateMachine() |
1485 | : collectTimings(lcGcStepExecution().isDebugEnabled()) |
1486 | { |
1487 | // base assumption: target 60fps, use at most 1/3 of time for gc |
1488 | // unless overridden by env variable |
1489 | bool ok = false; |
1490 | auto envTimeLimit = qEnvironmentVariableIntValue(varName: "QV4_GC_TIMELIMIT", ok: &ok ); |
1491 | if (!ok) |
1492 | envTimeLimit = (1000 / 60) / 3; |
1493 | if (envTimeLimit > 0) |
1494 | timeLimit = std::chrono::milliseconds { envTimeLimit }; |
1495 | else |
1496 | timeLimit = std::chrono::milliseconds { 0 }; |
1497 | } |
1498 | |
1499 | static void logStepTiming(GCStateMachine* that, quint64 timing) { |
1500 | auto registerTimingWithResetOnOverflow = []( |
1501 | GCStateMachine::StepTiming& storage, quint64 timing, GCState state |
1502 | ) { |
1503 | auto wouldOverflow = [](quint64 lhs, quint64 rhs) { |
1504 | return rhs > 0 && lhs > std::numeric_limits<quint64>::max() - rhs; |
1505 | }; |
1506 | |
1507 | if (wouldOverflow(storage.rolling_sum, timing) || wouldOverflow(storage.count, 1)) { |
1508 | qDebug(catFunc: lcGcStepExecution) << "Resetting timings storage for" |
1509 | << QMetaEnum::fromType<GCState>().key(index: state) << "due to overflow."; |
1510 | storage.rolling_sum = timing; |
1511 | storage.count = 1; |
1512 | } else { |
1513 | storage.rolling_sum += timing; |
1514 | storage.count += 1; |
1515 | } |
1516 | }; |
1517 | |
1518 | GCStateMachine::StepTiming& storage = that->executionTiming[that->state]; |
1519 | registerTimingWithResetOnOverflow(storage, timing, that->state); |
1520 | |
1521 | qDebug(catFunc: lcGcStepExecution) << "Performed"<< QMetaEnum::fromType<GCState>().key(index: that->state) |
1522 | << "in"<< timing << "microseconds"; |
1523 | qDebug(catFunc: lcGcStepExecution) << "This step was performed"<< storage.count << " time(s), executing in" |
1524 | << (storage.rolling_sum / storage.count) << "microseconds on average."; |
1525 | } |
1526 | |
1527 | static GCState executeWithLoggingIfEnabled(GCStateMachine* that, GCStateInfo& stateInfo) { |
1528 | if (!that->collectTimings) |
1529 | return stateInfo.execute(that, that->stateData); |
1530 | |
1531 | QElapsedTimer timer; |
1532 | timer.start(); |
1533 | GCState next = stateInfo.execute(that, that->stateData); |
1534 | logStepTiming(that, timing: timer.nsecsElapsed()/1000); |
1535 | return next; |
1536 | } |
1537 | |
1538 | void GCStateMachine::transition() { |
1539 | if (timeLimit.count() > 0) { |
1540 | deadline = QDeadlineTimer(timeLimit); |
1541 | bool deadlineExpired = false; |
1542 | while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid) { |
1543 | if (state > GCState::InitCallDestroyObjects) { |
1544 | /* initCallDestroyObjects is the last action which drains the mark |
1545 | stack by default. But as our write-barrier might end up putting |
1546 | objects on the markStack which still reference other objects. |
1547 | Especially when we call user code triggered by Component.onDestruction, |
1548 | but also when we run into a timeout. |
1549 | We don't redrain before InitCallDestroyObjects, as that would |
1550 | potentially lead to useless busy-work (e.g., if the last referencs |
1551 | to objects are removed while the mark phase is running) |
1552 | */ |
1553 | redrain(that: this); |
1554 | } |
1555 | qCDebug(lcGcStateTransitions) << "Preparing to execute the" |
1556 | << QMetaEnum::fromType<GCState>().key(index: state) << "state"; |
1557 | GCStateInfo& stateInfo = stateInfoMap[int(state)]; |
1558 | state = executeWithLoggingIfEnabled(that: this, stateInfo); |
1559 | qCDebug(lcGcStateTransitions) << "Transitioning to the" |
1560 | << QMetaEnum::fromType<GCState>().key(index: state) << "state"; |
1561 | if (stateInfo.breakAfter) |
1562 | break; |
1563 | } |
1564 | if (deadlineExpired) |
1565 | handleTimeout(state); |
1566 | if (state != GCState::Invalid) |
1567 | QMetaObject::invokeMethod(object: mm->engine->publicEngine, function: [this]{ |
1568 | mm->onEventLoop(); |
1569 | }, type: Qt::QueuedConnection); |
1570 | } else { |
1571 | deadline = QDeadlineTimer::Forever; |
1572 | while (state != GCState::Invalid) { |
1573 | qCDebug(lcGcStateTransitions) << "Preparing to execute the" |
1574 | << QMetaEnum::fromType<GCState>().key(index: state) << "state"; |
1575 | GCStateInfo& stateInfo = stateInfoMap[int(state)]; |
1576 | state = executeWithLoggingIfEnabled(that: this, stateInfo); |
1577 | qCDebug(lcGcStateTransitions) << "Transitioning to the" |
1578 | << QMetaEnum::fromType<GCState>().key(index: state) << "state"; |
1579 | } |
1580 | } |
1581 | } |
1582 | |
1583 | } // namespace QV4 |
1584 | |
1585 | QT_END_NAMESPACE |
1586 | |
1587 | #include "moc_qv4mm_p.cpp" |
1588 |
Definitions
- lcGcStats
- lcGcAllocatorStats
- lcGcStateTransitions
- lcGcForcedRuns
- lcGcStepExecution
- MemorySegment
- MemorySegment
- MemorySegment
- ~MemorySegment
- setBit
- clearBit
- testBit
- free
- contains
- allocate
- ChunkAllocator
- ChunkAllocator
- requiredChunkSize
- allocate
- free
- binary
- freedObjectStatsGlobal
- increaseFreedCountForClass
- sweep
- freeAll
- resetBlackBits
- sortIntoBins
- allocate
- sweep
- freeAll
- resetBlackBits
- allocate
- freeHugeChunk
- sweep
- resetBlackBits
- freeAll
- markStart
- markGlobalObject
- markJSStack
- initMarkPersistentValues
- markLoopIterationCount
- wasDrainNecessary
- markPersistentValues
- initMarkWeakValues
- markWeakValues
- markDrain
- markReady
- redrain
- initCallDestroyObjects
- callDestroyObject
- freeWeakMaps
- freeWeakMaps
- freeWeakSets
- freeWeakSets
- handleQObjectWrappers
- doSweep
- MemoryManager
- allocString
- allocData
- allocObjectWithMemberData
- markStackSize
- MarkStack
- drain
- drain
- setSoftLimit
- onEventLoop
- setGCTimeLimit
- sweep
- cleanupDeletedQObjectWrappersInSweep
- shouldRunGC
- dumpBins
- tryForceGCCompletion
- runFullGC
- runGC
- getUsedMem
- getAllocatedMem
- getLargeItemsMem
- updateUnmanagedHeapSizeGCLimit
- registerWeakMap
- registerWeakSet
- ~MemoryManager
- dumpStats
- collectFromJSStack
- GCStateMachine
- logStepTiming
- executeWithLoggingIfEnabled
Start learning QML with our Intro Training
Find out more