1 | // Copyright (C) 2021 The Qt Company Ltd. |
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qv4engine_p.h" |
5 | #include "qv4object_p.h" |
6 | #include "qv4mm_p.h" |
7 | #include "qv4qobjectwrapper_p.h" |
8 | #include "qv4identifiertable_p.h" |
9 | #include <QtCore/qalgorithms.h> |
10 | #include <QtCore/private/qnumeric_p.h> |
11 | #include <QtCore/qloggingcategory.h> |
12 | #include <private/qv4alloca_p.h> |
13 | #include <qqmlengine.h> |
14 | #include "PageReservation.h" |
15 | #include "PageAllocation.h" |
16 | |
17 | #include <QElapsedTimer> |
18 | #include <QMap> |
19 | #include <QScopedValueRollback> |
20 | |
21 | #include <iostream> |
22 | #include <cstdlib> |
23 | #include <algorithm> |
24 | #include "qv4profiling_p.h" |
25 | #include "qv4mapobject_p.h" |
26 | #include "qv4setobject_p.h" |
27 | |
28 | //#define MM_STATS |
29 | |
30 | #if !defined(MM_STATS) && !defined(QT_NO_DEBUG) |
31 | #define MM_STATS |
32 | #endif |
33 | |
34 | #if MM_DEBUG |
35 | #define DEBUG qDebug() << "MM:" |
36 | #else |
37 | #define DEBUG if (1) ; else qDebug() << "MM:" |
38 | #endif |
39 | |
40 | #ifdef V4_USE_VALGRIND |
41 | #include <valgrind/valgrind.h> |
42 | #include <valgrind/memcheck.h> |
43 | #endif |
44 | |
45 | #ifdef V4_USE_HEAPTRACK |
46 | #include <heaptrack_api.h> |
47 | #endif |
48 | |
49 | #if OS(QNX) |
50 | #include <sys/storage.h> // __tls() |
51 | #endif |
52 | |
53 | #if USE(PTHREADS) && HAVE(PTHREAD_NP_H) |
54 | #include <pthread_np.h> |
55 | #endif |
56 | |
57 | Q_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics" ) |
58 | Q_DECLARE_LOGGING_CATEGORY(lcGcStats) |
59 | Q_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats" ) |
60 | Q_DECLARE_LOGGING_CATEGORY(lcGcAllocatorStats) |
61 | |
62 | using namespace WTF; |
63 | |
64 | QT_BEGIN_NAMESPACE |
65 | |
66 | namespace QV4 { |
67 | |
68 | enum { |
69 | MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16, |
70 | GCOverallocation = 200 /* Max overallocation by the GC in % */ |
71 | }; |
72 | |
73 | struct MemorySegment { |
74 | enum { |
75 | #ifdef Q_OS_RTEMS |
76 | NumChunks = sizeof(quint64), |
77 | #else |
78 | NumChunks = 8*sizeof(quint64), |
79 | #endif |
80 | SegmentSize = NumChunks*Chunk::ChunkSize, |
81 | }; |
82 | |
83 | MemorySegment(size_t size) |
84 | { |
85 | size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory |
86 | if (size < SegmentSize) |
87 | size = SegmentSize; |
88 | |
89 | pageReservation = PageReservation::reserve(size, usage: OSAllocator::JSGCHeapPages); |
90 | base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1)); |
91 | nChunks = NumChunks; |
92 | availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base())); |
93 | if (availableBytes < SegmentSize) |
94 | --nChunks; |
95 | } |
96 | MemorySegment(MemorySegment &&other) { |
97 | qSwap(value1&: pageReservation, value2&: other.pageReservation); |
98 | qSwap(value1&: base, value2&: other.base); |
99 | qSwap(value1&: allocatedMap, value2&: other.allocatedMap); |
100 | qSwap(value1&: availableBytes, value2&: other.availableBytes); |
101 | qSwap(value1&: nChunks, value2&: other.nChunks); |
102 | } |
103 | |
104 | ~MemorySegment() { |
105 | if (base) |
106 | pageReservation.deallocate(); |
107 | } |
108 | |
109 | void setBit(size_t index) { |
110 | Q_ASSERT(index < nChunks); |
111 | quint64 bit = static_cast<quint64>(1) << index; |
112 | // qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit; |
113 | allocatedMap |= bit; |
114 | } |
115 | void clearBit(size_t index) { |
116 | Q_ASSERT(index < nChunks); |
117 | quint64 bit = static_cast<quint64>(1) << index; |
118 | // qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit; |
119 | allocatedMap &= ~bit; |
120 | } |
121 | bool testBit(size_t index) const { |
122 | Q_ASSERT(index < nChunks); |
123 | quint64 bit = static_cast<quint64>(1) << index; |
124 | return (allocatedMap & bit); |
125 | } |
126 | |
127 | Chunk *allocate(size_t size); |
128 | void free(Chunk *chunk, size_t size) { |
129 | DEBUG << "freeing chunk" << chunk; |
130 | size_t index = static_cast<size_t>(chunk - base); |
131 | size_t end = qMin(a: static_cast<size_t>(NumChunks), b: index + (size - 1)/Chunk::ChunkSize + 1); |
132 | while (index < end) { |
133 | Q_ASSERT(testBit(index)); |
134 | clearBit(index); |
135 | ++index; |
136 | } |
137 | |
138 | size_t pageSize = WTF::pageSize(); |
139 | size = (size + pageSize - 1) & ~(pageSize - 1); |
140 | #if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN) |
141 | // Linux and Windows zero out pages that have been decommitted and get committed again. |
142 | // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the |
143 | // memory before decommit, so that we can be sure that all chunks we allocate will be |
144 | // zero initialized. |
145 | memset(chunk, 0, size); |
146 | #endif |
147 | pageReservation.decommit(start: chunk, size); |
148 | } |
149 | |
150 | bool contains(Chunk *c) const { |
151 | return c >= base && c < base + nChunks; |
152 | } |
153 | |
154 | PageReservation ; |
155 | Chunk *base = nullptr; |
156 | quint64 allocatedMap = 0; |
157 | size_t availableBytes = 0; |
158 | uint nChunks = 0; |
159 | }; |
160 | |
161 | Chunk *MemorySegment::allocate(size_t size) |
162 | { |
163 | if (!allocatedMap && size >= SegmentSize) { |
164 | // chunk allocated for one huge allocation |
165 | Q_ASSERT(availableBytes >= size); |
166 | pageReservation.commit(start: base, size); |
167 | allocatedMap = ~static_cast<quint64>(0); |
168 | return base; |
169 | } |
170 | size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk); |
171 | uint sequence = 0; |
172 | Chunk *candidate = nullptr; |
173 | for (uint i = 0; i < nChunks; ++i) { |
174 | if (!testBit(index: i)) { |
175 | if (!candidate) |
176 | candidate = base + i; |
177 | ++sequence; |
178 | } else { |
179 | candidate = nullptr; |
180 | sequence = 0; |
181 | } |
182 | if (sequence == requiredChunks) { |
183 | pageReservation.commit(start: candidate, size); |
184 | for (uint i = 0; i < requiredChunks; ++i) |
185 | setBit(candidate - base + i); |
186 | DEBUG << "allocated chunk " << candidate << Qt::hex << size; |
187 | |
188 | return candidate; |
189 | } |
190 | } |
191 | return nullptr; |
192 | } |
193 | |
194 | struct ChunkAllocator { |
195 | ChunkAllocator() {} |
196 | |
197 | size_t requiredChunkSize(size_t size) { |
198 | size += Chunk::HeaderSize; // space required for the Chunk header |
199 | size_t pageSize = WTF::pageSize(); |
200 | size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes |
201 | if (size < Chunk::ChunkSize) |
202 | size = Chunk::ChunkSize; |
203 | return size; |
204 | } |
205 | |
206 | Chunk *allocate(size_t size = 0); |
207 | void free(Chunk *chunk, size_t size = 0); |
208 | |
209 | std::vector<MemorySegment> memorySegments; |
210 | }; |
211 | |
212 | Chunk *ChunkAllocator::allocate(size_t size) |
213 | { |
214 | size = requiredChunkSize(size); |
215 | for (auto &m : memorySegments) { |
216 | if (~m.allocatedMap) { |
217 | Chunk *c = m.allocate(size); |
218 | if (c) |
219 | return c; |
220 | } |
221 | } |
222 | |
223 | // allocate a new segment |
224 | memorySegments.push_back(x: MemorySegment(size)); |
225 | Chunk *c = memorySegments.back().allocate(size); |
226 | Q_ASSERT(c); |
227 | return c; |
228 | } |
229 | |
230 | void ChunkAllocator::free(Chunk *chunk, size_t size) |
231 | { |
232 | size = requiredChunkSize(size); |
233 | for (auto &m : memorySegments) { |
234 | if (m.contains(c: chunk)) { |
235 | m.free(chunk, size); |
236 | return; |
237 | } |
238 | } |
239 | Q_ASSERT(false); |
240 | } |
241 | |
242 | #ifdef DUMP_SWEEP |
243 | QString binary(quintptr n) { |
244 | QString s = QString::number(n, 2); |
245 | while (s.length() < 64) |
246 | s.prepend(QChar::fromLatin1('0')); |
247 | return s; |
248 | } |
249 | #define SDUMP qDebug |
250 | #else |
251 | QString binary(quintptr) { return QString(); } |
252 | #define SDUMP if (1) ; else qDebug |
253 | #endif |
254 | |
255 | // Stores a classname -> freed count mapping. |
256 | typedef QHash<const char*, int> MMStatsHash; |
257 | Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal) |
258 | |
259 | // This indirection avoids sticking QHash code in each of the call sites, which |
260 | // shaves off some instructions in the case that it's unused. |
261 | static void increaseFreedCountForClass(const char *className) |
262 | { |
263 | (*freedObjectStatsGlobal())[className]++; |
264 | } |
265 | |
266 | //bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr) |
267 | bool Chunk::sweep(ExecutionEngine *engine) |
268 | { |
269 | bool hasUsedSlots = false; |
270 | SDUMP() << "sweeping chunk" << this; |
271 | HeapItem *o = realBase(); |
272 | bool lastSlotFree = false; |
273 | for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) { |
274 | quintptr toFree = objectBitmap[i] ^ blackBitmap[i]; |
275 | Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used |
276 | quintptr e = extendsBitmap[i]; |
277 | SDUMP() << " index=" << i; |
278 | SDUMP() << " toFree =" << binary(toFree); |
279 | SDUMP() << " black =" << binary(blackBitmap[i]); |
280 | SDUMP() << " object =" << binary(objectBitmap[i]); |
281 | SDUMP() << " extends =" << binary(e); |
282 | if (lastSlotFree) |
283 | e &= (e + 1); // clear all lowest extent bits |
284 | while (toFree) { |
285 | uint index = qCountTrailingZeroBits(v: toFree); |
286 | quintptr bit = (static_cast<quintptr>(1) << index); |
287 | |
288 | toFree ^= bit; // mask out freed slot |
289 | // DEBUG << " index" << hex << index << toFree; |
290 | |
291 | // remove all extends slots that have been freed |
292 | // this is a bit of bit trickery. |
293 | quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit |
294 | quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object |
295 | quintptr result = objmask + 1; |
296 | Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something |
297 | result |= mask; // ensure we don't clear stuff to the right of the current object |
298 | e &= result; |
299 | |
300 | HeapItem *itemToFree = o + index; |
301 | Heap::Base *b = *itemToFree; |
302 | const VTable *v = b->internalClass->vtable; |
303 | // if (Q_UNLIKELY(classCountPtr)) |
304 | // classCountPtr(v->className); |
305 | if (v->destroy) { |
306 | v->destroy(b); |
307 | b->_checkIsDestroyed(); |
308 | } |
309 | #ifdef V4_USE_HEAPTRACK |
310 | heaptrack_report_free(itemToFree); |
311 | #endif |
312 | } |
313 | Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i]) |
314 | - (blackBitmap[i] | e)) * Chunk::SlotSize, |
315 | Profiling::SmallItem); |
316 | objectBitmap[i] = blackBitmap[i]; |
317 | hasUsedSlots |= (blackBitmap[i] != 0); |
318 | extendsBitmap[i] = e; |
319 | lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1)); |
320 | SDUMP() << " new extends =" << binary(e); |
321 | SDUMP() << " lastSlotFree" << lastSlotFree; |
322 | Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0); |
323 | o += Chunk::Bits; |
324 | } |
325 | // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots."; |
326 | return hasUsedSlots; |
327 | } |
328 | |
329 | void Chunk::freeAll(ExecutionEngine *engine) |
330 | { |
331 | // DEBUG << "sweeping chunk" << this << (*freeList); |
332 | HeapItem *o = realBase(); |
333 | for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) { |
334 | quintptr toFree = objectBitmap[i]; |
335 | quintptr e = extendsBitmap[i]; |
336 | // DEBUG << hex << " index=" << i << toFree; |
337 | while (toFree) { |
338 | uint index = qCountTrailingZeroBits(v: toFree); |
339 | quintptr bit = (static_cast<quintptr>(1) << index); |
340 | |
341 | toFree ^= bit; // mask out freed slot |
342 | // DEBUG << " index" << hex << index << toFree; |
343 | |
344 | // remove all extends slots that have been freed |
345 | // this is a bit of bit trickery. |
346 | quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit |
347 | quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object |
348 | quintptr result = objmask + 1; |
349 | Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something |
350 | result |= mask; // ensure we don't clear stuff to the right of the current object |
351 | e &= result; |
352 | |
353 | HeapItem *itemToFree = o + index; |
354 | Heap::Base *b = *itemToFree; |
355 | if (b->internalClass->vtable->destroy) { |
356 | b->internalClass->vtable->destroy(b); |
357 | b->_checkIsDestroyed(); |
358 | } |
359 | #ifdef V4_USE_HEAPTRACK |
360 | heaptrack_report_free(itemToFree); |
361 | #endif |
362 | } |
363 | Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i]) |
364 | - qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem); |
365 | objectBitmap[i] = 0; |
366 | extendsBitmap[i] = e; |
367 | o += Chunk::Bits; |
368 | } |
369 | // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots."; |
370 | } |
371 | |
372 | void Chunk::resetBlackBits() |
373 | { |
374 | memset(s: blackBitmap, c: 0, n: sizeof(blackBitmap)); |
375 | } |
376 | |
377 | void Chunk::sortIntoBins(HeapItem **bins, uint nBins) |
378 | { |
379 | // qDebug() << "sortIntoBins:"; |
380 | HeapItem *base = realBase(); |
381 | #if QT_POINTER_SIZE == 8 |
382 | const int start = 0; |
383 | #else |
384 | const int start = 1; |
385 | #endif |
386 | #ifndef QT_NO_DEBUG |
387 | uint freeSlots = 0; |
388 | uint allocatedSlots = 0; |
389 | #endif |
390 | for (int i = start; i < EntriesInBitmap; ++i) { |
391 | quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]); |
392 | #if QT_POINTER_SIZE == 8 |
393 | if (!i) |
394 | usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1; |
395 | #endif |
396 | #ifndef QT_NO_DEBUG |
397 | allocatedSlots += qPopulationCount(v: usedSlots); |
398 | // qDebug() << hex << " i=" << i << "used=" << usedSlots; |
399 | #endif |
400 | while (1) { |
401 | uint index = qCountTrailingZeroBits(v: usedSlots + 1); |
402 | if (index == Bits) |
403 | break; |
404 | uint freeStart = i*Bits + index; |
405 | usedSlots &= ~((static_cast<quintptr>(1) << index) - 1); |
406 | while (!usedSlots) { |
407 | if (++i < EntriesInBitmap) { |
408 | usedSlots = (objectBitmap[i]|extendsBitmap[i]); |
409 | } else { |
410 | Q_ASSERT(i == EntriesInBitmap); |
411 | // Overflows to 0 when counting trailing zeroes above in next iteration. |
412 | // Then, all the bits are zeroes and we break. |
413 | usedSlots = std::numeric_limits<quintptr>::max(); |
414 | break; |
415 | } |
416 | #ifndef QT_NO_DEBUG |
417 | allocatedSlots += qPopulationCount(v: usedSlots); |
418 | // qDebug() << hex << " i=" << i << "used=" << usedSlots; |
419 | #endif |
420 | } |
421 | HeapItem *freeItem = base + freeStart; |
422 | |
423 | index = qCountTrailingZeroBits(v: usedSlots); |
424 | usedSlots |= (quintptr(1) << index) - 1; |
425 | uint freeEnd = i*Bits + index; |
426 | uint nSlots = freeEnd - freeStart; |
427 | #ifndef QT_NO_DEBUG |
428 | // qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots; |
429 | freeSlots += nSlots; |
430 | #endif |
431 | Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots); |
432 | freeItem->freeData.availableSlots = nSlots; |
433 | uint bin = qMin(a: nBins - 1, b: nSlots); |
434 | freeItem->freeData.next = bins[bin]; |
435 | bins[bin] = freeItem; |
436 | } |
437 | } |
438 | #ifndef QT_NO_DEBUG |
439 | Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr)); |
440 | #endif |
441 | } |
442 | |
443 | HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) { |
444 | Q_ASSERT((size % Chunk::SlotSize) == 0); |
445 | size_t slotsRequired = size >> Chunk::SlotSizeShift; |
446 | |
447 | if (allocationStats) |
448 | ++allocationStats[binForSlots(nSlots: slotsRequired)]; |
449 | |
450 | HeapItem **last; |
451 | |
452 | HeapItem *m; |
453 | |
454 | if (slotsRequired < NumBins - 1) { |
455 | m = freeBins[slotsRequired]; |
456 | if (m) { |
457 | freeBins[slotsRequired] = m->freeData.next; |
458 | goto done; |
459 | } |
460 | } |
461 | |
462 | if (nFree >= slotsRequired) { |
463 | // use bump allocation |
464 | Q_ASSERT(nextFree); |
465 | m = nextFree; |
466 | nextFree += slotsRequired; |
467 | nFree -= slotsRequired; |
468 | goto done; |
469 | } |
470 | |
471 | // DEBUG << "No matching bin found for item" << size << bin; |
472 | // search last bin for a large enough item |
473 | last = &freeBins[NumBins - 1]; |
474 | while ((m = *last)) { |
475 | if (m->freeData.availableSlots >= slotsRequired) { |
476 | *last = m->freeData.next; // take it out of the list |
477 | |
478 | size_t remainingSlots = m->freeData.availableSlots - slotsRequired; |
479 | // DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots; |
480 | if (remainingSlots == 0) |
481 | goto done; |
482 | |
483 | HeapItem *remainder = m + slotsRequired; |
484 | if (remainingSlots > nFree) { |
485 | if (nFree) { |
486 | size_t bin = binForSlots(nSlots: nFree); |
487 | nextFree->freeData.next = freeBins[bin]; |
488 | nextFree->freeData.availableSlots = nFree; |
489 | freeBins[bin] = nextFree; |
490 | } |
491 | nextFree = remainder; |
492 | nFree = remainingSlots; |
493 | } else { |
494 | remainder->freeData.availableSlots = remainingSlots; |
495 | size_t binForRemainder = binForSlots(nSlots: remainingSlots); |
496 | remainder->freeData.next = freeBins[binForRemainder]; |
497 | freeBins[binForRemainder] = remainder; |
498 | } |
499 | goto done; |
500 | } |
501 | last = &m->freeData.next; |
502 | } |
503 | |
504 | if (slotsRequired < NumBins - 1) { |
505 | // check if we can split up another slot |
506 | for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) { |
507 | m = freeBins[i]; |
508 | if (m) { |
509 | freeBins[i] = m->freeData.next; // take it out of the list |
510 | // qDebug() << "got item" << slotsRequired << "from slot" << i; |
511 | size_t remainingSlots = i - slotsRequired; |
512 | Q_ASSERT(remainingSlots < NumBins - 1); |
513 | HeapItem *remainder = m + slotsRequired; |
514 | remainder->freeData.availableSlots = remainingSlots; |
515 | remainder->freeData.next = freeBins[remainingSlots]; |
516 | freeBins[remainingSlots] = remainder; |
517 | goto done; |
518 | } |
519 | } |
520 | } |
521 | |
522 | if (!m) { |
523 | if (!forceAllocation) |
524 | return nullptr; |
525 | if (nFree) { |
526 | // Save any remaining slots of the current chunk |
527 | // for later, smaller allocations. |
528 | size_t bin = binForSlots(nSlots: nFree); |
529 | nextFree->freeData.next = freeBins[bin]; |
530 | nextFree->freeData.availableSlots = nFree; |
531 | freeBins[bin] = nextFree; |
532 | } |
533 | Chunk *newChunk = chunkAllocator->allocate(); |
534 | Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
535 | chunks.push_back(x: newChunk); |
536 | nextFree = newChunk->first(); |
537 | nFree = Chunk::AvailableSlots; |
538 | m = nextFree; |
539 | nextFree += slotsRequired; |
540 | nFree -= slotsRequired; |
541 | } |
542 | |
543 | done: |
544 | m->setAllocatedSlots(slotsRequired); |
545 | Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem); |
546 | #ifdef V4_USE_HEAPTRACK |
547 | heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize); |
548 | #endif |
549 | // DEBUG << " " << hex << m->chunk() << m->chunk()->objectBitmap[0] << m->chunk()->extendsBitmap[0] << (m - m->chunk()->realBase()); |
550 | return m; |
551 | } |
552 | |
553 | void BlockAllocator::sweep() |
554 | { |
555 | nextFree = nullptr; |
556 | nFree = 0; |
557 | memset(s: freeBins, c: 0, n: sizeof(freeBins)); |
558 | |
559 | // qDebug() << "BlockAlloc: sweep"; |
560 | usedSlotsAfterLastSweep = 0; |
561 | |
562 | auto firstEmptyChunk = std::partition(first: chunks.begin(), last: chunks.end(), pred: [this](Chunk *c) { |
563 | return c->sweep(engine); |
564 | }); |
565 | |
566 | std::for_each(first: chunks.begin(), last: firstEmptyChunk, f: [this](Chunk *c) { |
567 | c->sortIntoBins(bins: freeBins, nBins: NumBins); |
568 | usedSlotsAfterLastSweep += c->nUsedSlots(); |
569 | }); |
570 | |
571 | // only free the chunks at the end to avoid that the sweep() calls indirectly |
572 | // access freed memory |
573 | std::for_each(first: firstEmptyChunk, last: chunks.end(), f: [this](Chunk *c) { |
574 | Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
575 | chunkAllocator->free(chunk: c); |
576 | }); |
577 | |
578 | chunks.erase(first: firstEmptyChunk, last: chunks.end()); |
579 | } |
580 | |
581 | void BlockAllocator::freeAll() |
582 | { |
583 | for (auto c : chunks) |
584 | c->freeAll(engine); |
585 | for (auto c : chunks) { |
586 | Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
587 | chunkAllocator->free(chunk: c); |
588 | } |
589 | } |
590 | |
591 | void BlockAllocator::resetBlackBits() |
592 | { |
593 | for (auto c : chunks) |
594 | c->resetBlackBits(); |
595 | } |
596 | |
597 | HeapItem *HugeItemAllocator::allocate(size_t size) { |
598 | MemorySegment *m = nullptr; |
599 | Chunk *c = nullptr; |
600 | if (size >= MemorySegment::SegmentSize/2) { |
601 | // too large to handle through the ChunkAllocator, let's get our own memory segement |
602 | size += Chunk::HeaderSize; // space required for the Chunk header |
603 | size_t pageSize = WTF::pageSize(); |
604 | size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes |
605 | m = new MemorySegment(size); |
606 | c = m->allocate(size); |
607 | } else { |
608 | c = chunkAllocator->allocate(size); |
609 | } |
610 | Q_ASSERT(c); |
611 | chunks.push_back(x: HugeChunk{.segment: m, .chunk: c, .size: size}); |
612 | Chunk::setBit(bitmap: c->objectBitmap, index: c->first() - c->realBase()); |
613 | Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem); |
614 | #ifdef V4_USE_HEAPTRACK |
615 | heaptrack_report_alloc(c, size); |
616 | #endif |
617 | return c->first(); |
618 | } |
619 | |
620 | static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr) |
621 | { |
622 | HeapItem *itemToFree = c.chunk->first(); |
623 | Heap::Base *b = *itemToFree; |
624 | const VTable *v = b->internalClass->vtable; |
625 | if (Q_UNLIKELY(classCountPtr)) |
626 | classCountPtr(v->className); |
627 | |
628 | if (v->destroy) { |
629 | v->destroy(b); |
630 | b->_checkIsDestroyed(); |
631 | } |
632 | if (c.segment) { |
633 | // own memory segment |
634 | c.segment->free(chunk: c.chunk, size: c.size); |
635 | delete c.segment; |
636 | } else { |
637 | chunkAllocator->free(chunk: c.chunk, size: c.size); |
638 | } |
639 | #ifdef V4_USE_HEAPTRACK |
640 | heaptrack_report_free(c.chunk); |
641 | #endif |
642 | } |
643 | |
644 | void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr) |
645 | { |
646 | auto isBlack = [this, classCountPtr] (const HugeChunk &c) { |
647 | bool b = c.chunk->first()->isBlack(); |
648 | Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase()); |
649 | if (!b) { |
650 | Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem); |
651 | freeHugeChunk(chunkAllocator, c, classCountPtr); |
652 | } |
653 | return !b; |
654 | }; |
655 | |
656 | auto newEnd = std::remove_if(first: chunks.begin(), last: chunks.end(), pred: isBlack); |
657 | chunks.erase(first: newEnd, last: chunks.end()); |
658 | } |
659 | |
660 | void HugeItemAllocator::resetBlackBits() |
661 | { |
662 | for (auto c : chunks) |
663 | Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase()); |
664 | } |
665 | |
666 | void HugeItemAllocator::freeAll() |
667 | { |
668 | for (auto &c : chunks) { |
669 | Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem); |
670 | freeHugeChunk(chunkAllocator, c, classCountPtr: nullptr); |
671 | } |
672 | } |
673 | |
674 | |
675 | MemoryManager::MemoryManager(ExecutionEngine *engine) |
676 | : engine(engine) |
677 | , chunkAllocator(new ChunkAllocator) |
678 | , blockAllocator(chunkAllocator, engine) |
679 | , icAllocator(chunkAllocator, engine) |
680 | , hugeItemAllocator(chunkAllocator, engine) |
681 | , m_persistentValues(new PersistentValueStorage(engine)) |
682 | , m_weakValues(new PersistentValueStorage(engine)) |
683 | , unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit) |
684 | , aggressiveGC(!qEnvironmentVariableIsEmpty(varName: "QV4_MM_AGGRESSIVE_GC" )) |
685 | , gcStats(lcGcStats().isDebugEnabled()) |
686 | , gcCollectorStats(lcGcAllocatorStats().isDebugEnabled()) |
687 | { |
688 | #ifdef V4_USE_VALGRIND |
689 | VALGRIND_CREATE_MEMPOOL(this, 0, true); |
690 | #endif |
691 | memset(s: statistics.allocations, c: 0, n: sizeof(statistics.allocations)); |
692 | if (gcStats) |
693 | blockAllocator.allocationStats = statistics.allocations; |
694 | } |
695 | |
696 | Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize) |
697 | { |
698 | const size_t stringSize = align(size: sizeof(Heap::String)); |
699 | #ifdef MM_STATS |
700 | lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift; |
701 | ++allocationCount; |
702 | #endif |
703 | unmanagedHeapSize += unmanagedSize; |
704 | |
705 | HeapItem *m = allocate(allocator: &blockAllocator, size: stringSize); |
706 | memset(s: m, c: 0, n: stringSize); |
707 | if (gcBlocked) { |
708 | // If the gc is running right now, it will not have a chance to mark the newly created item |
709 | // and may therefore sweep it right away. |
710 | // Protect the new object from the current GC run to avoid this. |
711 | m->as<Heap::Base>()->setMarkBit(); |
712 | } |
713 | |
714 | return *m; |
715 | } |
716 | |
717 | Heap::Base *MemoryManager::allocData(std::size_t size) |
718 | { |
719 | #ifdef MM_STATS |
720 | lastAllocRequestedSlots = size >> Chunk::SlotSizeShift; |
721 | ++allocationCount; |
722 | #endif |
723 | |
724 | Q_ASSERT(size >= Chunk::SlotSize); |
725 | Q_ASSERT(size % Chunk::SlotSize == 0); |
726 | |
727 | HeapItem *m = allocate(allocator: &blockAllocator, size); |
728 | memset(s: m, c: 0, n: size); |
729 | if (gcBlocked) { |
730 | // If the gc is running right now, it will not have a chance to mark the newly created item |
731 | // and may therefore sweep it right away. |
732 | // Protect the new object from the current GC run to avoid this. |
733 | m->as<Heap::Base>()->setMarkBit(); |
734 | } |
735 | |
736 | return *m; |
737 | } |
738 | |
739 | Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers) |
740 | { |
741 | uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value); |
742 | Q_ASSERT(!(size % sizeof(HeapItem))); |
743 | |
744 | Heap::Object *o; |
745 | if (nMembers <= vtable->nInlineProperties) { |
746 | o = static_cast<Heap::Object *>(allocData(size)); |
747 | } else { |
748 | // Allocate both in one go through the block allocator |
749 | nMembers -= vtable->nInlineProperties; |
750 | std::size_t memberSize = align(size: sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value)); |
751 | size_t totalSize = size + memberSize; |
752 | Heap::MemberData *m; |
753 | if (totalSize > Chunk::DataSize) { |
754 | o = static_cast<Heap::Object *>(allocData(size)); |
755 | m = hugeItemAllocator.allocate(size: memberSize)->as<Heap::MemberData>(); |
756 | } else { |
757 | HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(size: totalSize)); |
758 | Heap::Base *b = *mh; |
759 | o = static_cast<Heap::Object *>(b); |
760 | mh += (size >> Chunk::SlotSizeShift); |
761 | m = mh->as<Heap::MemberData>(); |
762 | Chunk *c = mh->chunk(); |
763 | size_t index = mh - c->realBase(); |
764 | Chunk::setBit(bitmap: c->objectBitmap, index); |
765 | Chunk::clearBit(bitmap: c->extendsBitmap, index); |
766 | } |
767 | o->memberData.set(e: engine, newVal: m); |
768 | m->internalClass.set(e: engine, newVal: engine->internalClasses(icType: EngineBase::Class_MemberData)); |
769 | Q_ASSERT(o->memberData->internalClass); |
770 | m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value)); |
771 | m->values.size = o->memberData->values.alloc; |
772 | m->init(); |
773 | // qDebug() << " got" << o->memberData << o->memberData->size; |
774 | } |
775 | // qDebug() << "allocating object with memberData" << o << o->memberData.operator->(); |
776 | return o; |
777 | } |
778 | |
779 | static uint markStackSize = 0; |
780 | |
781 | MarkStack::MarkStack(ExecutionEngine *engine) |
782 | : m_engine(engine) |
783 | { |
784 | m_base = (Heap::Base **)engine->gcStack->base(); |
785 | m_top = m_base; |
786 | const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base); |
787 | m_hardLimit = m_base + size; |
788 | m_softLimit = m_base + size * 3 / 4; |
789 | } |
790 | |
791 | void MarkStack::drain() |
792 | { |
793 | while (m_top > m_base) { |
794 | Heap::Base *h = pop(); |
795 | ++markStackSize; |
796 | Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen. |
797 | h->internalClass->vtable->markObjects(h, this); |
798 | } |
799 | } |
800 | |
801 | void MemoryManager::collectRoots(MarkStack *markStack) |
802 | { |
803 | engine->markObjects(markStack); |
804 | |
805 | // qDebug() << " mark stack after engine->mark" << (engine->jsStackTop - markBase); |
806 | |
807 | collectFromJSStack(markStack); |
808 | |
809 | // qDebug() << " mark stack after js stack collect" << (engine->jsStackTop - markBase); |
810 | m_persistentValues->mark(markStack); |
811 | |
812 | // qDebug() << " mark stack after persistants" << (engine->jsStackTop - markBase); |
813 | |
814 | // Preserve QObject ownership rules within JavaScript: A parent with c++ ownership |
815 | // keeps all of its children alive in JavaScript. |
816 | |
817 | // Do this _after_ collectFromStack to ensure that processing the weak |
818 | // managed objects in the loop down there doesn't make then end up as leftovers |
819 | // on the stack and thus always get collected. |
820 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
821 | QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>(); |
822 | if (!qobjectWrapper) |
823 | continue; |
824 | QObject *qobject = qobjectWrapper->object(); |
825 | if (!qobject) |
826 | continue; |
827 | bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: qobject); |
828 | |
829 | if (!keepAlive) { |
830 | if (QObject *parent = qobject->parent()) { |
831 | while (parent->parent()) |
832 | parent = parent->parent(); |
833 | |
834 | keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: parent); |
835 | } |
836 | } |
837 | |
838 | if (keepAlive) |
839 | qobjectWrapper->mark(markStack); |
840 | } |
841 | } |
842 | |
843 | void MemoryManager::mark() |
844 | { |
845 | markStackSize = 0; |
846 | MarkStack markStack(engine); |
847 | collectRoots(markStack: &markStack); |
848 | // dtor of MarkStack drains |
849 | } |
850 | |
851 | void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr) |
852 | { |
853 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
854 | Managed *m = (*it).managed(); |
855 | if (!m || m->markBit()) |
856 | continue; |
857 | // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed |
858 | // signal before we start sweeping the heap |
859 | if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>()) |
860 | qobjectWrapper->destroyObject(lastCall: lastSweep); |
861 | } |
862 | |
863 | // remove objects from weak maps and sets |
864 | Heap::MapObject *map = weakMaps; |
865 | Heap::MapObject **lastMap = &weakMaps; |
866 | while (map) { |
867 | if (map->isMarked()) { |
868 | map->removeUnmarkedKeys(); |
869 | *lastMap = map; |
870 | lastMap = &map->nextWeakMap; |
871 | } |
872 | map = map->nextWeakMap; |
873 | } |
874 | |
875 | Heap::SetObject *set = weakSets; |
876 | Heap::SetObject **lastSet = &weakSets; |
877 | while (set) { |
878 | if (set->isMarked()) { |
879 | set->removeUnmarkedKeys(); |
880 | *lastSet = set; |
881 | lastSet = &set->nextWeakSet; |
882 | } |
883 | set = set->nextWeakSet; |
884 | } |
885 | |
886 | // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure |
887 | // that they are all set to undefined. |
888 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
889 | Managed *m = (*it).managed(); |
890 | if (!m || m->markBit()) |
891 | continue; |
892 | (*it) = Value::undefinedValue(); |
893 | } |
894 | |
895 | // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed |
896 | const int pendingCount = m_pendingFreedObjectWrapperValue.size(); |
897 | if (pendingCount) { |
898 | QVector<Value *> remainingWeakQObjectWrappers; |
899 | remainingWeakQObjectWrappers.reserve(asize: pendingCount); |
900 | for (int i = 0; i < pendingCount; ++i) { |
901 | Value *v = m_pendingFreedObjectWrapperValue.at(i); |
902 | if (v->isUndefined() || v->isEmpty()) |
903 | PersistentValueStorage::free(v); |
904 | else |
905 | remainingWeakQObjectWrappers.append(t: v); |
906 | } |
907 | m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers; |
908 | } |
909 | |
910 | if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) { |
911 | for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) { |
912 | if (it.value().isNullOrUndefined()) |
913 | it = multiplyWrappedQObjects->erase(it); |
914 | else |
915 | ++it; |
916 | } |
917 | } |
918 | |
919 | |
920 | if (!lastSweep) { |
921 | engine->identifierTable->sweep(); |
922 | blockAllocator.sweep(/*classCountPtr*/); |
923 | hugeItemAllocator.sweep(classCountPtr); |
924 | icAllocator.sweep(/*classCountPtr*/); |
925 | } |
926 | } |
927 | |
928 | bool MemoryManager::shouldRunGC() const |
929 | { |
930 | size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots(); |
931 | if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100) |
932 | return true; |
933 | return false; |
934 | } |
935 | |
936 | static size_t dumpBins(BlockAllocator *b, const char *title) |
937 | { |
938 | const QLoggingCategory &stats = lcGcAllocatorStats(); |
939 | size_t totalSlotMem = 0; |
940 | if (title) |
941 | qDebug(cat: stats) << "Slot map for" << title << "allocator:" ; |
942 | for (uint i = 0; i < BlockAllocator::NumBins; ++i) { |
943 | uint nEntries = 0; |
944 | HeapItem *h = b->freeBins[i]; |
945 | while (h) { |
946 | ++nEntries; |
947 | totalSlotMem += h->freeData.availableSlots; |
948 | h = h->freeData.next; |
949 | } |
950 | if (title) |
951 | qDebug(cat: stats) << " number of entries in slot" << i << ":" << nEntries; |
952 | } |
953 | SDUMP() << " large slot map" ; |
954 | HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1]; |
955 | while (h) { |
956 | SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots; |
957 | h = h->freeData.next; |
958 | } |
959 | |
960 | if (title) |
961 | qDebug(cat: stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize; |
962 | return totalSlotMem*Chunk::SlotSize; |
963 | } |
964 | |
965 | void MemoryManager::runGC() |
966 | { |
967 | if (gcBlocked) { |
968 | // qDebug() << "Not running GC."; |
969 | return; |
970 | } |
971 | |
972 | QScopedValueRollback<bool> gcBlocker(gcBlocked, true); |
973 | // qDebug() << "runGC"; |
974 | |
975 | if (gcStats) { |
976 | statistics.maxReservedMem = qMax(a: statistics.maxReservedMem, b: getAllocatedMem()); |
977 | statistics.maxAllocatedMem = qMax(a: statistics.maxAllocatedMem, b: getUsedMem() + getLargeItemsMem()); |
978 | } |
979 | |
980 | if (!gcCollectorStats) { |
981 | mark(); |
982 | sweep(); |
983 | } else { |
984 | bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit); |
985 | size_t oldUnmanagedSize = unmanagedHeapSize; |
986 | |
987 | const size_t totalMem = getAllocatedMem(); |
988 | const size_t usedBefore = getUsedMem(); |
989 | const size_t largeItemsBefore = getLargeItemsMem(); |
990 | |
991 | const QLoggingCategory &stats = lcGcAllocatorStats(); |
992 | qDebug(cat: stats) << "========== GC ==========" ; |
993 | #ifdef MM_STATS |
994 | qDebug(cat: stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots." ; |
995 | qDebug(cat: stats) << " Allocations since last GC" << allocationCount; |
996 | allocationCount = 0; |
997 | #endif |
998 | size_t oldChunks = blockAllocator.chunks.size(); |
999 | qDebug(cat: stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks" ; |
1000 | qDebug(cat: stats) << "Fragmented memory before GC" << (totalMem - usedBefore); |
1001 | dumpBins(b: &blockAllocator, title: "Block" ); |
1002 | dumpBins(b: &icAllocator, title: "InternalClass" ); |
1003 | |
1004 | QElapsedTimer t; |
1005 | t.start(); |
1006 | mark(); |
1007 | qint64 markTime = t.nsecsElapsed()/1000; |
1008 | t.restart(); |
1009 | sweep(lastSweep: false, classCountPtr: increaseFreedCountForClass); |
1010 | const size_t usedAfter = getUsedMem(); |
1011 | const size_t largeItemsAfter = getLargeItemsMem(); |
1012 | qint64 sweepTime = t.nsecsElapsed()/1000; |
1013 | |
1014 | if (triggeredByUnmanagedHeap) { |
1015 | qDebug(cat: stats) << "triggered by unmanaged heap:" ; |
1016 | qDebug(cat: stats) << " old unmanaged heap size:" << oldUnmanagedSize; |
1017 | qDebug(cat: stats) << " new unmanaged heap:" << unmanagedHeapSize; |
1018 | qDebug(cat: stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit; |
1019 | } |
1020 | size_t memInBins = dumpBins(b: &blockAllocator, title: "Block" ) |
1021 | + dumpBins(b: &icAllocator, title: "InternalClasss" ); |
1022 | qDebug(cat: stats) << "Marked object in" << markTime << "us." ; |
1023 | qDebug(cat: stats) << " " << markStackSize << "objects marked" ; |
1024 | qDebug(cat: stats) << "Sweeped object in" << sweepTime << "us." ; |
1025 | |
1026 | // sort our object types by number of freed instances |
1027 | MMStatsHash freedObjectStats; |
1028 | std::swap(a&: freedObjectStats, b&: *freedObjectStatsGlobal()); |
1029 | typedef std::pair<const char*, int> ObjectStatInfo; |
1030 | std::vector<ObjectStatInfo> freedObjectsSorted; |
1031 | freedObjectsSorted.reserve(n: freedObjectStats.size()); |
1032 | for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) { |
1033 | freedObjectsSorted.push_back(x: std::make_pair(x: it.key(), y: it.value())); |
1034 | } |
1035 | std::sort(first: freedObjectsSorted.begin(), last: freedObjectsSorted.end(), comp: [](const ObjectStatInfo &a, const ObjectStatInfo &b) { |
1036 | return a.second > b.second && strcmp(s1: a.first, s2: b.first) < 0; |
1037 | }); |
1038 | |
1039 | qDebug(cat: stats) << "Used memory before GC:" << usedBefore; |
1040 | qDebug(cat: stats) << "Used memory after GC:" << usedAfter; |
1041 | qDebug(cat: stats) << "Freed up bytes :" << (usedBefore - usedAfter); |
1042 | qDebug(cat: stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size()); |
1043 | size_t lost = blockAllocator.allocatedMem() + icAllocator.allocatedMem() |
1044 | - memInBins - usedAfter; |
1045 | if (lost) |
1046 | qDebug(cat: stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" ; |
1047 | if (largeItemsBefore || largeItemsAfter) { |
1048 | qDebug(cat: stats) << "Large item memory before GC:" << largeItemsBefore; |
1049 | qDebug(cat: stats) << "Large item memory after GC:" << largeItemsAfter; |
1050 | qDebug(cat: stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter); |
1051 | } |
1052 | |
1053 | for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) { |
1054 | qDebug(cat: stats).noquote() << QString::fromLatin1(ba: "Freed JS type: %1 (%2 instances)" ).arg(args: QString::fromLatin1(ba: it->first), args: QString::number(it->second)); |
1055 | } |
1056 | |
1057 | qDebug(cat: stats) << "======== End GC ========" ; |
1058 | } |
1059 | |
1060 | if (gcStats) |
1061 | statistics.maxUsedMem = qMax(a: statistics.maxUsedMem, b: getUsedMem() + getLargeItemsMem()); |
1062 | |
1063 | if (aggressiveGC) { |
1064 | // ensure we don't 'loose' any memory |
1065 | Q_ASSERT(blockAllocator.allocatedMem() |
1066 | == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr)); |
1067 | Q_ASSERT(icAllocator.allocatedMem() |
1068 | == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr)); |
1069 | } |
1070 | |
1071 | usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep; |
1072 | |
1073 | // reset all black bits |
1074 | blockAllocator.resetBlackBits(); |
1075 | hugeItemAllocator.resetBlackBits(); |
1076 | icAllocator.resetBlackBits(); |
1077 | } |
1078 | |
1079 | size_t MemoryManager::getUsedMem() const |
1080 | { |
1081 | return blockAllocator.usedMem() + icAllocator.usedMem(); |
1082 | } |
1083 | |
1084 | size_t MemoryManager::getAllocatedMem() const |
1085 | { |
1086 | return blockAllocator.allocatedMem() + icAllocator.allocatedMem() + hugeItemAllocator.usedMem(); |
1087 | } |
1088 | |
1089 | size_t MemoryManager::getLargeItemsMem() const |
1090 | { |
1091 | return hugeItemAllocator.usedMem(); |
1092 | } |
1093 | |
1094 | void MemoryManager::registerWeakMap(Heap::MapObject *map) |
1095 | { |
1096 | map->nextWeakMap = weakMaps; |
1097 | weakMaps = map; |
1098 | } |
1099 | |
1100 | void MemoryManager::registerWeakSet(Heap::SetObject *set) |
1101 | { |
1102 | set->nextWeakSet = weakSets; |
1103 | weakSets = set; |
1104 | } |
1105 | |
1106 | MemoryManager::~MemoryManager() |
1107 | { |
1108 | delete m_persistentValues; |
1109 | |
1110 | dumpStats(); |
1111 | |
1112 | sweep(/*lastSweep*/true); |
1113 | blockAllocator.freeAll(); |
1114 | hugeItemAllocator.freeAll(); |
1115 | icAllocator.freeAll(); |
1116 | |
1117 | delete m_weakValues; |
1118 | #ifdef V4_USE_VALGRIND |
1119 | VALGRIND_DESTROY_MEMPOOL(this); |
1120 | #endif |
1121 | delete chunkAllocator; |
1122 | } |
1123 | |
1124 | |
1125 | void MemoryManager::dumpStats() const |
1126 | { |
1127 | if (!gcStats) |
1128 | return; |
1129 | |
1130 | const QLoggingCategory &stats = lcGcStats(); |
1131 | qDebug(cat: stats) << "Qml GC memory allocation statistics:" ; |
1132 | qDebug(cat: stats) << "Total memory allocated:" << statistics.maxReservedMem; |
1133 | qDebug(cat: stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem; |
1134 | qDebug(cat: stats) << "Max memory used after a GC run:" << statistics.maxUsedMem; |
1135 | qDebug(cat: stats) << "Requests for different item sizes:" ; |
1136 | for (int i = 1; i < BlockAllocator::NumBins - 1; ++i) |
1137 | qDebug(cat: stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i]; |
1138 | qDebug(cat: stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1]; |
1139 | } |
1140 | |
1141 | void MemoryManager::collectFromJSStack(MarkStack *markStack) const |
1142 | { |
1143 | Value *v = engine->jsStackBase; |
1144 | Value *top = engine->jsStackTop; |
1145 | while (v < top) { |
1146 | Managed *m = v->managed(); |
1147 | if (m) { |
1148 | Q_ASSERT(m->inUse()); |
1149 | // Skip pointers to already freed objects, they are bogus as well |
1150 | m->mark(markStack); |
1151 | } |
1152 | ++v; |
1153 | } |
1154 | } |
1155 | |
1156 | } // namespace QV4 |
1157 | |
1158 | QT_END_NAMESPACE |
1159 | |