1 | /**************************************************************************** |
2 | ** |
3 | ** Copyright (C) 2016 The Qt Company Ltd. |
4 | ** Contact: https://www.qt.io/licensing/ |
5 | ** |
6 | ** This file is part of the QtQml module of the Qt Toolkit. |
7 | ** |
8 | ** $QT_BEGIN_LICENSE:LGPL$ |
9 | ** Commercial License Usage |
10 | ** Licensees holding valid commercial Qt licenses may use this file in |
11 | ** accordance with the commercial license agreement provided with the |
12 | ** Software or, alternatively, in accordance with the terms contained in |
13 | ** a written agreement between you and The Qt Company. For licensing terms |
14 | ** and conditions see https://www.qt.io/terms-conditions. For further |
15 | ** information use the contact form at https://www.qt.io/contact-us. |
16 | ** |
17 | ** GNU Lesser General Public License Usage |
18 | ** Alternatively, this file may be used under the terms of the GNU Lesser |
19 | ** General Public License version 3 as published by the Free Software |
20 | ** Foundation and appearing in the file LICENSE.LGPL3 included in the |
21 | ** packaging of this file. Please review the following information to |
22 | ** ensure the GNU Lesser General Public License version 3 requirements |
23 | ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. |
24 | ** |
25 | ** GNU General Public License Usage |
26 | ** Alternatively, this file may be used under the terms of the GNU |
27 | ** General Public License version 2.0 or (at your option) the GNU General |
28 | ** Public license version 3 or any later version approved by the KDE Free |
29 | ** Qt Foundation. The licenses are as published by the Free Software |
30 | ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 |
31 | ** included in the packaging of this file. Please review the following |
32 | ** information to ensure the GNU General Public License requirements will |
33 | ** be met: https://www.gnu.org/licenses/gpl-2.0.html and |
34 | ** https://www.gnu.org/licenses/gpl-3.0.html. |
35 | ** |
36 | ** $QT_END_LICENSE$ |
37 | ** |
38 | ****************************************************************************/ |
39 | |
40 | #include "qv4engine_p.h" |
41 | #include "qv4object_p.h" |
42 | #include "qv4objectproto_p.h" |
43 | #include "qv4mm_p.h" |
44 | #include "qv4qobjectwrapper_p.h" |
45 | #include "qv4identifiertable_p.h" |
46 | #include <QtCore/qalgorithms.h> |
47 | #include <QtCore/private/qnumeric_p.h> |
48 | #include <QtCore/qloggingcategory.h> |
49 | #include <private/qv4alloca_p.h> |
50 | #include <qqmlengine.h> |
51 | #include "PageReservation.h" |
52 | #include "PageAllocation.h" |
53 | #include "PageAllocationAligned.h" |
54 | #include "StdLibExtras.h" |
55 | |
56 | #include <QElapsedTimer> |
57 | #include <QMap> |
58 | #include <QScopedValueRollback> |
59 | |
60 | #include <iostream> |
61 | #include <cstdlib> |
62 | #include <algorithm> |
63 | #include "qv4profiling_p.h" |
64 | #include "qv4mapobject_p.h" |
65 | #include "qv4setobject_p.h" |
66 | #include "qv4writebarrier_p.h" |
67 | |
68 | //#define MM_STATS |
69 | |
70 | #if !defined(MM_STATS) && !defined(QT_NO_DEBUG) |
71 | #define MM_STATS |
72 | #endif |
73 | |
74 | #if MM_DEBUG |
75 | #define DEBUG qDebug() << "MM:" |
76 | #else |
77 | #define DEBUG if (1) ; else qDebug() << "MM:" |
78 | #endif |
79 | |
80 | #ifdef V4_USE_VALGRIND |
81 | #include <valgrind/valgrind.h> |
82 | #include <valgrind/memcheck.h> |
83 | #endif |
84 | |
85 | #ifdef V4_USE_HEAPTRACK |
86 | #include <heaptrack_api.h> |
87 | #endif |
88 | |
89 | #if OS(QNX) |
90 | #include <sys/storage.h> // __tls() |
91 | #endif |
92 | |
93 | #if USE(PTHREADS) && HAVE(PTHREAD_NP_H) |
94 | #include <pthread_np.h> |
95 | #endif |
96 | |
97 | Q_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics" ) |
98 | Q_DECLARE_LOGGING_CATEGORY(lcGcStats) |
99 | Q_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats" ) |
100 | Q_DECLARE_LOGGING_CATEGORY(lcGcAllocatorStats) |
101 | |
102 | using namespace WTF; |
103 | |
104 | QT_BEGIN_NAMESPACE |
105 | |
106 | namespace QV4 { |
107 | |
108 | enum { |
109 | MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16, |
110 | GCOverallocation = 200 /* Max overallocation by the GC in % */ |
111 | }; |
112 | |
113 | struct MemorySegment { |
114 | enum { |
115 | #ifdef Q_OS_RTEMS |
116 | NumChunks = sizeof(quint64), |
117 | #else |
118 | NumChunks = 8*sizeof(quint64), |
119 | #endif |
120 | SegmentSize = NumChunks*Chunk::ChunkSize, |
121 | }; |
122 | |
123 | MemorySegment(size_t size) |
124 | { |
125 | size += Chunk::ChunkSize; // make sure we can get enough 64k aligment memory |
126 | if (size < SegmentSize) |
127 | size = SegmentSize; |
128 | |
129 | pageReservation = PageReservation::reserve(size, usage: OSAllocator::JSGCHeapPages); |
130 | base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1)); |
131 | nChunks = NumChunks; |
132 | availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base())); |
133 | if (availableBytes < SegmentSize) |
134 | --nChunks; |
135 | } |
136 | MemorySegment(MemorySegment &&other) { |
137 | qSwap(value1&: pageReservation, value2&: other.pageReservation); |
138 | qSwap(value1&: base, value2&: other.base); |
139 | qSwap(value1&: allocatedMap, value2&: other.allocatedMap); |
140 | qSwap(value1&: availableBytes, value2&: other.availableBytes); |
141 | qSwap(value1&: nChunks, value2&: other.nChunks); |
142 | } |
143 | |
144 | ~MemorySegment() { |
145 | if (base) |
146 | pageReservation.deallocate(); |
147 | } |
148 | |
149 | void setBit(size_t index) { |
150 | Q_ASSERT(index < nChunks); |
151 | quint64 bit = static_cast<quint64>(1) << index; |
152 | // qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit; |
153 | allocatedMap |= bit; |
154 | } |
155 | void clearBit(size_t index) { |
156 | Q_ASSERT(index < nChunks); |
157 | quint64 bit = static_cast<quint64>(1) << index; |
158 | // qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit; |
159 | allocatedMap &= ~bit; |
160 | } |
161 | bool testBit(size_t index) const { |
162 | Q_ASSERT(index < nChunks); |
163 | quint64 bit = static_cast<quint64>(1) << index; |
164 | return (allocatedMap & bit); |
165 | } |
166 | |
167 | Chunk *allocate(size_t size); |
168 | void free(Chunk *chunk, size_t size) { |
169 | DEBUG << "freeing chunk" << chunk; |
170 | size_t index = static_cast<size_t>(chunk - base); |
171 | size_t end = qMin(a: static_cast<size_t>(NumChunks), b: index + (size - 1)/Chunk::ChunkSize + 1); |
172 | while (index < end) { |
173 | Q_ASSERT(testBit(index)); |
174 | clearBit(index); |
175 | ++index; |
176 | } |
177 | |
178 | size_t pageSize = WTF::pageSize(); |
179 | size = (size + pageSize - 1) & ~(pageSize - 1); |
180 | #if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN) |
181 | // Linux and Windows zero out pages that have been decommitted and get committed again. |
182 | // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the |
183 | // memory before decommit, so that we can be sure that all chunks we allocate will be |
184 | // zero initialized. |
185 | memset(chunk, 0, size); |
186 | #endif |
187 | pageReservation.decommit(start: chunk, size); |
188 | } |
189 | |
190 | bool contains(Chunk *c) const { |
191 | return c >= base && c < base + nChunks; |
192 | } |
193 | |
194 | PageReservation ; |
195 | Chunk *base = nullptr; |
196 | quint64 allocatedMap = 0; |
197 | size_t availableBytes = 0; |
198 | uint nChunks = 0; |
199 | }; |
200 | |
201 | Chunk *MemorySegment::allocate(size_t size) |
202 | { |
203 | if (!allocatedMap && size >= SegmentSize) { |
204 | // chunk allocated for one huge allocation |
205 | Q_ASSERT(availableBytes >= size); |
206 | pageReservation.commit(start: base, size); |
207 | allocatedMap = ~static_cast<quint64>(0); |
208 | return base; |
209 | } |
210 | size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk); |
211 | uint sequence = 0; |
212 | Chunk *candidate = nullptr; |
213 | for (uint i = 0; i < nChunks; ++i) { |
214 | if (!testBit(index: i)) { |
215 | if (!candidate) |
216 | candidate = base + i; |
217 | ++sequence; |
218 | } else { |
219 | candidate = nullptr; |
220 | sequence = 0; |
221 | } |
222 | if (sequence == requiredChunks) { |
223 | pageReservation.commit(start: candidate, size); |
224 | for (uint i = 0; i < requiredChunks; ++i) |
225 | setBit(candidate - base + i); |
226 | DEBUG << "allocated chunk " << candidate << Qt::hex << size; |
227 | |
228 | return candidate; |
229 | } |
230 | } |
231 | return nullptr; |
232 | } |
233 | |
234 | struct ChunkAllocator { |
235 | ChunkAllocator() {} |
236 | |
237 | size_t requiredChunkSize(size_t size) { |
238 | size += Chunk::HeaderSize; // space required for the Chunk header |
239 | size_t pageSize = WTF::pageSize(); |
240 | size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes |
241 | if (size < Chunk::ChunkSize) |
242 | size = Chunk::ChunkSize; |
243 | return size; |
244 | } |
245 | |
246 | Chunk *allocate(size_t size = 0); |
247 | void free(Chunk *chunk, size_t size = 0); |
248 | |
249 | std::vector<MemorySegment> memorySegments; |
250 | }; |
251 | |
252 | Chunk *ChunkAllocator::allocate(size_t size) |
253 | { |
254 | size = requiredChunkSize(size); |
255 | for (auto &m : memorySegments) { |
256 | if (~m.allocatedMap) { |
257 | Chunk *c = m.allocate(size); |
258 | if (c) |
259 | return c; |
260 | } |
261 | } |
262 | |
263 | // allocate a new segment |
264 | memorySegments.push_back(x: MemorySegment(size)); |
265 | Chunk *c = memorySegments.back().allocate(size); |
266 | Q_ASSERT(c); |
267 | return c; |
268 | } |
269 | |
270 | void ChunkAllocator::free(Chunk *chunk, size_t size) |
271 | { |
272 | size = requiredChunkSize(size); |
273 | for (auto &m : memorySegments) { |
274 | if (m.contains(c: chunk)) { |
275 | m.free(chunk, size); |
276 | return; |
277 | } |
278 | } |
279 | Q_ASSERT(false); |
280 | } |
281 | |
282 | #ifdef DUMP_SWEEP |
283 | QString binary(quintptr n) { |
284 | QString s = QString::number(n, 2); |
285 | while (s.length() < 64) |
286 | s.prepend(QChar::fromLatin1('0')); |
287 | return s; |
288 | } |
289 | #define SDUMP qDebug |
290 | #else |
291 | QString binary(quintptr) { return QString(); } |
292 | #define SDUMP if (1) ; else qDebug |
293 | #endif |
294 | |
295 | // Stores a classname -> freed count mapping. |
296 | typedef QHash<const char*, int> MMStatsHash; |
297 | Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal) |
298 | |
299 | // This indirection avoids sticking QHash code in each of the call sites, which |
300 | // shaves off some instructions in the case that it's unused. |
301 | static void increaseFreedCountForClass(const char *className) |
302 | { |
303 | (*freedObjectStatsGlobal())[className]++; |
304 | } |
305 | |
306 | //bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr) |
307 | bool Chunk::sweep(ExecutionEngine *engine) |
308 | { |
309 | bool hasUsedSlots = false; |
310 | SDUMP() << "sweeping chunk" << this; |
311 | HeapItem *o = realBase(); |
312 | bool lastSlotFree = false; |
313 | for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) { |
314 | #if WRITEBARRIER(none) |
315 | Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects |
316 | #endif |
317 | quintptr toFree = objectBitmap[i] ^ blackBitmap[i]; |
318 | Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used |
319 | quintptr e = extendsBitmap[i]; |
320 | SDUMP() << " index=" << i; |
321 | SDUMP() << " toFree =" << binary(toFree); |
322 | SDUMP() << " black =" << binary(blackBitmap[i]); |
323 | SDUMP() << " object =" << binary(objectBitmap[i]); |
324 | SDUMP() << " extends =" << binary(e); |
325 | if (lastSlotFree) |
326 | e &= (e + 1); // clear all lowest extent bits |
327 | while (toFree) { |
328 | uint index = qCountTrailingZeroBits(v: toFree); |
329 | quintptr bit = (static_cast<quintptr>(1) << index); |
330 | |
331 | toFree ^= bit; // mask out freed slot |
332 | // DEBUG << " index" << hex << index << toFree; |
333 | |
334 | // remove all extends slots that have been freed |
335 | // this is a bit of bit trickery. |
336 | quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit |
337 | quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object |
338 | quintptr result = objmask + 1; |
339 | Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something |
340 | result |= mask; // ensure we don't clear stuff to the right of the current object |
341 | e &= result; |
342 | |
343 | HeapItem *itemToFree = o + index; |
344 | Heap::Base *b = *itemToFree; |
345 | const VTable *v = b->internalClass->vtable; |
346 | // if (Q_UNLIKELY(classCountPtr)) |
347 | // classCountPtr(v->className); |
348 | if (v->destroy) { |
349 | v->destroy(b); |
350 | b->_checkIsDestroyed(); |
351 | } |
352 | #ifdef V4_USE_HEAPTRACK |
353 | heaptrack_report_free(itemToFree); |
354 | #endif |
355 | } |
356 | Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i]) |
357 | - (blackBitmap[i] | e)) * Chunk::SlotSize, |
358 | Profiling::SmallItem); |
359 | objectBitmap[i] = blackBitmap[i]; |
360 | grayBitmap[i] = 0; |
361 | hasUsedSlots |= (blackBitmap[i] != 0); |
362 | extendsBitmap[i] = e; |
363 | lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1)); |
364 | SDUMP() << " new extends =" << binary(e); |
365 | SDUMP() << " lastSlotFree" << lastSlotFree; |
366 | Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0); |
367 | o += Chunk::Bits; |
368 | } |
369 | // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots."; |
370 | return hasUsedSlots; |
371 | } |
372 | |
373 | void Chunk::freeAll(ExecutionEngine *engine) |
374 | { |
375 | // DEBUG << "sweeping chunk" << this << (*freeList); |
376 | HeapItem *o = realBase(); |
377 | for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) { |
378 | quintptr toFree = objectBitmap[i]; |
379 | quintptr e = extendsBitmap[i]; |
380 | // DEBUG << hex << " index=" << i << toFree; |
381 | while (toFree) { |
382 | uint index = qCountTrailingZeroBits(v: toFree); |
383 | quintptr bit = (static_cast<quintptr>(1) << index); |
384 | |
385 | toFree ^= bit; // mask out freed slot |
386 | // DEBUG << " index" << hex << index << toFree; |
387 | |
388 | // remove all extends slots that have been freed |
389 | // this is a bit of bit trickery. |
390 | quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit |
391 | quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object |
392 | quintptr result = objmask + 1; |
393 | Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something |
394 | result |= mask; // ensure we don't clear stuff to the right of the current object |
395 | e &= result; |
396 | |
397 | HeapItem *itemToFree = o + index; |
398 | Heap::Base *b = *itemToFree; |
399 | if (b->internalClass->vtable->destroy) { |
400 | b->internalClass->vtable->destroy(b); |
401 | b->_checkIsDestroyed(); |
402 | } |
403 | #ifdef V4_USE_HEAPTRACK |
404 | heaptrack_report_free(itemToFree); |
405 | #endif |
406 | } |
407 | Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i]) |
408 | - qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem); |
409 | objectBitmap[i] = 0; |
410 | grayBitmap[i] = 0; |
411 | extendsBitmap[i] = e; |
412 | o += Chunk::Bits; |
413 | } |
414 | // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots."; |
415 | } |
416 | |
417 | void Chunk::resetBlackBits() |
418 | { |
419 | memset(s: blackBitmap, c: 0, n: sizeof(blackBitmap)); |
420 | } |
421 | |
422 | void Chunk::collectGrayItems(MarkStack *markStack) |
423 | { |
424 | // DEBUG << "sweeping chunk" << this << (*freeList); |
425 | HeapItem *o = realBase(); |
426 | for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) { |
427 | #if WRITEBARRIER(none) |
428 | Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects |
429 | #endif |
430 | quintptr toMark = blackBitmap[i] & grayBitmap[i]; // correct for a Steele type barrier |
431 | Q_ASSERT((toMark & objectBitmap[i]) == toMark); // check all black objects are marked as being used |
432 | // DEBUG << hex << " index=" << i << toFree; |
433 | while (toMark) { |
434 | uint index = qCountTrailingZeroBits(v: toMark); |
435 | quintptr bit = (static_cast<quintptr>(1) << index); |
436 | |
437 | toMark ^= bit; // mask out marked slot |
438 | // DEBUG << " index" << hex << index << toFree; |
439 | |
440 | HeapItem *itemToFree = o + index; |
441 | Heap::Base *b = *itemToFree; |
442 | Q_ASSERT(b->inUse()); |
443 | markStack->push(m: b); |
444 | } |
445 | grayBitmap[i] = 0; |
446 | o += Chunk::Bits; |
447 | } |
448 | // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots."; |
449 | |
450 | } |
451 | |
452 | void Chunk::sortIntoBins(HeapItem **bins, uint nBins) |
453 | { |
454 | // qDebug() << "sortIntoBins:"; |
455 | HeapItem *base = realBase(); |
456 | #if QT_POINTER_SIZE == 8 |
457 | const int start = 0; |
458 | #else |
459 | const int start = 1; |
460 | #endif |
461 | #ifndef QT_NO_DEBUG |
462 | uint freeSlots = 0; |
463 | uint allocatedSlots = 0; |
464 | #endif |
465 | for (int i = start; i < EntriesInBitmap; ++i) { |
466 | quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]); |
467 | #if QT_POINTER_SIZE == 8 |
468 | if (!i) |
469 | usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1; |
470 | #endif |
471 | #ifndef QT_NO_DEBUG |
472 | allocatedSlots += qPopulationCount(v: usedSlots); |
473 | // qDebug() << hex << " i=" << i << "used=" << usedSlots; |
474 | #endif |
475 | while (1) { |
476 | uint index = qCountTrailingZeroBits(v: usedSlots + 1); |
477 | if (index == Bits) |
478 | break; |
479 | uint freeStart = i*Bits + index; |
480 | usedSlots &= ~((static_cast<quintptr>(1) << index) - 1); |
481 | while (!usedSlots) { |
482 | if (++i < EntriesInBitmap) { |
483 | usedSlots = (objectBitmap[i]|extendsBitmap[i]); |
484 | } else { |
485 | Q_ASSERT(i == EntriesInBitmap); |
486 | // Overflows to 0 when counting trailing zeroes above in next iteration. |
487 | // Then, all the bits are zeroes and we break. |
488 | usedSlots = std::numeric_limits<quintptr>::max(); |
489 | break; |
490 | } |
491 | #ifndef QT_NO_DEBUG |
492 | allocatedSlots += qPopulationCount(v: usedSlots); |
493 | // qDebug() << hex << " i=" << i << "used=" << usedSlots; |
494 | #endif |
495 | } |
496 | HeapItem *freeItem = base + freeStart; |
497 | |
498 | index = qCountTrailingZeroBits(v: usedSlots); |
499 | usedSlots |= (quintptr(1) << index) - 1; |
500 | uint freeEnd = i*Bits + index; |
501 | uint nSlots = freeEnd - freeStart; |
502 | #ifndef QT_NO_DEBUG |
503 | // qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots; |
504 | freeSlots += nSlots; |
505 | #endif |
506 | Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots); |
507 | freeItem->freeData.availableSlots = nSlots; |
508 | uint bin = qMin(a: nBins - 1, b: nSlots); |
509 | freeItem->freeData.next = bins[bin]; |
510 | bins[bin] = freeItem; |
511 | } |
512 | } |
513 | #ifndef QT_NO_DEBUG |
514 | Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr)); |
515 | #endif |
516 | } |
517 | |
518 | HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) { |
519 | Q_ASSERT((size % Chunk::SlotSize) == 0); |
520 | size_t slotsRequired = size >> Chunk::SlotSizeShift; |
521 | |
522 | if (allocationStats) |
523 | ++allocationStats[binForSlots(nSlots: slotsRequired)]; |
524 | |
525 | HeapItem **last; |
526 | |
527 | HeapItem *m; |
528 | |
529 | if (slotsRequired < NumBins - 1) { |
530 | m = freeBins[slotsRequired]; |
531 | if (m) { |
532 | freeBins[slotsRequired] = m->freeData.next; |
533 | goto done; |
534 | } |
535 | } |
536 | |
537 | if (nFree >= slotsRequired) { |
538 | // use bump allocation |
539 | Q_ASSERT(nextFree); |
540 | m = nextFree; |
541 | nextFree += slotsRequired; |
542 | nFree -= slotsRequired; |
543 | goto done; |
544 | } |
545 | |
546 | // DEBUG << "No matching bin found for item" << size << bin; |
547 | // search last bin for a large enough item |
548 | last = &freeBins[NumBins - 1]; |
549 | while ((m = *last)) { |
550 | if (m->freeData.availableSlots >= slotsRequired) { |
551 | *last = m->freeData.next; // take it out of the list |
552 | |
553 | size_t remainingSlots = m->freeData.availableSlots - slotsRequired; |
554 | // DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots; |
555 | if (remainingSlots == 0) |
556 | goto done; |
557 | |
558 | HeapItem *remainder = m + slotsRequired; |
559 | if (remainingSlots > nFree) { |
560 | if (nFree) { |
561 | size_t bin = binForSlots(nSlots: nFree); |
562 | nextFree->freeData.next = freeBins[bin]; |
563 | nextFree->freeData.availableSlots = nFree; |
564 | freeBins[bin] = nextFree; |
565 | } |
566 | nextFree = remainder; |
567 | nFree = remainingSlots; |
568 | } else { |
569 | remainder->freeData.availableSlots = remainingSlots; |
570 | size_t binForRemainder = binForSlots(nSlots: remainingSlots); |
571 | remainder->freeData.next = freeBins[binForRemainder]; |
572 | freeBins[binForRemainder] = remainder; |
573 | } |
574 | goto done; |
575 | } |
576 | last = &m->freeData.next; |
577 | } |
578 | |
579 | if (slotsRequired < NumBins - 1) { |
580 | // check if we can split up another slot |
581 | for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) { |
582 | m = freeBins[i]; |
583 | if (m) { |
584 | freeBins[i] = m->freeData.next; // take it out of the list |
585 | // qDebug() << "got item" << slotsRequired << "from slot" << i; |
586 | size_t remainingSlots = i - slotsRequired; |
587 | Q_ASSERT(remainingSlots < NumBins - 1); |
588 | HeapItem *remainder = m + slotsRequired; |
589 | remainder->freeData.availableSlots = remainingSlots; |
590 | remainder->freeData.next = freeBins[remainingSlots]; |
591 | freeBins[remainingSlots] = remainder; |
592 | goto done; |
593 | } |
594 | } |
595 | } |
596 | |
597 | if (!m) { |
598 | if (!forceAllocation) |
599 | return nullptr; |
600 | Chunk *newChunk = chunkAllocator->allocate(); |
601 | Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
602 | chunks.push_back(x: newChunk); |
603 | nextFree = newChunk->first(); |
604 | nFree = Chunk::AvailableSlots; |
605 | m = nextFree; |
606 | nextFree += slotsRequired; |
607 | nFree -= slotsRequired; |
608 | } |
609 | |
610 | done: |
611 | m->setAllocatedSlots(slotsRequired); |
612 | Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem); |
613 | #ifdef V4_USE_HEAPTRACK |
614 | heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize); |
615 | #endif |
616 | // DEBUG << " " << hex << m->chunk() << m->chunk()->objectBitmap[0] << m->chunk()->extendsBitmap[0] << (m - m->chunk()->realBase()); |
617 | return m; |
618 | } |
619 | |
620 | void BlockAllocator::sweep() |
621 | { |
622 | nextFree = nullptr; |
623 | nFree = 0; |
624 | memset(s: freeBins, c: 0, n: sizeof(freeBins)); |
625 | |
626 | // qDebug() << "BlockAlloc: sweep"; |
627 | usedSlotsAfterLastSweep = 0; |
628 | |
629 | auto firstEmptyChunk = std::partition(first: chunks.begin(), last: chunks.end(), pred: [this](Chunk *c) { |
630 | return c->sweep(engine); |
631 | }); |
632 | |
633 | std::for_each(first: chunks.begin(), last: firstEmptyChunk, f: [this](Chunk *c) { |
634 | c->sortIntoBins(bins: freeBins, nBins: NumBins); |
635 | usedSlotsAfterLastSweep += c->nUsedSlots(); |
636 | }); |
637 | |
638 | // only free the chunks at the end to avoid that the sweep() calls indirectly |
639 | // access freed memory |
640 | std::for_each(first: firstEmptyChunk, last: chunks.end(), f: [this](Chunk *c) { |
641 | Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
642 | chunkAllocator->free(chunk: c); |
643 | }); |
644 | |
645 | chunks.erase(first: firstEmptyChunk, last: chunks.end()); |
646 | } |
647 | |
648 | void BlockAllocator::freeAll() |
649 | { |
650 | for (auto c : chunks) |
651 | c->freeAll(engine); |
652 | for (auto c : chunks) { |
653 | Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage); |
654 | chunkAllocator->free(chunk: c); |
655 | } |
656 | } |
657 | |
658 | void BlockAllocator::resetBlackBits() |
659 | { |
660 | for (auto c : chunks) |
661 | c->resetBlackBits(); |
662 | } |
663 | |
664 | void BlockAllocator::collectGrayItems(MarkStack *markStack) |
665 | { |
666 | for (auto c : chunks) |
667 | c->collectGrayItems(markStack); |
668 | |
669 | } |
670 | |
671 | HeapItem *HugeItemAllocator::allocate(size_t size) { |
672 | MemorySegment *m = nullptr; |
673 | Chunk *c = nullptr; |
674 | if (size >= MemorySegment::SegmentSize/2) { |
675 | // too large to handle through the ChunkAllocator, let's get our own memory segement |
676 | size += Chunk::HeaderSize; // space required for the Chunk header |
677 | size_t pageSize = WTF::pageSize(); |
678 | size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes |
679 | m = new MemorySegment(size); |
680 | c = m->allocate(size); |
681 | } else { |
682 | c = chunkAllocator->allocate(size); |
683 | } |
684 | Q_ASSERT(c); |
685 | chunks.push_back(x: HugeChunk{.segment: m, .chunk: c, .size: size}); |
686 | Chunk::setBit(bitmap: c->objectBitmap, index: c->first() - c->realBase()); |
687 | Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem); |
688 | #ifdef V4_USE_HEAPTRACK |
689 | heaptrack_report_alloc(c, size); |
690 | #endif |
691 | return c->first(); |
692 | } |
693 | |
694 | static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr) |
695 | { |
696 | HeapItem *itemToFree = c.chunk->first(); |
697 | Heap::Base *b = *itemToFree; |
698 | const VTable *v = b->internalClass->vtable; |
699 | if (Q_UNLIKELY(classCountPtr)) |
700 | classCountPtr(v->className); |
701 | |
702 | if (v->destroy) { |
703 | v->destroy(b); |
704 | b->_checkIsDestroyed(); |
705 | } |
706 | if (c.segment) { |
707 | // own memory segment |
708 | c.segment->free(chunk: c.chunk, size: c.size); |
709 | delete c.segment; |
710 | } else { |
711 | chunkAllocator->free(chunk: c.chunk, size: c.size); |
712 | } |
713 | #ifdef V4_USE_HEAPTRACK |
714 | heaptrack_report_free(c.chunk); |
715 | #endif |
716 | } |
717 | |
718 | void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr) |
719 | { |
720 | auto isBlack = [this, classCountPtr] (const HugeChunk &c) { |
721 | bool b = c.chunk->first()->isBlack(); |
722 | Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase()); |
723 | if (!b) { |
724 | Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem); |
725 | freeHugeChunk(chunkAllocator, c, classCountPtr); |
726 | } |
727 | return !b; |
728 | }; |
729 | |
730 | auto newEnd = std::remove_if(first: chunks.begin(), last: chunks.end(), pred: isBlack); |
731 | chunks.erase(first: newEnd, last: chunks.end()); |
732 | } |
733 | |
734 | void HugeItemAllocator::resetBlackBits() |
735 | { |
736 | for (auto c : chunks) |
737 | Chunk::clearBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase()); |
738 | } |
739 | |
740 | void HugeItemAllocator::collectGrayItems(MarkStack *markStack) |
741 | { |
742 | for (auto c : chunks) |
743 | // Correct for a Steele type barrier |
744 | if (Chunk::testBit(bitmap: c.chunk->blackBitmap, index: c.chunk->first() - c.chunk->realBase()) && |
745 | Chunk::testBit(bitmap: c.chunk->grayBitmap, index: c.chunk->first() - c.chunk->realBase())) { |
746 | HeapItem *i = c.chunk->first(); |
747 | Heap::Base *b = *i; |
748 | b->mark(markStack); |
749 | } |
750 | } |
751 | |
752 | void HugeItemAllocator::freeAll() |
753 | { |
754 | for (auto &c : chunks) { |
755 | Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem); |
756 | freeHugeChunk(chunkAllocator, c, classCountPtr: nullptr); |
757 | } |
758 | } |
759 | |
760 | |
761 | MemoryManager::MemoryManager(ExecutionEngine *engine) |
762 | : engine(engine) |
763 | , chunkAllocator(new ChunkAllocator) |
764 | , blockAllocator(chunkAllocator, engine) |
765 | , icAllocator(chunkAllocator, engine) |
766 | , hugeItemAllocator(chunkAllocator, engine) |
767 | , m_persistentValues(new PersistentValueStorage(engine)) |
768 | , m_weakValues(new PersistentValueStorage(engine)) |
769 | , unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit) |
770 | , aggressiveGC(!qEnvironmentVariableIsEmpty(varName: "QV4_MM_AGGRESSIVE_GC" )) |
771 | , gcStats(lcGcStats().isDebugEnabled()) |
772 | , gcCollectorStats(lcGcAllocatorStats().isDebugEnabled()) |
773 | { |
774 | #ifdef V4_USE_VALGRIND |
775 | VALGRIND_CREATE_MEMPOOL(this, 0, true); |
776 | #endif |
777 | memset(s: statistics.allocations, c: 0, n: sizeof(statistics.allocations)); |
778 | if (gcStats) |
779 | blockAllocator.allocationStats = statistics.allocations; |
780 | } |
781 | |
782 | Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize) |
783 | { |
784 | const size_t stringSize = align(size: sizeof(Heap::String)); |
785 | #ifdef MM_STATS |
786 | lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift; |
787 | ++allocationCount; |
788 | #endif |
789 | unmanagedHeapSize += unmanagedSize; |
790 | |
791 | HeapItem *m = allocate(allocator: &blockAllocator, size: stringSize); |
792 | memset(s: m, c: 0, n: stringSize); |
793 | return *m; |
794 | } |
795 | |
796 | Heap::Base *MemoryManager::allocData(std::size_t size) |
797 | { |
798 | #ifdef MM_STATS |
799 | lastAllocRequestedSlots = size >> Chunk::SlotSizeShift; |
800 | ++allocationCount; |
801 | #endif |
802 | |
803 | Q_ASSERT(size >= Chunk::SlotSize); |
804 | Q_ASSERT(size % Chunk::SlotSize == 0); |
805 | |
806 | HeapItem *m = allocate(allocator: &blockAllocator, size); |
807 | memset(s: m, c: 0, n: size); |
808 | return *m; |
809 | } |
810 | |
811 | Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers) |
812 | { |
813 | uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value); |
814 | Q_ASSERT(!(size % sizeof(HeapItem))); |
815 | |
816 | Heap::Object *o; |
817 | if (nMembers <= vtable->nInlineProperties) { |
818 | o = static_cast<Heap::Object *>(allocData(size)); |
819 | } else { |
820 | // Allocate both in one go through the block allocator |
821 | nMembers -= vtable->nInlineProperties; |
822 | std::size_t memberSize = align(size: sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value)); |
823 | size_t totalSize = size + memberSize; |
824 | Heap::MemberData *m; |
825 | if (totalSize > Chunk::DataSize) { |
826 | o = static_cast<Heap::Object *>(allocData(size)); |
827 | m = hugeItemAllocator.allocate(size: memberSize)->as<Heap::MemberData>(); |
828 | } else { |
829 | HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(size: totalSize)); |
830 | Heap::Base *b = *mh; |
831 | o = static_cast<Heap::Object *>(b); |
832 | mh += (size >> Chunk::SlotSizeShift); |
833 | m = mh->as<Heap::MemberData>(); |
834 | Chunk *c = mh->chunk(); |
835 | size_t index = mh - c->realBase(); |
836 | Chunk::setBit(bitmap: c->objectBitmap, index); |
837 | Chunk::clearBit(bitmap: c->extendsBitmap, index); |
838 | } |
839 | o->memberData.set(e: engine, newVal: m); |
840 | m->internalClass.set(e: engine, newVal: engine->internalClasses(icType: EngineBase::Class_MemberData)); |
841 | Q_ASSERT(o->memberData->internalClass); |
842 | m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value)); |
843 | m->values.size = o->memberData->values.alloc; |
844 | m->init(); |
845 | // qDebug() << " got" << o->memberData << o->memberData->size; |
846 | } |
847 | // qDebug() << "allocating object with memberData" << o << o->memberData.operator->(); |
848 | return o; |
849 | } |
850 | |
851 | static uint markStackSize = 0; |
852 | |
853 | MarkStack::MarkStack(ExecutionEngine *engine) |
854 | : m_engine(engine) |
855 | { |
856 | m_base = (Heap::Base **)engine->gcStack->base(); |
857 | m_top = m_base; |
858 | const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base); |
859 | m_hardLimit = m_base + size; |
860 | m_softLimit = m_base + size * 3 / 4; |
861 | } |
862 | |
863 | void MarkStack::drain() |
864 | { |
865 | while (m_top > m_base) { |
866 | Heap::Base *h = pop(); |
867 | ++markStackSize; |
868 | Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen. |
869 | h->internalClass->vtable->markObjects(h, this); |
870 | } |
871 | } |
872 | |
873 | void MemoryManager::collectRoots(MarkStack *markStack) |
874 | { |
875 | engine->markObjects(markStack); |
876 | |
877 | // qDebug() << " mark stack after engine->mark" << (engine->jsStackTop - markBase); |
878 | |
879 | collectFromJSStack(markStack); |
880 | |
881 | // qDebug() << " mark stack after js stack collect" << (engine->jsStackTop - markBase); |
882 | m_persistentValues->mark(markStack); |
883 | |
884 | // qDebug() << " mark stack after persistants" << (engine->jsStackTop - markBase); |
885 | |
886 | // Preserve QObject ownership rules within JavaScript: A parent with c++ ownership |
887 | // keeps all of its children alive in JavaScript. |
888 | |
889 | // Do this _after_ collectFromStack to ensure that processing the weak |
890 | // managed objects in the loop down there doesn't make then end up as leftovers |
891 | // on the stack and thus always get collected. |
892 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
893 | QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>(); |
894 | if (!qobjectWrapper) |
895 | continue; |
896 | QObject *qobject = qobjectWrapper->object(); |
897 | if (!qobject) |
898 | continue; |
899 | bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: qobject); |
900 | |
901 | if (!keepAlive) { |
902 | if (QObject *parent = qobject->parent()) { |
903 | while (parent->parent()) |
904 | parent = parent->parent(); |
905 | |
906 | keepAlive = QQmlData::keepAliveDuringGarbageCollection(object: parent); |
907 | } |
908 | } |
909 | |
910 | if (keepAlive) |
911 | qobjectWrapper->mark(markStack); |
912 | } |
913 | } |
914 | |
915 | void MemoryManager::mark() |
916 | { |
917 | markStackSize = 0; |
918 | MarkStack markStack(engine); |
919 | collectRoots(markStack: &markStack); |
920 | // dtor of MarkStack drains |
921 | } |
922 | |
923 | void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr) |
924 | { |
925 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
926 | Managed *m = (*it).managed(); |
927 | if (!m || m->markBit()) |
928 | continue; |
929 | // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed |
930 | // signal before we start sweeping the heap |
931 | if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>()) |
932 | qobjectWrapper->destroyObject(lastCall: lastSweep); |
933 | } |
934 | |
935 | // remove objects from weak maps and sets |
936 | Heap::MapObject *map = weakMaps; |
937 | Heap::MapObject **lastMap = &weakMaps; |
938 | while (map) { |
939 | if (map->isMarked()) { |
940 | map->removeUnmarkedKeys(); |
941 | *lastMap = map; |
942 | lastMap = &map->nextWeakMap; |
943 | } |
944 | map = map->nextWeakMap; |
945 | } |
946 | |
947 | Heap::SetObject *set = weakSets; |
948 | Heap::SetObject **lastSet = &weakSets; |
949 | while (set) { |
950 | if (set->isMarked()) { |
951 | set->removeUnmarkedKeys(); |
952 | *lastSet = set; |
953 | lastSet = &set->nextWeakSet; |
954 | } |
955 | set = set->nextWeakSet; |
956 | } |
957 | |
958 | // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure |
959 | // that they are all set to undefined. |
960 | for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) { |
961 | Managed *m = (*it).managed(); |
962 | if (!m || m->markBit()) |
963 | continue; |
964 | (*it) = Value::undefinedValue(); |
965 | } |
966 | |
967 | // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed |
968 | const int pendingCount = m_pendingFreedObjectWrapperValue.count(); |
969 | if (pendingCount) { |
970 | QVector<Value *> remainingWeakQObjectWrappers; |
971 | remainingWeakQObjectWrappers.reserve(asize: pendingCount); |
972 | for (int i = 0; i < pendingCount; ++i) { |
973 | Value *v = m_pendingFreedObjectWrapperValue.at(i); |
974 | if (v->isUndefined() || v->isEmpty()) |
975 | PersistentValueStorage::free(e: v); |
976 | else |
977 | remainingWeakQObjectWrappers.append(t: v); |
978 | } |
979 | m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers; |
980 | } |
981 | |
982 | if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) { |
983 | for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) { |
984 | if (it.value().isNullOrUndefined()) |
985 | it = multiplyWrappedQObjects->erase(it); |
986 | else |
987 | ++it; |
988 | } |
989 | } |
990 | |
991 | |
992 | if (!lastSweep) { |
993 | engine->identifierTable->sweep(); |
994 | blockAllocator.sweep(/*classCountPtr*/); |
995 | hugeItemAllocator.sweep(classCountPtr); |
996 | icAllocator.sweep(/*classCountPtr*/); |
997 | } |
998 | } |
999 | |
1000 | bool MemoryManager::shouldRunGC() const |
1001 | { |
1002 | size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots(); |
1003 | if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100) |
1004 | return true; |
1005 | return false; |
1006 | } |
1007 | |
1008 | static size_t dumpBins(BlockAllocator *b, const char *title) |
1009 | { |
1010 | const QLoggingCategory &stats = lcGcAllocatorStats(); |
1011 | size_t totalSlotMem = 0; |
1012 | if (title) |
1013 | qDebug(cat: stats) << "Slot map for" << title << "allocator:" ; |
1014 | for (uint i = 0; i < BlockAllocator::NumBins; ++i) { |
1015 | uint nEntries = 0; |
1016 | HeapItem *h = b->freeBins[i]; |
1017 | while (h) { |
1018 | ++nEntries; |
1019 | totalSlotMem += h->freeData.availableSlots; |
1020 | h = h->freeData.next; |
1021 | } |
1022 | if (title) |
1023 | qDebug(cat: stats) << " number of entries in slot" << i << ":" << nEntries; |
1024 | } |
1025 | SDUMP() << " large slot map" ; |
1026 | HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1]; |
1027 | while (h) { |
1028 | SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots; |
1029 | h = h->freeData.next; |
1030 | } |
1031 | |
1032 | if (title) |
1033 | qDebug(cat: stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize; |
1034 | return totalSlotMem*Chunk::SlotSize; |
1035 | } |
1036 | |
1037 | void MemoryManager::runGC() |
1038 | { |
1039 | if (gcBlocked) { |
1040 | // qDebug() << "Not running GC."; |
1041 | return; |
1042 | } |
1043 | |
1044 | QScopedValueRollback<bool> gcBlocker(gcBlocked, true); |
1045 | // qDebug() << "runGC"; |
1046 | |
1047 | if (gcStats) { |
1048 | statistics.maxReservedMem = qMax(a: statistics.maxReservedMem, b: getAllocatedMem()); |
1049 | statistics.maxAllocatedMem = qMax(a: statistics.maxAllocatedMem, b: getUsedMem() + getLargeItemsMem()); |
1050 | } |
1051 | |
1052 | if (!gcCollectorStats) { |
1053 | mark(); |
1054 | sweep(); |
1055 | } else { |
1056 | bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit); |
1057 | size_t oldUnmanagedSize = unmanagedHeapSize; |
1058 | |
1059 | const size_t totalMem = getAllocatedMem(); |
1060 | const size_t usedBefore = getUsedMem(); |
1061 | const size_t largeItemsBefore = getLargeItemsMem(); |
1062 | |
1063 | const QLoggingCategory &stats = lcGcAllocatorStats(); |
1064 | qDebug(cat: stats) << "========== GC ==========" ; |
1065 | #ifdef MM_STATS |
1066 | qDebug(cat: stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots." ; |
1067 | qDebug(cat: stats) << " Allocations since last GC" << allocationCount; |
1068 | allocationCount = 0; |
1069 | #endif |
1070 | size_t oldChunks = blockAllocator.chunks.size(); |
1071 | qDebug(cat: stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks" ; |
1072 | qDebug(cat: stats) << "Fragmented memory before GC" << (totalMem - usedBefore); |
1073 | dumpBins(b: &blockAllocator, title: "Block" ); |
1074 | dumpBins(b: &icAllocator, title: "InternalClass" ); |
1075 | |
1076 | QElapsedTimer t; |
1077 | t.start(); |
1078 | mark(); |
1079 | qint64 markTime = t.nsecsElapsed()/1000; |
1080 | t.restart(); |
1081 | sweep(lastSweep: false, classCountPtr: increaseFreedCountForClass); |
1082 | const size_t usedAfter = getUsedMem(); |
1083 | const size_t largeItemsAfter = getLargeItemsMem(); |
1084 | qint64 sweepTime = t.nsecsElapsed()/1000; |
1085 | |
1086 | if (triggeredByUnmanagedHeap) { |
1087 | qDebug(cat: stats) << "triggered by unmanaged heap:" ; |
1088 | qDebug(cat: stats) << " old unmanaged heap size:" << oldUnmanagedSize; |
1089 | qDebug(cat: stats) << " new unmanaged heap:" << unmanagedHeapSize; |
1090 | qDebug(cat: stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit; |
1091 | } |
1092 | size_t memInBins = dumpBins(b: &blockAllocator, title: "Block" ) |
1093 | + dumpBins(b: &icAllocator, title: "InternalClasss" ); |
1094 | qDebug(cat: stats) << "Marked object in" << markTime << "us." ; |
1095 | qDebug(cat: stats) << " " << markStackSize << "objects marked" ; |
1096 | qDebug(cat: stats) << "Sweeped object in" << sweepTime << "us." ; |
1097 | |
1098 | // sort our object types by number of freed instances |
1099 | MMStatsHash freedObjectStats; |
1100 | std::swap(a&: freedObjectStats, b&: *freedObjectStatsGlobal()); |
1101 | typedef std::pair<const char*, int> ObjectStatInfo; |
1102 | std::vector<ObjectStatInfo> freedObjectsSorted; |
1103 | freedObjectsSorted.reserve(n: freedObjectStats.count()); |
1104 | for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) { |
1105 | freedObjectsSorted.push_back(x: std::make_pair(x: it.key(), y: it.value())); |
1106 | } |
1107 | std::sort(first: freedObjectsSorted.begin(), last: freedObjectsSorted.end(), comp: [](const ObjectStatInfo &a, const ObjectStatInfo &b) { |
1108 | return a.second > b.second && strcmp(s1: a.first, s2: b.first) < 0; |
1109 | }); |
1110 | |
1111 | qDebug(cat: stats) << "Used memory before GC:" << usedBefore; |
1112 | qDebug(cat: stats) << "Used memory after GC:" << usedAfter; |
1113 | qDebug(cat: stats) << "Freed up bytes :" << (usedBefore - usedAfter); |
1114 | qDebug(cat: stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size()); |
1115 | size_t lost = blockAllocator.allocatedMem() + icAllocator.allocatedMem() |
1116 | - memInBins - usedAfter; |
1117 | if (lost) |
1118 | qDebug(cat: stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" ; |
1119 | if (largeItemsBefore || largeItemsAfter) { |
1120 | qDebug(cat: stats) << "Large item memory before GC:" << largeItemsBefore; |
1121 | qDebug(cat: stats) << "Large item memory after GC:" << largeItemsAfter; |
1122 | qDebug(cat: stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter); |
1123 | } |
1124 | |
1125 | for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) { |
1126 | qDebug(cat: stats).noquote() << QString::fromLatin1(str: "Freed JS type: %1 (%2 instances)" ).arg(args: QString::fromLatin1(str: it->first), args: QString::number(it->second)); |
1127 | } |
1128 | |
1129 | qDebug(cat: stats) << "======== End GC ========" ; |
1130 | } |
1131 | |
1132 | if (gcStats) |
1133 | statistics.maxUsedMem = qMax(a: statistics.maxUsedMem, b: getUsedMem() + getLargeItemsMem()); |
1134 | |
1135 | if (aggressiveGC) { |
1136 | // ensure we don't 'loose' any memory |
1137 | Q_ASSERT(blockAllocator.allocatedMem() |
1138 | == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr)); |
1139 | Q_ASSERT(icAllocator.allocatedMem() |
1140 | == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr)); |
1141 | } |
1142 | |
1143 | usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep; |
1144 | |
1145 | // reset all black bits |
1146 | blockAllocator.resetBlackBits(); |
1147 | hugeItemAllocator.resetBlackBits(); |
1148 | icAllocator.resetBlackBits(); |
1149 | } |
1150 | |
1151 | size_t MemoryManager::getUsedMem() const |
1152 | { |
1153 | return blockAllocator.usedMem() + icAllocator.usedMem(); |
1154 | } |
1155 | |
1156 | size_t MemoryManager::getAllocatedMem() const |
1157 | { |
1158 | return blockAllocator.allocatedMem() + icAllocator.allocatedMem() + hugeItemAllocator.usedMem(); |
1159 | } |
1160 | |
1161 | size_t MemoryManager::getLargeItemsMem() const |
1162 | { |
1163 | return hugeItemAllocator.usedMem(); |
1164 | } |
1165 | |
1166 | void MemoryManager::registerWeakMap(Heap::MapObject *map) |
1167 | { |
1168 | map->nextWeakMap = weakMaps; |
1169 | weakMaps = map; |
1170 | } |
1171 | |
1172 | void MemoryManager::registerWeakSet(Heap::SetObject *set) |
1173 | { |
1174 | set->nextWeakSet = weakSets; |
1175 | weakSets = set; |
1176 | } |
1177 | |
1178 | MemoryManager::~MemoryManager() |
1179 | { |
1180 | delete m_persistentValues; |
1181 | |
1182 | dumpStats(); |
1183 | |
1184 | sweep(/*lastSweep*/true); |
1185 | blockAllocator.freeAll(); |
1186 | hugeItemAllocator.freeAll(); |
1187 | icAllocator.freeAll(); |
1188 | |
1189 | delete m_weakValues; |
1190 | #ifdef V4_USE_VALGRIND |
1191 | VALGRIND_DESTROY_MEMPOOL(this); |
1192 | #endif |
1193 | delete chunkAllocator; |
1194 | } |
1195 | |
1196 | |
1197 | void MemoryManager::dumpStats() const |
1198 | { |
1199 | if (!gcStats) |
1200 | return; |
1201 | |
1202 | const QLoggingCategory &stats = lcGcStats(); |
1203 | qDebug(cat: stats) << "Qml GC memory allocation statistics:" ; |
1204 | qDebug(cat: stats) << "Total memory allocated:" << statistics.maxReservedMem; |
1205 | qDebug(cat: stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem; |
1206 | qDebug(cat: stats) << "Max memory used after a GC run:" << statistics.maxUsedMem; |
1207 | qDebug(cat: stats) << "Requests for different item sizes:" ; |
1208 | for (int i = 1; i < BlockAllocator::NumBins - 1; ++i) |
1209 | qDebug(cat: stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i]; |
1210 | qDebug(cat: stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1]; |
1211 | } |
1212 | |
1213 | void MemoryManager::collectFromJSStack(MarkStack *markStack) const |
1214 | { |
1215 | Value *v = engine->jsStackBase; |
1216 | Value *top = engine->jsStackTop; |
1217 | while (v < top) { |
1218 | Managed *m = v->managed(); |
1219 | if (m) { |
1220 | Q_ASSERT(m->inUse()); |
1221 | // Skip pointers to already freed objects, they are bogus as well |
1222 | m->mark(markStack); |
1223 | } |
1224 | ++v; |
1225 | } |
1226 | } |
1227 | |
1228 | } // namespace QV4 |
1229 | |
1230 | QT_END_NAMESPACE |
1231 | |