1 | /**************************************************************************** |
2 | ** |
3 | ** Copyright (C) 2016 The Qt Company Ltd. |
4 | ** Contact: https://www.qt.io/licensing/ |
5 | ** |
6 | ** This file is part of the QtQml module of the Qt Toolkit. |
7 | ** |
8 | ** $QT_BEGIN_LICENSE:LGPL$ |
9 | ** Commercial License Usage |
10 | ** Licensees holding valid commercial Qt licenses may use this file in |
11 | ** accordance with the commercial license agreement provided with the |
12 | ** Software or, alternatively, in accordance with the terms contained in |
13 | ** a written agreement between you and The Qt Company. For licensing terms |
14 | ** and conditions see https://www.qt.io/terms-conditions. For further |
15 | ** information use the contact form at https://www.qt.io/contact-us. |
16 | ** |
17 | ** GNU Lesser General Public License Usage |
18 | ** Alternatively, this file may be used under the terms of the GNU Lesser |
19 | ** General Public License version 3 as published by the Free Software |
20 | ** Foundation and appearing in the file LICENSE.LGPL3 included in the |
21 | ** packaging of this file. Please review the following information to |
22 | ** ensure the GNU Lesser General Public License version 3 requirements |
23 | ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. |
24 | ** |
25 | ** GNU General Public License Usage |
26 | ** Alternatively, this file may be used under the terms of the GNU |
27 | ** General Public License version 2.0 or (at your option) the GNU General |
28 | ** Public license version 3 or any later version approved by the KDE Free |
29 | ** Qt Foundation. The licenses are as published by the Free Software |
30 | ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 |
31 | ** included in the packaging of this file. Please review the following |
32 | ** information to ensure the GNU General Public License requirements will |
33 | ** be met: https://www.gnu.org/licenses/gpl-2.0.html and |
34 | ** https://www.gnu.org/licenses/gpl-3.0.html. |
35 | ** |
36 | ** $QT_END_LICENSE$ |
37 | ** |
38 | ****************************************************************************/ |
39 | |
40 | #ifndef QV4GC_H |
41 | #define QV4GC_H |
42 | |
43 | // |
44 | // W A R N I N G |
45 | // ------------- |
46 | // |
47 | // This file is not part of the Qt API. It exists purely as an |
48 | // implementation detail. This header file may change from version to |
49 | // version without notice, or even be removed. |
50 | // |
51 | // We mean it. |
52 | // |
53 | |
54 | #include <private/qv4global_p.h> |
55 | #include <private/qv4value_p.h> |
56 | #include <private/qv4scopedvalue_p.h> |
57 | #include <private/qv4object_p.h> |
58 | #include <private/qv4mmdefs_p.h> |
59 | #include <QVector> |
60 | |
61 | #define QV4_MM_MAXBLOCK_SHIFT "QV4_MM_MAXBLOCK_SHIFT" |
62 | #define QV4_MM_MAX_CHUNK_SIZE "QV4_MM_MAX_CHUNK_SIZE" |
63 | #define QV4_MM_STATS "QV4_MM_STATS" |
64 | |
65 | #define MM_DEBUG 0 |
66 | |
67 | QT_BEGIN_NAMESPACE |
68 | |
69 | namespace QV4 { |
70 | |
71 | struct ChunkAllocator; |
72 | struct MemorySegment; |
73 | |
74 | struct BlockAllocator { |
75 | BlockAllocator(ChunkAllocator *chunkAllocator, ExecutionEngine *engine) |
76 | : chunkAllocator(chunkAllocator), engine(engine) |
77 | { |
78 | memset(s: freeBins, c: 0, n: sizeof(freeBins)); |
79 | } |
80 | |
81 | enum { NumBins = 8 }; |
82 | |
83 | static inline size_t binForSlots(size_t nSlots) { |
84 | return nSlots >= NumBins ? NumBins - 1 : nSlots; |
85 | } |
86 | |
87 | HeapItem *allocate(size_t size, bool forceAllocation = false); |
88 | |
89 | size_t totalSlots() const { |
90 | return Chunk::AvailableSlots*chunks.size(); |
91 | } |
92 | |
93 | size_t allocatedMem() const { |
94 | return chunks.size()*Chunk::DataSize; |
95 | } |
96 | size_t usedMem() const { |
97 | uint used = 0; |
98 | for (auto c : chunks) |
99 | used += c->nUsedSlots()*Chunk::SlotSize; |
100 | return used; |
101 | } |
102 | |
103 | void sweep(); |
104 | void freeAll(); |
105 | void resetBlackBits(); |
106 | void collectGrayItems(MarkStack *markStack); |
107 | |
108 | // bump allocations |
109 | HeapItem *nextFree = nullptr; |
110 | size_t nFree = 0; |
111 | size_t usedSlotsAfterLastSweep = 0; |
112 | HeapItem *freeBins[NumBins]; |
113 | ChunkAllocator *chunkAllocator; |
114 | ExecutionEngine *engine; |
115 | std::vector<Chunk *> chunks; |
116 | uint *allocationStats = nullptr; |
117 | }; |
118 | |
119 | struct HugeItemAllocator { |
120 | HugeItemAllocator(ChunkAllocator *chunkAllocator, ExecutionEngine *engine) |
121 | : chunkAllocator(chunkAllocator), engine(engine) |
122 | {} |
123 | |
124 | HeapItem *allocate(size_t size); |
125 | void sweep(ClassDestroyStatsCallback classCountPtr); |
126 | void freeAll(); |
127 | void resetBlackBits(); |
128 | void collectGrayItems(MarkStack *markStack); |
129 | |
130 | size_t usedMem() const { |
131 | size_t used = 0; |
132 | for (const auto &c : chunks) |
133 | used += c.size; |
134 | return used; |
135 | } |
136 | |
137 | ChunkAllocator *chunkAllocator; |
138 | ExecutionEngine *engine; |
139 | struct HugeChunk { |
140 | MemorySegment *segment; |
141 | Chunk *chunk; |
142 | size_t size; |
143 | }; |
144 | |
145 | std::vector<HugeChunk> chunks; |
146 | }; |
147 | |
148 | |
149 | class Q_QML_EXPORT MemoryManager |
150 | { |
151 | Q_DISABLE_COPY(MemoryManager); |
152 | |
153 | public: |
154 | MemoryManager(ExecutionEngine *engine); |
155 | ~MemoryManager(); |
156 | |
157 | // TODO: this is only for 64bit (and x86 with SSE/AVX), so exend it for other architectures to be slightly more efficient (meaning, align on 8-byte boundaries). |
158 | // Note: all occurrences of "16" in alloc/dealloc are also due to the alignment. |
159 | Q_DECL_CONSTEXPR static inline std::size_t align(std::size_t size) |
160 | { return (size + Chunk::SlotSize - 1) & ~(Chunk::SlotSize - 1); } |
161 | |
162 | template<typename ManagedType> |
163 | inline typename ManagedType::Data *allocManaged(std::size_t size, Heap::InternalClass *ic) |
164 | { |
165 | Q_STATIC_ASSERT(std::is_trivial< typename ManagedType::Data >::value); |
166 | size = align(size); |
167 | typename ManagedType::Data *d = static_cast<typename ManagedType::Data *>(allocData(size)); |
168 | d->internalClass.set(engine, ic); |
169 | Q_ASSERT(d->internalClass && d->internalClass->vtable); |
170 | Q_ASSERT(ic->vtable == ManagedType::staticVTable()); |
171 | return d; |
172 | } |
173 | |
174 | template<typename ManagedType> |
175 | inline typename ManagedType::Data *allocManaged(std::size_t size, InternalClass *ic) |
176 | { |
177 | return allocManaged<ManagedType>(size, ic->d()); |
178 | } |
179 | |
180 | template<typename ManagedType> |
181 | inline typename ManagedType::Data *allocManaged(std::size_t size) |
182 | { |
183 | Scope scope(engine); |
184 | Scoped<InternalClass> ic(scope, ManagedType::defaultInternalClass(engine)); |
185 | return allocManaged<ManagedType>(size, ic); |
186 | } |
187 | |
188 | template <typename ObjectType> |
189 | typename ObjectType::Data *allocateObject(Heap::InternalClass *ic) |
190 | { |
191 | Heap::Object *o = allocObjectWithMemberData(vtable: ObjectType::staticVTable(), nMembers: ic->size); |
192 | o->internalClass.set(e: engine, newVal: ic); |
193 | Q_ASSERT(o->internalClass.get() && o->vtable()); |
194 | Q_ASSERT(o->vtable() == ObjectType::staticVTable()); |
195 | return static_cast<typename ObjectType::Data *>(o); |
196 | } |
197 | |
198 | template <typename ObjectType> |
199 | typename ObjectType::Data *allocateObject(InternalClass *ic) |
200 | { |
201 | return allocateObject<ObjectType>(ic->d()); |
202 | } |
203 | |
204 | template <typename ObjectType> |
205 | typename ObjectType::Data *allocateObject() |
206 | { |
207 | Scope scope(engine); |
208 | Scoped<InternalClass> ic(scope, ObjectType::defaultInternalClass(engine)); |
209 | ic = ic->changeVTable(vt: ObjectType::staticVTable()); |
210 | ic = ic->changePrototype(proto: ObjectType::defaultPrototype(engine)->d()); |
211 | return allocateObject<ObjectType>(ic); |
212 | } |
213 | |
214 | template <typename ManagedType, typename Arg1> |
215 | typename ManagedType::Data *allocWithStringData(std::size_t unmanagedSize, Arg1 arg1) |
216 | { |
217 | typename ManagedType::Data *o = reinterpret_cast<typename ManagedType::Data *>(allocString(unmanagedSize)); |
218 | o->internalClass.set(engine, ManagedType::defaultInternalClass(engine)); |
219 | Q_ASSERT(o->internalClass && o->internalClass->vtable); |
220 | o->init(arg1); |
221 | return o; |
222 | } |
223 | |
224 | template <typename ObjectType, typename... Args> |
225 | typename ObjectType::Data *allocObject(Heap::InternalClass *ic, Args... args) |
226 | { |
227 | typename ObjectType::Data *d = allocateObject<ObjectType>(ic); |
228 | d->init(args...); |
229 | return d; |
230 | } |
231 | |
232 | template <typename ObjectType, typename... Args> |
233 | typename ObjectType::Data *allocObject(InternalClass *ic, Args... args) |
234 | { |
235 | typename ObjectType::Data *d = allocateObject<ObjectType>(ic); |
236 | d->init(args...); |
237 | return d; |
238 | } |
239 | |
240 | template <typename ObjectType, typename... Args> |
241 | typename ObjectType::Data *allocate(Args... args) |
242 | { |
243 | Scope scope(engine); |
244 | Scoped<ObjectType> t(scope, allocateObject<ObjectType>()); |
245 | t->d_unchecked()->init(args...); |
246 | return t->d(); |
247 | } |
248 | |
249 | template <typename ManagedType, typename... Args> |
250 | typename ManagedType::Data *alloc(Args... args) |
251 | { |
252 | Scope scope(engine); |
253 | Scoped<ManagedType> t(scope, allocManaged<ManagedType>(sizeof(typename ManagedType::Data))); |
254 | t->d_unchecked()->init(args...); |
255 | return t->d(); |
256 | } |
257 | |
258 | void runGC(); |
259 | |
260 | void dumpStats() const; |
261 | |
262 | size_t getUsedMem() const; |
263 | size_t getAllocatedMem() const; |
264 | size_t getLargeItemsMem() const; |
265 | |
266 | // called when a JS object grows itself. Specifically: Heap::String::append |
267 | // and InternalClassDataPrivate<PropertyAttributes>. |
268 | void changeUnmanagedHeapSizeUsage(qptrdiff delta) { unmanagedHeapSize += delta; } |
269 | |
270 | template<typename ManagedType> |
271 | typename ManagedType::Data *allocIC() |
272 | { |
273 | Heap::Base *b = *allocate(allocator: &icAllocator, size: align(size: sizeof(typename ManagedType::Data))); |
274 | return static_cast<typename ManagedType::Data *>(b); |
275 | } |
276 | |
277 | void registerWeakMap(Heap::MapObject *map); |
278 | void registerWeakSet(Heap::SetObject *set); |
279 | |
280 | protected: |
281 | /// expects size to be aligned |
282 | Heap::Base *allocString(std::size_t unmanagedSize); |
283 | Heap::Base *allocData(std::size_t size); |
284 | Heap::Object *allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers); |
285 | |
286 | private: |
287 | enum { |
288 | MinUnmanagedHeapSizeGCLimit = 128 * 1024 |
289 | }; |
290 | |
291 | void collectFromJSStack(MarkStack *markStack) const; |
292 | void mark(); |
293 | void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr); |
294 | bool shouldRunGC() const; |
295 | void collectRoots(MarkStack *markStack); |
296 | |
297 | HeapItem *allocate(BlockAllocator *allocator, std::size_t size) |
298 | { |
299 | bool didGCRun = false; |
300 | if (aggressiveGC) { |
301 | runGC(); |
302 | didGCRun = true; |
303 | } |
304 | |
305 | if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) { |
306 | if (!didGCRun) |
307 | runGC(); |
308 | |
309 | if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) { |
310 | // more than 75% full, raise limit |
311 | unmanagedHeapSizeGCLimit = std::max(a: unmanagedHeapSizeGCLimit, |
312 | b: unmanagedHeapSize) * 2; |
313 | } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) { |
314 | // less than 25% full, lower limit |
315 | unmanagedHeapSizeGCLimit = qMax(a: std::size_t(MinUnmanagedHeapSizeGCLimit), |
316 | b: unmanagedHeapSizeGCLimit/2); |
317 | } |
318 | didGCRun = true; |
319 | } |
320 | |
321 | if (size > Chunk::DataSize) |
322 | return hugeItemAllocator.allocate(size); |
323 | |
324 | if (HeapItem *m = allocator->allocate(size)) |
325 | return m; |
326 | |
327 | if (!didGCRun && shouldRunGC()) |
328 | runGC(); |
329 | |
330 | return allocator->allocate(size, forceAllocation: true); |
331 | } |
332 | |
333 | public: |
334 | QV4::ExecutionEngine *engine; |
335 | ChunkAllocator *chunkAllocator; |
336 | BlockAllocator blockAllocator; |
337 | BlockAllocator icAllocator; |
338 | HugeItemAllocator hugeItemAllocator; |
339 | PersistentValueStorage *m_persistentValues; |
340 | PersistentValueStorage *m_weakValues; |
341 | QVector<Value *> m_pendingFreedObjectWrapperValue; |
342 | Heap::MapObject *weakMaps = nullptr; |
343 | Heap::SetObject *weakSets = nullptr; |
344 | |
345 | std::size_t unmanagedHeapSize = 0; // the amount of bytes of heap that is not managed by the memory manager, but which is held onto by managed items. |
346 | std::size_t unmanagedHeapSizeGCLimit; |
347 | std::size_t usedSlotsAfterLastFullSweep = 0; |
348 | |
349 | bool gcBlocked = false; |
350 | bool aggressiveGC = false; |
351 | bool gcStats = false; |
352 | bool gcCollectorStats = false; |
353 | |
354 | int allocationCount = 0; |
355 | size_t lastAllocRequestedSlots = 0; |
356 | |
357 | struct { |
358 | size_t maxReservedMem = 0; |
359 | size_t maxAllocatedMem = 0; |
360 | size_t maxUsedMem = 0; |
361 | uint allocations[BlockAllocator::NumBins]; |
362 | } statistics; |
363 | }; |
364 | |
365 | } |
366 | |
367 | QT_END_NAMESPACE |
368 | |
369 | #endif // QV4GC_H |
370 | |