1 | /* |
2 | * Copyright (C) 1999-2000 Harri Porten (porten@kde.org) |
3 | * Copyright (C) 2001 Peter Kelly (pmk@post.com) |
4 | * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * |
20 | */ |
21 | |
22 | #ifndef Collector_h |
23 | #define Collector_h |
24 | |
25 | #include <stddef.h> |
26 | #include <string.h> |
27 | #include <wtf/HashCountedSet.h> |
28 | #include <wtf/HashSet.h> |
29 | #include <wtf/Noncopyable.h> |
30 | #include <wtf/OwnPtr.h> |
31 | #include <wtf/StdLibExtras.h> |
32 | #include <wtf/Threading.h> |
33 | |
34 | #if ENABLE(JSC_MULTIPLE_THREADS) |
35 | #include <pthread.h> |
36 | #endif |
37 | |
38 | #if OS(SYMBIAN) |
39 | #include <wtf/symbian/BlockAllocatorSymbian.h> |
40 | #endif |
41 | |
42 | #define ASSERT_CLASS_FITS_IN_CELL(class) COMPILE_ASSERT(sizeof(class) <= CELL_SIZE, class_fits_in_cell) |
43 | |
44 | namespace JSC { |
45 | |
46 | class CollectorBlock; |
47 | class JSCell; |
48 | class JSGlobalData; |
49 | class JSValue; |
50 | class MarkedArgumentBuffer; |
51 | class MarkStack; |
52 | |
53 | enum OperationInProgress { NoOperation, Allocation, Collection }; |
54 | |
55 | class LiveObjectIterator; |
56 | |
57 | struct CollectorHeap { |
58 | size_t nextBlock; |
59 | size_t nextCell; |
60 | CollectorBlock** blocks; |
61 | |
62 | void* nextNumber; |
63 | |
64 | size_t numBlocks; |
65 | size_t usedBlocks; |
66 | |
67 | size_t ; |
68 | bool didShrink; |
69 | |
70 | OperationInProgress operationInProgress; |
71 | }; |
72 | |
73 | class Heap : public Noncopyable { |
74 | public: |
75 | class Thread; |
76 | |
77 | void destroy(); |
78 | |
79 | void* allocateNumber(size_t); |
80 | void* allocate(size_t); |
81 | |
82 | bool isBusy(); // true if an allocation or collection is in progress |
83 | void collectAllGarbage(); |
84 | |
85 | static const size_t = 256; |
86 | static const size_t = 1024 * 1024; |
87 | |
88 | void reportExtraMemoryCost(size_t cost); |
89 | |
90 | size_t objectCount() const; |
91 | struct Statistics { |
92 | size_t size; |
93 | size_t free; |
94 | }; |
95 | Statistics statistics() const; |
96 | |
97 | void protect(JSValue); |
98 | void unprotect(JSValue); |
99 | |
100 | static Heap* heap(JSValue); // 0 for immediate values |
101 | static Heap* heap(JSCell*); |
102 | |
103 | size_t globalObjectCount(); |
104 | size_t protectedObjectCount(); |
105 | size_t protectedGlobalObjectCount(); |
106 | HashCountedSet<const char*>* protectedObjectTypeCounts(); |
107 | |
108 | void registerThread(); // Only needs to be called by clients that can use the same heap from multiple threads. |
109 | |
110 | static bool isCellMarked(const JSCell*); |
111 | static void markCell(JSCell*); |
112 | |
113 | void markConservatively(MarkStack&, void* start, void* end); |
114 | |
115 | HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = new HashSet<MarkedArgumentBuffer*>; return *m_markListSet; } |
116 | |
117 | JSGlobalData* globalData() const { return m_globalData; } |
118 | static bool isNumber(JSCell*); |
119 | |
120 | LiveObjectIterator primaryHeapBegin(); |
121 | LiveObjectIterator primaryHeapEnd(); |
122 | |
123 | private: |
124 | void reset(); |
125 | void sweep(); |
126 | static CollectorBlock* cellBlock(const JSCell*); |
127 | static size_t cellOffset(const JSCell*); |
128 | |
129 | friend class JSGlobalData; |
130 | Heap(JSGlobalData*); |
131 | ~Heap(); |
132 | |
133 | NEVER_INLINE CollectorBlock* allocateBlock(); |
134 | NEVER_INLINE void freeBlock(size_t); |
135 | NEVER_INLINE void freeBlockPtr(CollectorBlock*); |
136 | void freeBlocks(); |
137 | void resizeBlocks(); |
138 | void growBlocks(size_t neededBlocks); |
139 | void shrinkBlocks(size_t neededBlocks); |
140 | void clearMarkBits(); |
141 | void clearMarkBits(CollectorBlock*); |
142 | size_t markedCells(size_t startBlock = 0, size_t startCell = 0) const; |
143 | |
144 | void (size_t); |
145 | |
146 | void addToStatistics(Statistics&) const; |
147 | |
148 | void markRoots(); |
149 | void markProtectedObjects(MarkStack&); |
150 | void markCurrentThreadConservatively(MarkStack&); |
151 | void markCurrentThreadConservativelyInternal(MarkStack&); |
152 | void markOtherThreadConservatively(MarkStack&, Thread*); |
153 | void markStackObjectsConservatively(MarkStack&); |
154 | |
155 | typedef HashCountedSet<JSCell*> ProtectCountSet; |
156 | |
157 | CollectorHeap m_heap; |
158 | |
159 | ProtectCountSet m_protectedValues; |
160 | |
161 | HashSet<MarkedArgumentBuffer*>* m_markListSet; |
162 | |
163 | #if ENABLE(JSC_MULTIPLE_THREADS) |
164 | void makeUsableFromMultipleThreads(); |
165 | |
166 | static void unregisterThread(void*); |
167 | void unregisterThread(); |
168 | |
169 | Mutex m_registeredThreadsMutex; |
170 | Thread* m_registeredThreads; |
171 | pthread_key_t m_currentThreadRegistrar; |
172 | #endif |
173 | |
174 | #if OS(SYMBIAN) |
175 | // Allocates collector blocks with correct alignment |
176 | WTF::AlignedBlockAllocator m_blockallocator; |
177 | #endif |
178 | |
179 | JSGlobalData* m_globalData; |
180 | }; |
181 | |
182 | // tunable parameters |
183 | template<size_t bytesPerWord> struct CellSize; |
184 | |
185 | // cell size needs to be a power of two for certain optimizations in collector.cpp |
186 | #if USE(JSVALUE32) |
187 | template<> struct CellSize<sizeof(uint32_t)> { static const size_t m_value = 32; }; |
188 | #else |
189 | template<> struct CellSize<sizeof(uint32_t)> { static const size_t m_value = 64; }; |
190 | #endif |
191 | template<> struct CellSize<sizeof(uint64_t)> { static const size_t m_value = 64; }; |
192 | |
193 | #if OS(WINCE) || OS(SYMBIAN) |
194 | const size_t BLOCK_SIZE = 64 * 1024; // 64k |
195 | #else |
196 | const size_t BLOCK_SIZE = 64 * 4096; // 256k |
197 | #endif |
198 | |
199 | // derived constants |
200 | const size_t BLOCK_OFFSET_MASK = BLOCK_SIZE - 1; |
201 | const size_t BLOCK_MASK = ~BLOCK_OFFSET_MASK; |
202 | const size_t MINIMUM_CELL_SIZE = CellSize<sizeof(void*)>::m_value; |
203 | const size_t CELL_ARRAY_LENGTH = (MINIMUM_CELL_SIZE / sizeof(double)) + (MINIMUM_CELL_SIZE % sizeof(double) != 0 ? sizeof(double) : 0); |
204 | const size_t CELL_SIZE = CELL_ARRAY_LENGTH * sizeof(double); |
205 | const size_t SMALL_CELL_SIZE = CELL_SIZE / 2; |
206 | const size_t CELL_MASK = CELL_SIZE - 1; |
207 | const size_t CELL_ALIGN_MASK = ~CELL_MASK; |
208 | const size_t CELLS_PER_BLOCK = (BLOCK_SIZE - sizeof(Heap*)) * 8 * CELL_SIZE / (8 * CELL_SIZE + 1) / CELL_SIZE; // one bitmap byte can represent 8 cells. |
209 | |
210 | const size_t BITMAP_SIZE = (CELLS_PER_BLOCK + 7) / 8; |
211 | const size_t BITMAP_WORDS = (BITMAP_SIZE + 3) / sizeof(uint32_t); |
212 | |
213 | struct CollectorBitmap { |
214 | uint32_t bits[BITMAP_WORDS]; |
215 | bool get(size_t n) const { return !!(bits[n >> 5] & (1 << (n & 0x1F))); } |
216 | void set(size_t n) { bits[n >> 5] |= (1 << (n & 0x1F)); } |
217 | void clear(size_t n) { bits[n >> 5] &= ~(1 << (n & 0x1F)); } |
218 | void clearAll() { memset(s: bits, c: 0, n: sizeof(bits)); } |
219 | size_t count(size_t startCell = 0) |
220 | { |
221 | size_t result = 0; |
222 | for ( ; (startCell & 0x1F) != 0; ++startCell) { |
223 | if (get(n: startCell)) |
224 | ++result; |
225 | } |
226 | for (size_t i = startCell >> 5; i < BITMAP_WORDS; ++i) |
227 | result += WTF::bitCount(bits: bits[i]); |
228 | return result; |
229 | } |
230 | size_t isEmpty() // Much more efficient than testing count() == 0. |
231 | { |
232 | for (size_t i = 0; i < BITMAP_WORDS; ++i) |
233 | if (bits[i] != 0) |
234 | return false; |
235 | return true; |
236 | } |
237 | }; |
238 | |
239 | struct CollectorCell { |
240 | double memory[CELL_ARRAY_LENGTH]; |
241 | }; |
242 | |
243 | class CollectorBlock { |
244 | public: |
245 | CollectorCell cells[CELLS_PER_BLOCK]; |
246 | CollectorBitmap marked; |
247 | Heap* heap; |
248 | }; |
249 | |
250 | struct HeapConstants { |
251 | static const size_t cellSize = CELL_SIZE; |
252 | static const size_t cellsPerBlock = CELLS_PER_BLOCK; |
253 | typedef CollectorCell Cell; |
254 | typedef CollectorBlock Block; |
255 | }; |
256 | |
257 | inline CollectorBlock* Heap::cellBlock(const JSCell* cell) |
258 | { |
259 | return reinterpret_cast<CollectorBlock*>(reinterpret_cast<uintptr_t>(cell) & BLOCK_MASK); |
260 | } |
261 | |
262 | inline size_t Heap::cellOffset(const JSCell* cell) |
263 | { |
264 | return (reinterpret_cast<uintptr_t>(cell) & BLOCK_OFFSET_MASK) / CELL_SIZE; |
265 | } |
266 | |
267 | inline bool Heap::isCellMarked(const JSCell* cell) |
268 | { |
269 | return cellBlock(cell)->marked.get(n: cellOffset(cell)); |
270 | } |
271 | |
272 | inline void Heap::markCell(JSCell* cell) |
273 | { |
274 | cellBlock(cell)->marked.set(cellOffset(cell)); |
275 | } |
276 | |
277 | inline void Heap::(size_t cost) |
278 | { |
279 | if (cost > minExtraCost) |
280 | recordExtraCost(cost); |
281 | } |
282 | |
283 | inline void* Heap::allocateNumber(size_t s) |
284 | { |
285 | if (void* result = m_heap.nextNumber) { |
286 | m_heap.nextNumber = 0; |
287 | return result; |
288 | } |
289 | |
290 | void* result = allocate(s); |
291 | m_heap.nextNumber = static_cast<char*>(result) + (CELL_SIZE / 2); |
292 | return result; |
293 | } |
294 | |
295 | } // namespace JSC |
296 | |
297 | #endif /* Collector_h */ |
298 | |