| 1 | /* | 
| 2 |  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. | 
| 3 |  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org> | 
| 4 |  * | 
| 5 |  *  This library is free software; you can redistribute it and/or | 
| 6 |  *  modify it under the terms of the GNU Lesser General Public | 
| 7 |  *  License as published by the Free Software Foundation; either | 
| 8 |  *  version 2 of the License, or (at your option) any later version. | 
| 9 |  * | 
| 10 |  *  This library is distributed in the hope that it will be useful, | 
| 11 |  *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
| 12 |  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
| 13 |  *  Lesser General Public License for more details. | 
| 14 |  * | 
| 15 |  *  You should have received a copy of the GNU Lesser General Public | 
| 16 |  *  License along with this library; if not, write to the Free Software | 
| 17 |  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA | 
| 18 |  * | 
| 19 |  */ | 
| 20 |  | 
| 21 | #include "config.h" | 
| 22 | #include "Collector.h" | 
| 23 |  | 
| 24 | #include "ArgList.h" | 
| 25 | #include "CallFrame.h" | 
| 26 | #include "CodeBlock.h" | 
| 27 | #include "CollectorHeapIterator.h" | 
| 28 | #include "Interpreter.h" | 
| 29 | #include "JSArray.h" | 
| 30 | #include "JSGlobalObject.h" | 
| 31 | #include "JSLock.h" | 
| 32 | #include "JSONObject.h" | 
| 33 | #include "JSString.h" | 
| 34 | #include "JSValue.h" | 
| 35 | #include "JSZombie.h" | 
| 36 | #include "MarkStack.h" | 
| 37 | #include "Nodes.h" | 
| 38 | #include "Tracing.h" | 
| 39 | #include <algorithm> | 
| 40 | #include <limits.h> | 
| 41 | #include <setjmp.h> | 
| 42 | #include <stdlib.h> | 
| 43 | #include <wtf/FastMalloc.h> | 
| 44 | #include <wtf/HashCountedSet.h> | 
| 45 | #include <wtf/UnusedParam.h> | 
| 46 | #include <wtf/VMTags.h> | 
| 47 |  | 
| 48 | #if OS(DARWIN) | 
| 49 |  | 
| 50 | #include <mach/mach_init.h> | 
| 51 | #include <mach/mach_port.h> | 
| 52 | #include <mach/task.h> | 
| 53 | #include <mach/thread_act.h> | 
| 54 | #include <mach/vm_map.h> | 
| 55 | // clang's libc++ headers does not pull in pthread.h (but libstdc++ does) | 
| 56 | #include <pthread.h> | 
| 57 |  | 
| 58 | #elif OS(WINDOWS) | 
| 59 |  | 
| 60 | #include <windows.h> | 
| 61 | #include <malloc.h> | 
| 62 |  | 
| 63 | #elif OS(HAIKU) | 
| 64 |  | 
| 65 | #include <OS.h> | 
| 66 |  | 
| 67 | #elif OS(UNIX) | 
| 68 |  | 
| 69 | #include <stdlib.h> | 
| 70 | #if !OS(HAIKU) | 
| 71 | #include <sys/mman.h> | 
| 72 | #endif | 
| 73 | #include <unistd.h> | 
| 74 |  | 
| 75 | #if OS(SOLARIS) | 
| 76 | #include <thread.h> | 
| 77 | #else | 
| 78 | #include <pthread.h> | 
| 79 | #endif | 
| 80 |  | 
| 81 | #if HAVE(PTHREAD_NP_H) | 
| 82 | #include <pthread_np.h> | 
| 83 | #endif | 
| 84 |  | 
| 85 | #if OS(QNX) | 
| 86 | #include <fcntl.h> | 
| 87 | #include <sys/procfs.h> | 
| 88 | #include <stdio.h> | 
| 89 | #include <errno.h> | 
| 90 | #endif | 
| 91 |  | 
| 92 | #endif | 
| 93 |  | 
| 94 | #define COLLECT_ON_EVERY_ALLOCATION 0 | 
| 95 |  | 
| 96 | using std::max; | 
| 97 |  | 
| 98 | namespace JSC { | 
| 99 |  | 
| 100 | // tunable parameters | 
| 101 |  | 
| 102 | const size_t GROWTH_FACTOR = 2; | 
| 103 | const size_t LOW_WATER_FACTOR = 4; | 
| 104 | const size_t ALLOCATIONS_PER_COLLECTION = 3600; | 
| 105 | // This value has to be a macro to be used in max() without introducing | 
| 106 | // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>. | 
| 107 | #define MIN_ARRAY_SIZE (static_cast<size_t>(14)) | 
| 108 |  | 
| 109 | #if ENABLE(JSC_MULTIPLE_THREADS) | 
| 110 |  | 
| 111 | #if OS(DARWIN) | 
| 112 | typedef mach_port_t PlatformThread; | 
| 113 | #elif OS(WINDOWS) | 
| 114 | typedef HANDLE PlatformThread; | 
| 115 | #endif | 
| 116 |  | 
| 117 | class Heap::Thread { | 
| 118 | public: | 
| 119 |     Thread(pthread_t pthread, const PlatformThread& platThread, void* base)  | 
| 120 |         : posixThread(pthread) | 
| 121 |         , platformThread(platThread) | 
| 122 |         , stackBase(base) | 
| 123 |     { | 
| 124 |     } | 
| 125 |  | 
| 126 |     Thread* next; | 
| 127 |     pthread_t posixThread; | 
| 128 |     PlatformThread platformThread; | 
| 129 |     void* stackBase; | 
| 130 | }; | 
| 131 |  | 
| 132 | #endif | 
| 133 |  | 
| 134 | Heap::Heap(JSGlobalData* globalData) | 
| 135 |     : m_markListSet(0) | 
| 136 | #if ENABLE(JSC_MULTIPLE_THREADS) | 
| 137 |     , m_registeredThreads(0) | 
| 138 |     , m_currentThreadRegistrar(0) | 
| 139 | #endif | 
| 140 |     , m_globalData(globalData) | 
| 141 | #if OS(SYMBIAN) | 
| 142 |     , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE) | 
| 143 | #endif | 
| 144 | { | 
| 145 |     ASSERT(globalData); | 
| 146 |     memset(s: &m_heap, c: 0, n: sizeof(CollectorHeap)); | 
| 147 |     allocateBlock(); | 
| 148 | } | 
| 149 |  | 
| 150 | Heap::~Heap() | 
| 151 | { | 
| 152 |     // The destroy function must already have been called, so assert this. | 
| 153 |     ASSERT(!m_globalData); | 
| 154 | } | 
| 155 |  | 
| 156 | void Heap::destroy() | 
| 157 | { | 
| 158 |     JSLock lock(SilenceAssertionsOnly); | 
| 159 |  | 
| 160 |     if (!m_globalData) | 
| 161 |         return; | 
| 162 |  | 
| 163 |     ASSERT(!m_globalData->dynamicGlobalObject); | 
| 164 |     ASSERT(!isBusy()); | 
| 165 |      | 
| 166 |     // The global object is not GC protected at this point, so sweeping may delete it | 
| 167 |     // (and thus the global data) before other objects that may use the global data. | 
| 168 |     RefPtr<JSGlobalData> protect(m_globalData); | 
| 169 |  | 
| 170 |     delete m_markListSet; | 
| 171 |     m_markListSet = 0; | 
| 172 |  | 
| 173 |     freeBlocks(); | 
| 174 |  | 
| 175 | #if ENABLE(JSC_MULTIPLE_THREADS) | 
| 176 |     if (m_currentThreadRegistrar) { | 
| 177 |         int error = pthread_key_delete(m_currentThreadRegistrar); | 
| 178 |         ASSERT_UNUSED(error, !error); | 
| 179 |     } | 
| 180 |  | 
| 181 |     MutexLocker registeredThreadsLock(m_registeredThreadsMutex); | 
| 182 |     for (Heap::Thread* t = m_registeredThreads; t;) { | 
| 183 |         Heap::Thread* next = t->next; | 
| 184 |         delete t; | 
| 185 |         t = next; | 
| 186 |     } | 
| 187 | #endif | 
| 188 | #if OS(SYMBIAN) | 
| 189 |     m_blockallocator.destroy(); | 
| 190 | #endif | 
| 191 |     m_globalData = 0; | 
| 192 | } | 
| 193 |  | 
| 194 | NEVER_INLINE CollectorBlock* Heap::allocateBlock() | 
| 195 | { | 
| 196 | #if OS(DARWIN) | 
| 197 |     vm_address_t address = 0; | 
| 198 |     vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); | 
| 199 | #elif OS(SYMBIAN) | 
| 200 |     void* address = m_blockallocator.alloc();   | 
| 201 |     if (!address) | 
| 202 |         CRASH(); | 
| 203 | #elif OS(WINCE) | 
| 204 |     void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); | 
| 205 | #elif OS(WINDOWS) | 
| 206 | #if COMPILER(MINGW) && !COMPILER(MINGW64) | 
| 207 |     void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); | 
| 208 | #else | 
| 209 |     void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); | 
| 210 | #endif | 
| 211 |     memset(address, 0, BLOCK_SIZE); | 
| 212 | #elif HAVE(POSIX_MEMALIGN) | 
| 213 |     void* address; | 
| 214 |     posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE); | 
| 215 | #else | 
| 216 |  | 
| 217 | #if ENABLE(JSC_MULTIPLE_THREADS) | 
| 218 | #error Need to initialize pagesize safely. | 
| 219 | #endif | 
| 220 |     static size_t pagesize = getpagesize(); | 
| 221 |  | 
| 222 |     size_t  = 0; | 
| 223 |     if (BLOCK_SIZE > pagesize) | 
| 224 |         extra = BLOCK_SIZE - pagesize; | 
| 225 |  | 
| 226 |     void* mmapResult = mmap(NULL, len: BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, fd: -1, offset: 0); | 
| 227 |     uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult); | 
| 228 |  | 
| 229 |     size_t adjust = 0; | 
| 230 |     if ((address & BLOCK_OFFSET_MASK) != 0) | 
| 231 |         adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK); | 
| 232 |  | 
| 233 |     if (adjust > 0) | 
| 234 |         munmap(addr: reinterpret_cast<char*>(address), len: adjust); | 
| 235 |  | 
| 236 |     if (adjust < extra) | 
| 237 |         munmap(addr: reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), len: extra - adjust); | 
| 238 |  | 
| 239 |     address += adjust; | 
| 240 | #endif | 
| 241 |  | 
| 242 |     // Initialize block. | 
| 243 |  | 
| 244 |     CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address); | 
| 245 |     block->heap = this; | 
| 246 |     clearMarkBits(block); | 
| 247 |  | 
| 248 |     Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get(); | 
| 249 |     for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i) | 
| 250 |         new (block->cells + i) JSCell(dummyMarkableCellStructure); | 
| 251 |      | 
| 252 |     // Add block to blocks vector. | 
| 253 |  | 
| 254 |     size_t numBlocks = m_heap.numBlocks; | 
| 255 |     if (m_heap.usedBlocks == numBlocks) { | 
| 256 |         static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR; | 
| 257 |         if (numBlocks > maxNumBlocks) | 
| 258 |             CRASH(); | 
| 259 |         numBlocks = max(MIN_ARRAY_SIZE, b: numBlocks * GROWTH_FACTOR); | 
| 260 |         m_heap.numBlocks = numBlocks; | 
| 261 |         m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*))); | 
| 262 |     } | 
| 263 |     m_heap.blocks[m_heap.usedBlocks++] = block; | 
| 264 |  | 
| 265 |     return block; | 
| 266 | } | 
| 267 |  | 
| 268 | NEVER_INLINE void Heap::freeBlock(size_t block) | 
| 269 | { | 
| 270 |     m_heap.didShrink = true; | 
| 271 |  | 
| 272 |     ObjectIterator it(m_heap, block); | 
| 273 |     ObjectIterator end(m_heap, block + 1); | 
| 274 |     for ( ; it != end; ++it) | 
| 275 |         (*it)->~JSCell(); | 
| 276 |     freeBlockPtr(m_heap.blocks[block]); | 
| 277 |  | 
| 278 |     // swap with the last block so we compact as we go | 
| 279 |     m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1]; | 
| 280 |     m_heap.usedBlocks--; | 
| 281 |  | 
| 282 |     if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) { | 
| 283 |         m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;  | 
| 284 |         m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*))); | 
| 285 |     } | 
| 286 | } | 
| 287 |  | 
| 288 | NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block) | 
| 289 | { | 
| 290 | #if OS(DARWIN)     | 
| 291 |     vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE); | 
| 292 | #elif OS(SYMBIAN) | 
| 293 |     m_blockallocator.free(reinterpret_cast<void*>(block)); | 
| 294 | #elif OS(WINCE) | 
| 295 |     VirtualFree(block, 0, MEM_RELEASE); | 
| 296 | #elif OS(WINDOWS) | 
| 297 | #if COMPILER(MINGW) && !COMPILER(MINGW64) | 
| 298 |     __mingw_aligned_free(block); | 
| 299 | #else | 
| 300 |     _aligned_free(block); | 
| 301 | #endif | 
| 302 | #elif HAVE(POSIX_MEMALIGN) | 
| 303 |     free(block); | 
| 304 | #else | 
| 305 |     munmap(addr: reinterpret_cast<char*>(block), len: BLOCK_SIZE); | 
| 306 | #endif | 
| 307 | } | 
| 308 |  | 
| 309 | void Heap::freeBlocks() | 
| 310 | { | 
| 311 |     ProtectCountSet protectedValuesCopy = m_protectedValues; | 
| 312 |  | 
| 313 |     clearMarkBits(); | 
| 314 |     ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end(); | 
| 315 |     for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it) | 
| 316 |         markCell(cell: it->first); | 
| 317 |  | 
| 318 |     m_heap.nextCell = 0; | 
| 319 |     m_heap.nextBlock = 0; | 
| 320 |     DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell); | 
| 321 |     DeadObjectIterator end(m_heap, m_heap.usedBlocks); | 
| 322 |     for ( ; it != end; ++it) | 
| 323 |         (*it)->~JSCell(); | 
| 324 |  | 
| 325 |     ASSERT(!protectedObjectCount()); | 
| 326 |  | 
| 327 |     protectedValuesEnd = protectedValuesCopy.end(); | 
| 328 |     for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it) | 
| 329 |         it->first->~JSCell(); | 
| 330 |  | 
| 331 |     for (size_t block = 0; block < m_heap.usedBlocks; ++block) | 
| 332 |         freeBlockPtr(block: m_heap.blocks[block]); | 
| 333 |  | 
| 334 |     fastFree(m_heap.blocks); | 
| 335 |  | 
| 336 |     memset(s: &m_heap, c: 0, n: sizeof(CollectorHeap)); | 
| 337 | } | 
| 338 |  | 
| 339 | void Heap::(size_t cost) | 
| 340 | { | 
| 341 |     // Our frequency of garbage collection tries to balance memory use against speed | 
| 342 |     // by collecting based on the number of newly created values. However, for values | 
| 343 |     // that hold on to a great deal of memory that's not in the form of other JS values, | 
| 344 |     // that is not good enough - in some cases a lot of those objects can pile up and | 
| 345 |     // use crazy amounts of memory without a GC happening. So we track these extra | 
| 346 |     // memory costs. Only unusually large objects are noted, and we only keep track | 
| 347 |     // of this extra cost until the next GC. In garbage collected languages, most values | 
| 348 |     // are either very short lived temporaries, or have extremely long lifetimes. So | 
| 349 |     // if a large value survives one garbage collection, there is not much point to | 
| 350 |     // collecting more frequently as long as it stays alive. | 
| 351 |  | 
| 352 |     if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) { | 
| 353 |         // If the last iteration through the heap deallocated blocks, we need | 
| 354 |         // to clean up remaining garbage before marking. Otherwise, the conservative | 
| 355 |         // marking mechanism might follow a pointer to unmapped memory. | 
| 356 |         if (m_heap.didShrink) | 
| 357 |             sweep(); | 
| 358 |         reset(); | 
| 359 |     } | 
| 360 |     m_heap.extraCost += cost; | 
| 361 | } | 
| 362 |  | 
| 363 | void* Heap::allocate(size_t s) | 
| 364 | { | 
| 365 |     typedef HeapConstants::Block Block; | 
| 366 |     typedef HeapConstants::Cell Cell; | 
| 367 |      | 
| 368 |     ASSERT(JSLock::lockCount() > 0); | 
| 369 |     ASSERT(JSLock::currentThreadIsHoldingLock()); | 
| 370 |     ASSERT_UNUSED(s, s <= HeapConstants::cellSize); | 
| 371 |  | 
| 372 |     ASSERT(m_heap.operationInProgress == NoOperation); | 
| 373 |  | 
| 374 | #if COLLECT_ON_EVERY_ALLOCATION | 
| 375 |     collectAllGarbage(); | 
| 376 |     ASSERT(m_heap.operationInProgress == NoOperation); | 
| 377 | #endif | 
| 378 |  | 
| 379 | allocate: | 
| 380 |  | 
| 381 |     // Fast case: find the next garbage cell and recycle it. | 
| 382 |  | 
| 383 |     do { | 
| 384 |         ASSERT(m_heap.nextBlock < m_heap.usedBlocks); | 
| 385 |         Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]); | 
| 386 |         do { | 
| 387 |             ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock); | 
| 388 |             if (!block->marked.get(n: m_heap.nextCell)) { // Always false for the last cell in the block | 
| 389 |                 Cell* cell = block->cells + m_heap.nextCell; | 
| 390 |  | 
| 391 |                 m_heap.operationInProgress = Allocation; | 
| 392 |                 JSCell* imp = reinterpret_cast<JSCell*>(cell); | 
| 393 |                 imp->~JSCell(); | 
| 394 |                 m_heap.operationInProgress = NoOperation; | 
| 395 |  | 
| 396 |                 ++m_heap.nextCell; | 
| 397 |                 return cell; | 
| 398 |             } | 
| 399 |         } while (++m_heap.nextCell != HeapConstants::cellsPerBlock); | 
| 400 |         m_heap.nextCell = 0; | 
| 401 |     } while (++m_heap.nextBlock != m_heap.usedBlocks); | 
| 402 |  | 
| 403 |     // Slow case: reached the end of the heap. Mark live objects and start over. | 
| 404 |  | 
| 405 |     reset(); | 
| 406 |     goto allocate; | 
| 407 | } | 
| 408 |  | 
| 409 | void Heap::resizeBlocks() | 
| 410 | { | 
| 411 |     m_heap.didShrink = false; | 
| 412 |  | 
| 413 |     size_t usedCellCount = markedCells(); | 
| 414 |     size_t minCellCount = usedCellCount + max(a: ALLOCATIONS_PER_COLLECTION, b: usedCellCount); | 
| 415 |     size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock; | 
| 416 |  | 
| 417 |     size_t maxCellCount = 1.25f * minCellCount; | 
| 418 |     size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock; | 
| 419 |  | 
| 420 |     if (m_heap.usedBlocks < minBlockCount) | 
| 421 |         growBlocks(neededBlocks: minBlockCount); | 
| 422 |     else if (m_heap.usedBlocks > maxBlockCount) | 
| 423 |         shrinkBlocks(neededBlocks: maxBlockCount); | 
| 424 | } | 
| 425 |  | 
| 426 | void Heap::growBlocks(size_t neededBlocks) | 
| 427 | { | 
| 428 |     ASSERT(m_heap.usedBlocks < neededBlocks); | 
| 429 |     while (m_heap.usedBlocks < neededBlocks) | 
| 430 |         allocateBlock(); | 
| 431 | } | 
| 432 |  | 
| 433 | void Heap::shrinkBlocks(size_t neededBlocks) | 
| 434 | { | 
| 435 |     ASSERT(m_heap.usedBlocks > neededBlocks); | 
| 436 |      | 
| 437 |     // Clear the always-on last bit, so isEmpty() isn't fooled by it. | 
| 438 |     for (size_t i = 0; i < m_heap.usedBlocks; ++i) | 
| 439 |         m_heap.blocks[i]->marked.clear(n: HeapConstants::cellsPerBlock - 1); | 
| 440 |  | 
| 441 |     for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) { | 
| 442 |         if (m_heap.blocks[i]->marked.isEmpty()) { | 
| 443 |             freeBlock(block: i); | 
| 444 |         } else | 
| 445 |             ++i; | 
| 446 |     } | 
| 447 |  | 
| 448 |     // Reset the always-on last bit. | 
| 449 |     for (size_t i = 0; i < m_heap.usedBlocks; ++i) | 
| 450 |         m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1); | 
| 451 | } | 
| 452 |  | 
| 453 | #if OS(WINCE) | 
| 454 | void* g_stackBase = 0; | 
| 455 |  | 
| 456 | inline bool isPageWritable(void* page) | 
| 457 | { | 
| 458 |     MEMORY_BASIC_INFORMATION memoryInformation; | 
| 459 |     DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation)); | 
| 460 |  | 
| 461 |     // return false on error, including ptr outside memory | 
| 462 |     if (result != sizeof(memoryInformation)) | 
| 463 |         return false; | 
| 464 |  | 
| 465 |     DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE); | 
| 466 |     return protect == PAGE_READWRITE | 
| 467 |         || protect == PAGE_WRITECOPY | 
| 468 |         || protect == PAGE_EXECUTE_READWRITE | 
| 469 |         || protect == PAGE_EXECUTE_WRITECOPY; | 
| 470 | } | 
| 471 |  | 
| 472 | static void* getStackBase(void* previousFrame) | 
| 473 | { | 
| 474 |     // find the address of this stack frame by taking the address of a local variable | 
| 475 |     bool isGrowingDownward; | 
| 476 |     void* thisFrame = (void*)(&isGrowingDownward); | 
| 477 |  | 
| 478 |     isGrowingDownward = previousFrame < &thisFrame; | 
| 479 |     static DWORD pageSize = 0; | 
| 480 |     if (!pageSize) { | 
| 481 |         SYSTEM_INFO systemInfo; | 
| 482 |         GetSystemInfo(&systemInfo); | 
| 483 |         pageSize = systemInfo.dwPageSize; | 
| 484 |     } | 
| 485 |  | 
| 486 |     // scan all of memory starting from this frame, and return the last writeable page found | 
| 487 |     register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1)); | 
| 488 |     if (isGrowingDownward) { | 
| 489 |         while (currentPage > 0) { | 
| 490 |             // check for underflow | 
| 491 |             if (currentPage >= (char*)pageSize) | 
| 492 |                 currentPage -= pageSize; | 
| 493 |             else | 
| 494 |                 currentPage = 0; | 
| 495 |             if (!isPageWritable(currentPage)) | 
| 496 |                 return currentPage + pageSize; | 
| 497 |         } | 
| 498 |         return 0; | 
| 499 |     } else { | 
| 500 |         while (true) { | 
| 501 |             // guaranteed to complete because isPageWritable returns false at end of memory | 
| 502 |             currentPage += pageSize; | 
| 503 |             if (!isPageWritable(currentPage)) | 
| 504 |                 return currentPage; | 
| 505 |         } | 
| 506 |     } | 
| 507 | } | 
| 508 | #endif | 
| 509 |  | 
| 510 | #if OS(HPUX) | 
| 511 | struct hpux_get_stack_base_data | 
| 512 | { | 
| 513 |     pthread_t thread; | 
| 514 |     _pthread_stack_info info; | 
| 515 | }; | 
| 516 |  | 
| 517 | static void *hpux_get_stack_base_internal(void *d) | 
| 518 | { | 
| 519 |     hpux_get_stack_base_data *data = static_cast<hpux_get_stack_base_data *>(d); | 
| 520 |  | 
| 521 |     // _pthread_stack_info_np requires the target thread to be suspended | 
| 522 |     // in order to get information about it | 
| 523 |     pthread_suspend(data->thread); | 
| 524 |  | 
| 525 |     // _pthread_stack_info_np returns an errno code in case of failure | 
| 526 |     // or zero on success | 
| 527 |     if (_pthread_stack_info_np(data->thread, &data->info)) { | 
| 528 |         // failed | 
| 529 |         return 0; | 
| 530 |     } | 
| 531 |  | 
| 532 |     pthread_continue(data->thread); | 
| 533 |     return data; | 
| 534 | } | 
| 535 |  | 
| 536 | static void *hpux_get_stack_base() | 
| 537 | { | 
| 538 |     hpux_get_stack_base_data data; | 
| 539 |     data.thread = pthread_self(); | 
| 540 |  | 
| 541 |     // We cannot get the stack information for the current thread | 
| 542 |     // So we start a new thread to get that information and return it to us | 
| 543 |     pthread_t other; | 
| 544 |     pthread_create(&other, 0, hpux_get_stack_base_internal, &data); | 
| 545 |  | 
| 546 |     void *result; | 
| 547 |     pthread_join(other, &result); | 
| 548 |     if (result) | 
| 549 |        return data.info.stk_stack_base; | 
| 550 |     return 0; | 
| 551 | } | 
| 552 | #endif | 
| 553 |  | 
| 554 | #if OS(QNX) | 
| 555 | static inline void *currentThreadStackBaseQNX() | 
| 556 | { | 
| 557 |     static void* stackBase = 0; | 
| 558 |     static size_t stackSize = 0; | 
| 559 |     static pthread_t stackThread; | 
| 560 |     pthread_t thread = pthread_self(); | 
| 561 |     if (stackBase == 0 || thread != stackThread) { | 
| 562 |         debug_thread_t threadInfo; | 
| 563 |         memset(&threadInfo, 0, sizeof(threadInfo)); | 
| 564 |         threadInfo.tid = pthread_self(); | 
| 565 |         int fd = open("/proc/self" , O_RDONLY); | 
| 566 |         if (fd == -1) { | 
| 567 |             LOG_ERROR("Unable to open /proc/self (errno: %d)" , errno); | 
| 568 |             return 0; | 
| 569 |         } | 
| 570 |         devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0); | 
| 571 |         close(fd); | 
| 572 |         stackBase = reinterpret_cast<void*>(threadInfo.stkbase); | 
| 573 |         stackSize = threadInfo.stksize; | 
| 574 |         ASSERT(stackBase); | 
| 575 |         stackThread = thread; | 
| 576 |     } | 
| 577 |     return static_cast<char*>(stackBase) + stackSize; | 
| 578 | } | 
| 579 | #endif | 
| 580 |  | 
| 581 | static inline void* currentThreadStackBase() | 
| 582 | { | 
| 583 | #if OS(DARWIN) | 
| 584 |     pthread_t thread = pthread_self(); | 
| 585 |     return pthread_get_stackaddr_np(thread); | 
| 586 | #elif OS(WINCE) | 
| 587 |     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 
| 588 |     MutexLocker locker(mutex); | 
| 589 |     if (g_stackBase) | 
| 590 |         return g_stackBase; | 
| 591 |     else { | 
| 592 |         int dummy; | 
| 593 |         return getStackBase(&dummy); | 
| 594 |     } | 
| 595 | #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC) | 
| 596 |     // offset 0x18 from the FS segment register gives a pointer to | 
| 597 |     // the thread information block for the current thread | 
| 598 |     NT_TIB* pTib; | 
| 599 |     __asm { | 
| 600 |         MOV EAX, FS:[18h] | 
| 601 |         MOV pTib, EAX | 
| 602 |     } | 
| 603 |     return static_cast<void*>(pTib->StackBase); | 
| 604 | #elif OS(WINDOWS) && (CPU(X86_64) || CPU(AARCH64)) && (COMPILER(MSVC) || COMPILER(GCC)) | 
| 605 |     // FIXME: why only for MSVC? | 
| 606 |     PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb()); | 
| 607 |     return reinterpret_cast<void*>(pTib->StackBase); | 
| 608 | #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC) | 
| 609 |     // offset 0x18 from the FS segment register gives a pointer to | 
| 610 |     // the thread information block for the current thread | 
| 611 |     NT_TIB* pTib; | 
| 612 |     asm ( "movl %%fs:0x18, %0\n"  | 
| 613 |           : "=r"  (pTib) | 
| 614 |         ); | 
| 615 |     return static_cast<void*>(pTib->StackBase); | 
| 616 | #elif OS(HPUX) | 
| 617 |     return hpux_get_stack_base(); | 
| 618 | #elif OS(QNX) | 
| 619 |     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 
| 620 |     MutexLocker locker(mutex); | 
| 621 |     return currentThreadStackBaseQNX(); | 
| 622 | #elif OS(SOLARIS) | 
| 623 |     stack_t s; | 
| 624 |     thr_stksegment(&s); | 
| 625 |     return s.ss_sp; | 
| 626 | #elif OS(AIX) | 
| 627 |     pthread_t thread = pthread_self(); | 
| 628 |     struct __pthrdsinfo threadinfo; | 
| 629 |     char regbuf[256]; | 
| 630 |     int regbufsize = sizeof regbuf; | 
| 631 |  | 
| 632 |     if (pthread_getthrds_np(&thread, PTHRDSINFO_QUERY_ALL, | 
| 633 |                             &threadinfo, sizeof threadinfo, | 
| 634 |                             ®buf, ®bufsize) == 0) | 
| 635 |         return threadinfo.__pi_stackaddr; | 
| 636 |  | 
| 637 |     return 0; | 
| 638 | #elif OS(OPENBSD) | 
| 639 |     pthread_t thread = pthread_self(); | 
| 640 |     stack_t stack; | 
| 641 |     pthread_stackseg_np(thread, &stack); | 
| 642 |     return stack.ss_sp; | 
| 643 | #elif OS(SYMBIAN) | 
| 644 |     TThreadStackInfo info; | 
| 645 |     RThread thread; | 
| 646 |     thread.StackInfo(info); | 
| 647 |     return (void*)info.iBase; | 
| 648 | #elif OS(HAIKU) | 
| 649 |     thread_info threadInfo; | 
| 650 |     get_thread_info(find_thread(NULL), &threadInfo); | 
| 651 |     return threadInfo.stack_end; | 
| 652 | #elif OS(UNIX) | 
| 653 |     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 
| 654 |     MutexLocker locker(mutex); | 
| 655 |     static void* stackBase = 0; | 
| 656 |     static size_t stackSize = 0; | 
| 657 |     static pthread_t stackThread; | 
| 658 |     pthread_t thread = pthread_self(); | 
| 659 |     if (stackBase == 0 || thread != stackThread) { | 
| 660 |         pthread_attr_t sattr; | 
| 661 |         pthread_attr_init(attr: &sattr); | 
| 662 | #if HAVE(PTHREAD_NP_H) || OS(NETBSD) | 
| 663 |         // e.g. on FreeBSD 5.4, neundorf@kde.org | 
| 664 |         pthread_attr_get_np(thread, &sattr); | 
| 665 | #else | 
| 666 |         // FIXME: this function is non-portable; other POSIX systems may have different np alternatives | 
| 667 |         pthread_getattr_np(th: thread, attr: &sattr); | 
| 668 | #endif | 
| 669 |         int rc = pthread_attr_getstack(attr: &sattr, stackaddr: &stackBase, stacksize: &stackSize); | 
| 670 |         (void)rc; // FIXME: Deal with error code somehow? Seems fatal. | 
| 671 |         ASSERT(stackBase); | 
| 672 |         pthread_attr_destroy(attr: &sattr); | 
| 673 |         stackThread = thread; | 
| 674 |     } | 
| 675 |     return static_cast<char*>(stackBase) + stackSize; | 
| 676 | #else | 
| 677 | #error Need a way to get the stack base on this platform | 
| 678 | #endif | 
| 679 | } | 
| 680 |  | 
| 681 | #if ENABLE(JSC_MULTIPLE_THREADS) | 
| 682 |  | 
| 683 | static inline PlatformThread getCurrentPlatformThread() | 
| 684 | { | 
| 685 | #if OS(DARWIN) | 
| 686 |     return pthread_mach_thread_np(pthread_self()); | 
| 687 | #elif OS(WINDOWS) | 
| 688 |     return pthread_getw32threadhandle_np(pthread_self()); | 
| 689 | #endif | 
| 690 | } | 
| 691 |  | 
| 692 | void Heap::makeUsableFromMultipleThreads() | 
| 693 | { | 
| 694 |     if (m_currentThreadRegistrar) | 
| 695 |         return; | 
| 696 |  | 
| 697 |     int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread); | 
| 698 |     if (error) | 
| 699 |         CRASH(); | 
| 700 | } | 
| 701 |  | 
| 702 | void Heap::registerThread() | 
| 703 | { | 
| 704 |     ASSERT(!m_globalData->mainThreadOnly || isMainThread()); | 
| 705 |  | 
| 706 |     if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar)) | 
| 707 |         return; | 
| 708 |  | 
| 709 |     pthread_setspecific(m_currentThreadRegistrar, this); | 
| 710 |     Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); | 
| 711 |  | 
| 712 |     MutexLocker lock(m_registeredThreadsMutex); | 
| 713 |  | 
| 714 |     thread->next = m_registeredThreads; | 
| 715 |     m_registeredThreads = thread; | 
| 716 | } | 
| 717 |  | 
| 718 | void Heap::unregisterThread(void* p) | 
| 719 | { | 
| 720 |     if (p) | 
| 721 |         static_cast<Heap*>(p)->unregisterThread(); | 
| 722 | } | 
| 723 |  | 
| 724 | void Heap::unregisterThread() | 
| 725 | { | 
| 726 |     pthread_t currentPosixThread = pthread_self(); | 
| 727 |  | 
| 728 |     MutexLocker lock(m_registeredThreadsMutex); | 
| 729 |  | 
| 730 |     if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) { | 
| 731 |         Thread* t = m_registeredThreads; | 
| 732 |         m_registeredThreads = m_registeredThreads->next; | 
| 733 |         delete t; | 
| 734 |     } else { | 
| 735 |         Heap::Thread* last = m_registeredThreads; | 
| 736 |         Heap::Thread* t; | 
| 737 |         for (t = m_registeredThreads->next; t; t = t->next) { | 
| 738 |             if (pthread_equal(t->posixThread, currentPosixThread)) { | 
| 739 |                 last->next = t->next; | 
| 740 |                 break; | 
| 741 |             } | 
| 742 |             last = t; | 
| 743 |         } | 
| 744 |         ASSERT(t); // If t is NULL, we never found ourselves in the list. | 
| 745 |         delete t; | 
| 746 |     } | 
| 747 | } | 
| 748 |  | 
| 749 | #else // ENABLE(JSC_MULTIPLE_THREADS) | 
| 750 |  | 
| 751 | void Heap::registerThread() | 
| 752 | { | 
| 753 | } | 
| 754 |  | 
| 755 | #endif | 
| 756 |  | 
| 757 | inline bool isPointerAligned(void* p) | 
| 758 | { | 
| 759 |     return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0); | 
| 760 | } | 
| 761 |  | 
| 762 | // Cell size needs to be a power of two for isPossibleCell to be valid. | 
| 763 | COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two); | 
| 764 |  | 
| 765 | #if USE(JSVALUE32) | 
| 766 | static bool isHalfCellAligned(void *p) | 
| 767 | { | 
| 768 |     return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0); | 
| 769 | } | 
| 770 |  | 
| 771 | static inline bool isPossibleCell(void* p) | 
| 772 | { | 
| 773 |     return isHalfCellAligned(p) && p; | 
| 774 | } | 
| 775 |  | 
| 776 | #else | 
| 777 |  | 
| 778 | static inline bool isCellAligned(void *p) | 
| 779 | { | 
| 780 |     return (((intptr_t)(p) & CELL_MASK) == 0); | 
| 781 | } | 
| 782 |  | 
| 783 | static inline bool isPossibleCell(void* p) | 
| 784 | { | 
| 785 |     return isCellAligned(p) && p; | 
| 786 | } | 
| 787 | #endif // USE(JSVALUE32) | 
| 788 |  | 
| 789 | void Heap::markConservatively(MarkStack& markStack, void* start, void* end) | 
| 790 | { | 
| 791 |     if (start > end) { | 
| 792 |         void* tmp = start; | 
| 793 |         start = end; | 
| 794 |         end = tmp; | 
| 795 |     } | 
| 796 |  | 
| 797 |     ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000); | 
| 798 |     ASSERT(isPointerAligned(start)); | 
| 799 |     ASSERT(isPointerAligned(end)); | 
| 800 |  | 
| 801 |     char** p = static_cast<char**>(start); | 
| 802 |     char** e = static_cast<char**>(end); | 
| 803 |  | 
| 804 |     CollectorBlock** blocks = m_heap.blocks; | 
| 805 |     while (p != e) { | 
| 806 |         char* x = *p++; | 
| 807 |         if (isPossibleCell(p: x)) { | 
| 808 |             size_t usedBlocks; | 
| 809 |             uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x); | 
| 810 |             xAsBits &= CELL_ALIGN_MASK; | 
| 811 |  | 
| 812 |             uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK; | 
| 813 |             const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1); | 
| 814 |             if (offset > lastCellOffset) | 
| 815 |                 continue; | 
| 816 |  | 
| 817 |             CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset); | 
| 818 |             usedBlocks = m_heap.usedBlocks; | 
| 819 |             for (size_t block = 0; block < usedBlocks; block++) { | 
| 820 |                 if (blocks[block] != blockAddr) | 
| 821 |                     continue; | 
| 822 |                 markStack.append(cell: reinterpret_cast<JSCell*>(xAsBits)); | 
| 823 |                 markStack.drain(); | 
| 824 |             } | 
| 825 |         } | 
| 826 |     } | 
| 827 | } | 
| 828 |  | 
| 829 | void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack) | 
| 830 | { | 
| 831 |     void* dummy; | 
| 832 |     void* stackPointer = &dummy; | 
| 833 |     void* stackBase = currentThreadStackBase(); | 
| 834 |     markConservatively(markStack, start: stackPointer, end: stackBase); | 
| 835 | } | 
| 836 |  | 
| 837 | #if COMPILER(GCC) | 
| 838 | #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*)))) | 
| 839 | #else | 
| 840 | #define REGISTER_BUFFER_ALIGNMENT | 
| 841 | #endif | 
| 842 |  | 
| 843 | void Heap::markCurrentThreadConservatively(MarkStack& markStack) | 
| 844 | { | 
| 845 |     // setjmp forces volatile registers onto the stack | 
| 846 |     jmp_buf registers REGISTER_BUFFER_ALIGNMENT; | 
| 847 | #if COMPILER(MSVC) | 
| 848 | #pragma warning(push) | 
| 849 | #pragma warning(disable: 4611) | 
| 850 | #endif | 
| 851 |     setjmp(registers); | 
| 852 | #if COMPILER(MSVC) | 
| 853 | #pragma warning(pop) | 
| 854 | #endif | 
| 855 |  | 
| 856 |     markCurrentThreadConservativelyInternal(markStack); | 
| 857 | } | 
| 858 |  | 
| 859 | #if ENABLE(JSC_MULTIPLE_THREADS) | 
| 860 |  | 
| 861 | static inline void suspendThread(const PlatformThread& platformThread) | 
| 862 | { | 
| 863 | #if OS(DARWIN) | 
| 864 |     thread_suspend(platformThread); | 
| 865 | #elif OS(WINDOWS) | 
| 866 |     SuspendThread(platformThread); | 
| 867 | #else | 
| 868 | #error Need a way to suspend threads on this platform | 
| 869 | #endif | 
| 870 | } | 
| 871 |  | 
| 872 | static inline void resumeThread(const PlatformThread& platformThread) | 
| 873 | { | 
| 874 | #if OS(DARWIN) | 
| 875 |     thread_resume(platformThread); | 
| 876 | #elif OS(WINDOWS) | 
| 877 |     ResumeThread(platformThread); | 
| 878 | #else | 
| 879 | #error Need a way to resume threads on this platform | 
| 880 | #endif | 
| 881 | } | 
| 882 |  | 
| 883 | typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit | 
| 884 |  | 
| 885 | #if OS(DARWIN) | 
| 886 |  | 
| 887 | #if CPU(X86) | 
| 888 | typedef i386_thread_state_t PlatformThreadRegisters; | 
| 889 | #elif CPU(X86_64) | 
| 890 | typedef x86_thread_state64_t PlatformThreadRegisters; | 
| 891 | #elif CPU(PPC) | 
| 892 | typedef ppc_thread_state_t PlatformThreadRegisters; | 
| 893 | #elif CPU(PPC64) | 
| 894 | typedef ppc_thread_state64_t PlatformThreadRegisters; | 
| 895 | #elif CPU(ARM) | 
| 896 | typedef arm_thread_state_t PlatformThreadRegisters; | 
| 897 | #else | 
| 898 | #error Unknown Architecture | 
| 899 | #endif | 
| 900 |  | 
| 901 | #elif OS(WINDOWS) && CPU(X86) | 
| 902 | typedef CONTEXT PlatformThreadRegisters; | 
| 903 | #else | 
| 904 | #error Need a thread register struct for this platform | 
| 905 | #endif | 
| 906 |  | 
| 907 | static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs) | 
| 908 | { | 
| 909 | #if OS(DARWIN) | 
| 910 |  | 
| 911 | #if CPU(X86) | 
| 912 |     unsigned user_count = sizeof(regs)/sizeof(int); | 
| 913 |     thread_state_flavor_t flavor = i386_THREAD_STATE; | 
| 914 | #elif CPU(X86_64) | 
| 915 |     unsigned user_count = x86_THREAD_STATE64_COUNT; | 
| 916 |     thread_state_flavor_t flavor = x86_THREAD_STATE64; | 
| 917 | #elif CPU(PPC)  | 
| 918 |     unsigned user_count = PPC_THREAD_STATE_COUNT; | 
| 919 |     thread_state_flavor_t flavor = PPC_THREAD_STATE; | 
| 920 | #elif CPU(PPC64) | 
| 921 |     unsigned user_count = PPC_THREAD_STATE64_COUNT; | 
| 922 |     thread_state_flavor_t flavor = PPC_THREAD_STATE64; | 
| 923 | #elif CPU(ARM) | 
| 924 |     unsigned user_count = ARM_THREAD_STATE_COUNT; | 
| 925 |     thread_state_flavor_t flavor = ARM_THREAD_STATE; | 
| 926 | #else | 
| 927 | #error Unknown Architecture | 
| 928 | #endif | 
| 929 |  | 
| 930 |     kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); | 
| 931 |     if (result != KERN_SUCCESS) { | 
| 932 |         WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,  | 
| 933 |                             "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported." , result); | 
| 934 |         CRASH(); | 
| 935 |     } | 
| 936 |     return user_count * sizeof(usword_t); | 
| 937 | // end OS(DARWIN) | 
| 938 |  | 
| 939 | #elif OS(WINDOWS) && CPU(X86) | 
| 940 |     regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS; | 
| 941 |     GetThreadContext(platformThread, ®s); | 
| 942 |     return sizeof(CONTEXT); | 
| 943 | #else | 
| 944 | #error Need a way to get thread registers on this platform | 
| 945 | #endif | 
| 946 | } | 
| 947 |  | 
| 948 | static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs) | 
| 949 | { | 
| 950 | #if OS(DARWIN) | 
| 951 |  | 
| 952 | #if __DARWIN_UNIX03 | 
| 953 |  | 
| 954 | #if CPU(X86) | 
| 955 |     return reinterpret_cast<void*>(regs.__esp); | 
| 956 | #elif CPU(X86_64) | 
| 957 |     return reinterpret_cast<void*>(regs.__rsp); | 
| 958 | #elif CPU(PPC) || CPU(PPC64) | 
| 959 |     return reinterpret_cast<void*>(regs.__r1); | 
| 960 | #elif CPU(ARM) | 
| 961 |     return reinterpret_cast<void*>(regs.__sp); | 
| 962 | #else | 
| 963 | #error Unknown Architecture | 
| 964 | #endif | 
| 965 |  | 
| 966 | #else // !__DARWIN_UNIX03 | 
| 967 |  | 
| 968 | #if CPU(X86) | 
| 969 |     return reinterpret_cast<void*>(regs.esp); | 
| 970 | #elif CPU(X86_64) | 
| 971 |     return reinterpret_cast<void*>(regs.rsp); | 
| 972 | #elif CPU(PPC) || CPU(PPC64) | 
| 973 |     return reinterpret_cast<void*>(regs.r1); | 
| 974 | #else | 
| 975 | #error Unknown Architecture | 
| 976 | #endif | 
| 977 |  | 
| 978 | #endif // __DARWIN_UNIX03 | 
| 979 |  | 
| 980 | // end OS(DARWIN) | 
| 981 | #elif CPU(X86) && OS(WINDOWS) | 
| 982 |     return reinterpret_cast<void*>((uintptr_t) regs.Esp); | 
| 983 | #else | 
| 984 | #error Need a way to get the stack pointer for another thread on this platform | 
| 985 | #endif | 
| 986 | } | 
| 987 |  | 
| 988 | void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread) | 
| 989 | { | 
| 990 |     suspendThread(thread->platformThread); | 
| 991 |  | 
| 992 |     PlatformThreadRegisters regs; | 
| 993 |     size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); | 
| 994 |  | 
| 995 |     // mark the thread's registers | 
| 996 |     markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize)); | 
| 997 |  | 
| 998 |     void* stackPointer = otherThreadStackPointer(regs); | 
| 999 |     markConservatively(markStack, stackPointer, thread->stackBase); | 
| 1000 |  | 
| 1001 |     resumeThread(thread->platformThread); | 
| 1002 | } | 
| 1003 |  | 
| 1004 | #endif | 
| 1005 |  | 
| 1006 | void Heap::markStackObjectsConservatively(MarkStack& markStack) | 
| 1007 | { | 
| 1008 |     markCurrentThreadConservatively(markStack); | 
| 1009 |  | 
| 1010 | #if ENABLE(JSC_MULTIPLE_THREADS) | 
| 1011 |  | 
| 1012 |     if (m_currentThreadRegistrar) { | 
| 1013 |  | 
| 1014 |         MutexLocker lock(m_registeredThreadsMutex); | 
| 1015 |  | 
| 1016 | #ifndef NDEBUG | 
| 1017 |         // Forbid malloc during the mark phase. Marking a thread suspends it, so  | 
| 1018 |         // a malloc inside markChildren() would risk a deadlock with a thread that had been  | 
| 1019 |         // suspended while holding the malloc lock. | 
| 1020 |         fastMallocForbid(); | 
| 1021 | #endif | 
| 1022 |         // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held, | 
| 1023 |         // and since this is a shared heap, they are real locks. | 
| 1024 |         for (Thread* thread = m_registeredThreads; thread; thread = thread->next) { | 
| 1025 |             if (!pthread_equal(thread->posixThread, pthread_self())) | 
| 1026 |                 markOtherThreadConservatively(markStack, thread); | 
| 1027 |         } | 
| 1028 | #ifndef NDEBUG | 
| 1029 |         fastMallocAllow(); | 
| 1030 | #endif | 
| 1031 |     } | 
| 1032 | #endif | 
| 1033 | } | 
| 1034 |  | 
| 1035 | void Heap::protect(JSValue k) | 
| 1036 | { | 
| 1037 |     ASSERT(k); | 
| 1038 |     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); | 
| 1039 |  | 
| 1040 |     if (!k.isCell()) | 
| 1041 |         return; | 
| 1042 |  | 
| 1043 |     m_protectedValues.add(value: k.asCell()); | 
| 1044 | } | 
| 1045 |  | 
| 1046 | void Heap::unprotect(JSValue k) | 
| 1047 | { | 
| 1048 |     ASSERT(k); | 
| 1049 |     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); | 
| 1050 |  | 
| 1051 |     if (!k.isCell()) | 
| 1052 |         return; | 
| 1053 |  | 
| 1054 |     m_protectedValues.remove(value: k.asCell()); | 
| 1055 | } | 
| 1056 |  | 
| 1057 | void Heap::markProtectedObjects(MarkStack& markStack) | 
| 1058 | { | 
| 1059 |     ProtectCountSet::iterator end = m_protectedValues.end(); | 
| 1060 |     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) { | 
| 1061 |         markStack.append(cell: it->first); | 
| 1062 |         markStack.drain(); | 
| 1063 |     } | 
| 1064 | } | 
| 1065 |  | 
| 1066 | void Heap::clearMarkBits() | 
| 1067 | { | 
| 1068 |     for (size_t i = 0; i < m_heap.usedBlocks; ++i) | 
| 1069 |         clearMarkBits(m_heap.blocks[i]); | 
| 1070 | } | 
| 1071 |  | 
| 1072 | void Heap::clearMarkBits(CollectorBlock* block) | 
| 1073 | { | 
| 1074 |     // allocate assumes that the last cell in every block is marked. | 
| 1075 |     block->marked.clearAll(); | 
| 1076 |     block->marked.set(HeapConstants::cellsPerBlock - 1); | 
| 1077 | } | 
| 1078 |  | 
| 1079 | size_t Heap::markedCells(size_t startBlock, size_t startCell) const | 
| 1080 | { | 
| 1081 |     ASSERT(startBlock <= m_heap.usedBlocks); | 
| 1082 |     ASSERT(startCell < HeapConstants::cellsPerBlock); | 
| 1083 |  | 
| 1084 |     if (startBlock >= m_heap.usedBlocks) | 
| 1085 |         return 0; | 
| 1086 |  | 
| 1087 |     size_t result = 0; | 
| 1088 |     result += m_heap.blocks[startBlock]->marked.count(startCell); | 
| 1089 |     for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i) | 
| 1090 |         result += m_heap.blocks[i]->marked.count(); | 
| 1091 |  | 
| 1092 |     return result; | 
| 1093 | } | 
| 1094 |  | 
| 1095 | void Heap::sweep() | 
| 1096 | { | 
| 1097 |     ASSERT(m_heap.operationInProgress == NoOperation); | 
| 1098 |     if (m_heap.operationInProgress != NoOperation) | 
| 1099 |         CRASH(); | 
| 1100 |     m_heap.operationInProgress = Collection; | 
| 1101 |      | 
| 1102 | #if !ENABLE(JSC_ZOMBIES) | 
| 1103 |     Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get(); | 
| 1104 | #endif | 
| 1105 |  | 
| 1106 |     DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell); | 
| 1107 |     DeadObjectIterator end(m_heap, m_heap.usedBlocks); | 
| 1108 |     for ( ; it != end; ++it) { | 
| 1109 |         JSCell* cell = *it; | 
| 1110 | #if ENABLE(JSC_ZOMBIES) | 
| 1111 |         if (!cell->isZombie()) { | 
| 1112 |             const ClassInfo* info = cell->classInfo(); | 
| 1113 |             cell->~JSCell(); | 
| 1114 |             new (cell) JSZombie(info, JSZombie::leakedZombieStructure()); | 
| 1115 |             Heap::markCell(cell); | 
| 1116 |         } | 
| 1117 | #else | 
| 1118 |         cell->~JSCell(); | 
| 1119 |         // Callers of sweep assume it's safe to mark any cell in the heap. | 
| 1120 |         new (cell) JSCell(dummyMarkableCellStructure); | 
| 1121 | #endif | 
| 1122 |     } | 
| 1123 |  | 
| 1124 |     m_heap.operationInProgress = NoOperation; | 
| 1125 | } | 
| 1126 |  | 
| 1127 | void Heap::markRoots() | 
| 1128 | { | 
| 1129 | #ifndef NDEBUG | 
| 1130 |     if (m_globalData->isSharedInstance) { | 
| 1131 |         ASSERT(JSLock::lockCount() > 0); | 
| 1132 |         ASSERT(JSLock::currentThreadIsHoldingLock()); | 
| 1133 |     } | 
| 1134 | #endif | 
| 1135 |  | 
| 1136 |     ASSERT(m_heap.operationInProgress == NoOperation); | 
| 1137 |     if (m_heap.operationInProgress != NoOperation) | 
| 1138 |         CRASH(); | 
| 1139 |  | 
| 1140 |     m_heap.operationInProgress = Collection; | 
| 1141 |  | 
| 1142 |     MarkStack& markStack = m_globalData->markStack; | 
| 1143 |  | 
| 1144 |     // Reset mark bits. | 
| 1145 |     clearMarkBits(); | 
| 1146 |  | 
| 1147 |     // Mark stack roots. | 
| 1148 |     markStackObjectsConservatively(markStack); | 
| 1149 |     m_globalData->interpreter->registerFile().markCallFrames(markStack, heap: this); | 
| 1150 |  | 
| 1151 |     // Mark explicitly registered roots. | 
| 1152 |     markProtectedObjects(markStack); | 
| 1153 |  | 
| 1154 |     // Mark misc. other roots. | 
| 1155 |     if (m_markListSet && m_markListSet->size()) | 
| 1156 |         MarkedArgumentBuffer::markLists(markStack, *m_markListSet); | 
| 1157 |     if (m_globalData->exception) | 
| 1158 |         markStack.append(value: m_globalData->exception); | 
| 1159 |     m_globalData->smallStrings.markChildren(markStack); | 
| 1160 |     if (m_globalData->functionCodeBlockBeingReparsed) | 
| 1161 |         m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack); | 
| 1162 |     if (m_globalData->firstStringifierToMark) | 
| 1163 |         JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark); | 
| 1164 |  | 
| 1165 | #if QT_BUILD_SCRIPT_LIB | 
| 1166 |     if (m_globalData->clientData) | 
| 1167 |         m_globalData->clientData->mark(markStack); | 
| 1168 | #endif | 
| 1169 |  | 
| 1170 |     markStack.drain(); | 
| 1171 |     markStack.compact(); | 
| 1172 |  | 
| 1173 |     m_heap.operationInProgress = NoOperation; | 
| 1174 | } | 
| 1175 |  | 
| 1176 | size_t Heap::objectCount() const | 
| 1177 | { | 
| 1178 |     return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks | 
| 1179 |            + m_heap.nextCell // allocated cells in current block | 
| 1180 |            + markedCells(startBlock: m_heap.nextBlock, startCell: m_heap.nextCell) // marked cells in remainder of m_heap | 
| 1181 |            - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel | 
| 1182 | } | 
| 1183 |  | 
| 1184 | void Heap::addToStatistics(Heap::Statistics& statistics) const | 
| 1185 | { | 
| 1186 |     statistics.size += m_heap.usedBlocks * BLOCK_SIZE; | 
| 1187 |     statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize); | 
| 1188 | } | 
| 1189 |  | 
| 1190 | Heap::Statistics Heap::statistics() const | 
| 1191 | { | 
| 1192 |     Statistics statistics = { .size: 0, .free: 0 }; | 
| 1193 |     addToStatistics(statistics); | 
| 1194 |     return statistics; | 
| 1195 | } | 
| 1196 |  | 
| 1197 | size_t Heap::globalObjectCount() | 
| 1198 | { | 
| 1199 |     size_t count = 0; | 
| 1200 |     if (JSGlobalObject* head = m_globalData->head) { | 
| 1201 |         JSGlobalObject* o = head; | 
| 1202 |         do { | 
| 1203 |             ++count; | 
| 1204 |             o = o->next(); | 
| 1205 |         } while (o != head); | 
| 1206 |     } | 
| 1207 |     return count; | 
| 1208 | } | 
| 1209 |  | 
| 1210 | size_t Heap::protectedGlobalObjectCount() | 
| 1211 | { | 
| 1212 |     size_t count = 0; | 
| 1213 |     if (JSGlobalObject* head = m_globalData->head) { | 
| 1214 |         JSGlobalObject* o = head; | 
| 1215 |         do { | 
| 1216 |             if (m_protectedValues.contains(value: o)) | 
| 1217 |                 ++count; | 
| 1218 |             o = o->next(); | 
| 1219 |         } while (o != head); | 
| 1220 |     } | 
| 1221 |  | 
| 1222 |     return count; | 
| 1223 | } | 
| 1224 |  | 
| 1225 | size_t Heap::protectedObjectCount() | 
| 1226 | { | 
| 1227 |     return m_protectedValues.size(); | 
| 1228 | } | 
| 1229 |  | 
| 1230 | static const char* typeName(JSCell* cell) | 
| 1231 | { | 
| 1232 |     if (cell->isString()) | 
| 1233 |         return "string" ; | 
| 1234 | #if USE(JSVALUE32) | 
| 1235 |     if (cell->isNumber()) | 
| 1236 |         return "number" ; | 
| 1237 | #endif | 
| 1238 |     if (cell->isGetterSetter()) | 
| 1239 |         return "gettersetter" ; | 
| 1240 |     if (cell->isAPIValueWrapper()) | 
| 1241 |         return "value wrapper" ; | 
| 1242 |     if (cell->isPropertyNameIterator()) | 
| 1243 |         return "for-in iterator" ; | 
| 1244 |     ASSERT(cell->isObject()); | 
| 1245 |     const ClassInfo* info = cell->classInfo(); | 
| 1246 |     return info ? info->className : "Object" ; | 
| 1247 | } | 
| 1248 |  | 
| 1249 | HashCountedSet<const char*>* Heap::protectedObjectTypeCounts() | 
| 1250 | { | 
| 1251 |     HashCountedSet<const char*>* counts = new HashCountedSet<const char*>; | 
| 1252 |  | 
| 1253 |     ProtectCountSet::iterator end = m_protectedValues.end(); | 
| 1254 |     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) | 
| 1255 |         counts->add(value: typeName(cell: it->first)); | 
| 1256 |  | 
| 1257 |     return counts; | 
| 1258 | } | 
| 1259 |  | 
| 1260 | bool Heap::isBusy() | 
| 1261 | { | 
| 1262 |     return m_heap.operationInProgress != NoOperation; | 
| 1263 | } | 
| 1264 |  | 
| 1265 | void Heap::reset() | 
| 1266 | { | 
| 1267 |     JAVASCRIPTCORE_GC_BEGIN(); | 
| 1268 |  | 
| 1269 |     markRoots(); | 
| 1270 |  | 
| 1271 |     JAVASCRIPTCORE_GC_MARKED(); | 
| 1272 |  | 
| 1273 |     m_heap.nextCell = 0; | 
| 1274 |     m_heap.nextBlock = 0; | 
| 1275 |     m_heap.nextNumber = 0; | 
| 1276 |     m_heap.extraCost = 0; | 
| 1277 | #if ENABLE(JSC_ZOMBIES) | 
| 1278 |     sweep(); | 
| 1279 | #endif | 
| 1280 |     resizeBlocks(); | 
| 1281 |  | 
| 1282 |     JAVASCRIPTCORE_GC_END(); | 
| 1283 | } | 
| 1284 |  | 
| 1285 | void Heap::collectAllGarbage() | 
| 1286 | { | 
| 1287 |     JAVASCRIPTCORE_GC_BEGIN(); | 
| 1288 |  | 
| 1289 |     // If the last iteration through the heap deallocated blocks, we need | 
| 1290 |     // to clean up remaining garbage before marking. Otherwise, the conservative | 
| 1291 |     // marking mechanism might follow a pointer to unmapped memory. | 
| 1292 |     if (m_heap.didShrink) | 
| 1293 |         sweep(); | 
| 1294 |  | 
| 1295 |     markRoots(); | 
| 1296 |  | 
| 1297 |     JAVASCRIPTCORE_GC_MARKED(); | 
| 1298 |  | 
| 1299 |     m_heap.nextCell = 0; | 
| 1300 |     m_heap.nextBlock = 0; | 
| 1301 |     m_heap.nextNumber = 0; | 
| 1302 |     m_heap.extraCost = 0; | 
| 1303 |     sweep(); | 
| 1304 |     resizeBlocks(); | 
| 1305 |  | 
| 1306 |     JAVASCRIPTCORE_GC_END(); | 
| 1307 | } | 
| 1308 |  | 
| 1309 | LiveObjectIterator Heap::primaryHeapBegin() | 
| 1310 | { | 
| 1311 |     return LiveObjectIterator(m_heap, 0); | 
| 1312 | } | 
| 1313 |  | 
| 1314 | LiveObjectIterator Heap::primaryHeapEnd() | 
| 1315 | { | 
| 1316 |     return LiveObjectIterator(m_heap, m_heap.usedBlocks); | 
| 1317 | } | 
| 1318 |  | 
| 1319 | } // namespace JSC | 
| 1320 |  |