| 1 | // Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #ifndef RUNTIME_VM_HEAP_PAGE_H_ |
| 6 | #define RUNTIME_VM_HEAP_PAGE_H_ |
| 7 | |
| 8 | #include "platform/atomic.h" |
| 9 | #include "vm/globals.h" |
| 10 | #include "vm/heap/spaces.h" |
| 11 | #include "vm/pointer_tagging.h" |
| 12 | #include "vm/raw_object.h" |
| 13 | #include "vm/virtual_memory.h" |
| 14 | |
| 15 | namespace dart { |
| 16 | |
| 17 | class ForwardingPage; |
| 18 | class ObjectVisitor; |
| 19 | class ObjectPointerVisitor; |
| 20 | class Thread; |
| 21 | class UnwindingRecords; |
| 22 | |
| 23 | // Pages are allocated with kPageSize alignment so that the Page of any object |
| 24 | // can be computed by masking the object with kPageMask. This does not apply to |
| 25 | // image pages, whose address is chosen by the system loader rather than the |
| 26 | // Dart VM. |
| 27 | static constexpr intptr_t kPageSize = 512 * KB; |
| 28 | static constexpr intptr_t kPageSizeInWords = kPageSize / kWordSize; |
| 29 | static constexpr intptr_t kPageMask = ~(kPageSize - 1); |
| 30 | |
| 31 | // See ForwardingBlock and CountingBlock. |
| 32 | static constexpr intptr_t kBitVectorWordsPerBlock = 1; |
| 33 | static constexpr intptr_t kBlockSize = |
| 34 | kObjectAlignment * kBitsPerWord * kBitVectorWordsPerBlock; |
| 35 | static constexpr intptr_t kBlockMask = ~(kBlockSize - 1); |
| 36 | static constexpr intptr_t kBlocksPerPage = kPageSize / kBlockSize; |
| 37 | |
| 38 | // Simplify initialization in allocation stubs by ensuring it is safe |
| 39 | // to overshoot the object end by up to kAllocationRedZoneSize. (Just as the |
| 40 | // stack red zone allows one to overshoot the stack pointer.) |
| 41 | static constexpr intptr_t kAllocationRedZoneSize = kObjectAlignment; |
| 42 | |
| 43 | // A Page is the granuitary at which the Dart heap allocates memory from the OS. |
| 44 | // Pages are usually of size kPageSize, except large objects are allocated on |
| 45 | // their own Page sized to the object. |
| 46 | // |
| 47 | // +----------------------+ <- start |
| 48 | // | struct Page (header) | |
| 49 | // +----------------------+ |
| 50 | // | alignment gap | |
| 51 | // +----------------------+ <- object_start |
| 52 | // | objects | |
| 53 | // | ... | |
| 54 | // | ... | |
| 55 | // +----------------------+ <- object_end / top_ |
| 56 | // | available | |
| 57 | // +----------------------+ <- end_ |
| 58 | // | red zone or | |
| 59 | // | forwarding table | |
| 60 | // +----------------------+ <- memory_->end() |
| 61 | class Page { |
| 62 | public: |
| 63 | static void Init(); |
| 64 | static void ClearCache(); |
| 65 | static intptr_t CachedSize(); |
| 66 | static void Cleanup(); |
| 67 | |
| 68 | enum PageFlags : uword { |
| 69 | kExecutable = 1 << 0, |
| 70 | kLarge = 1 << 1, |
| 71 | kImage = 1 << 2, |
| 72 | kVMIsolate = 1 << 3, |
| 73 | kNew = 1 << 4, |
| 74 | kEvacuationCandidate = 1 << 5, |
| 75 | }; |
| 76 | bool is_executable() const { return (flags_ & kExecutable) != 0; } |
| 77 | bool is_large() const { return (flags_ & kLarge) != 0; } |
| 78 | bool is_image() const { return (flags_ & kImage) != 0; } |
| 79 | bool is_vm_isolate() const { return (flags_ & kVMIsolate) != 0; } |
| 80 | bool is_new() const { return (flags_ & kNew) != 0; } |
| 81 | bool is_old() const { return !is_new(); } |
| 82 | bool is_evacuation_candidate() const { |
| 83 | return (flags_ & kEvacuationCandidate) != 0; |
| 84 | } |
| 85 | |
| 86 | Page* next() const { return next_; } |
| 87 | void set_next(Page* next) { next_ = next; } |
| 88 | |
| 89 | uword start() const { return memory_->start(); } |
| 90 | uword end() const { return memory_->end(); } |
| 91 | bool Contains(uword addr) const { return memory_->Contains(addr); } |
| 92 | intptr_t AliasOffset() const { return memory_->AliasOffset(); } |
| 93 | |
| 94 | uword object_start() const { |
| 95 | return is_new() ? new_object_start() : old_object_start(); |
| 96 | } |
| 97 | uword old_object_start() const { |
| 98 | return memory_->start() + OldObjectStartOffset(); |
| 99 | } |
| 100 | uword new_object_start() const { |
| 101 | return memory_->start() + NewObjectStartOffset(); |
| 102 | } |
| 103 | uword object_end() const { |
| 104 | if (owner_ != nullptr) return owner_->top(); |
| 105 | return top_; |
| 106 | } |
| 107 | intptr_t used() const { return object_end() - object_start(); } |
| 108 | |
| 109 | ForwardingPage* forwarding_page() const { return forwarding_page_; } |
| 110 | void RegisterUnwindingRecords(); |
| 111 | void UnregisterUnwindingRecords(); |
| 112 | void AllocateForwardingPage(); |
| 113 | |
| 114 | void VisitObjects(ObjectVisitor* visitor) const; |
| 115 | void VisitObjectPointers(ObjectPointerVisitor* visitor) const; |
| 116 | |
| 117 | void WriteProtect(bool read_only); |
| 118 | |
| 119 | constexpr static intptr_t OldObjectStartOffset() { |
| 120 | return Utils::RoundUp(x: sizeof(Page), alignment: kObjectStartAlignment, |
| 121 | offset: kOldObjectAlignmentOffset); |
| 122 | } |
| 123 | constexpr static intptr_t NewObjectStartOffset() { |
| 124 | // Note weaker alignment because the bool/null offset tricks don't apply to |
| 125 | // new-space. |
| 126 | return Utils::RoundUp(x: sizeof(Page), alignment: kObjectAlignment, |
| 127 | offset: kNewObjectAlignmentOffset); |
| 128 | } |
| 129 | |
| 130 | // Warning: This does not work for objects on image pages because image pages |
| 131 | // are not aligned. However, it works for objects on large pages, because |
| 132 | // only one object is allocated per large page. |
| 133 | static Page* Of(ObjectPtr obj) { |
| 134 | ASSERT(obj->IsHeapObject()); |
| 135 | return reinterpret_cast<Page*>(static_cast<uword>(obj) & kPageMask); |
| 136 | } |
| 137 | |
| 138 | // Warning: This does not work for addresses on image pages or on large pages. |
| 139 | static Page* Of(uword addr) { |
| 140 | return reinterpret_cast<Page*>(addr & kPageMask); |
| 141 | } |
| 142 | |
| 143 | // Warning: This does not work for objects on image pages. |
| 144 | static ObjectPtr ToExecutable(ObjectPtr obj) { |
| 145 | Page* page = Of(obj); |
| 146 | VirtualMemory* memory = page->memory_; |
| 147 | const intptr_t alias_offset = memory->AliasOffset(); |
| 148 | if (alias_offset == 0) { |
| 149 | return obj; // Not aliased. |
| 150 | } |
| 151 | uword addr = UntaggedObject::ToAddr(raw_obj: obj); |
| 152 | if (memory->Contains(addr)) { |
| 153 | return UntaggedObject::FromAddr(addr: addr + alias_offset); |
| 154 | } |
| 155 | // obj is executable. |
| 156 | ASSERT(memory->ContainsAlias(addr)); |
| 157 | return obj; |
| 158 | } |
| 159 | |
| 160 | // Warning: This does not work for objects on image pages. |
| 161 | static ObjectPtr ToWritable(ObjectPtr obj) { |
| 162 | Page* page = Of(obj); |
| 163 | VirtualMemory* memory = page->memory_; |
| 164 | const intptr_t alias_offset = memory->AliasOffset(); |
| 165 | if (alias_offset == 0) { |
| 166 | return obj; // Not aliased. |
| 167 | } |
| 168 | uword addr = UntaggedObject::ToAddr(raw_obj: obj); |
| 169 | if (memory->ContainsAlias(addr)) { |
| 170 | return UntaggedObject::FromAddr(addr: addr - alias_offset); |
| 171 | } |
| 172 | // obj is writable. |
| 173 | ASSERT(memory->Contains(addr)); |
| 174 | return obj; |
| 175 | } |
| 176 | |
| 177 | // 1 card = 32 slots. |
| 178 | static constexpr intptr_t kSlotsPerCardLog2 = 5; |
| 179 | static constexpr intptr_t kBytesPerCardLog2 = |
| 180 | kCompressedWordSizeLog2 + kSlotsPerCardLog2; |
| 181 | |
| 182 | intptr_t card_table_size() const { |
| 183 | return memory_->size() >> kBytesPerCardLog2; |
| 184 | } |
| 185 | |
| 186 | static intptr_t card_table_offset() { return OFFSET_OF(Page, card_table_); } |
| 187 | |
| 188 | void RememberCard(ObjectPtr const* slot) { |
| 189 | RememberCard(slot: reinterpret_cast<uword>(slot)); |
| 190 | } |
| 191 | bool IsCardRemembered(ObjectPtr const* slot) { |
| 192 | return IsCardRemembered(slot: reinterpret_cast<uword>(slot)); |
| 193 | } |
| 194 | #if defined(DART_COMPRESSED_POINTERS) |
| 195 | void RememberCard(CompressedObjectPtr const* slot) { |
| 196 | RememberCard(reinterpret_cast<uword>(slot)); |
| 197 | } |
| 198 | bool IsCardRemembered(CompressedObjectPtr const* slot) { |
| 199 | return IsCardRemembered(reinterpret_cast<uword>(slot)); |
| 200 | } |
| 201 | #endif |
| 202 | void VisitRememberedCards(ObjectPointerVisitor* visitor); |
| 203 | void ResetProgressBar(); |
| 204 | |
| 205 | Thread* owner() const { |
| 206 | return owner_; |
| 207 | } |
| 208 | |
| 209 | // Remember the limit to which objects have been copied. |
| 210 | void RecordSurvivors() { |
| 211 | survivor_end_ = object_end(); |
| 212 | } |
| 213 | |
| 214 | // Move survivor end to the end of the to_ space, making all surviving |
| 215 | // objects candidates for promotion next time. |
| 216 | void EarlyTenure() { |
| 217 | survivor_end_ = end_; |
| 218 | } |
| 219 | |
| 220 | uword promo_candidate_words() const { |
| 221 | return (survivor_end_ - object_start()) / kWordSize; |
| 222 | } |
| 223 | |
| 224 | void Acquire(Thread* thread) { |
| 225 | ASSERT(owner_ == nullptr); |
| 226 | owner_ = thread; |
| 227 | thread->set_top(top_); |
| 228 | thread->set_end(end_); |
| 229 | thread->set_true_end(end_); |
| 230 | } |
| 231 | void Release(Thread* thread) { |
| 232 | ASSERT(owner_ == thread); |
| 233 | owner_ = nullptr; |
| 234 | top_ = thread->top(); |
| 235 | thread->set_top(0); |
| 236 | thread->set_end(0); |
| 237 | thread->set_true_end(0); |
| 238 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| 239 | thread->heap_sampler().HandleReleasedTLAB(thread: Thread::Current()); |
| 240 | #endif |
| 241 | } |
| 242 | void Release() { |
| 243 | if (owner_ != nullptr) { |
| 244 | Release(thread: owner_); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | uword TryAllocateGC(intptr_t size) { |
| 249 | ASSERT(owner_ == nullptr); |
| 250 | uword result = top_; |
| 251 | uword new_top = result + size; |
| 252 | if (LIKELY(new_top <= end_)) { |
| 253 | top_ = new_top; |
| 254 | return result; |
| 255 | } |
| 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | void Unallocate(uword addr, intptr_t size) { |
| 260 | ASSERT((addr + size) == top_); |
| 261 | top_ -= size; |
| 262 | } |
| 263 | |
| 264 | bool IsSurvivor(uword raw_addr) const { |
| 265 | return raw_addr < survivor_end_; |
| 266 | } |
| 267 | bool IsResolved() const { |
| 268 | return top_ == resolved_top_; |
| 269 | } |
| 270 | |
| 271 | private: |
| 272 | void RememberCard(uword slot) { |
| 273 | ASSERT(Contains(slot)); |
| 274 | if (card_table_ == nullptr) { |
| 275 | size_t size_in_bits = card_table_size(); |
| 276 | size_t size_in_bytes = |
| 277 | Utils::RoundUp(x: size_in_bits, alignment: kBitsPerWord) >> kBitsPerByteLog2; |
| 278 | card_table_ = |
| 279 | reinterpret_cast<uword*>(calloc(n: size_in_bytes, size: sizeof(uint8_t))); |
| 280 | } |
| 281 | intptr_t offset = slot - reinterpret_cast<uword>(this); |
| 282 | intptr_t index = offset >> kBytesPerCardLog2; |
| 283 | ASSERT((index >= 0) && (index < card_table_size())); |
| 284 | intptr_t word_offset = index >> kBitsPerWordLog2; |
| 285 | intptr_t bit_offset = index & (kBitsPerWord - 1); |
| 286 | uword bit_mask = static_cast<uword>(1) << bit_offset; |
| 287 | card_table_[word_offset] |= bit_mask; |
| 288 | } |
| 289 | bool IsCardRemembered(uword slot) { |
| 290 | ASSERT(Contains(slot)); |
| 291 | if (card_table_ == nullptr) { |
| 292 | return false; |
| 293 | } |
| 294 | intptr_t offset = slot - reinterpret_cast<uword>(this); |
| 295 | intptr_t index = offset >> kBytesPerCardLog2; |
| 296 | ASSERT((index >= 0) && (index < card_table_size())); |
| 297 | intptr_t word_offset = index >> kBitsPerWordLog2; |
| 298 | intptr_t bit_offset = index & (kBitsPerWord - 1); |
| 299 | uword bit_mask = static_cast<uword>(1) << bit_offset; |
| 300 | return (card_table_[word_offset] & bit_mask) != 0; |
| 301 | } |
| 302 | |
| 303 | void set_object_end(uword value) { |
| 304 | ASSERT((value & kObjectAlignmentMask) == kOldObjectAlignmentOffset); |
| 305 | top_ = value; |
| 306 | } |
| 307 | |
| 308 | // Returns nullptr on OOM. |
| 309 | static Page* Allocate(intptr_t size, uword flags); |
| 310 | |
| 311 | // Deallocate the virtual memory backing this page. The page pointer to this |
| 312 | // page becomes immediately inaccessible. |
| 313 | void Deallocate(); |
| 314 | |
| 315 | uword flags_; |
| 316 | VirtualMemory* memory_; |
| 317 | Page* next_; |
| 318 | ForwardingPage* forwarding_page_; |
| 319 | uword* card_table_; // Remembered set, not marking. |
| 320 | RelaxedAtomic<intptr_t> progress_bar_; |
| 321 | |
| 322 | // The thread using this page for allocation, otherwise nullptr. |
| 323 | Thread* owner_; |
| 324 | |
| 325 | // The address of the next allocation. If owner is non-NULL, this value is |
| 326 | // stale and the current value is at owner->top_. Called "NEXT" in the |
| 327 | // original Cheney paper. |
| 328 | uword top_; |
| 329 | |
| 330 | // The address after the last allocatable byte in this page. |
| 331 | uword end_; |
| 332 | |
| 333 | // Objects below this address have survived a scavenge. |
| 334 | uword survivor_end_; |
| 335 | |
| 336 | // A pointer to the first unprocessed object. Resolution completes when this |
| 337 | // value meets the allocation top. Called "SCAN" in the original Cheney paper. |
| 338 | uword resolved_top_; |
| 339 | |
| 340 | friend class CheckStoreBufferVisitor; |
| 341 | friend class GCCompactor; |
| 342 | friend class PageSpace; |
| 343 | template <bool> |
| 344 | friend class ScavengerVisitorBase; |
| 345 | friend class SemiSpace; |
| 346 | friend class UnwindingRecords; |
| 347 | |
| 348 | DISALLOW_ALLOCATION(); |
| 349 | DISALLOW_IMPLICIT_CONSTRUCTORS(Page); |
| 350 | }; |
| 351 | |
| 352 | } // namespace dart |
| 353 | |
| 354 | #endif // RUNTIME_VM_HEAP_PAGE_H_ |
| 355 | |