| 1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #ifndef RUNTIME_VM_CLASS_TABLE_H_ |
| 6 | #define RUNTIME_VM_CLASS_TABLE_H_ |
| 7 | |
| 8 | #include <memory> |
| 9 | #include <tuple> |
| 10 | #include <utility> |
| 11 | |
| 12 | #include "platform/allocation.h" |
| 13 | #include "platform/assert.h" |
| 14 | #include "platform/atomic.h" |
| 15 | #include "platform/utils.h" |
| 16 | |
| 17 | #include "vm/bitfield.h" |
| 18 | #include "vm/class_id.h" |
| 19 | #include "vm/flags.h" |
| 20 | #include "vm/globals.h" |
| 21 | #include "vm/tagged_pointer.h" |
| 22 | |
| 23 | namespace dart { |
| 24 | |
| 25 | class Class; |
| 26 | class ClassTable; |
| 27 | class Isolate; |
| 28 | class IsolateGroup; |
| 29 | class JSONArray; |
| 30 | class JSONObject; |
| 31 | class JSONStream; |
| 32 | template <typename T> |
| 33 | class MallocGrowableArray; |
| 34 | class ObjectPointerVisitor; |
| 35 | class PersistentHandle; |
| 36 | |
| 37 | // A 64-bit bitmap describing unboxed fields in a class. |
| 38 | // |
| 39 | // There is a bit for each word in an instance of the class. |
| 40 | // |
| 41 | // Words corresponding to set bits must be ignored by the GC because they |
| 42 | // don't contain pointers. All words beyond the first 64 words of an object |
| 43 | // are expected to contain pointers. |
| 44 | class UnboxedFieldBitmap { |
| 45 | public: |
| 46 | UnboxedFieldBitmap() : bitmap_(0) {} |
| 47 | explicit UnboxedFieldBitmap(uint64_t bitmap) : bitmap_(bitmap) {} |
| 48 | UnboxedFieldBitmap(const UnboxedFieldBitmap&) = default; |
| 49 | UnboxedFieldBitmap& operator=(const UnboxedFieldBitmap&) = default; |
| 50 | |
| 51 | DART_FORCE_INLINE bool Get(intptr_t position) const { |
| 52 | if (position >= Length()) return false; |
| 53 | return Utils::TestBit(bitmap_, position); |
| 54 | } |
| 55 | DART_FORCE_INLINE void Set(intptr_t position) { |
| 56 | ASSERT(position < Length()); |
| 57 | bitmap_ |= Utils::Bit<decltype(bitmap_)>(position); |
| 58 | } |
| 59 | DART_FORCE_INLINE void Clear(intptr_t position) { |
| 60 | ASSERT(position < Length()); |
| 61 | bitmap_ &= ~Utils::Bit<decltype(bitmap_)>(position); |
| 62 | } |
| 63 | DART_FORCE_INLINE uint64_t Value() const { return bitmap_; } |
| 64 | DART_FORCE_INLINE bool IsEmpty() const { return bitmap_ == 0; } |
| 65 | DART_FORCE_INLINE void Reset() { bitmap_ = 0; } |
| 66 | |
| 67 | DART_FORCE_INLINE static constexpr intptr_t Length() { |
| 68 | return sizeof(decltype(bitmap_)) * kBitsPerByte; |
| 69 | } |
| 70 | |
| 71 | private: |
| 72 | uint64_t bitmap_; |
| 73 | }; |
| 74 | |
| 75 | // Allocator used to manage memory for ClassTable arrays and ClassTable |
| 76 | // objects themselves. |
| 77 | // |
| 78 | // This allocator provides delayed free functionality: normally class tables |
| 79 | // can't be freed unless all mutator and helper threads are stopped because |
| 80 | // some of these threads might be holding a pointer to a table which we |
| 81 | // want to free. Instead of stopping the world whenever we need to free |
| 82 | // a table (e.g. freeing old table after growing) we delay freeing until an |
| 83 | // occasional GC which will need to stop the world anyway. |
| 84 | class ClassTableAllocator : public ValueObject { |
| 85 | public: |
| 86 | ClassTableAllocator(); |
| 87 | ~ClassTableAllocator(); |
| 88 | |
| 89 | // Allocate an array of T with |len| elements. |
| 90 | // |
| 91 | // Does *not* initialize the memory. |
| 92 | template <class T> |
| 93 | inline T* Alloc(intptr_t len) { |
| 94 | return reinterpret_cast<T*>(dart::malloc(size: len * sizeof(T))); |
| 95 | } |
| 96 | |
| 97 | // Allocate a zero initialized array of T with |len| elements. |
| 98 | template <class T> |
| 99 | inline T* AllocZeroInitialized(intptr_t len) { |
| 100 | return reinterpret_cast<T*>(dart::calloc(n: len, size: sizeof(T))); |
| 101 | } |
| 102 | |
| 103 | // Clone the given |array| with |size| elements. |
| 104 | template <class T> |
| 105 | inline T* Clone(T* array, intptr_t size) { |
| 106 | if (array == nullptr) { |
| 107 | ASSERT(size == 0); |
| 108 | return nullptr; |
| 109 | } |
| 110 | auto result = Alloc<T>(size); |
| 111 | memmove(result, array, size * sizeof(T)); |
| 112 | return result; |
| 113 | } |
| 114 | |
| 115 | // Copy |size| elements from the given |array| into a new |
| 116 | // array with space for |new_size| elements. Then |Free| |
| 117 | // the original |array|. |
| 118 | // |
| 119 | // |new_size| is expected to be larger than |size|. |
| 120 | template <class T> |
| 121 | inline T* Realloc(T* array, intptr_t size, intptr_t new_size) { |
| 122 | ASSERT(size < new_size); |
| 123 | auto result = AllocZeroInitialized<T>(new_size); |
| 124 | if (size != 0) { |
| 125 | ASSERT(result != nullptr); |
| 126 | memmove(result, array, size * sizeof(T)); |
| 127 | } |
| 128 | Free(array); |
| 129 | return result; |
| 130 | } |
| 131 | |
| 132 | // Schedule deletion of the given ClassTable. |
| 133 | void Free(ClassTable* table); |
| 134 | |
| 135 | // Schedule freeing of the given pointer. |
| 136 | void Free(void* ptr); |
| 137 | |
| 138 | // Free all objects which were scheduled by |Free|. Expected to only be |
| 139 | // called on |IsolateGroup| shutdown or when the world is stopped and no |
| 140 | // thread can be using a stale class table pointer. |
| 141 | void FreePending(); |
| 142 | |
| 143 | private: |
| 144 | typedef void (*Deleter)(void*); |
| 145 | MallocGrowableArray<std::pair<void*, Deleter>>* pending_freed_; |
| 146 | }; |
| 147 | |
| 148 | // A table with the given |Columns| indexed by class id. |
| 149 | // |
| 150 | // Each column is a continuous array of a the given type. All columns have |
| 151 | // the same number of used elements (|num_cids()|) and the same capacity. |
| 152 | template <typename CidType, typename... Columns> |
| 153 | class CidIndexedTable { |
| 154 | public: |
| 155 | explicit CidIndexedTable(ClassTableAllocator* allocator) |
| 156 | : allocator_(allocator) {} |
| 157 | |
| 158 | ~CidIndexedTable() { |
| 159 | std::apply([&](auto&... column) { (allocator_->Free(column.load()), ...); }, |
| 160 | columns_); |
| 161 | } |
| 162 | |
| 163 | CidIndexedTable(const CidIndexedTable& other) = delete; |
| 164 | |
| 165 | void SetNumCidsAndCapacity(intptr_t new_num_cids, intptr_t new_capacity) { |
| 166 | columns_ = std::apply( |
| 167 | [&](auto&... column) { |
| 168 | return std::make_tuple( |
| 169 | allocator_->Realloc(column.load(), num_cids_, new_capacity)...); |
| 170 | }, |
| 171 | columns_); |
| 172 | capacity_ = new_capacity; |
| 173 | SetNumCids(new_num_cids); |
| 174 | } |
| 175 | |
| 176 | void AllocateIndex(intptr_t index, bool* did_grow) { |
| 177 | *did_grow = EnsureCapacity(index); |
| 178 | SetNumCids(Utils::Maximum(num_cids_, index + 1)); |
| 179 | } |
| 180 | |
| 181 | intptr_t AddRow(bool* did_grow) { |
| 182 | *did_grow = EnsureCapacity(num_cids_); |
| 183 | intptr_t id = num_cids_; |
| 184 | SetNumCids(num_cids_ + 1); |
| 185 | return id; |
| 186 | } |
| 187 | |
| 188 | void ShrinkTo(intptr_t new_num_cids) { |
| 189 | ASSERT(new_num_cids <= num_cids_); |
| 190 | num_cids_ = new_num_cids; |
| 191 | } |
| 192 | |
| 193 | bool IsValidIndex(intptr_t index) const { |
| 194 | return 0 <= index && index < num_cids_; |
| 195 | } |
| 196 | |
| 197 | void CopyFrom(const CidIndexedTable& other) { |
| 198 | ASSERT(allocator_ == other.allocator_); |
| 199 | |
| 200 | std::apply([&](auto&... column) { (allocator_->Free(column.load()), ...); }, |
| 201 | columns_); |
| 202 | |
| 203 | columns_ = std::apply( |
| 204 | [&](auto&... column) { |
| 205 | return std::make_tuple( |
| 206 | allocator_->Clone(column.load(), other.num_cids_)...); |
| 207 | }, |
| 208 | other.columns_); |
| 209 | capacity_ = num_cids_ = other.num_cids_; |
| 210 | } |
| 211 | |
| 212 | void Remap(intptr_t* old_to_new_cid) { |
| 213 | CidIndexedTable clone(allocator_); |
| 214 | clone.CopyFrom(*this); |
| 215 | RemapAllColumns(clone, old_to_new_cid, |
| 216 | std::index_sequence_for<Columns...>{}); |
| 217 | } |
| 218 | |
| 219 | template < |
| 220 | intptr_t kColumnIndex, |
| 221 | typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>> |
| 222 | T* GetColumn() { |
| 223 | return std::get<kColumnIndex>(columns_).load(); |
| 224 | } |
| 225 | |
| 226 | template < |
| 227 | intptr_t kColumnIndex, |
| 228 | typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>> |
| 229 | const T* GetColumn() const { |
| 230 | return std::get<kColumnIndex>(columns_).load(); |
| 231 | } |
| 232 | |
| 233 | template < |
| 234 | intptr_t kColumnIndex, |
| 235 | typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>> |
| 236 | T& At(intptr_t index) { |
| 237 | ASSERT(IsValidIndex(index)); |
| 238 | return GetColumn<kColumnIndex>()[index]; |
| 239 | } |
| 240 | |
| 241 | template < |
| 242 | intptr_t kColumnIndex, |
| 243 | typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>> |
| 244 | const T& At(intptr_t index) const { |
| 245 | ASSERT(IsValidIndex(index)); |
| 246 | return GetColumn<kColumnIndex>()[index]; |
| 247 | } |
| 248 | |
| 249 | intptr_t num_cids() const { return num_cids_; } |
| 250 | intptr_t capacity() const { return capacity_; } |
| 251 | |
| 252 | private: |
| 253 | friend class ClassTable; |
| 254 | |
| 255 | // Wrapper around AcqRelAtomic<T*> which makes it assignable and copyable |
| 256 | // so that we could put it inside an std::tuple. |
| 257 | template <typename T> |
| 258 | struct Ptr { |
| 259 | Ptr() : ptr(nullptr) {} |
| 260 | Ptr(T* ptr) : ptr(ptr) {} // NOLINT |
| 261 | |
| 262 | Ptr(const Ptr& other) { ptr.store(other.ptr.load()); } |
| 263 | |
| 264 | Ptr& operator=(const Ptr& other) { |
| 265 | ptr.store(other.load()); |
| 266 | return *this; |
| 267 | } |
| 268 | |
| 269 | T* load() const { return ptr.load(); } |
| 270 | |
| 271 | AcqRelAtomic<T*> ptr = {nullptr}; |
| 272 | }; |
| 273 | |
| 274 | void SetNumCids(intptr_t new_num_cids) { |
| 275 | if (new_num_cids > kClassIdTagMax) { |
| 276 | FATAL("Too many classes" ); |
| 277 | } |
| 278 | num_cids_ = new_num_cids; |
| 279 | } |
| 280 | |
| 281 | bool EnsureCapacity(intptr_t index) { |
| 282 | if (index >= capacity_) { |
| 283 | SetNumCidsAndCapacity(num_cids_, index + kCapacityIncrement); |
| 284 | return true; |
| 285 | } |
| 286 | return false; |
| 287 | } |
| 288 | |
| 289 | template <intptr_t kColumnIndex> |
| 290 | void RemapColumn(const CidIndexedTable& old, intptr_t* old_to_new_cid) { |
| 291 | auto new_column = GetColumn<kColumnIndex>(); |
| 292 | auto old_column = old.GetColumn<kColumnIndex>(); |
| 293 | for (intptr_t i = 0; i < num_cids_; i++) { |
| 294 | new_column[old_to_new_cid[i]] = old_column[i]; |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | template <std::size_t... Is> |
| 299 | void RemapAllColumns(const CidIndexedTable& old, |
| 300 | intptr_t* old_to_new_cid, |
| 301 | std::index_sequence<Is...>) { |
| 302 | (RemapColumn<Is>(old, old_to_new_cid), ...); |
| 303 | } |
| 304 | |
| 305 | static constexpr intptr_t kCapacityIncrement = 256; |
| 306 | |
| 307 | ClassTableAllocator* allocator_; |
| 308 | intptr_t num_cids_ = 0; |
| 309 | intptr_t capacity_ = 0; |
| 310 | std::tuple<Ptr<Columns>...> columns_; |
| 311 | }; |
| 312 | |
| 313 | // Registry of all known classes. |
| 314 | // |
| 315 | // The GC will only use information about instance size and unboxed field maps |
| 316 | // to scan instances and will not access class objects themselves. This |
| 317 | // information is stored in separate columns of the |classes_| table. |
| 318 | // |
| 319 | // # Concurrency & atomicity |
| 320 | // |
| 321 | // This table is read concurrently without locking (e.g. by GC threads) so |
| 322 | // there are some invariants that need to be observed when working with it. |
| 323 | // |
| 324 | // * When table is updated (e.g. when the table is grown or a new class is |
| 325 | // registered in a table) there must be a release barrier after the update. |
| 326 | // Such barrier will ensure that stores which populate the table are not |
| 327 | // reordered past the store which exposes the new grown table or exposes |
| 328 | // a new class id; |
| 329 | // * Old versions of the table can only be freed when the world is stopped: |
| 330 | // no mutator and no helper threads are running. To avoid freeing a table |
| 331 | // which some other thread is reading from. |
| 332 | // |
| 333 | // Note that torn reads are not a concern (e.g. it is fine to use |
| 334 | // memmove to copy class table contents) as long as an appropriate |
| 335 | // barrier is issued before the copy of the table can be observed. |
| 336 | // |
| 337 | // # Hot reload |
| 338 | // |
| 339 | // Each IsolateGroup contains two ClassTable fields: |class_table| and |
| 340 | // |heap_walk_class_table|. GC visitors use the second field to get ClassTable |
| 341 | // instance which they will use for visiting pointers inside instances in |
| 342 | // the heap. Usually these two fields will be pointing to the same table, |
| 343 | // except when IsolateGroup is in the middle of reload. |
| 344 | // |
| 345 | // When reloading |class_table| will be pointing to a copy of the original |
| 346 | // table. Kernel loading will be modifying this table, while GC |
| 347 | // workers can continue using original table still available through |
| 348 | // |heap_walk_class_table|. If hot reload succeeds, |heap_walk_class_table| |
| 349 | // will be dropped and |class_table| will become the source of truth. Otherwise, |
| 350 | // original table will be restored from |heap_walk_class_table|. |
| 351 | // |
| 352 | // See IsolateGroup methods CloneClassTableForReload, RestoreOriginalClassTable, |
| 353 | // DropOriginalClassTable. |
| 354 | class ClassTable : public MallocAllocated { |
| 355 | public: |
| 356 | explicit ClassTable(ClassTableAllocator* allocator); |
| 357 | |
| 358 | ~ClassTable(); |
| 359 | |
| 360 | ClassTable* Clone() const { return new ClassTable(*this); } |
| 361 | |
| 362 | ClassPtr At(intptr_t cid) const { |
| 363 | if (IsTopLevelCid(cid)) { |
| 364 | return top_level_classes_.At<kClassIndex>(IndexFromTopLevelCid(cid)); |
| 365 | } |
| 366 | return classes_.At<kClassIndex>(cid); |
| 367 | } |
| 368 | |
| 369 | int32_t SizeAt(intptr_t index) const { |
| 370 | if (IsTopLevelCid(index)) { |
| 371 | return 0; |
| 372 | } |
| 373 | return classes_.At<kSizeIndex>(index); |
| 374 | } |
| 375 | |
| 376 | void SetAt(intptr_t index, ClassPtr raw_cls); |
| 377 | void UpdateClassSize(intptr_t cid, ClassPtr raw_cls); |
| 378 | |
| 379 | bool IsValidIndex(intptr_t cid) const { |
| 380 | if (IsTopLevelCid(cid)) { |
| 381 | return top_level_classes_.IsValidIndex(IndexFromTopLevelCid(cid)); |
| 382 | } |
| 383 | return classes_.IsValidIndex(cid); |
| 384 | } |
| 385 | |
| 386 | bool HasValidClassAt(intptr_t cid) const { return At(cid) != nullptr; } |
| 387 | |
| 388 | UnboxedFieldBitmap GetUnboxedFieldsMapAt(intptr_t cid) const { |
| 389 | ASSERT(IsValidIndex(cid)); |
| 390 | return classes_.At<kUnboxedFieldBitmapIndex>(cid); |
| 391 | } |
| 392 | |
| 393 | void SetUnboxedFieldsMapAt(intptr_t cid, UnboxedFieldBitmap map) { |
| 394 | ASSERT(IsValidIndex(cid)); |
| 395 | classes_.At<kUnboxedFieldBitmapIndex>(cid) = map; |
| 396 | } |
| 397 | |
| 398 | #if !defined(PRODUCT) |
| 399 | bool ShouldTraceAllocationFor(intptr_t cid) { |
| 400 | return !IsTopLevelCid(cid) && |
| 401 | (classes_.At<kAllocationTracingStateIndex>(cid) != kTracingDisabled); |
| 402 | } |
| 403 | |
| 404 | void SetTraceAllocationFor(intptr_t cid, bool trace) { |
| 405 | classes_.At<kAllocationTracingStateIndex>(cid) = |
| 406 | trace ? kTraceAllocationBit : kTracingDisabled; |
| 407 | } |
| 408 | |
| 409 | void SetCollectInstancesFor(intptr_t cid, bool trace) { |
| 410 | auto& slot = classes_.At<kAllocationTracingStateIndex>(cid); |
| 411 | if (trace) { |
| 412 | slot |= kCollectInstancesBit; |
| 413 | } else { |
| 414 | slot &= ~kCollectInstancesBit; |
| 415 | } |
| 416 | } |
| 417 | |
| 418 | bool CollectInstancesFor(intptr_t cid) { |
| 419 | auto& slot = classes_.At<kAllocationTracingStateIndex>(cid); |
| 420 | return (slot & kCollectInstancesBit) != 0; |
| 421 | } |
| 422 | |
| 423 | void UpdateCachedAllocationTracingStateTablePointer() { |
| 424 | cached_allocation_tracing_state_table_.store( |
| 425 | classes_.GetColumn<kAllocationTracingStateIndex>()); |
| 426 | } |
| 427 | #else |
| 428 | void UpdateCachedAllocationTracingStateTablePointer() {} |
| 429 | #endif // !defined(PRODUCT) |
| 430 | |
| 431 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| 432 | void PopulateUserVisibleNames(); |
| 433 | |
| 434 | const char* UserVisibleNameFor(intptr_t cid) { |
| 435 | if (!classes_.IsValidIndex(cid)) { |
| 436 | return nullptr; |
| 437 | } |
| 438 | return classes_.At<kClassNameIndex>(cid); |
| 439 | } |
| 440 | |
| 441 | void SetUserVisibleNameFor(intptr_t cid, const char* name) { |
| 442 | ASSERT(classes_.At<kClassNameIndex>(cid) == nullptr); |
| 443 | classes_.At<kClassNameIndex>(cid) = name; |
| 444 | } |
| 445 | #endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| 446 | |
| 447 | intptr_t NumCids() const { |
| 448 | return classes_.num_cids(); |
| 449 | } |
| 450 | intptr_t Capacity() const { |
| 451 | return classes_.capacity(); |
| 452 | } |
| 453 | |
| 454 | intptr_t NumTopLevelCids() const { |
| 455 | return top_level_classes_.num_cids(); |
| 456 | } |
| 457 | |
| 458 | void Register(const Class& cls); |
| 459 | void AllocateIndex(intptr_t index); |
| 460 | |
| 461 | void RegisterTopLevel(const Class& cls); |
| 462 | void UnregisterTopLevel(intptr_t index); |
| 463 | |
| 464 | void Remap(intptr_t* old_to_new_cids); |
| 465 | |
| 466 | void VisitObjectPointers(ObjectPointerVisitor* visitor); |
| 467 | |
| 468 | // If a snapshot reader has populated the class table then the |
| 469 | // sizes in the class table are not correct. Iterates through the |
| 470 | // table, updating the sizes. |
| 471 | void CopySizesFromClassObjects(); |
| 472 | |
| 473 | void Validate(); |
| 474 | |
| 475 | void Print(); |
| 476 | |
| 477 | #if defined(DART_PRECOMPILER) |
| 478 | void PrintObjectLayout(const char* filename); |
| 479 | #endif |
| 480 | |
| 481 | #ifndef PRODUCT |
| 482 | // Describes layout of heap stats for code generation. See offset_extractor.cc |
| 483 | struct ArrayTraits { |
| 484 | static intptr_t elements_start_offset() { return 0; } |
| 485 | |
| 486 | static constexpr intptr_t kElementSize = sizeof(uint8_t); |
| 487 | }; |
| 488 | |
| 489 | static intptr_t allocation_tracing_state_table_offset() { |
| 490 | static_assert(sizeof(cached_allocation_tracing_state_table_) == kWordSize); |
| 491 | return OFFSET_OF(ClassTable, cached_allocation_tracing_state_table_); |
| 492 | } |
| 493 | |
| 494 | void AllocationProfilePrintJSON(JSONStream* stream, bool internal); |
| 495 | |
| 496 | void PrintToJSONObject(JSONObject* object); |
| 497 | #endif // !PRODUCT |
| 498 | |
| 499 | // Deallocates table copies. Do not call during concurrent access to table. |
| 500 | void FreeOldTables(); |
| 501 | |
| 502 | static bool IsTopLevelCid(intptr_t cid) { return cid >= kTopLevelCidOffset; } |
| 503 | |
| 504 | static intptr_t IndexFromTopLevelCid(intptr_t cid) { |
| 505 | ASSERT(IsTopLevelCid(cid)); |
| 506 | return cid - kTopLevelCidOffset; |
| 507 | } |
| 508 | |
| 509 | static intptr_t CidFromTopLevelIndex(intptr_t index) { |
| 510 | return kTopLevelCidOffset + index; |
| 511 | } |
| 512 | |
| 513 | private: |
| 514 | friend class ClassTableAllocator; |
| 515 | friend class Dart; |
| 516 | friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup* group, |
| 517 | const char* name, |
| 518 | char** error); |
| 519 | friend class IsolateGroup; // for table() |
| 520 | static constexpr int kInitialCapacity = 512; |
| 521 | |
| 522 | static constexpr intptr_t kTopLevelCidOffset = kClassIdTagMax + 1; |
| 523 | |
| 524 | ClassTable(const ClassTable& original) |
| 525 | : allocator_(original.allocator_), |
| 526 | classes_(original.allocator_), |
| 527 | top_level_classes_(original.allocator_) { |
| 528 | classes_.CopyFrom(original.classes_); |
| 529 | |
| 530 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| 531 | // Copying classes_ doesn't perform a deep copy. Ensure we duplicate |
| 532 | // the class names to avoid double free crashes at shutdown. |
| 533 | for (intptr_t cid = 1; cid < classes_.num_cids(); ++cid) { |
| 534 | if (classes_.IsValidIndex(cid)) { |
| 535 | const char* cls_name = classes_.At<kClassNameIndex>(cid); |
| 536 | if (cls_name != nullptr) { |
| 537 | classes_.At<kClassNameIndex>(cid) = Utils::StrDup(cls_name); |
| 538 | } |
| 539 | } |
| 540 | } |
| 541 | #endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| 542 | |
| 543 | top_level_classes_.CopyFrom(original.top_level_classes_); |
| 544 | UpdateCachedAllocationTracingStateTablePointer(); |
| 545 | } |
| 546 | |
| 547 | void AllocateTopLevelIndex(intptr_t index); |
| 548 | |
| 549 | ClassPtr* table() { |
| 550 | return classes_.GetColumn<kClassIndex>(); |
| 551 | } |
| 552 | |
| 553 | // Used to drop recently added classes. |
| 554 | void SetNumCids(intptr_t num_cids, intptr_t num_tlc_cids) { |
| 555 | classes_.ShrinkTo(num_cids); |
| 556 | top_level_classes_.ShrinkTo(num_tlc_cids); |
| 557 | } |
| 558 | |
| 559 | ClassTableAllocator* allocator_; |
| 560 | |
| 561 | // Unfortunately std::tuple used by CidIndexedTable does not have a stable |
| 562 | // layout so we can't refer to its elements from generated code. |
| 563 | NOT_IN_PRODUCT(AcqRelAtomic<uint8_t*> cached_allocation_tracing_state_table_ = |
| 564 | {nullptr}); |
| 565 | |
| 566 | enum { |
| 567 | kClassIndex = 0, |
| 568 | kSizeIndex, |
| 569 | kUnboxedFieldBitmapIndex, |
| 570 | #if !defined(PRODUCT) |
| 571 | kAllocationTracingStateIndex, |
| 572 | #endif |
| 573 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| 574 | kClassNameIndex, |
| 575 | #endif |
| 576 | }; |
| 577 | |
| 578 | #if !defined(PRODUCT) |
| 579 | CidIndexedTable<ClassIdTagType, |
| 580 | ClassPtr, |
| 581 | uint32_t, |
| 582 | UnboxedFieldBitmap, |
| 583 | uint8_t, |
| 584 | const char*> |
| 585 | classes_; |
| 586 | #elif defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| 587 | CidIndexedTable<ClassIdTagType, |
| 588 | ClassPtr, |
| 589 | uint32_t, |
| 590 | UnboxedFieldBitmap, |
| 591 | const char*> |
| 592 | classes_; |
| 593 | #else |
| 594 | CidIndexedTable<ClassIdTagType, ClassPtr, uint32_t, UnboxedFieldBitmap> |
| 595 | classes_; |
| 596 | #endif |
| 597 | |
| 598 | #ifndef PRODUCT |
| 599 | enum { |
| 600 | kTracingDisabled = 0, |
| 601 | kTraceAllocationBit = (1 << 0), |
| 602 | kCollectInstancesBit = (1 << 1), |
| 603 | }; |
| 604 | #endif // !PRODUCT |
| 605 | |
| 606 | CidIndexedTable<classid_t, ClassPtr> top_level_classes_; |
| 607 | }; |
| 608 | |
| 609 | } // namespace dart |
| 610 | |
| 611 | #endif // RUNTIME_VM_CLASS_TABLE_H_ |
| 612 | |