1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
---|---|
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include <memory> |
6 | #include <utility> |
7 | |
8 | #include "vm/heap/heap.h" |
9 | |
10 | #include "platform/assert.h" |
11 | #include "platform/utils.h" |
12 | #include "vm/compiler/jit/compiler.h" |
13 | #include "vm/dart.h" |
14 | #include "vm/flags.h" |
15 | #include "vm/heap/pages.h" |
16 | #include "vm/heap/safepoint.h" |
17 | #include "vm/heap/scavenger.h" |
18 | #include "vm/heap/verifier.h" |
19 | #include "vm/heap/weak_table.h" |
20 | #include "vm/isolate.h" |
21 | #include "vm/lockers.h" |
22 | #include "vm/object.h" |
23 | #include "vm/object_set.h" |
24 | #include "vm/os.h" |
25 | #include "vm/raw_object.h" |
26 | #include "vm/service.h" |
27 | #include "vm/service_event.h" |
28 | #include "vm/service_isolate.h" |
29 | #include "vm/stack_frame.h" |
30 | #include "vm/tags.h" |
31 | #include "vm/thread_pool.h" |
32 | #include "vm/timeline.h" |
33 | #include "vm/virtual_memory.h" |
34 | |
35 | namespace dart { |
36 | |
37 | DEFINE_FLAG(bool, write_protect_vm_isolate, true, "Write protect vm_isolate."); |
38 | DEFINE_FLAG(bool, |
39 | disable_heap_verification, |
40 | false, |
41 | "Explicitly disable heap verification."); |
42 | |
43 | Heap::Heap(IsolateGroup* isolate_group, |
44 | bool is_vm_isolate, |
45 | intptr_t max_new_gen_semi_words, |
46 | intptr_t max_old_gen_words) |
47 | : isolate_group_(isolate_group), |
48 | is_vm_isolate_(is_vm_isolate), |
49 | new_space_(this, max_new_gen_semi_words), |
50 | old_space_(this, max_old_gen_words), |
51 | read_only_(false), |
52 | last_gc_was_old_space_(false), |
53 | assume_scavenge_will_fail_(false), |
54 | gc_on_nth_allocation_(kNoForcedGarbageCollection) { |
55 | UpdateGlobalMaxUsed(); |
56 | for (int sel = 0; sel < kNumWeakSelectors; sel++) { |
57 | new_weak_tables_[sel] = new WeakTable(); |
58 | old_weak_tables_[sel] = new WeakTable(); |
59 | } |
60 | stats_.num_ = 0; |
61 | stats_.state_ = kInitial; |
62 | stats_.reachability_barrier_ = 0; |
63 | } |
64 | |
65 | Heap::~Heap() { |
66 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
67 | Dart_HeapSamplingDeleteCallback cleanup = |
68 | HeapProfileSampler::delete_callback(); |
69 | if (cleanup != nullptr) { |
70 | new_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup); |
71 | old_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup); |
72 | } |
73 | #endif |
74 | |
75 | for (int sel = 0; sel < kNumWeakSelectors; sel++) { |
76 | delete new_weak_tables_[sel]; |
77 | delete old_weak_tables_[sel]; |
78 | } |
79 | } |
80 | |
81 | uword Heap::AllocateNew(Thread* thread, intptr_t size) { |
82 | ASSERT(thread->no_safepoint_scope_depth() == 0); |
83 | CollectForDebugging(thread); |
84 | uword addr = new_space_.TryAllocate(thread, size); |
85 | if (LIKELY(addr != 0)) { |
86 | return addr; |
87 | } |
88 | if (!assume_scavenge_will_fail_ && !thread->force_growth()) { |
89 | GcSafepointOperationScope safepoint_operation(thread); |
90 | |
91 | // Another thread may have won the race to the safepoint and performed a GC |
92 | // before this thread acquired the safepoint. Retry the allocation under the |
93 | // safepoint to avoid back-to-back GC. |
94 | addr = new_space_.TryAllocate(thread, size); |
95 | if (addr != 0) { |
96 | return addr; |
97 | } |
98 | |
99 | CollectGarbage(thread, type: GCType::kScavenge, reason: GCReason::kNewSpace); |
100 | |
101 | addr = new_space_.TryAllocate(thread, size); |
102 | if (LIKELY(addr != 0)) { |
103 | return addr; |
104 | } |
105 | } |
106 | |
107 | // It is possible a GC doesn't clear enough space. |
108 | // In that case, we must fall through and allocate into old space. |
109 | return AllocateOld(thread, size, /*exec*/ executable: false); |
110 | } |
111 | |
112 | uword Heap::AllocateOld(Thread* thread, intptr_t size, bool is_exec) { |
113 | ASSERT(thread->no_safepoint_scope_depth() == 0); |
114 | |
115 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
116 | if (HeapProfileSampler::enabled()) { |
117 | thread->heap_sampler().SampleOldSpaceAllocation(allocation_size: size); |
118 | } |
119 | #endif |
120 | |
121 | if (!thread->force_growth()) { |
122 | CollectForDebugging(thread); |
123 | uword addr = old_space_.TryAllocate(size, is_exec); |
124 | if (addr != 0) { |
125 | return addr; |
126 | } |
127 | // Wait for any GC tasks that are in progress. |
128 | WaitForSweeperTasks(thread); |
129 | addr = old_space_.TryAllocate(size, is_exec); |
130 | if (addr != 0) { |
131 | return addr; |
132 | } |
133 | GcSafepointOperationScope safepoint_operation(thread); |
134 | // Another thread may have won the race to the safepoint and performed a GC |
135 | // before this thread acquired the safepoint. Retry the allocation under the |
136 | // safepoint to avoid back-to-back GC. |
137 | addr = old_space_.TryAllocate(size, is_exec); |
138 | if (addr != 0) { |
139 | return addr; |
140 | } |
141 | // All GC tasks finished without allocating successfully. Collect both |
142 | // generations. |
143 | CollectMostGarbage(reason: GCReason::kOldSpace, /*compact=*/false); |
144 | addr = old_space_.TryAllocate(size, is_exec); |
145 | if (addr != 0) { |
146 | return addr; |
147 | } |
148 | // Wait for all of the concurrent tasks to finish before giving up. |
149 | WaitForSweeperTasksAtSafepoint(thread); |
150 | addr = old_space_.TryAllocate(size, is_exec); |
151 | if (addr != 0) { |
152 | return addr; |
153 | } |
154 | // Force growth before attempting another synchronous GC. |
155 | addr = old_space_.TryAllocate(size, is_exec, PageSpace::kForceGrowth); |
156 | if (addr != 0) { |
157 | return addr; |
158 | } |
159 | // Before throwing an out-of-memory error try a synchronous GC. |
160 | CollectAllGarbage(reason: GCReason::kOldSpace, /*compact=*/true); |
161 | WaitForSweeperTasksAtSafepoint(thread); |
162 | } |
163 | uword addr = old_space_.TryAllocate(size, is_exec, PageSpace::kForceGrowth); |
164 | if (addr != 0) { |
165 | return addr; |
166 | } |
167 | |
168 | if (!thread->force_growth()) { |
169 | WaitForSweeperTasks(thread); |
170 | old_space_.TryReleaseReservation(); |
171 | } else { |
172 | // We may or may not be a safepoint, so we don't know how to wait for the |
173 | // sweeper. |
174 | } |
175 | |
176 | // Give up allocating this object. |
177 | OS::PrintErr("Exhausted heap space, trying to allocate %"Pd " bytes.\n", |
178 | size); |
179 | return 0; |
180 | } |
181 | |
182 | bool Heap::AllocatedExternal(intptr_t size, Space space) { |
183 | if (space == kNew) { |
184 | if (!new_space_.AllocatedExternal(size)) { |
185 | return false; |
186 | } |
187 | } else { |
188 | ASSERT(space == kOld); |
189 | if (!old_space_.AllocatedExternal(size)) { |
190 | return false; |
191 | } |
192 | } |
193 | |
194 | Thread* thread = Thread::Current(); |
195 | if ((thread->no_callback_scope_depth() == 0) && !thread->force_growth()) { |
196 | CheckExternalGC(thread); |
197 | } else { |
198 | // Check delayed until Dart_TypedDataRelease/~ForceGrowthScope. |
199 | } |
200 | return true; |
201 | } |
202 | |
203 | void Heap::FreedExternal(intptr_t size, Space space) { |
204 | if (space == kNew) { |
205 | new_space_.FreedExternal(size); |
206 | } else { |
207 | ASSERT(space == kOld); |
208 | old_space_.FreedExternal(size); |
209 | } |
210 | } |
211 | |
212 | void Heap::PromotedExternal(intptr_t size) { |
213 | new_space_.FreedExternal(size); |
214 | old_space_.AllocatedExternal(size); |
215 | } |
216 | |
217 | void Heap::CheckExternalGC(Thread* thread) { |
218 | ASSERT(thread->no_safepoint_scope_depth() == 0); |
219 | ASSERT(thread->no_callback_scope_depth() == 0); |
220 | ASSERT(!thread->force_growth()); |
221 | |
222 | if (mode_ == Dart_PerformanceMode_Latency) { |
223 | return; |
224 | } |
225 | |
226 | if (new_space_.ExternalInWords() >= (4 * new_space_.CapacityInWords())) { |
227 | // Attempt to free some external allocation by a scavenge. (If the total |
228 | // remains above the limit, next external alloc will trigger another.) |
229 | CollectGarbage(thread, type: GCType::kScavenge, reason: GCReason::kExternal); |
230 | // Promotion may have pushed old space over its limit. Fall through for old |
231 | // space GC check. |
232 | } |
233 | |
234 | if (old_space_.ReachedHardThreshold()) { |
235 | if (last_gc_was_old_space_) { |
236 | CollectNewSpaceGarbage(thread, type: GCType::kScavenge, reason: GCReason::kFull); |
237 | } |
238 | CollectGarbage(thread, type: GCType::kMarkSweep, reason: GCReason::kExternal); |
239 | } else { |
240 | CheckConcurrentMarking(thread, reason: GCReason::kExternal, size: 0); |
241 | } |
242 | } |
243 | |
244 | bool Heap::Contains(uword addr) const { |
245 | return new_space_.Contains(addr) || old_space_.Contains(addr); |
246 | } |
247 | |
248 | bool Heap::NewContains(uword addr) const { |
249 | return new_space_.Contains(addr); |
250 | } |
251 | |
252 | bool Heap::OldContains(uword addr) const { |
253 | return old_space_.Contains(addr); |
254 | } |
255 | |
256 | bool Heap::CodeContains(uword addr) const { |
257 | return old_space_.CodeContains(addr); |
258 | } |
259 | |
260 | bool Heap::DataContains(uword addr) const { |
261 | return old_space_.DataContains(addr); |
262 | } |
263 | |
264 | void Heap::VisitObjects(ObjectVisitor* visitor) { |
265 | new_space_.VisitObjects(visitor); |
266 | old_space_.VisitObjects(visitor); |
267 | } |
268 | |
269 | void Heap::VisitObjectsNoImagePages(ObjectVisitor* visitor) { |
270 | new_space_.VisitObjects(visitor); |
271 | old_space_.VisitObjectsNoImagePages(visitor); |
272 | } |
273 | |
274 | void Heap::VisitObjectsImagePages(ObjectVisitor* visitor) const { |
275 | old_space_.VisitObjectsImagePages(visitor); |
276 | } |
277 | |
278 | HeapIterationScope::HeapIterationScope(Thread* thread, bool writable) |
279 | : ThreadStackResource(thread), |
280 | heap_(isolate_group()->heap()), |
281 | old_space_(heap_->old_space()), |
282 | writable_(writable) { |
283 | isolate_group()->safepoint_handler()->SafepointThreads(T: thread, |
284 | level: SafepointLevel::kGC); |
285 | |
286 | { |
287 | // It's not safe to iterate over old space when concurrent marking or |
288 | // sweeping is in progress, or another thread is iterating the heap, so wait |
289 | // for any such task to complete first. |
290 | MonitorLocker ml(old_space_->tasks_lock()); |
291 | #if defined(DEBUG) |
292 | // We currently don't support nesting of HeapIterationScopes. |
293 | ASSERT(old_space_->iterating_thread_ != thread); |
294 | #endif |
295 | while ((old_space_->tasks() > 0) || |
296 | (old_space_->phase() != PageSpace::kDone)) { |
297 | old_space_->AssistTasks(ml: &ml); |
298 | if (old_space_->phase() == PageSpace::kAwaitingFinalization) { |
299 | ml.Exit(); |
300 | heap_->CollectOldSpaceGarbage(thread, type: GCType::kMarkSweep, |
301 | reason: GCReason::kFinalize); |
302 | ml.Enter(); |
303 | } |
304 | while (old_space_->tasks() > 0) { |
305 | ml.Wait(); |
306 | } |
307 | } |
308 | #if defined(DEBUG) |
309 | ASSERT(old_space_->iterating_thread_ == nullptr); |
310 | old_space_->iterating_thread_ = thread; |
311 | #endif |
312 | old_space_->set_tasks(1); |
313 | } |
314 | |
315 | if (writable_) { |
316 | heap_->WriteProtectCode(read_only: false); |
317 | } |
318 | } |
319 | |
320 | HeapIterationScope::~HeapIterationScope() { |
321 | if (writable_) { |
322 | heap_->WriteProtectCode(read_only: true); |
323 | } |
324 | |
325 | { |
326 | MonitorLocker ml(old_space_->tasks_lock()); |
327 | #if defined(DEBUG) |
328 | ASSERT(old_space_->iterating_thread_ == thread()); |
329 | old_space_->iterating_thread_ = nullptr; |
330 | #endif |
331 | ASSERT(old_space_->tasks() == 1); |
332 | old_space_->set_tasks(0); |
333 | ml.NotifyAll(); |
334 | } |
335 | |
336 | isolate_group()->safepoint_handler()->ResumeThreads(T: thread(), |
337 | level: SafepointLevel::kGC); |
338 | } |
339 | |
340 | void HeapIterationScope::IterateObjects(ObjectVisitor* visitor) const { |
341 | heap_->VisitObjects(visitor); |
342 | } |
343 | |
344 | void HeapIterationScope::IterateObjectsNoImagePages( |
345 | ObjectVisitor* visitor) const { |
346 | heap_->new_space()->VisitObjects(visitor); |
347 | heap_->old_space()->VisitObjectsNoImagePages(visitor); |
348 | } |
349 | |
350 | void HeapIterationScope::IterateOldObjects(ObjectVisitor* visitor) const { |
351 | old_space_->VisitObjects(visitor); |
352 | } |
353 | |
354 | void HeapIterationScope::IterateOldObjectsNoImagePages( |
355 | ObjectVisitor* visitor) const { |
356 | old_space_->VisitObjectsNoImagePages(visitor); |
357 | } |
358 | |
359 | void HeapIterationScope::IterateVMIsolateObjects(ObjectVisitor* visitor) const { |
360 | Dart::vm_isolate_group()->heap()->VisitObjects(visitor); |
361 | } |
362 | |
363 | void HeapIterationScope::IterateObjectPointers( |
364 | ObjectPointerVisitor* visitor, |
365 | ValidationPolicy validate_frames) { |
366 | isolate_group()->VisitObjectPointers(visitor, validate_frames); |
367 | } |
368 | |
369 | void HeapIterationScope::IterateStackPointers( |
370 | ObjectPointerVisitor* visitor, |
371 | ValidationPolicy validate_frames) { |
372 | isolate_group()->VisitStackPointers(visitor, validate_frames); |
373 | } |
374 | |
375 | void Heap::VisitObjectPointers(ObjectPointerVisitor* visitor) { |
376 | new_space_.VisitObjectPointers(visitor); |
377 | old_space_.VisitObjectPointers(visitor); |
378 | } |
379 | |
380 | void Heap::NotifyIdle(int64_t deadline) { |
381 | Thread* thread = Thread::Current(); |
382 | TIMELINE_FUNCTION_GC_DURATION(thread, "NotifyIdle"); |
383 | { |
384 | GcSafepointOperationScope safepoint_operation(thread); |
385 | |
386 | // Check if we want to collect new-space first, because if we want to |
387 | // collect both new-space and old-space, the new-space collection should run |
388 | // first to shrink the root set (make old-space GC faster) and avoid |
389 | // intergenerational garbage (make old-space GC free more memory). |
390 | if (new_space_.ShouldPerformIdleScavenge(deadline)) { |
391 | CollectNewSpaceGarbage(thread, type: GCType::kScavenge, reason: GCReason::kIdle); |
392 | } |
393 | |
394 | // Check if we want to collect old-space, in decreasing order of cost. |
395 | // Because we use a deadline instead of a timeout, we automatically take any |
396 | // time used up by a scavenge into account when deciding if we can complete |
397 | // a mark-sweep on time. |
398 | if (old_space_.ShouldPerformIdleMarkCompact(deadline)) { |
399 | // We prefer mark-compact over other old space GCs if we have enough time, |
400 | // since it removes old space fragmentation and frees up most memory. |
401 | // Blocks for O(heap), roughly twice as costly as mark-sweep. |
402 | CollectOldSpaceGarbage(thread, type: GCType::kMarkCompact, reason: GCReason::kIdle); |
403 | } else if (old_space_.ReachedHardThreshold()) { |
404 | // Even though the following GC may exceed our idle deadline, we need to |
405 | // ensure than that promotions during idle scavenges do not lead to |
406 | // unbounded growth of old space. If a program is allocating only in new |
407 | // space and all scavenges happen during idle time, then NotifyIdle will |
408 | // be the only place that checks the old space allocation limit. |
409 | // Compare the tail end of Heap::CollectNewSpaceGarbage. |
410 | // Blocks for O(heap). |
411 | CollectOldSpaceGarbage(thread, type: GCType::kMarkSweep, reason: GCReason::kIdle); |
412 | } else if (old_space_.ShouldStartIdleMarkSweep(deadline) || |
413 | old_space_.ReachedSoftThreshold()) { |
414 | // If we have both work to do and enough time, start or finish GC. |
415 | // If we have crossed the soft threshold, ignore time; the next old-space |
416 | // allocation will trigger this work anyway, so we try to pay at least |
417 | // some of that cost with idle time. |
418 | // Blocks for O(roots). |
419 | PageSpace::Phase phase; |
420 | { |
421 | MonitorLocker ml(old_space_.tasks_lock()); |
422 | phase = old_space_.phase(); |
423 | } |
424 | if (phase == PageSpace::kAwaitingFinalization) { |
425 | CollectOldSpaceGarbage(thread, type: GCType::kMarkSweep, reason: GCReason::kFinalize); |
426 | } else if (phase == PageSpace::kDone) { |
427 | StartConcurrentMarking(thread, reason: GCReason::kIdle); |
428 | } |
429 | } |
430 | } |
431 | |
432 | if (FLAG_mark_when_idle) { |
433 | old_space_.IncrementalMarkWithTimeBudget(deadline); |
434 | } |
435 | |
436 | if (OS::GetCurrentMonotonicMicros() < deadline) { |
437 | Page::ClearCache(); |
438 | } |
439 | } |
440 | |
441 | void Heap::NotifyDestroyed() { |
442 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "NotifyDestroyed"); |
443 | CollectAllGarbage(reason: GCReason::kDestroyed, /*compact=*/true); |
444 | Page::ClearCache(); |
445 | } |
446 | |
447 | Dart_PerformanceMode Heap::SetMode(Dart_PerformanceMode new_mode) { |
448 | Dart_PerformanceMode old_mode = mode_.exchange(new_mode); |
449 | if ((old_mode == Dart_PerformanceMode_Latency) && |
450 | (new_mode == Dart_PerformanceMode_Default)) { |
451 | CheckCatchUp(thread: Thread::Current()); |
452 | } |
453 | return old_mode; |
454 | } |
455 | |
456 | void Heap::CollectNewSpaceGarbage(Thread* thread, |
457 | GCType type, |
458 | GCReason reason) { |
459 | NoActiveIsolateScope no_active_isolate_scope(thread); |
460 | ASSERT(reason != GCReason::kPromotion); |
461 | ASSERT(reason != GCReason::kFinalize); |
462 | if (thread->isolate_group() == Dart::vm_isolate_group()) { |
463 | // The vm isolate cannot safely collect garbage due to unvisited read-only |
464 | // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to |
465 | // trigger a nice out-of-memory message instead of a crash in the middle of |
466 | // visiting pointers. |
467 | return; |
468 | } |
469 | { |
470 | GcSafepointOperationScope safepoint_operation(thread); |
471 | RecordBeforeGC(type, reason); |
472 | { |
473 | VMTagScope tagScope(thread, reason == GCReason::kIdle |
474 | ? VMTag::kGCIdleTagId |
475 | : VMTag::kGCNewSpaceTagId); |
476 | TIMELINE_FUNCTION_GC_DURATION(thread, "CollectNewGeneration"); |
477 | new_space_.Scavenge(thread, type, reason); |
478 | RecordAfterGC(type); |
479 | PrintStats(); |
480 | #if defined(SUPPORT_TIMELINE) |
481 | PrintStatsToTimeline(event: &tbes, reason); |
482 | #endif |
483 | last_gc_was_old_space_ = false; |
484 | } |
485 | if (type == GCType::kScavenge && reason == GCReason::kNewSpace) { |
486 | if (old_space_.ReachedHardThreshold()) { |
487 | CollectOldSpaceGarbage(thread, type: GCType::kMarkSweep, |
488 | reason: GCReason::kPromotion); |
489 | } else { |
490 | CheckConcurrentMarking(thread, reason: GCReason::kPromotion, size: 0); |
491 | } |
492 | } |
493 | } |
494 | } |
495 | |
496 | void Heap::CollectOldSpaceGarbage(Thread* thread, |
497 | GCType type, |
498 | GCReason reason) { |
499 | NoActiveIsolateScope no_active_isolate_scope(thread); |
500 | |
501 | ASSERT(type != GCType::kScavenge); |
502 | ASSERT(reason != GCReason::kNewSpace); |
503 | ASSERT(reason != GCReason::kStoreBuffer); |
504 | if (FLAG_use_compactor) { |
505 | type = GCType::kMarkCompact; |
506 | } |
507 | if (thread->isolate_group() == Dart::vm_isolate_group()) { |
508 | // The vm isolate cannot safely collect garbage due to unvisited read-only |
509 | // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to |
510 | // trigger a nice out-of-memory message instead of a crash in the middle of |
511 | // visiting pointers. |
512 | return; |
513 | } |
514 | { |
515 | GcSafepointOperationScope safepoint_operation(thread); |
516 | if (reason == GCReason::kFinalize) { |
517 | MonitorLocker ml(old_space_.tasks_lock()); |
518 | if (old_space_.phase() != PageSpace::kAwaitingFinalization) { |
519 | return; // Lost race. |
520 | } |
521 | } |
522 | |
523 | thread->isolate_group()->ForEachIsolate( |
524 | [&](Isolate* isolate) { |
525 | // Discard regexp backtracking stacks to further reduce memory usage. |
526 | isolate->CacheRegexpBacktrackStack(nullptr); |
527 | }, |
528 | /*at_safepoint=*/true); |
529 | |
530 | RecordBeforeGC(type, reason); |
531 | VMTagScope tagScope(thread, reason == GCReason::kIdle |
532 | ? VMTag::kGCIdleTagId |
533 | : VMTag::kGCOldSpaceTagId); |
534 | TIMELINE_FUNCTION_GC_DURATION(thread, "CollectOldGeneration"); |
535 | old_space_.CollectGarbage(thread, /*compact=*/type == GCType::kMarkCompact, |
536 | /*finalize=*/true); |
537 | RecordAfterGC(type); |
538 | PrintStats(); |
539 | #if defined(SUPPORT_TIMELINE) |
540 | PrintStatsToTimeline(event: &tbes, reason); |
541 | #endif |
542 | |
543 | // Some Code objects may have been collected so invalidate handler cache. |
544 | thread->isolate_group()->ForEachIsolate( |
545 | [&](Isolate* isolate) { |
546 | isolate->handler_info_cache()->Clear(); |
547 | isolate->catch_entry_moves_cache()->Clear(); |
548 | }, |
549 | /*at_safepoint=*/true); |
550 | last_gc_was_old_space_ = true; |
551 | assume_scavenge_will_fail_ = false; |
552 | } |
553 | } |
554 | |
555 | void Heap::CollectGarbage(Thread* thread, GCType type, GCReason reason) { |
556 | switch (type) { |
557 | case GCType::kScavenge: |
558 | case GCType::kEvacuate: |
559 | CollectNewSpaceGarbage(thread, type, reason); |
560 | break; |
561 | case GCType::kMarkSweep: |
562 | case GCType::kMarkCompact: |
563 | CollectOldSpaceGarbage(thread, type, reason); |
564 | break; |
565 | default: |
566 | UNREACHABLE(); |
567 | } |
568 | } |
569 | |
570 | void Heap::CollectMostGarbage(GCReason reason, bool compact) { |
571 | Thread* thread = Thread::Current(); |
572 | CollectNewSpaceGarbage(thread, type: GCType::kScavenge, reason); |
573 | CollectOldSpaceGarbage( |
574 | thread, type: compact ? GCType::kMarkCompact : GCType::kMarkSweep, reason); |
575 | } |
576 | |
577 | void Heap::CollectAllGarbage(GCReason reason, bool compact) { |
578 | Thread* thread = Thread::Current(); |
579 | |
580 | // New space is evacuated so this GC will collect all dead objects |
581 | // kept alive by a cross-generational pointer. |
582 | CollectNewSpaceGarbage(thread, type: GCType::kEvacuate, reason); |
583 | if (thread->is_marking()) { |
584 | // If incremental marking is happening, we need to finish the GC cycle |
585 | // and perform a follow-up GC to purge any "floating garbage" that may be |
586 | // retained by the incremental barrier. |
587 | CollectOldSpaceGarbage(thread, type: GCType::kMarkSweep, reason); |
588 | } |
589 | CollectOldSpaceGarbage( |
590 | thread, type: compact ? GCType::kMarkCompact : GCType::kMarkSweep, reason); |
591 | } |
592 | |
593 | void Heap::CheckCatchUp(Thread* thread) { |
594 | ASSERT(!thread->force_growth()); |
595 | if (old_space()->ReachedHardThreshold()) { |
596 | CollectGarbage(thread, type: GCType::kMarkSweep, reason: GCReason::kCatchUp); |
597 | } else { |
598 | CheckConcurrentMarking(thread, reason: GCReason::kCatchUp, size: 0); |
599 | } |
600 | } |
601 | |
602 | void Heap::CheckConcurrentMarking(Thread* thread, |
603 | GCReason reason, |
604 | intptr_t size) { |
605 | ASSERT(!thread->force_growth()); |
606 | |
607 | PageSpace::Phase phase; |
608 | { |
609 | MonitorLocker ml(old_space_.tasks_lock()); |
610 | phase = old_space_.phase(); |
611 | } |
612 | |
613 | switch (phase) { |
614 | case PageSpace::kMarking: |
615 | if ((size != 0) && (mode_ != Dart_PerformanceMode_Latency)) { |
616 | old_space_.IncrementalMarkWithSizeBudget(size); |
617 | } |
618 | return; |
619 | case PageSpace::kSweepingLarge: |
620 | case PageSpace::kSweepingRegular: |
621 | return; // Busy. |
622 | case PageSpace::kAwaitingFinalization: |
623 | CollectOldSpaceGarbage(thread, type: GCType::kMarkSweep, reason: GCReason::kFinalize); |
624 | return; |
625 | case PageSpace::kDone: |
626 | if (old_space_.ReachedSoftThreshold()) { |
627 | // New-space objects are roots during old-space GC. This means that even |
628 | // unreachable new-space objects prevent old-space objects they |
629 | // reference from being collected during an old-space GC. Normally this |
630 | // is not an issue because new-space GCs run much more frequently than |
631 | // old-space GCs. If new-space allocation is low and direct old-space |
632 | // allocation is high, which can happen in a program that allocates |
633 | // large objects and little else, old-space can fill up with unreachable |
634 | // objects until the next new-space GC. This check is the |
635 | // concurrent-marking equivalent to the new-space GC before |
636 | // synchronous-marking in CollectMostGarbage. |
637 | if (last_gc_was_old_space_) { |
638 | CollectNewSpaceGarbage(thread, type: GCType::kScavenge, reason: GCReason::kFull); |
639 | } |
640 | StartConcurrentMarking(thread, reason); |
641 | } |
642 | return; |
643 | default: |
644 | UNREACHABLE(); |
645 | } |
646 | } |
647 | |
648 | void Heap::StartConcurrentMarking(Thread* thread, GCReason reason) { |
649 | GcSafepointOperationScope safepoint_operation(thread); |
650 | RecordBeforeGC(type: GCType::kStartConcurrentMark, reason); |
651 | VMTagScope tagScope(thread, reason == GCReason::kIdle |
652 | ? VMTag::kGCIdleTagId |
653 | : VMTag::kGCOldSpaceTagId); |
654 | TIMELINE_FUNCTION_GC_DURATION(thread, "StartConcurrentMarking"); |
655 | old_space_.CollectGarbage(thread, /*compact=*/false, /*finalize=*/false); |
656 | RecordAfterGC(type: GCType::kStartConcurrentMark); |
657 | PrintStats(); |
658 | #if defined(SUPPORT_TIMELINE) |
659 | PrintStatsToTimeline(event: &tbes, reason); |
660 | #endif |
661 | } |
662 | |
663 | void Heap::WaitForMarkerTasks(Thread* thread) { |
664 | MonitorLocker ml(old_space_.tasks_lock()); |
665 | while ((old_space_.phase() == PageSpace::kMarking) || |
666 | (old_space_.phase() == PageSpace::kAwaitingFinalization)) { |
667 | while (old_space_.phase() == PageSpace::kMarking) { |
668 | ml.WaitWithSafepointCheck(thread); |
669 | } |
670 | if (old_space_.phase() == PageSpace::kAwaitingFinalization) { |
671 | ml.Exit(); |
672 | CollectOldSpaceGarbage(thread, type: GCType::kMarkSweep, reason: GCReason::kFinalize); |
673 | ml.Enter(); |
674 | } |
675 | } |
676 | } |
677 | |
678 | void Heap::WaitForSweeperTasks(Thread* thread) { |
679 | ASSERT(!thread->OwnsGCSafepoint()); |
680 | MonitorLocker ml(old_space_.tasks_lock()); |
681 | while ((old_space_.phase() == PageSpace::kSweepingLarge) || |
682 | (old_space_.phase() == PageSpace::kSweepingRegular)) { |
683 | ml.WaitWithSafepointCheck(thread); |
684 | } |
685 | } |
686 | |
687 | void Heap::WaitForSweeperTasksAtSafepoint(Thread* thread) { |
688 | ASSERT(thread->OwnsGCSafepoint()); |
689 | MonitorLocker ml(old_space_.tasks_lock()); |
690 | while ((old_space_.phase() == PageSpace::kSweepingLarge) || |
691 | (old_space_.phase() == PageSpace::kSweepingRegular)) { |
692 | ml.Wait(); |
693 | } |
694 | } |
695 | |
696 | void Heap::UpdateGlobalMaxUsed() { |
697 | ASSERT(isolate_group_ != nullptr); |
698 | // We are accessing the used in words count for both new and old space |
699 | // without synchronizing. The value of this metric is approximate. |
700 | isolate_group_->GetHeapGlobalUsedMaxMetric()->SetValue( |
701 | (UsedInWords(space: Heap::kNew) * kWordSize) + |
702 | (UsedInWords(space: Heap::kOld) * kWordSize)); |
703 | } |
704 | |
705 | void Heap::WriteProtect(bool read_only) { |
706 | read_only_ = read_only; |
707 | new_space_.WriteProtect(read_only); |
708 | old_space_.WriteProtect(read_only); |
709 | } |
710 | |
711 | void Heap::Init(IsolateGroup* isolate_group, |
712 | bool is_vm_isolate, |
713 | intptr_t max_new_gen_words, |
714 | intptr_t max_old_gen_words) { |
715 | ASSERT(isolate_group->heap() == nullptr); |
716 | std::unique_ptr<Heap> heap(new Heap(isolate_group, is_vm_isolate, |
717 | max_new_gen_words, max_old_gen_words)); |
718 | isolate_group->set_heap(std::move(heap)); |
719 | } |
720 | |
721 | void Heap::AddRegionsToObjectSet(ObjectSet* set) const { |
722 | new_space_.AddRegionsToObjectSet(set); |
723 | old_space_.AddRegionsToObjectSet(set); |
724 | set->SortRegions(); |
725 | } |
726 | |
727 | void Heap::CollectOnNthAllocation(intptr_t num_allocations) { |
728 | // Prevent generated code from using the TLAB fast path on next allocation. |
729 | new_space_.AbandonRemainingTLABForDebugging(Thread::Current()); |
730 | gc_on_nth_allocation_ = num_allocations; |
731 | } |
732 | |
733 | void Heap::CollectForDebugging(Thread* thread) { |
734 | if (gc_on_nth_allocation_ == kNoForcedGarbageCollection) return; |
735 | if (thread->OwnsGCSafepoint()) { |
736 | // CollectAllGarbage is not supported when we are at a safepoint. |
737 | // Allocating when at a safepoint is not a common case. |
738 | return; |
739 | } |
740 | gc_on_nth_allocation_--; |
741 | if (gc_on_nth_allocation_ == 0) { |
742 | CollectAllGarbage(reason: GCReason::kDebugging); |
743 | gc_on_nth_allocation_ = kNoForcedGarbageCollection; |
744 | } else { |
745 | // Prevent generated code from using the TLAB fast path on next allocation. |
746 | new_space_.AbandonRemainingTLABForDebugging(thread); |
747 | } |
748 | } |
749 | |
750 | ObjectSet* Heap::CreateAllocatedObjectSet(Zone* zone, |
751 | MarkExpectation mark_expectation) { |
752 | ObjectSet* allocated_set = new (zone) ObjectSet(zone); |
753 | |
754 | this->AddRegionsToObjectSet(set: allocated_set); |
755 | Isolate* vm_isolate = Dart::vm_isolate(); |
756 | vm_isolate->group()->heap()->AddRegionsToObjectSet(set: allocated_set); |
757 | |
758 | { |
759 | VerifyObjectVisitor object_visitor(isolate_group(), allocated_set, |
760 | mark_expectation); |
761 | this->VisitObjectsNoImagePages(visitor: &object_visitor); |
762 | } |
763 | { |
764 | VerifyObjectVisitor object_visitor(isolate_group(), allocated_set, |
765 | kRequireMarked); |
766 | this->VisitObjectsImagePages(visitor: &object_visitor); |
767 | } |
768 | { |
769 | // VM isolate heap is premarked. |
770 | VerifyObjectVisitor vm_object_visitor(isolate_group(), allocated_set, |
771 | kRequireMarked); |
772 | vm_isolate->group()->heap()->VisitObjects(visitor: &vm_object_visitor); |
773 | } |
774 | |
775 | return allocated_set; |
776 | } |
777 | |
778 | bool Heap::Verify(const char* msg, MarkExpectation mark_expectation) { |
779 | if (FLAG_disable_heap_verification) { |
780 | return true; |
781 | } |
782 | HeapIterationScope heap_iteration_scope(Thread::Current()); |
783 | return VerifyGC(msg, mark_expectation); |
784 | } |
785 | |
786 | bool Heap::VerifyGC(const char* msg, MarkExpectation mark_expectation) { |
787 | ASSERT(msg != nullptr); |
788 | auto thread = Thread::Current(); |
789 | StackZone stack_zone(thread); |
790 | |
791 | ObjectSet* allocated_set = |
792 | CreateAllocatedObjectSet(zone: stack_zone.GetZone(), mark_expectation); |
793 | VerifyPointersVisitor visitor(isolate_group(), allocated_set, msg); |
794 | VisitObjectPointers(visitor: &visitor); |
795 | |
796 | // Only returning a value so that Heap::Validate can be called from an ASSERT. |
797 | return true; |
798 | } |
799 | |
800 | void Heap::PrintSizes() const { |
801 | OS::PrintErr( |
802 | "New space (%"Pd "k of %"Pd "k) " |
803 | "Old space (%"Pd "k of %"Pd "k)\n", |
804 | (UsedInWords(kNew) / KBInWords), (CapacityInWords(kNew) / KBInWords), |
805 | (UsedInWords(kOld) / KBInWords), (CapacityInWords(kOld) / KBInWords)); |
806 | } |
807 | |
808 | intptr_t Heap::UsedInWords(Space space) const { |
809 | return space == kNew ? new_space_.UsedInWords() : old_space_.UsedInWords(); |
810 | } |
811 | |
812 | intptr_t Heap::CapacityInWords(Space space) const { |
813 | return space == kNew ? new_space_.CapacityInWords() |
814 | : old_space_.CapacityInWords(); |
815 | } |
816 | |
817 | intptr_t Heap::ExternalInWords(Space space) const { |
818 | return space == kNew ? new_space_.ExternalInWords() |
819 | : old_space_.ExternalInWords(); |
820 | } |
821 | |
822 | intptr_t Heap::TotalUsedInWords() const { |
823 | return UsedInWords(space: kNew) + UsedInWords(space: kOld); |
824 | } |
825 | |
826 | intptr_t Heap::TotalCapacityInWords() const { |
827 | return CapacityInWords(space: kNew) + CapacityInWords(space: kOld); |
828 | } |
829 | |
830 | intptr_t Heap::TotalExternalInWords() const { |
831 | return ExternalInWords(space: kNew) + ExternalInWords(space: kOld); |
832 | } |
833 | |
834 | int64_t Heap::GCTimeInMicros(Space space) const { |
835 | if (space == kNew) { |
836 | return new_space_.gc_time_micros(); |
837 | } |
838 | return old_space_.gc_time_micros(); |
839 | } |
840 | |
841 | intptr_t Heap::Collections(Space space) const { |
842 | if (space == kNew) { |
843 | return new_space_.collections(); |
844 | } |
845 | return old_space_.collections(); |
846 | } |
847 | |
848 | const char* Heap::GCTypeToString(GCType type) { |
849 | switch (type) { |
850 | case GCType::kScavenge: |
851 | return "Scavenge"; |
852 | case GCType::kEvacuate: |
853 | return "Evacuate"; |
854 | case GCType::kStartConcurrentMark: |
855 | return "StartCMark"; |
856 | case GCType::kMarkSweep: |
857 | return "MarkSweep"; |
858 | case GCType::kMarkCompact: |
859 | return "MarkCompact"; |
860 | default: |
861 | UNREACHABLE(); |
862 | return ""; |
863 | } |
864 | } |
865 | |
866 | const char* Heap::GCReasonToString(GCReason gc_reason) { |
867 | switch (gc_reason) { |
868 | case GCReason::kNewSpace: |
869 | return "new space"; |
870 | case GCReason::kStoreBuffer: |
871 | return "store buffer"; |
872 | case GCReason::kPromotion: |
873 | return "promotion"; |
874 | case GCReason::kOldSpace: |
875 | return "old space"; |
876 | case GCReason::kFinalize: |
877 | return "finalize"; |
878 | case GCReason::kFull: |
879 | return "full"; |
880 | case GCReason::kExternal: |
881 | return "external"; |
882 | case GCReason::kIdle: |
883 | return "idle"; |
884 | case GCReason::kDestroyed: |
885 | return "destroyed"; |
886 | case GCReason::kDebugging: |
887 | return "debugging"; |
888 | case GCReason::kCatchUp: |
889 | return "catch-up"; |
890 | default: |
891 | UNREACHABLE(); |
892 | return ""; |
893 | } |
894 | } |
895 | |
896 | int64_t Heap::PeerCount() const { |
897 | return new_weak_tables_[kPeers]->count() + old_weak_tables_[kPeers]->count(); |
898 | } |
899 | |
900 | void Heap::ResetCanonicalHashTable() { |
901 | new_weak_tables_[kCanonicalHashes]->Reset(); |
902 | old_weak_tables_[kCanonicalHashes]->Reset(); |
903 | } |
904 | |
905 | void Heap::ResetObjectIdTable() { |
906 | new_weak_tables_[kObjectIds]->Reset(); |
907 | old_weak_tables_[kObjectIds]->Reset(); |
908 | } |
909 | |
910 | intptr_t Heap::GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const { |
911 | if (raw_obj->IsImmediateOrOldObject()) { |
912 | return old_weak_tables_[sel]->GetValue(key: raw_obj); |
913 | } else { |
914 | return new_weak_tables_[sel]->GetValue(key: raw_obj); |
915 | } |
916 | } |
917 | |
918 | void Heap::SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val) { |
919 | if (raw_obj->IsImmediateOrOldObject()) { |
920 | old_weak_tables_[sel]->SetValue(key: raw_obj, val); |
921 | } else { |
922 | new_weak_tables_[sel]->SetValue(key: raw_obj, val); |
923 | } |
924 | } |
925 | |
926 | intptr_t Heap::SetWeakEntryIfNonExistent(ObjectPtr raw_obj, |
927 | WeakSelector sel, |
928 | intptr_t val) { |
929 | if (raw_obj->IsImmediateOrOldObject()) { |
930 | return old_weak_tables_[sel]->SetValueIfNonExistent(key: raw_obj, val); |
931 | } else { |
932 | return new_weak_tables_[sel]->SetValueIfNonExistent(key: raw_obj, val); |
933 | } |
934 | } |
935 | |
936 | void Heap::ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object) { |
937 | const auto before_space = |
938 | before_object->IsImmediateOrOldObject() ? Heap::kOld : Heap::kNew; |
939 | const auto after_space = |
940 | after_object->IsImmediateOrOldObject() ? Heap::kOld : Heap::kNew; |
941 | |
942 | for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) { |
943 | const auto selector = static_cast<Heap::WeakSelector>(sel); |
944 | auto before_table = GetWeakTable(space: before_space, selector); |
945 | intptr_t entry = before_table->RemoveValueExclusive(key: before_object); |
946 | if (entry != 0) { |
947 | auto after_table = GetWeakTable(space: after_space, selector); |
948 | after_table->SetValueExclusive(key: after_object, val: entry); |
949 | } |
950 | } |
951 | |
952 | isolate_group()->ForEachIsolate( |
953 | [&](Isolate* isolate) { |
954 | auto before_table = before_object->IsImmediateOrOldObject() |
955 | ? isolate->forward_table_old() |
956 | : isolate->forward_table_new(); |
957 | if (before_table != nullptr) { |
958 | intptr_t entry = before_table->RemoveValueExclusive(key: before_object); |
959 | if (entry != 0) { |
960 | auto after_table = after_object->IsImmediateOrOldObject() |
961 | ? isolate->forward_table_old() |
962 | : isolate->forward_table_new(); |
963 | ASSERT(after_table != nullptr); |
964 | after_table->SetValueExclusive(key: after_object, val: entry); |
965 | } |
966 | } |
967 | }, |
968 | /*at_safepoint=*/true); |
969 | } |
970 | |
971 | void Heap::ForwardWeakTables(ObjectPointerVisitor* visitor) { |
972 | // NOTE: This method is only used by the compactor, so there is no need to |
973 | // process the `Heap::kNew` tables. |
974 | for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) { |
975 | WeakSelector selector = static_cast<Heap::WeakSelector>(sel); |
976 | GetWeakTable(space: Heap::kOld, selector)->Forward(visitor); |
977 | } |
978 | |
979 | // Isolates might have forwarding tables (used for during snapshotting in |
980 | // isolate communication). |
981 | isolate_group()->ForEachIsolate( |
982 | [&](Isolate* isolate) { |
983 | auto table_old = isolate->forward_table_old(); |
984 | if (table_old != nullptr) table_old->Forward(visitor); |
985 | }, |
986 | /*at_safepoint=*/true); |
987 | } |
988 | |
989 | #ifndef PRODUCT |
990 | void Heap::PrintToJSONObject(Space space, JSONObject* object) const { |
991 | if (space == kNew) { |
992 | new_space_.PrintToJSONObject(object); |
993 | } else { |
994 | old_space_.PrintToJSONObject(object); |
995 | } |
996 | } |
997 | |
998 | void Heap::PrintMemoryUsageJSON(JSONStream* stream) const { |
999 | JSONObject obj(stream); |
1000 | PrintMemoryUsageJSON(jsobj: &obj); |
1001 | } |
1002 | |
1003 | void Heap::PrintMemoryUsageJSON(JSONObject* jsobj) const { |
1004 | jsobj->AddProperty(name: "type", s: "MemoryUsage"); |
1005 | jsobj->AddProperty64(name: "heapUsage", i: TotalUsedInWords() * kWordSize); |
1006 | jsobj->AddProperty64(name: "heapCapacity", i: TotalCapacityInWords() * kWordSize); |
1007 | jsobj->AddProperty64(name: "externalUsage", i: TotalExternalInWords() * kWordSize); |
1008 | } |
1009 | #endif // PRODUCT |
1010 | |
1011 | void Heap::RecordBeforeGC(GCType type, GCReason reason) { |
1012 | stats_.num_++; |
1013 | stats_.type_ = type; |
1014 | stats_.reason_ = reason; |
1015 | stats_.before_.micros_ = OS::GetCurrentMonotonicMicros(); |
1016 | stats_.before_.new_ = new_space_.GetCurrentUsage(); |
1017 | stats_.before_.old_ = old_space_.GetCurrentUsage(); |
1018 | stats_.before_.store_buffer_ = isolate_group_->store_buffer()->Size(); |
1019 | } |
1020 | |
1021 | void Heap::RecordAfterGC(GCType type) { |
1022 | stats_.after_.micros_ = OS::GetCurrentMonotonicMicros(); |
1023 | int64_t delta = stats_.after_.micros_ - stats_.before_.micros_; |
1024 | if (stats_.type_ == GCType::kScavenge) { |
1025 | new_space_.AddGCTime(delta); |
1026 | new_space_.IncrementCollections(); |
1027 | } else { |
1028 | old_space_.AddGCTime(delta); |
1029 | old_space_.IncrementCollections(); |
1030 | } |
1031 | stats_.after_.new_ = new_space_.GetCurrentUsage(); |
1032 | stats_.after_.old_ = old_space_.GetCurrentUsage(); |
1033 | stats_.after_.store_buffer_ = isolate_group_->store_buffer()->Size(); |
1034 | #ifndef PRODUCT |
1035 | // For now we'll emit the same GC events on all isolates. |
1036 | if (Service::gc_stream.enabled()) { |
1037 | isolate_group_->ForEachIsolate( |
1038 | [&](Isolate* isolate) { |
1039 | if (!Isolate::IsSystemIsolate(isolate)) { |
1040 | ServiceEvent event(isolate, ServiceEvent::kGC); |
1041 | event.set_gc_stats(&stats_); |
1042 | Service::HandleEvent(event: &event, /*enter_safepoint*/ false); |
1043 | } |
1044 | }, |
1045 | /*at_safepoint=*/true); |
1046 | } |
1047 | #endif // !PRODUCT |
1048 | } |
1049 | |
1050 | void Heap::PrintStats() { |
1051 | if (!FLAG_verbose_gc) return; |
1052 | |
1053 | if ((FLAG_verbose_gc_hdr != 0) && |
1054 | (((stats_.num_ - 1) % FLAG_verbose_gc_hdr) == 0)) { |
1055 | OS::PrintErr( |
1056 | format: "[ | | | | | new " |
1057 | "gen | new gen | new gen | old gen | old gen | old " |
1058 | "gen | store | delta used ]\n" |
1059 | "[ GC isolate | space (reason) | GC# | start | time | used " |
1060 | "(MB) | capacity MB | external| used (MB) | capacity (MB) | " |
1061 | "external MB | buffer | new | old ]\n" |
1062 | "[ | | | (s) | (ms) " |
1063 | "|before| after|before| after| b4 |aftr| before| after | before| after " |
1064 | "|before| after| b4 |aftr| (MB) | (MB) ]\n"); |
1065 | } |
1066 | |
1067 | // clang-format off |
1068 | OS::PrintErr( |
1069 | "[ %-13.13s, %11s(%12s), "// GC(isolate-group), type(reason) |
1070 | "%4"Pd ", "// count |
1071 | "%6.2f, "// start time |
1072 | "%5.1f, "// total time |
1073 | "%5.1f, %5.1f, "// new gen: in use before/after |
1074 | "%5.1f, %5.1f, "// new gen: capacity before/after |
1075 | "%3.1f, %3.1f, "// new gen: external before/after |
1076 | "%6.1f, %6.1f, "// old gen: in use before/after |
1077 | "%6.1f, %6.1f, "// old gen: capacity before/after |
1078 | "%5.1f, %5.1f, "// old gen: external before/after |
1079 | "%3"Pd ", %3"Pd ", "// store buffer: before/after |
1080 | "%5.1f, %6.1f, "// delta used: new gen/old gen |
1081 | "]\n", // End with a comma to make it easier to import in spreadsheets. |
1082 | isolate_group()->source()->name, |
1083 | GCTypeToString(stats_.type_), |
1084 | GCReasonToString(stats_.reason_), |
1085 | stats_.num_, |
1086 | MicrosecondsToSeconds(isolate_group_->UptimeMicros()), |
1087 | MicrosecondsToMilliseconds(stats_.after_.micros_ - |
1088 | stats_.before_.micros_), |
1089 | WordsToMB(stats_.before_.new_.used_in_words), |
1090 | WordsToMB(stats_.after_.new_.used_in_words), |
1091 | WordsToMB(stats_.before_.new_.capacity_in_words), |
1092 | WordsToMB(stats_.after_.new_.capacity_in_words), |
1093 | WordsToMB(stats_.before_.new_.external_in_words), |
1094 | WordsToMB(stats_.after_.new_.external_in_words), |
1095 | WordsToMB(stats_.before_.old_.used_in_words), |
1096 | WordsToMB(stats_.after_.old_.used_in_words), |
1097 | WordsToMB(stats_.before_.old_.capacity_in_words), |
1098 | WordsToMB(stats_.after_.old_.capacity_in_words), |
1099 | WordsToMB(stats_.before_.old_.external_in_words), |
1100 | WordsToMB(stats_.after_.old_.external_in_words), |
1101 | stats_.before_.store_buffer_, |
1102 | stats_.after_.store_buffer_, |
1103 | WordsToMB(stats_.after_.new_.used_in_words - |
1104 | stats_.before_.new_.used_in_words), |
1105 | WordsToMB(stats_.after_.old_.used_in_words - |
1106 | stats_.before_.old_.used_in_words)); |
1107 | // clang-format on |
1108 | } |
1109 | |
1110 | void Heap::PrintStatsToTimeline(TimelineEventScope* event, GCReason reason) { |
1111 | #if defined(SUPPORT_TIMELINE) |
1112 | if ((event == nullptr) || !event->enabled()) { |
1113 | return; |
1114 | } |
1115 | intptr_t arguments = event->GetNumArguments(); |
1116 | event->SetNumArguments(arguments + 13); |
1117 | event->CopyArgument(i: arguments + 0, name: "Reason", argument: GCReasonToString(gc_reason: reason)); |
1118 | event->FormatArgument(arguments + 1, "Before.New.Used (kB)", "%"Pd "", |
1119 | RoundWordsToKB(stats_.before_.new_.used_in_words)); |
1120 | event->FormatArgument(arguments + 2, "After.New.Used (kB)", "%"Pd "", |
1121 | RoundWordsToKB(stats_.after_.new_.used_in_words)); |
1122 | event->FormatArgument(arguments + 3, "Before.Old.Used (kB)", "%"Pd "", |
1123 | RoundWordsToKB(stats_.before_.old_.used_in_words)); |
1124 | event->FormatArgument(arguments + 4, "After.Old.Used (kB)", "%"Pd "", |
1125 | RoundWordsToKB(stats_.after_.old_.used_in_words)); |
1126 | |
1127 | event->FormatArgument(arguments + 5, "Before.New.Capacity (kB)", "%"Pd "", |
1128 | RoundWordsToKB(stats_.before_.new_.capacity_in_words)); |
1129 | event->FormatArgument(arguments + 6, "After.New.Capacity (kB)", "%"Pd "", |
1130 | RoundWordsToKB(stats_.after_.new_.capacity_in_words)); |
1131 | event->FormatArgument(arguments + 7, "Before.Old.Capacity (kB)", "%"Pd "", |
1132 | RoundWordsToKB(stats_.before_.old_.capacity_in_words)); |
1133 | event->FormatArgument(arguments + 8, "After.Old.Capacity (kB)", "%"Pd "", |
1134 | RoundWordsToKB(stats_.after_.old_.capacity_in_words)); |
1135 | |
1136 | event->FormatArgument(arguments + 9, "Before.New.External (kB)", "%"Pd "", |
1137 | RoundWordsToKB(stats_.before_.new_.external_in_words)); |
1138 | event->FormatArgument(arguments + 10, "After.New.External (kB)", "%"Pd "", |
1139 | RoundWordsToKB(stats_.after_.new_.external_in_words)); |
1140 | event->FormatArgument(arguments + 11, "Before.Old.External (kB)", "%"Pd "", |
1141 | RoundWordsToKB(stats_.before_.old_.external_in_words)); |
1142 | event->FormatArgument(arguments + 12, "After.Old.External (kB)", "%"Pd "", |
1143 | RoundWordsToKB(stats_.after_.old_.external_in_words)); |
1144 | #endif // defined(SUPPORT_TIMELINE) |
1145 | } |
1146 | |
1147 | Heap::Space Heap::SpaceForExternal(intptr_t size) const { |
1148 | // If 'size' would be a significant fraction of new space, then use old. |
1149 | const int kExtNewRatio = 16; |
1150 | if (size > (CapacityInWords(space: Heap::kNew) * kWordSize) / kExtNewRatio) { |
1151 | return Heap::kOld; |
1152 | } else { |
1153 | return Heap::kNew; |
1154 | } |
1155 | } |
1156 | |
1157 | ForceGrowthScope::ForceGrowthScope(Thread* thread) |
1158 | : ThreadStackResource(thread) { |
1159 | thread->IncrementForceGrowthScopeDepth(); |
1160 | } |
1161 | |
1162 | ForceGrowthScope::~ForceGrowthScope() { |
1163 | thread()->DecrementForceGrowthScopeDepth(); |
1164 | } |
1165 | |
1166 | WritableVMIsolateScope::WritableVMIsolateScope(Thread* thread) |
1167 | : ThreadStackResource(thread) { |
1168 | if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) { |
1169 | Dart::vm_isolate_group()->heap()->WriteProtect(read_only: false); |
1170 | } |
1171 | } |
1172 | |
1173 | WritableVMIsolateScope::~WritableVMIsolateScope() { |
1174 | ASSERT(Dart::vm_isolate_group()->heap()->UsedInWords(Heap::kNew) == 0); |
1175 | if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) { |
1176 | Dart::vm_isolate_group()->heap()->WriteProtect(read_only: true); |
1177 | } |
1178 | } |
1179 | |
1180 | WritableCodePages::WritableCodePages(Thread* thread, |
1181 | IsolateGroup* isolate_group) |
1182 | : StackResource(thread), isolate_group_(isolate_group) { |
1183 | isolate_group_->heap()->WriteProtectCode(read_only: false); |
1184 | } |
1185 | |
1186 | WritableCodePages::~WritableCodePages() { |
1187 | isolate_group_->heap()->WriteProtectCode(read_only: true); |
1188 | } |
1189 | |
1190 | } // namespace dart |
1191 |
Definitions
- Heap
- ~Heap
- AllocateNew
- AllocateOld
- AllocatedExternal
- FreedExternal
- PromotedExternal
- CheckExternalGC
- Contains
- NewContains
- OldContains
- CodeContains
- DataContains
- VisitObjects
- VisitObjectsNoImagePages
- VisitObjectsImagePages
- HeapIterationScope
- ~HeapIterationScope
- IterateObjects
- IterateObjectsNoImagePages
- IterateOldObjects
- IterateOldObjectsNoImagePages
- IterateVMIsolateObjects
- IterateObjectPointers
- IterateStackPointers
- VisitObjectPointers
- NotifyIdle
- NotifyDestroyed
- SetMode
- CollectNewSpaceGarbage
- CollectOldSpaceGarbage
- CollectGarbage
- CollectMostGarbage
- CollectAllGarbage
- CheckCatchUp
- CheckConcurrentMarking
- StartConcurrentMarking
- WaitForMarkerTasks
- WaitForSweeperTasks
- WaitForSweeperTasksAtSafepoint
- UpdateGlobalMaxUsed
- WriteProtect
- Init
- AddRegionsToObjectSet
- CollectOnNthAllocation
- CollectForDebugging
- CreateAllocatedObjectSet
- Verify
- VerifyGC
- PrintSizes
- UsedInWords
- CapacityInWords
- ExternalInWords
- TotalUsedInWords
- TotalCapacityInWords
- TotalExternalInWords
- GCTimeInMicros
- Collections
- GCTypeToString
- GCReasonToString
- PeerCount
- ResetCanonicalHashTable
- ResetObjectIdTable
- GetWeakEntry
- SetWeakEntry
- SetWeakEntryIfNonExistent
- ForwardWeakEntries
- ForwardWeakTables
- PrintToJSONObject
- PrintMemoryUsageJSON
- PrintMemoryUsageJSON
- RecordBeforeGC
- RecordAfterGC
- PrintStats
- PrintStatsToTimeline
- SpaceForExternal
- ForceGrowthScope
- ~ForceGrowthScope
- WritableVMIsolateScope
- ~WritableVMIsolateScope
- WritableCodePages
Learn more about Flutter for embedded and desktop on industrialflutter.com