[vm, gc] Evaluate old-gen GC on each new-gen page.
This gives starting or finalizing concurrent marking a chance to run not-immediately-after-a-scavenge.
TEST=ci
Change-Id: Iec1ba7b7440045cc18e01637af1f865d588b24c2
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/246163
Reviewed-by: Siva Annamalai <asiva@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index 68ba680..ce07e8e 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -193,7 +193,7 @@
}
CollectGarbage(thread, GCType::kMarkSweep, GCReason::kExternal);
} else {
- CheckStartConcurrentMarking(thread, GCReason::kExternal);
+ CheckConcurrentMarking(thread, GCReason::kExternal);
}
}
@@ -457,7 +457,7 @@
CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
GCReason::kPromotion);
} else {
- CheckStartConcurrentMarking(thread, GCReason::kPromotion);
+ CheckConcurrentMarking(thread, GCReason::kPromotion);
}
}
}
@@ -554,29 +554,42 @@
WaitForSweeperTasks(thread);
}
-void Heap::CheckStartConcurrentMarking(Thread* thread, GCReason reason) {
+void Heap::CheckConcurrentMarking(Thread* thread, GCReason reason) {
+ PageSpace::Phase phase;
{
MonitorLocker ml(old_space_.tasks_lock());
- if (old_space_.phase() != PageSpace::kDone) {
- return; // Busy.
- }
+ phase = old_space_.phase();
}
- if (old_space_.ReachedSoftThreshold()) {
- // New-space objects are roots during old-space GC. This means that even
- // unreachable new-space objects prevent old-space objects they reference
- // from being collected during an old-space GC. Normally this is not an
- // issue because new-space GCs run much more frequently than old-space GCs.
- // If new-space allocation is low and direct old-space allocation is high,
- // which can happen in a program that allocates large objects and little
- // else, old-space can fill up with unreachable objects until the next
- // new-space GC. This check is the concurrent-marking equivalent to the
- // new-space GC before synchronous-marking in CollectMostGarbage.
- if (last_gc_was_old_space_) {
- CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull);
- }
-
- StartConcurrentMarking(thread, reason);
+ switch (phase) {
+ case PageSpace::kMarking:
+ // TODO(rmacnak): Have this thread help with marking.
+ case PageSpace::kSweepingLarge:
+ case PageSpace::kSweepingRegular:
+ return; // Busy.
+ case PageSpace::kAwaitingFinalization:
+ CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
+ return;
+ case PageSpace::kDone:
+ if (old_space_.ReachedSoftThreshold()) {
+ // New-space objects are roots during old-space GC. This means that even
+ // unreachable new-space objects prevent old-space objects they
+ // reference from being collected during an old-space GC. Normally this
+ // is not an issue because new-space GCs run much more frequently than
+ // old-space GCs. If new-space allocation is low and direct old-space
+ // allocation is high, which can happen in a program that allocates
+ // large objects and little else, old-space can fill up with unreachable
+ // objects until the next new-space GC. This check is the
+ // concurrent-marking equivalent to the new-space GC before
+ // synchronous-marking in CollectMostGarbage.
+ if (last_gc_was_old_space_) {
+ CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull);
+ }
+ StartConcurrentMarking(thread, reason);
+ }
+ return;
+ default:
+ UNREACHABLE();
}
}
@@ -595,17 +608,6 @@
#endif
}
-void Heap::CheckFinishConcurrentMarking(Thread* thread) {
- bool ready;
- {
- MonitorLocker ml(old_space_.tasks_lock());
- ready = old_space_.phase() == PageSpace::kAwaitingFinalization;
- }
- if (ready) {
- CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
- }
-}
-
void Heap::WaitForMarkerTasks(Thread* thread) {
MonitorLocker ml(old_space_.tasks_lock());
while ((old_space_.phase() == PageSpace::kMarking) ||
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 8248ee1..6a0dc2a 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -125,9 +125,8 @@
void CollectAllGarbage(GCReason reason = GCReason::kFull,
bool compact = false);
- void CheckStartConcurrentMarking(Thread* thread, GCReason reason);
+ void CheckConcurrentMarking(Thread* thread, GCReason reason);
void StartConcurrentMarking(Thread* thread, GCReason reason);
- void CheckFinishConcurrentMarking(Thread* thread);
void WaitForMarkerTasks(Thread* thread);
void WaitForSweeperTasks(Thread* thread);
void WaitForSweeperTasksAtSafepoint(Thread* thread);
diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc
index 53381e2..de7a382 100644
--- a/runtime/vm/heap/pages.cc
+++ b/runtime/vm/heap/pages.cc
@@ -465,19 +465,6 @@
}
}
-void PageSpace::EvaluateConcurrentMarking(GrowthPolicy growth_policy) {
- if (growth_policy != kForceGrowth) {
- ASSERT(GrowthControlState());
- if (heap_ != NULL) { // Some unit tests.
- Thread* thread = Thread::Current();
- if (thread->CanCollectGarbage()) {
- heap_->CheckFinishConcurrentMarking(thread);
- heap_->CheckStartConcurrentMarking(thread, GCReason::kOldSpace);
- }
- }
- }
-}
-
uword PageSpace::TryAllocateInFreshPage(intptr_t size,
FreeList* freelist,
OldPage::PageType type,
@@ -485,7 +472,12 @@
bool is_locked) {
ASSERT(Heap::IsAllocatableViaFreeLists(size));
- EvaluateConcurrentMarking(growth_policy);
+ if (growth_policy != kForceGrowth) {
+ ASSERT(GrowthControlState());
+ if (heap_ != nullptr) { // Some unit tests.
+ heap_->CheckConcurrentMarking(Thread::Current(), GCReason::kOldSpace);
+ }
+ }
uword result = 0;
SpaceUsage after_allocation = GetCurrentUsage();
@@ -521,7 +513,12 @@
GrowthPolicy growth_policy) {
ASSERT(!Heap::IsAllocatableViaFreeLists(size));
- EvaluateConcurrentMarking(growth_policy);
+ if (growth_policy != kForceGrowth) {
+ ASSERT(GrowthControlState());
+ if (heap_ != nullptr) { // Some unit tests.
+ heap_->CheckConcurrentMarking(Thread::Current(), GCReason::kOldSpace);
+ }
+ }
intptr_t page_size_in_words = LargePageSizeInWordsFor(size);
if ((page_size_in_words << kWordSizeLog2) < size) {
diff --git a/runtime/vm/heap/pages.h b/runtime/vm/heap/pages.h
index fca17a2..28ee25e 100644
--- a/runtime/vm/heap/pages.h
+++ b/runtime/vm/heap/pages.h
@@ -553,8 +553,6 @@
OldPage::PageType type,
GrowthPolicy growth_policy);
- void EvaluateConcurrentMarking(GrowthPolicy growth_policy);
-
// Makes bump block walkable; do not call concurrently with mutator.
void MakeIterable() const;
diff --git a/runtime/vm/heap/safepoint.cc b/runtime/vm/heap/safepoint.cc
index 09e0c04..9933bed 100644
--- a/runtime/vm/heap/safepoint.cc
+++ b/runtime/vm/heap/safepoint.cc
@@ -65,7 +65,7 @@
if (heap->old_space()->ReachedHardThreshold()) {
heap->CollectGarbage(T, GCType::kMarkSweep, GCReason::kOldSpace);
} else {
- heap->CheckStartConcurrentMarking(T, GCReason::kOldSpace);
+ heap->CheckConcurrentMarking(T, GCReason::kOldSpace);
}
}
}
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index a6b53d4..9425afd 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -1672,12 +1672,19 @@
return Object::null();
}
-void Scavenger::TryAllocateNewTLAB(Thread* thread, intptr_t min_size) {
+void Scavenger::TryAllocateNewTLAB(Thread* thread,
+ intptr_t min_size,
+ bool can_safepoint) {
ASSERT(heap_ != Dart::vm_isolate_group()->heap());
ASSERT(!scavenging_);
AbandonRemainingTLAB(thread);
+ if (can_safepoint) {
+ ASSERT(thread->no_safepoint_scope_depth() == 0);
+ heap_->CheckConcurrentMarking(thread, GCReason::kNewSpace);
+ }
+
MutexLocker ml(&space_lock_);
for (NewPage* page = to_->head(); page != nullptr; page = page->next()) {
if (page->owner() != nullptr) continue;
diff --git a/runtime/vm/heap/scavenger.h b/runtime/vm/heap/scavenger.h
index 9930fa8..3c1051c 100644
--- a/runtime/vm/heap/scavenger.h
+++ b/runtime/vm/heap/scavenger.h
@@ -283,7 +283,15 @@
if (LIKELY(addr != 0)) {
return addr;
}
- TryAllocateNewTLAB(thread, size);
+ TryAllocateNewTLAB(thread, size, true);
+ return TryAllocateFromTLAB(thread, size);
+ }
+ uword TryAllocateNoSafepoint(Thread* thread, intptr_t size) {
+ uword addr = TryAllocateFromTLAB(thread, size);
+ if (LIKELY(addr != 0)) {
+ return addr;
+ }
+ TryAllocateNewTLAB(thread, size, false);
return TryAllocateFromTLAB(thread, size);
}
void AbandonRemainingTLAB(Thread* thread);
@@ -393,7 +401,7 @@
thread->set_top(result + size);
return result;
}
- void TryAllocateNewTLAB(Thread* thread, intptr_t size);
+ void TryAllocateNewTLAB(Thread* thread, intptr_t size, bool can_safepoint);
SemiSpace* Prologue(GCReason reason);
intptr_t ParallelScavenge(SemiSpace* from);
diff --git a/runtime/vm/object_graph_copy.cc b/runtime/vm/object_graph_copy.cc
index 560a7a7..979a2b4 100644
--- a/runtime/vm/object_graph_copy.cc
+++ b/runtime/vm/object_graph_copy.cc
@@ -788,7 +788,7 @@
const uword size =
header_size != 0 ? header_size : from.untag()->HeapSize();
if (Heap::IsAllocatableInNewSpace(size)) {
- const uword alloc = new_space_->TryAllocate(thread_, size);
+ const uword alloc = new_space_->TryAllocateNoSafepoint(thread_, size);
if (alloc != 0) {
ObjectPtr to(reinterpret_cast<UntaggedObject*>(alloc));
fast_forward_map_.Insert(from, to, size);
@@ -1353,7 +1353,7 @@
auto raw_from = from.ptr().untag();
auto raw_to = to.ptr().untag();
const intptr_t cid = Types::GetTypedDataPtr(from)->GetClassId();
- raw_to->length_ = raw_from->length_;
+ ASSERT(raw_to->length_ == raw_from->length_);
raw_to->RecomputeDataField();
const intptr_t length =
TypedData::ElementSizeInBytes(cid) * Smi::Value(raw_from->length_);
@@ -1605,7 +1605,7 @@
if (length == 0) return Object::null();
const intptr_t size = Array::InstanceSize(length);
- const uword array_addr = new_space_->TryAllocate(thread_, size);
+ const uword array_addr = new_space_->TryAllocateNoSafepoint(thread_, size);
if (array_addr == 0) {
exception_msg_ = kFastAllocationFailed;
return Marker();