[vm] Revert "Restore TLABs."
This reverts commit 7e1aa678551e33cb9d5de485b1f2fcd79f74e59e as this
regresses flutter stock_layout_iterations by about ~9% on Moto G4.
Fixes https://github.com/dart-lang/sdk/issues/35694
Change-Id: I3c76ee868c64e4b1ed947bd2d295aa8a07d9d1d0
Reviewed-on: https://dart-review.googlesource.com/c/90140
Commit-Queue: Alexander Aprelev <aam@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
Auto-Submit: Alexander Aprelev <aam@google.com>
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index 4ce0f61..8cc2add 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -8,7 +8,6 @@
#include "platform/utils.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/flags.h"
-#include "vm/heap/become.h"
#include "vm/heap/pages.h"
#include "vm/heap/safepoint.h"
#include "vm/heap/scavenger.h"
@@ -62,72 +61,23 @@
}
}
-void Heap::MakeTLABIterable(Thread* thread) {
- uword start = thread->top();
- uword end = thread->end();
- ASSERT(end >= start);
- intptr_t size = end - start;
- ASSERT(Utils::IsAligned(size, kObjectAlignment));
- if (size >= kObjectAlignment) {
- // ForwardingCorpse(forwarding to default null) will work as filler.
- ForwardingCorpse::AsForwarder(start, size);
- ASSERT(RawObject::FromAddr(start)->Size() == size);
- }
-}
-
-void Heap::AbandonRemainingTLAB(Thread* thread) {
- MakeTLABIterable(thread);
- thread->set_top(0);
- thread->set_end(0);
-}
-
-intptr_t Heap::CalculateTLABSize() {
- intptr_t size = new_space_.end() - new_space_.top();
- return Utils::RoundDown(size, kObjectAlignment);
-}
-
uword Heap::AllocateNew(intptr_t size) {
ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0);
// Currently, only the Dart thread may allocate in new space.
isolate()->AssertCurrentThreadIsMutator();
Thread* thread = Thread::Current();
uword addr = new_space_.TryAllocateInTLAB(thread, size);
- if (addr != 0) {
- return addr;
- }
-
- intptr_t tlab_size = CalculateTLABSize();
- if ((tlab_size > 0) && (size > tlab_size)) {
- return AllocateOld(size, HeapPage::kData);
- }
-
- AbandonRemainingTLAB(thread);
- if (tlab_size > 0) {
- uword tlab_top = new_space_.TryAllocateNewTLAB(thread, tlab_size);
- if (tlab_top != 0) {
- addr = new_space_.TryAllocateInTLAB(thread, size);
- ASSERT(addr != 0);
- return addr;
- }
- }
-
- ASSERT(!thread->HasActiveTLAB());
-
- // This call to CollectGarbage might end up "reusing" a collection spawned
- // from a different thread and will be racing to allocate the requested
- // memory with other threads being released after the collection.
- CollectGarbage(kNew);
- tlab_size = CalculateTLABSize();
- uword tlab_top = new_space_.TryAllocateNewTLAB(thread, tlab_size);
- if (tlab_top != 0) {
+ if (addr == 0) {
+ // This call to CollectGarbage might end up "reusing" a collection spawned
+ // from a different thread and will be racing to allocate the requested
+ // memory with other threads being released after the collection.
+ CollectGarbage(kNew);
addr = new_space_.TryAllocateInTLAB(thread, size);
- // It is possible a GC doesn't clear enough space.
- // In that case, we must fall through and allocate into old space.
- if (addr != 0) {
- return addr;
+ if (addr == 0) {
+ return AllocateOld(size, HeapPage::kData);
}
}
- return AllocateOld(size, HeapPage::kData);
+ return addr;
}
uword Heap::AllocateOld(intptr_t size, HeapPage::PageType type) {
@@ -706,7 +656,7 @@
StackZone stack_zone(Thread::Current());
// Change the new space's top_ with the more up-to-date thread's view of top_
- new_space_.MakeNewSpaceIterable();
+ new_space_.FlushTLS();
ObjectSet* allocated_set =
CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation);
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 6935bd1..23a02d6 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -281,10 +281,6 @@
static const intptr_t kNewAllocatableSize = 256 * KB;
- intptr_t CalculateTLABSize();
- void MakeTLABIterable(Thread* thread);
- void AbandonRemainingTLAB(Thread* thread);
-
private:
class GCStats : public ValueObject {
public:
diff --git a/runtime/vm/heap/heap_test.cc b/runtime/vm/heap/heap_test.cc
index af5236c..c47ff50 100644
--- a/runtime/vm/heap/heap_test.cc
+++ b/runtime/vm/heap/heap_test.cc
@@ -267,14 +267,12 @@
for (size_t space = 0; space < ARRAY_SIZE(spaces); ++space) {
const String& obj = String::Handle(String::New("x", spaces[space]));
{
- HeapIterationScope iteration(thread);
NoSafepointScope no_safepoint;
FindOnly find_only(obj.raw());
EXPECT(obj.raw() == heap->FindObject(&find_only));
}
}
{
- HeapIterationScope iteration(thread);
NoSafepointScope no_safepoint;
FindNothing find_nothing;
EXPECT(Object::null() == heap->FindObject(&find_nothing));
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index a3dca1f..01e2df4 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -447,6 +447,13 @@
resolved_top_ = top_;
end_ = to_->end();
+ // Throw out the old information about the from space
+ if (isolate->IsMutatorThreadScheduled()) {
+ Thread* mutator_thread = isolate->mutator_thread();
+ mutator_thread->set_top(top_);
+ mutator_thread->set_end(end_);
+ }
+
return from;
}
@@ -454,10 +461,13 @@
// All objects in the to space have been copied from the from space at this
// moment.
- // Ensure the mutator thread will fail the next allocation. This will force
- // mutator to allocate a new TLAB
- Thread* mutator_thread = isolate->mutator_thread();
- ASSERT((mutator_thread == NULL) || (!mutator_thread->HasActiveTLAB()));
+ // Ensure the mutator thread now has the up-to-date top_ and end_ of the
+ // semispace
+ if (isolate->IsMutatorThreadScheduled()) {
+ Thread* thread = isolate->mutator_thread();
+ thread->set_top(top_);
+ thread->set_end(end_);
+ }
double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction();
if (stats_history_.Size() >= 2) {
@@ -836,45 +846,11 @@
}
}
-void Scavenger::MakeAllTLABsIterable(Isolate* isolate) const {
- MonitorLocker ml(isolate->threads_lock(), false);
- ASSERT(Thread::Current()->IsAtSafepoint() ||
- (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
- (Thread::Current()->task_kind() == Thread::kCompactorTask));
- Thread* current = heap_->isolate()->thread_registry()->active_list();
- while (current != NULL) {
- if (current->HasActiveTLAB()) {
- heap_->MakeTLABIterable(current);
- }
- current = current->next();
- }
- Thread* mutator_thread = isolate->mutator_thread();
- if ((mutator_thread != NULL) && (!isolate->IsMutatorThreadScheduled())) {
- heap_->MakeTLABIterable(mutator_thread);
- }
-}
-
-void Scavenger::MakeNewSpaceIterable() const {
+void Scavenger::FlushTLS() const {
ASSERT(heap_ != NULL);
- ASSERT(Thread::Current()->IsAtSafepoint() ||
- (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
- (Thread::Current()->task_kind() == Thread::kCompactorTask));
- if (!scavenging_) {
- MakeAllTLABsIterable(heap_->isolate());
- }
-}
-
-void Scavenger::AbandonAllTLABs(Isolate* isolate) {
- ASSERT(Thread::Current()->IsAtSafepoint());
- MonitorLocker ml(isolate->threads_lock(), false);
- Thread* current = isolate->thread_registry()->active_list();
- while (current != NULL) {
- heap_->AbandonRemainingTLAB(current);
- current = current->next();
- }
- Thread* mutator_thread = isolate->mutator_thread();
- if ((mutator_thread != NULL) && (!isolate->IsMutatorThreadScheduled())) {
- heap_->AbandonRemainingTLAB(mutator_thread);
+ if (heap_->isolate()->IsMutatorThreadScheduled()) {
+ Thread* mutator_thread = heap_->isolate()->mutator_thread();
+ mutator_thread->heap()->new_space()->set_top(mutator_thread->top());
}
}
@@ -882,7 +858,7 @@
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMarkerTask) ||
(Thread::Current()->task_kind() == Thread::kCompactorTask));
- MakeNewSpaceIterable();
+ FlushTLS();
uword cur = FirstObjectStart();
while (cur < top_) {
RawObject* raw_obj = RawObject::FromAddr(cur);
@@ -893,7 +869,7 @@
void Scavenger::VisitObjects(ObjectVisitor* visitor) const {
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMarkerTask));
- MakeNewSpaceIterable();
+ FlushTLS();
uword cur = FirstObjectStart();
while (cur < top_) {
RawObject* raw_obj = RawObject::FromAddr(cur);
@@ -908,7 +884,7 @@
RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const {
ASSERT(!scavenging_);
- MakeNewSpaceIterable();
+ FlushTLS();
uword cur = FirstObjectStart();
if (visitor->VisitRange(cur, top_)) {
while (cur < top_) {
@@ -924,26 +900,6 @@
return Object::null();
}
-uword Scavenger::TryAllocateNewTLAB(Thread* thread, intptr_t size) {
- ASSERT(Utils::IsAligned(size, kObjectAlignment));
- ASSERT(heap_ != Dart::vm_isolate()->heap());
- ASSERT(!scavenging_);
- MutexLocker ml(&space_lock_);
- uword result = top_;
- intptr_t remaining = end_ - top_;
- if (remaining < size) {
- return 0;
- }
- ASSERT(to_->Contains(result));
- ASSERT((result & kObjectAlignmentMask) == object_alignment_);
- top_ += size;
- ASSERT(to_->Contains(top_) || (top_ == to_->end()));
- ASSERT(result < top_);
- thread->set_top(result);
- thread->set_end(top_);
- return result;
-}
-
void Scavenger::Scavenge() {
Isolate* isolate = heap_->isolate();
// Ensure that all threads for this isolate are at a safepoint (either stopped
@@ -969,13 +925,6 @@
int64_t safe_point = OS::GetCurrentMonotonicMicros();
heap_->RecordTime(kSafePoint, safe_point - start);
- AbandonAllTLABs(isolate);
-
- Thread* mutator_thread = isolate->mutator_thread();
- if ((mutator_thread != NULL) && (mutator_thread->HasActiveTLAB())) {
- heap_->AbandonRemainingTLAB(mutator_thread);
- }
-
// TODO(koda): Make verification more compatible with concurrent sweep.
if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) {
OS::PrintErr("Verifying before Scavenge...");
@@ -984,7 +933,7 @@
}
// Prepare for a scavenge.
- MakeNewSpaceIterable();
+ FlushTLS();
SpaceUsage usage_before = GetCurrentUsage();
intptr_t promo_candidate_words =
(survivor_end_ - FirstObjectStart()) / kWordSize;
@@ -1093,6 +1042,11 @@
// Forces the next scavenge to promote all the objects in the new space.
survivor_end_ = top_;
+ if (heap_->isolate()->IsMutatorThreadScheduled()) {
+ Thread* mutator_thread = heap_->isolate()->mutator_thread();
+ survivor_end_ = mutator_thread->top();
+ }
+
Scavenge();
// It is possible for objects to stay in the new space
diff --git a/runtime/vm/heap/scavenger.h b/runtime/vm/heap/scavenger.h
index bd8ad4f..412395d 100644
--- a/runtime/vm/heap/scavenger.h
+++ b/runtime/vm/heap/scavenger.h
@@ -11,7 +11,6 @@
#include "vm/flags.h"
#include "vm/globals.h"
#include "vm/heap/spaces.h"
-#include "vm/lockers.h"
#include "vm/raw_object.h"
#include "vm/ring_buffer.h"
#include "vm/virtual_memory.h"
@@ -127,8 +126,6 @@
RawObject* FindObject(FindObjectVisitor* visitor) const;
- uword TryAllocateNewTLAB(Thread* thread, intptr_t size);
-
uword AllocateGC(intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ASSERT(heap_ != Dart::vm_isolate()->heap());
@@ -142,7 +139,7 @@
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == object_alignment_);
top_ += size;
- ASSERT((to_->Contains(top_)) || (top_ == to_->end()));
+ ASSERT(to_->Contains(top_) || (top_ == to_->end()));
return result;
}
@@ -151,8 +148,6 @@
ASSERT(heap_ != Dart::vm_isolate()->heap());
ASSERT(thread->IsMutatorThread());
ASSERT(thread->isolate()->IsMutatorThreadScheduled());
- ASSERT(thread->top() <= top_);
- ASSERT((thread->end() == 0) || (thread->end() == top_));
#if defined(DEBUG)
if (FLAG_gc_at_alloc) {
ASSERT(!scavenging_);
@@ -169,7 +164,7 @@
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == object_alignment_);
top += size;
- ASSERT((to_->Contains(top)) || (top == to_->end()));
+ ASSERT(to_->Contains(top) || (top == to_->end()));
thread->set_top(top);
return result;
}
@@ -226,10 +221,7 @@
void AllocateExternal(intptr_t cid, intptr_t size);
void FreeExternal(intptr_t size);
- void MakeNewSpaceIterable() const;
- int64_t FreeSpaceInWords(Isolate* isolate) const;
- void MakeAllTLABsIterable(Isolate* isolate) const;
- void AbandonAllTLABs(Isolate* isolate);
+ void FlushTLS() const;
private:
// Ids for time and data records in Heap::GCStats.
@@ -331,9 +323,6 @@
bool failed_to_promote_;
- // Protects new space during the allocation of new TLABs
- Mutex space_lock_;
-
friend class ScavengerVisitor;
friend class ScavengerWeakVisitor;
diff --git a/runtime/vm/heap/verifier.cc b/runtime/vm/heap/verifier.cc
index 8ea2fdb..29bbe385 100644
--- a/runtime/vm/heap/verifier.cc
+++ b/runtime/vm/heap/verifier.cc
@@ -20,7 +20,7 @@
if (raw_obj->IsHeapObject()) {
uword raw_addr = RawObject::ToAddr(raw_obj);
if (raw_obj->IsFreeListElement() || raw_obj->IsForwardingCorpse()) {
- if (raw_obj->IsOldObject() && raw_obj->IsMarked()) {
+ if (raw_obj->IsMarked()) {
FATAL1("Marked free list element encountered %#" Px "\n", raw_addr);
}
} else {
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index d793eb2..861328c 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -2832,6 +2832,10 @@
thread->sticky_error_ = sticky_error_;
sticky_error_ = Error::null();
}
+ if (this != Dart::vm_isolate()) {
+ scheduled_mutator_thread_->set_top(heap()->new_space()->top());
+ scheduled_mutator_thread_->set_end(heap()->new_space()->end());
+ }
}
Thread::SetCurrent(thread);
os_thread->EnableThreadInterrupts();
@@ -2870,6 +2874,12 @@
os_thread->set_thread(NULL);
OSThread::SetCurrent(os_thread);
if (is_mutator) {
+ if (this != Dart::vm_isolate()) {
+ heap()->new_space()->set_top(scheduled_mutator_thread_->top_);
+ heap()->new_space()->set_end(scheduled_mutator_thread_->end_);
+ }
+ scheduled_mutator_thread_->top_ = 0;
+ scheduled_mutator_thread_->end_ = 0;
scheduled_mutator_thread_ = NULL;
}
// Even if we unschedule the mutator thread, e.g. via calling
diff --git a/runtime/vm/service.cc b/runtime/vm/service.cc
index 3c2eca2..a280dd1 100644
--- a/runtime/vm/service.cc
+++ b/runtime/vm/service.cc
@@ -4115,7 +4115,6 @@
};
static RawObject* GetObjectHelper(Thread* thread, uword addr) {
- HeapIterationScope iteration(thread);
Object& object = Object::Handle(thread->zone());
{
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index 0df7b36..f344ffc 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -432,17 +432,17 @@
static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); }
void set_top(uword value) {
+ ASSERT(heap_ != NULL);
top_ = value;
}
void set_end(uword value) {
+ ASSERT(heap_ != NULL);
end_ = value;
}
uword top() { return top_; }
uword end() { return end_; }
- bool HasActiveTLAB() { return end_ > 0; }
-
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
diff --git a/runtime/vm/thread_registry.h b/runtime/vm/thread_registry.h
index fda9474..2d6ad35 100644
--- a/runtime/vm/thread_registry.h
+++ b/runtime/vm/thread_registry.h
@@ -83,7 +83,6 @@
friend class Isolate;
friend class SafepointHandler;
- friend class Scavenger;
DISALLOW_COPY_AND_ASSIGN(ThreadRegistry);
};