Restore TLABs.
This reverts https://github.com/dart-lang/sdk/commit/e9d358921dddbee84c39c951220d5a844b097bb5 and on top of original change it:
- switches from FreeList used as a filler to ForwardingCorpse because it is not as strict as to be expected only in old space;
- gets Verifier to accept ForwardingCorpse(as well as FreeList) in new space;
- asserts that thread is at safepoint(or is busy with one of GC tasks);
- has slow-path TryAllocateNewTLAB in scavenger.cc, rather than in .h;
- uses "inline" space_lock Mutex instead of allocating one on the heap;
- reverts back to less precise used space calculation to avoid requiring threads to be at a safepoint for calculation;
- adds HeapIterationScope in two places(heap_test.cc and service.cc) to accommodate newly introduced ASSERTs.
Change-Id: I42df716cff6da9651ce7737fa66d7ec2a55905b0
Reviewed-on: https://dart-review.googlesource.com/c/89041
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Alexander Aprelev <aam@google.com>
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index e2848d3..da5e831 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -8,6 +8,7 @@
#include "platform/utils.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/flags.h"
+#include "vm/heap/become.h"
#include "vm/heap/pages.h"
#include "vm/heap/safepoint.h"
#include "vm/heap/scavenger.h"
@@ -61,23 +62,72 @@
}
}
+void Heap::MakeTLABIterable(Thread* thread) {
+ uword start = thread->top();
+ uword end = thread->end();
+ ASSERT(end >= start);
+ intptr_t size = end - start;
+ ASSERT(Utils::IsAligned(size, kObjectAlignment));
+ if (size >= kObjectAlignment) {
+ // ForwardingCorpse(forwarding to default null) will work as filler.
+ ForwardingCorpse::AsForwarder(start, size);
+ ASSERT(RawObject::FromAddr(start)->Size() == size);
+ }
+}
+
+void Heap::AbandonRemainingTLAB(Thread* thread) {
+ MakeTLABIterable(thread);
+ thread->set_top(0);
+ thread->set_end(0);
+}
+
+intptr_t Heap::CalculateTLABSize() {
+ intptr_t size = new_space_.end() - new_space_.top();
+ return Utils::RoundDown(size, kObjectAlignment);
+}
+
uword Heap::AllocateNew(intptr_t size) {
ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0);
// Currently, only the Dart thread may allocate in new space.
isolate()->AssertCurrentThreadIsMutator();
Thread* thread = Thread::Current();
uword addr = new_space_.TryAllocateInTLAB(thread, size);
- if (addr == 0) {
- // This call to CollectGarbage might end up "reusing" a collection spawned
- // from a different thread and will be racing to allocate the requested
- // memory with other threads being released after the collection.
- CollectGarbage(kNew);
- addr = new_space_.TryAllocateInTLAB(thread, size);
- if (addr == 0) {
- return AllocateOld(size, HeapPage::kData);
+ if (addr != 0) {
+ return addr;
+ }
+
+ intptr_t tlab_size = CalculateTLABSize();
+ if ((tlab_size > 0) && (size > tlab_size)) {
+ return AllocateOld(size, HeapPage::kData);
+ }
+
+ AbandonRemainingTLAB(thread);
+ if (tlab_size > 0) {
+ uword tlab_top = new_space_.TryAllocateNewTLAB(thread, tlab_size);
+ if (tlab_top != 0) {
+ addr = new_space_.TryAllocateInTLAB(thread, size);
+ ASSERT(addr != 0);
+ return addr;
}
}
- return addr;
+
+ ASSERT(!thread->HasActiveTLAB());
+
+ // This call to CollectGarbage might end up "reusing" a collection spawned
+ // from a different thread and will be racing to allocate the requested
+ // memory with other threads being released after the collection.
+ CollectGarbage(kNew);
+ tlab_size = CalculateTLABSize();
+ uword tlab_top = new_space_.TryAllocateNewTLAB(thread, tlab_size);
+ if (tlab_top != 0) {
+ addr = new_space_.TryAllocateInTLAB(thread, size);
+ // It is possible a GC doesn't clear enough space.
+ // In that case, we must fall through and allocate into old space.
+ if (addr != 0) {
+ return addr;
+ }
+ }
+ return AllocateOld(size, HeapPage::kData);
}
uword Heap::AllocateOld(intptr_t size, HeapPage::PageType type) {
@@ -647,7 +697,7 @@
StackZone stack_zone(Thread::Current());
// Change the new space's top_ with the more up-to-date thread's view of top_
- new_space_.FlushTLS();
+ new_space_.MakeNewSpaceIterable();
ObjectSet* allocated_set =
CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation);
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 23a02d6..6935bd1 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -281,6 +281,10 @@
static const intptr_t kNewAllocatableSize = 256 * KB;
+ intptr_t CalculateTLABSize();
+ void MakeTLABIterable(Thread* thread);
+ void AbandonRemainingTLAB(Thread* thread);
+
private:
class GCStats : public ValueObject {
public:
diff --git a/runtime/vm/heap/heap_test.cc b/runtime/vm/heap/heap_test.cc
index c47ff50..af5236c 100644
--- a/runtime/vm/heap/heap_test.cc
+++ b/runtime/vm/heap/heap_test.cc
@@ -267,12 +267,14 @@
for (size_t space = 0; space < ARRAY_SIZE(spaces); ++space) {
const String& obj = String::Handle(String::New("x", spaces[space]));
{
+ HeapIterationScope iteration(thread);
NoSafepointScope no_safepoint;
FindOnly find_only(obj.raw());
EXPECT(obj.raw() == heap->FindObject(&find_only));
}
}
{
+ HeapIterationScope iteration(thread);
NoSafepointScope no_safepoint;
FindNothing find_nothing;
EXPECT(Object::null() == heap->FindObject(&find_nothing));
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index 9e3a457..b07afe9 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -447,13 +447,6 @@
resolved_top_ = top_;
end_ = to_->end();
- // Throw out the old information about the from space
- if (isolate->IsMutatorThreadScheduled()) {
- Thread* mutator_thread = isolate->mutator_thread();
- mutator_thread->set_top(top_);
- mutator_thread->set_end(end_);
- }
-
return from;
}
@@ -461,13 +454,10 @@
// All objects in the to space have been copied from the from space at this
// moment.
- // Ensure the mutator thread now has the up-to-date top_ and end_ of the
- // semispace
- if (isolate->IsMutatorThreadScheduled()) {
- Thread* thread = isolate->mutator_thread();
- thread->set_top(top_);
- thread->set_end(end_);
- }
+ // Ensure the mutator thread will fail the next allocation. This will force
+ // mutator to allocate a new TLAB
+ Thread* mutator_thread = isolate->mutator_thread();
+ ASSERT((mutator_thread == NULL) || (!mutator_thread->HasActiveTLAB()));
double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction();
if (stats_history_.Size() >= 2) {
@@ -844,11 +834,45 @@
}
}
-void Scavenger::FlushTLS() const {
+void Scavenger::MakeAllTLABsIterable(Isolate* isolate) const {
+ MonitorLocker ml(isolate->threads_lock(), false);
+ ASSERT(Thread::Current()->IsAtSafepoint() ||
+ (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
+ (Thread::Current()->task_kind() == Thread::kCompactorTask));
+ Thread* current = heap_->isolate()->thread_registry()->active_list();
+ while (current != NULL) {
+ if (current->HasActiveTLAB()) {
+ heap_->MakeTLABIterable(current);
+ }
+ current = current->next();
+ }
+ Thread* mutator_thread = isolate->mutator_thread();
+ if ((mutator_thread != NULL) && (!isolate->IsMutatorThreadScheduled())) {
+ heap_->MakeTLABIterable(mutator_thread);
+ }
+}
+
+void Scavenger::MakeNewSpaceIterable() const {
ASSERT(heap_ != NULL);
- if (heap_->isolate()->IsMutatorThreadScheduled()) {
- Thread* mutator_thread = heap_->isolate()->mutator_thread();
- mutator_thread->heap()->new_space()->set_top(mutator_thread->top());
+ ASSERT(Thread::Current()->IsAtSafepoint() ||
+ (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
+ (Thread::Current()->task_kind() == Thread::kCompactorTask));
+ if (!scavenging_) {
+ MakeAllTLABsIterable(heap_->isolate());
+ }
+}
+
+void Scavenger::AbandonAllTLABs(Isolate* isolate) {
+ ASSERT(Thread::Current()->IsAtSafepoint());
+ MonitorLocker ml(isolate->threads_lock(), false);
+ Thread* current = isolate->thread_registry()->active_list();
+ while (current != NULL) {
+ heap_->AbandonRemainingTLAB(current);
+ current = current->next();
+ }
+ Thread* mutator_thread = isolate->mutator_thread();
+ if ((mutator_thread != NULL) && (!isolate->IsMutatorThreadScheduled())) {
+ heap_->AbandonRemainingTLAB(mutator_thread);
}
}
@@ -856,7 +880,7 @@
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMarkerTask) ||
(Thread::Current()->task_kind() == Thread::kCompactorTask));
- FlushTLS();
+ MakeNewSpaceIterable();
uword cur = FirstObjectStart();
while (cur < top_) {
RawObject* raw_obj = RawObject::FromAddr(cur);
@@ -867,7 +891,7 @@
void Scavenger::VisitObjects(ObjectVisitor* visitor) const {
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMarkerTask));
- FlushTLS();
+ MakeNewSpaceIterable();
uword cur = FirstObjectStart();
while (cur < top_) {
RawObject* raw_obj = RawObject::FromAddr(cur);
@@ -882,7 +906,7 @@
RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const {
ASSERT(!scavenging_);
- FlushTLS();
+ MakeNewSpaceIterable();
uword cur = FirstObjectStart();
if (visitor->VisitRange(cur, top_)) {
while (cur < top_) {
@@ -898,6 +922,26 @@
return Object::null();
}
+uword Scavenger::TryAllocateNewTLAB(Thread* thread, intptr_t size) {
+ ASSERT(Utils::IsAligned(size, kObjectAlignment));
+ ASSERT(heap_ != Dart::vm_isolate()->heap());
+ ASSERT(!scavenging_);
+ MutexLocker ml(&space_lock_);
+ uword result = top_;
+ intptr_t remaining = end_ - top_;
+ if (remaining < size) {
+ return 0;
+ }
+ ASSERT(to_->Contains(result));
+ ASSERT((result & kObjectAlignmentMask) == object_alignment_);
+ top_ += size;
+ ASSERT(to_->Contains(top_) || (top_ == to_->end()));
+ ASSERT(result < top_);
+ thread->set_top(result);
+ thread->set_end(top_);
+ return result;
+}
+
void Scavenger::Scavenge() {
Isolate* isolate = heap_->isolate();
// Ensure that all threads for this isolate are at a safepoint (either stopped
@@ -923,6 +967,13 @@
int64_t safe_point = OS::GetCurrentMonotonicMicros();
heap_->RecordTime(kSafePoint, safe_point - start);
+ AbandonAllTLABs(isolate);
+
+ Thread* mutator_thread = isolate->mutator_thread();
+ if ((mutator_thread != NULL) && (mutator_thread->HasActiveTLAB())) {
+ heap_->AbandonRemainingTLAB(mutator_thread);
+ }
+
// TODO(koda): Make verification more compatible with concurrent sweep.
if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) {
OS::PrintErr("Verifying before Scavenge...");
@@ -931,7 +982,7 @@
}
// Prepare for a scavenge.
- FlushTLS();
+ MakeNewSpaceIterable();
SpaceUsage usage_before = GetCurrentUsage();
intptr_t promo_candidate_words =
(survivor_end_ - FirstObjectStart()) / kWordSize;
@@ -1040,11 +1091,6 @@
// Forces the next scavenge to promote all the objects in the new space.
survivor_end_ = top_;
- if (heap_->isolate()->IsMutatorThreadScheduled()) {
- Thread* mutator_thread = heap_->isolate()->mutator_thread();
- survivor_end_ = mutator_thread->top();
- }
-
Scavenge();
// It is possible for objects to stay in the new space
diff --git a/runtime/vm/heap/scavenger.h b/runtime/vm/heap/scavenger.h
index 412395d..bd8ad4f 100644
--- a/runtime/vm/heap/scavenger.h
+++ b/runtime/vm/heap/scavenger.h
@@ -11,6 +11,7 @@
#include "vm/flags.h"
#include "vm/globals.h"
#include "vm/heap/spaces.h"
+#include "vm/lockers.h"
#include "vm/raw_object.h"
#include "vm/ring_buffer.h"
#include "vm/virtual_memory.h"
@@ -126,6 +127,8 @@
RawObject* FindObject(FindObjectVisitor* visitor) const;
+ uword TryAllocateNewTLAB(Thread* thread, intptr_t size);
+
uword AllocateGC(intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ASSERT(heap_ != Dart::vm_isolate()->heap());
@@ -139,7 +142,7 @@
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == object_alignment_);
top_ += size;
- ASSERT(to_->Contains(top_) || (top_ == to_->end()));
+ ASSERT((to_->Contains(top_)) || (top_ == to_->end()));
return result;
}
@@ -148,6 +151,8 @@
ASSERT(heap_ != Dart::vm_isolate()->heap());
ASSERT(thread->IsMutatorThread());
ASSERT(thread->isolate()->IsMutatorThreadScheduled());
+ ASSERT(thread->top() <= top_);
+ ASSERT((thread->end() == 0) || (thread->end() == top_));
#if defined(DEBUG)
if (FLAG_gc_at_alloc) {
ASSERT(!scavenging_);
@@ -164,7 +169,7 @@
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == object_alignment_);
top += size;
- ASSERT(to_->Contains(top) || (top == to_->end()));
+ ASSERT((to_->Contains(top)) || (top == to_->end()));
thread->set_top(top);
return result;
}
@@ -221,7 +226,10 @@
void AllocateExternal(intptr_t cid, intptr_t size);
void FreeExternal(intptr_t size);
- void FlushTLS() const;
+ void MakeNewSpaceIterable() const;
+ int64_t FreeSpaceInWords(Isolate* isolate) const;
+ void MakeAllTLABsIterable(Isolate* isolate) const;
+ void AbandonAllTLABs(Isolate* isolate);
private:
// Ids for time and data records in Heap::GCStats.
@@ -323,6 +331,9 @@
bool failed_to_promote_;
+ // Protects new space during the allocation of new TLABs
+ Mutex space_lock_;
+
friend class ScavengerVisitor;
friend class ScavengerWeakVisitor;
diff --git a/runtime/vm/heap/verifier.cc b/runtime/vm/heap/verifier.cc
index 29bbe385..8ea2fdb 100644
--- a/runtime/vm/heap/verifier.cc
+++ b/runtime/vm/heap/verifier.cc
@@ -20,7 +20,7 @@
if (raw_obj->IsHeapObject()) {
uword raw_addr = RawObject::ToAddr(raw_obj);
if (raw_obj->IsFreeListElement() || raw_obj->IsForwardingCorpse()) {
- if (raw_obj->IsMarked()) {
+ if (raw_obj->IsOldObject() && raw_obj->IsMarked()) {
FATAL1("Marked free list element encountered %#" Px "\n", raw_addr);
}
} else {
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 1d18e97..7f1e8a2 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -2837,10 +2837,6 @@
os_thread->set_thread(thread);
if (is_mutator) {
scheduled_mutator_thread_ = thread;
- if (this != Dart::vm_isolate()) {
- scheduled_mutator_thread_->set_top(heap()->new_space()->top());
- scheduled_mutator_thread_->set_end(heap()->new_space()->end());
- }
}
Thread::SetCurrent(thread);
os_thread->EnableThreadInterrupts();
@@ -2879,12 +2875,6 @@
os_thread->set_thread(NULL);
OSThread::SetCurrent(os_thread);
if (is_mutator) {
- if (this != Dart::vm_isolate()) {
- heap()->new_space()->set_top(scheduled_mutator_thread_->top_);
- heap()->new_space()->set_end(scheduled_mutator_thread_->end_);
- }
- scheduled_mutator_thread_->top_ = 0;
- scheduled_mutator_thread_->end_ = 0;
scheduled_mutator_thread_ = NULL;
}
// Even if we unschedule the mutator thread, e.g. via calling
diff --git a/runtime/vm/service.cc b/runtime/vm/service.cc
index 25fd47c..282f198 100644
--- a/runtime/vm/service.cc
+++ b/runtime/vm/service.cc
@@ -4110,6 +4110,7 @@
};
static RawObject* GetObjectHelper(Thread* thread, uword addr) {
+ HeapIterationScope iteration(thread);
Object& object = Object::Handle(thread->zone());
{
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index f344ffc..0df7b36 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -432,17 +432,17 @@
static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); }
void set_top(uword value) {
- ASSERT(heap_ != NULL);
top_ = value;
}
void set_end(uword value) {
- ASSERT(heap_ != NULL);
end_ = value;
}
uword top() { return top_; }
uword end() { return end_; }
+ bool HasActiveTLAB() { return end_ > 0; }
+
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
diff --git a/runtime/vm/thread_registry.h b/runtime/vm/thread_registry.h
index 2d6ad35..fda9474 100644
--- a/runtime/vm/thread_registry.h
+++ b/runtime/vm/thread_registry.h
@@ -83,6 +83,7 @@
friend class Isolate;
friend class SafepointHandler;
+ friend class Scavenger;
DISALLOW_COPY_AND_ASSIGN(ThreadRegistry);
};