[vm, gc] Make force-growth a thread-local property.

Don't evaluate concurrent marking on new-space page allocation or external allocation under force-growth scopes.

TEST=ci, tsan
Bug: https://github.com/dart-lang/sdk/issues/49344
Bug: https://github.com/dart-lang/sdk/issues/48377
Bug: https://github.com/dart-lang/sdk/issues/48607
Change-Id: Ieff3880bd29228804419ef292a41ba4d502c2c80
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/250223
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index 65aea3a..9d66e4d 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -1059,9 +1059,6 @@
     return error.ptr();
   }
 
-  if (!was_child_cloned_into_existing_isolate) {
-    IG->heap()->InitGrowthControl();
-  }
   I->set_init_callback_data(isolate_data);
   if (FLAG_print_class_table) {
     IG->class_table()->Print();
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index eb33322..adac74b 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -1313,9 +1313,6 @@
   }
 
   if (success) {
-    if (is_new_group) {
-      group->heap()->InitGrowthControl();
-    }
     // A Thread structure has been associated to the thread, we do the
     // safepoint transition explicitly here instead of using the
     // TransitionXXX scope objects as the reverse transition happens
diff --git a/runtime/vm/dart_api_impl_test.cc b/runtime/vm/dart_api_impl_test.cc
index 4d7f959..0e7442a 100644
--- a/runtime/vm/dart_api_impl_test.cc
+++ b/runtime/vm/dart_api_impl_test.cc
@@ -3280,7 +3280,7 @@
     {
       // GCs due to allocations or weak handle creation can cause early
       // promotion and interfere with the scenario this test is verifying.
-      NoHeapGrowthControlScope force_growth;
+      ForceGrowthScope force_growth(thread);
 
       // Create an object in new space.
       new_ref = AllocateNewString("new string");
@@ -3425,7 +3425,7 @@
     {
       // GCs due to allocations or weak handle creation can cause early
       // promotion and interfere with the scenario this test is verifying.
-      NoHeapGrowthControlScope force_growth;
+      ForceGrowthScope force_growth(thread);
 
       // Create an object in new space.
       new_ref = AllocateNewString("new string");
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index d8b0c3f..c218c50 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -74,7 +74,7 @@
   if (LIKELY(addr != 0)) {
     return addr;
   }
-  if (!assume_scavenge_will_fail_ && new_space_.GrowthControlState()) {
+  if (!assume_scavenge_will_fail_ && !thread->force_growth()) {
     // This call to CollectGarbage might end up "reusing" a collection spawned
     // from a different thread and will be racing to allocate the requested
     // memory with other threads being released after the collection.
@@ -93,7 +93,7 @@
 
 uword Heap::AllocateOld(Thread* thread, intptr_t size, OldPage::PageType type) {
   ASSERT(thread->no_safepoint_scope_depth() == 0);
-  if (old_space_.GrowthControlState()) {
+  if (!thread->force_growth()) {
     CollectForDebugging(thread);
     uword addr = old_space_.TryAllocate(size, type);
     if (addr != 0) {
@@ -132,7 +132,7 @@
     return addr;
   }
 
-  if (old_space_.GrowthControlState()) {
+  if (!thread->force_growth()) {
     WaitForSweeperTasks(thread);
     old_space_.TryReleaseReservation();
   } else {
@@ -155,10 +155,10 @@
   }
 
   Thread* thread = Thread::Current();
-  if (thread->no_callback_scope_depth() == 0) {
+  if ((thread->no_callback_scope_depth() == 0) && !thread->force_growth()) {
     CheckExternalGC(thread);
   } else {
-    // Check delayed until Dart_TypedDataRelease.
+    // Check delayed until Dart_TypedDataRelease/~ForceGrowthScope.
   }
 }
 
@@ -179,6 +179,7 @@
 void Heap::CheckExternalGC(Thread* thread) {
   ASSERT(thread->no_safepoint_scope_depth() == 0);
   ASSERT(thread->no_callback_scope_depth() == 0);
+  ASSERT(!thread->force_growth());
   if (new_space_.ExternalInWords() >= (4 * new_space_.CapacityInWords())) {
     // Attempt to free some external allocation by a scavenge. (If the total
     // remains above the limit, next external alloc will trigger another.)
@@ -566,6 +567,8 @@
 void Heap::CheckConcurrentMarking(Thread* thread,
                                   GCReason reason,
                                   intptr_t size) {
+  ASSERT(!thread->force_growth());
+
   PageSpace::Phase phase;
   {
     MonitorLocker ml(old_space_.tasks_lock());
@@ -662,21 +665,6 @@
       (UsedInWords(Heap::kOld) * kWordSize));
 }
 
-void Heap::InitGrowthControl() {
-  new_space_.InitGrowthControl();
-  old_space_.InitGrowthControl();
-}
-
-void Heap::SetGrowthControlState(bool state) {
-  new_space_.SetGrowthControlState(state);
-  old_space_.SetGrowthControlState(state);
-}
-
-bool Heap::GrowthControlState() {
-  ASSERT(new_space_.GrowthControlState() == old_space_.GrowthControlState());
-  return old_space_.GrowthControlState();
-}
-
 void Heap::WriteProtect(bool read_only) {
   read_only_ = read_only;
   new_space_.WriteProtect(read_only);
@@ -1187,16 +1175,13 @@
   }
 }
 
-NoHeapGrowthControlScope::NoHeapGrowthControlScope()
-    : ThreadStackResource(Thread::Current()) {
-  Heap* heap = isolate_group()->heap();
-  current_growth_controller_state_ = heap->GrowthControlState();
-  heap->DisableGrowthControl();
+ForceGrowthScope::ForceGrowthScope(Thread* thread)
+    : ThreadStackResource(thread) {
+  thread->IncrementForceGrowthScopeDepth();
 }
 
-NoHeapGrowthControlScope::~NoHeapGrowthControlScope() {
-  Heap* heap = isolate_group()->heap();
-  heap->SetGrowthControlState(current_growth_controller_state_);
+ForceGrowthScope::~ForceGrowthScope() {
+  thread()->DecrementForceGrowthScopeDepth();
 }
 
 WritableVMIsolateScope::WritableVMIsolateScope(Thread* thread)
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 3ceab79..3408cb2 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -131,13 +131,6 @@
   void WaitForSweeperTasks(Thread* thread);
   void WaitForSweeperTasksAtSafepoint(Thread* thread);
 
-  // Enables growth control on the page space heaps.  This should be
-  // called before any user code is executed.
-  void InitGrowthControl();
-  void DisableGrowthControl() { SetGrowthControlState(false); }
-  void SetGrowthControlState(bool state);
-  bool GrowthControlState();
-
   // Protect access to the heap. Note: Code pages are made
   // executable/non-executable when 'read_only' is true/false, respectively.
   void WriteProtect(bool read_only);
@@ -424,14 +417,13 @@
   DISALLOW_COPY_AND_ASSIGN(HeapIterationScope);
 };
 
-class NoHeapGrowthControlScope : public ThreadStackResource {
+class ForceGrowthScope : public ThreadStackResource {
  public:
-  NoHeapGrowthControlScope();
-  ~NoHeapGrowthControlScope();
+  explicit ForceGrowthScope(Thread* thread);
+  ~ForceGrowthScope();
 
  private:
-  bool current_growth_controller_state_;
-  DISALLOW_COPY_AND_ASSIGN(NoHeapGrowthControlScope);
+  DISALLOW_COPY_AND_ASSIGN(ForceGrowthScope);
 };
 
 // Note: During this scope all pages are writable and the code pages are
diff --git a/runtime/vm/heap/heap_test.cc b/runtime/vm/heap/heap_test.cc
index d2e2c41..51a139b 100644
--- a/runtime/vm/heap/heap_test.cc
+++ b/runtime/vm/heap/heap_test.cc
@@ -721,4 +721,65 @@
   }
 }
 
+class ConcurrentForceGrowthScopeTask : public ThreadPool::Task {
+ public:
+  ConcurrentForceGrowthScopeTask(Isolate* isolate,
+                                 Monitor* monitor,
+                                 intptr_t* done_count)
+      : isolate_(isolate), monitor_(monitor), done_count_(done_count) {}
+
+  virtual void Run() {
+    Thread::EnterIsolateAsHelper(isolate_, Thread::kUnknownTask);
+    {
+      Thread* thread = Thread::Current();
+      StackZone stack_zone(thread);
+
+      GrowableObjectArray& accumulate =
+          GrowableObjectArray::Handle(GrowableObjectArray::New());
+      Object& element = Object::Handle();
+      for (intptr_t i = 0; i < 1000; i++) {
+        // Lots of entering and leaving ForceGrowth scopes. Previously, this
+        // would have been data races on the per-Heap force-growth flag.
+        {
+          ForceGrowthScope force_growth(thread);
+          GrowableObjectArrayPtr unsafe_accumulate = accumulate.ptr();
+          element = Array::New(0);
+          accumulate = unsafe_accumulate;
+        }
+        accumulate.Add(element);
+      }
+    }
+    Thread::ExitIsolateAsHelper();
+    // Notify the main thread that this thread has exited.
+    {
+      MonitorLocker ml(monitor_);
+      *done_count_ += 1;
+      ml.Notify();
+    }
+  }
+
+ private:
+  Isolate* isolate_;
+  Monitor* monitor_;
+  intptr_t* done_count_;
+};
+
+ISOLATE_UNIT_TEST_CASE(ConcurrentForceGrowthScope) {
+  intptr_t task_count = 8;
+  Monitor monitor;
+  intptr_t done_count = 0;
+
+  for (intptr_t i = 0; i < task_count; i++) {
+    Dart::thread_pool()->Run<ConcurrentForceGrowthScopeTask>(
+        thread->isolate(), &monitor, &done_count);
+  }
+
+  {
+    MonitorLocker ml(&monitor);
+    while (done_count < task_count) {
+      ml.WaitWithSafepointCheck(thread);
+    }
+  }
+}
+
 }  // namespace dart
diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc
index 1d66e97..e207185 100644
--- a/runtime/vm/heap/pages.cc
+++ b/runtime/vm/heap/pages.cc
@@ -471,7 +471,7 @@
   ASSERT(Heap::IsAllocatableViaFreeLists(size));
 
   if (growth_policy != kForceGrowth) {
-    ASSERT(GrowthControlState());
+    ASSERT(!Thread::Current()->force_growth());
     if (heap_ != nullptr) {  // Some unit tests.
       heap_->CheckConcurrentMarking(Thread::Current(), GCReason::kOldSpace,
                                     kOldPageSize);
@@ -513,7 +513,7 @@
   ASSERT(!Heap::IsAllocatableViaFreeLists(size));
 
   if (growth_policy != kForceGrowth) {
-    ASSERT(GrowthControlState());
+    ASSERT(!Thread::Current()->force_growth());
     if (heap_ != nullptr) {  // Some unit tests.
       heap_->CheckConcurrentMarking(Thread::Current(), GCReason::kOldSpace,
                                     size);
@@ -1110,7 +1110,7 @@
 }
 
 void PageSpace::CollectGarbage(Thread* thread, bool compact, bool finalize) {
-  ASSERT(GrowthControlState());
+  ASSERT(!Thread::Current()->force_growth());
 
   if (!finalize) {
 #if defined(TARGET_ARCH_IA32)
@@ -1509,7 +1509,6 @@
                                          int heap_growth_max,
                                          int garbage_collection_time_ratio)
     : heap_(heap),
-      is_enabled_(false),
       heap_growth_ratio_(heap_growth_ratio),
       desired_utilization_((100.0 - heap_growth_ratio) / 100.0),
       heap_growth_max_(heap_growth_max),
@@ -1522,9 +1521,6 @@
 PageSpaceController::~PageSpaceController() {}
 
 bool PageSpaceController::ReachedHardThreshold(SpaceUsage after) const {
-  if (!is_enabled_) {
-    return false;
-  }
   if (heap_growth_ratio_ == 100) {
     return false;
   }
@@ -1532,9 +1528,6 @@
 }
 
 bool PageSpaceController::ReachedSoftThreshold(SpaceUsage after) const {
-  if (!is_enabled_) {
-    return false;
-  }
   if (heap_growth_ratio_ == 100) {
     return false;
   }
@@ -1542,9 +1535,6 @@
 }
 
 bool PageSpaceController::ReachedIdleThreshold(SpaceUsage current) const {
-  if (!is_enabled_) {
-    return false;
-  }
   if (heap_growth_ratio_ == 100) {
     return false;
   }
diff --git a/runtime/vm/heap/pages.h b/runtime/vm/heap/pages.h
index aff20f3..36e8ab2 100644
--- a/runtime/vm/heap/pages.h
+++ b/runtime/vm/heap/pages.h
@@ -270,10 +270,6 @@
 
   void set_last_usage(SpaceUsage current) { last_usage_ = current; }
 
-  void Enable() { is_enabled_ = true; }
-  void Disable() { is_enabled_ = false; }
-  bool is_enabled() { return is_enabled_; }
-
  private:
   friend class PageSpace;  // For MergeOtherPageSpaceController
 
@@ -285,8 +281,6 @@
 
   Heap* heap_;
 
-  bool is_enabled_;
-
   // Usage after last evaluated GC or last enabled.
   SpaceUsage last_usage_;
 
@@ -415,21 +409,6 @@
 
   void AddRegionsToObjectSet(ObjectSet* set) const;
 
-  void InitGrowthControl() {
-    page_space_controller_.set_last_usage(usage_);
-    page_space_controller_.Enable();
-  }
-
-  void SetGrowthControlState(bool state) {
-    if (state) {
-      page_space_controller_.Enable();
-    } else {
-      page_space_controller_.Disable();
-    }
-  }
-
-  bool GrowthControlState() { return page_space_controller_.is_enabled(); }
-
   // Note: Code pages are made executable/non-executable when 'read_only' is
   // true/false, respectively.
   void WriteProtect(bool read_only);
diff --git a/runtime/vm/heap/pages_test.cc b/runtime/vm/heap/pages_test.cc
index fed0a88..1db6113 100644
--- a/runtime/vm/heap/pages_test.cc
+++ b/runtime/vm/heap/pages_test.cc
@@ -10,7 +10,6 @@
 
 TEST_CASE(Pages) {
   PageSpace* space = new PageSpace(NULL, 4 * MBInWords);
-  space->InitGrowthControl();
   EXPECT(!space->Contains(reinterpret_cast<uword>(&space)));
   uword block = space->TryAllocate(8 * kWordSize);
   EXPECT(block != 0);
diff --git a/runtime/vm/heap/safepoint.cc b/runtime/vm/heap/safepoint.cc
index d75d06a..3853e9c 100644
--- a/runtime/vm/heap/safepoint.cc
+++ b/runtime/vm/heap/safepoint.cc
@@ -37,13 +37,10 @@
   IsolateGroup* IG = T->isolate_group();
   ASSERT(IG != NULL);
 
+  T->IncrementForceGrowthScopeDepth();
+
   auto handler = IG->safepoint_handler();
   handler->SafepointThreads(T, level_);
-
-  // N.B.: Change growth policy inside the safepoint to prevent racy access.
-  Heap* heap = IG->heap();
-  current_growth_controller_state_ = heap->GrowthControlState();
-  heap->DisableGrowthControl();
 }
 
 ForceGrowthSafepointOperationScope::~ForceGrowthSafepointOperationScope() {
@@ -52,16 +49,14 @@
   IsolateGroup* IG = T->isolate_group();
   ASSERT(IG != NULL);
 
-  // N.B.: Change growth policy inside the safepoint to prevent racy access.
-  Heap* heap = IG->heap();
-  heap->SetGrowthControlState(current_growth_controller_state_);
-
   auto handler = IG->safepoint_handler();
   handler->ResumeThreads(T, level_);
 
-  if (current_growth_controller_state_) {
+  T->DecrementForceGrowthScopeDepth();
+  if (!T->force_growth()) {
     ASSERT(T->CanCollectGarbage());
     // Check if we passed the growth limit during the scope.
+    Heap* heap = T->heap();
     if (heap->old_space()->ReachedHardThreshold()) {
       heap->CollectGarbage(T, GCType::kMarkSweep, GCReason::kOldSpace);
     } else {
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index 5bce434..8dc0cfd 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -1678,7 +1678,7 @@
 
   AbandonRemainingTLAB(thread);
 
-  if (can_safepoint) {
+  if (can_safepoint && !thread->force_growth()) {
     ASSERT(thread->no_safepoint_scope_depth() == 0);
     heap_->CheckConcurrentMarking(thread, GCReason::kNewSpace, kNewPageSize);
   }
diff --git a/runtime/vm/heap/scavenger.h b/runtime/vm/heap/scavenger.h
index 3c1051c..43eac30 100644
--- a/runtime/vm/heap/scavenger.h
+++ b/runtime/vm/heap/scavenger.h
@@ -349,16 +349,6 @@
   void MakeNewSpaceIterable();
   int64_t FreeSpaceInWords(Isolate* isolate) const;
 
-  void InitGrowthControl() {
-    growth_control_ = true;
-  }
-
-  void SetGrowthControlState(bool state) {
-    growth_control_ = state;
-  }
-
-  bool GrowthControlState() { return growth_control_; }
-
   bool scavenging() const { return scavenging_; }
 
   // The maximum number of Dart mutator threads we allow to execute at the same
@@ -457,10 +447,6 @@
   RelaxedAtomic<bool> failed_to_promote_;
   RelaxedAtomic<bool> abort_;
 
-  // When the isolate group is ready it will enable growth control via
-  // InitGrowthControl.
-  bool growth_control_ = false;
-
   // Protects new space during the allocation of new TLABs
   mutable Mutex space_lock_;
 
diff --git a/runtime/vm/isolate_reload.cc b/runtime/vm/isolate_reload.cc
index f182dce..f40c12b 100644
--- a/runtime/vm/isolate_reload.cc
+++ b/runtime/vm/isolate_reload.cc
@@ -753,7 +753,7 @@
           // "become" operation below replaces all the instances of the old
           // size with forwarding corpses. Force heap growth to prevent size
           // confusion during this period.
-          NoHeapGrowthControlScope scope;
+          ForceGrowthScope force_growth(thread);
           // The HeapIterationScope above ensures no other GC tasks can be
           // active.
           ASSERT(HasNoTasks(heap));
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index 906efbf..5f9660d 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -586,17 +586,25 @@
   }
 
   int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
-
   void IncrementNoCallbackScopeDepth() {
     ASSERT(no_callback_scope_depth_ < INT_MAX);
     no_callback_scope_depth_ += 1;
   }
-
   void DecrementNoCallbackScopeDepth() {
     ASSERT(no_callback_scope_depth_ > 0);
     no_callback_scope_depth_ -= 1;
   }
 
+  bool force_growth() const { return force_growth_scope_depth_ != 0; }
+  void IncrementForceGrowthScopeDepth() {
+    ASSERT(force_growth_scope_depth_ < INT_MAX);
+    force_growth_scope_depth_ += 1;
+  }
+  void DecrementForceGrowthScopeDepth() {
+    ASSERT(force_growth_scope_depth_ > 0);
+    force_growth_scope_depth_ -= 1;
+  }
+
   bool is_unwind_in_progress() const { return is_unwind_in_progress_; }
 
   void StartUnwindError() {
@@ -1219,6 +1227,7 @@
   mutable Monitor thread_lock_;
   ApiLocalScope* api_reusable_scope_;
   int32_t no_callback_scope_depth_;
+  int32_t force_growth_scope_depth_ = 0;
   intptr_t no_reload_scope_depth_ = 0;
   intptr_t stopped_mutators_scope_depth_ = 0;
 #if defined(DEBUG)
diff --git a/runtime/vm/thread_test.cc b/runtime/vm/thread_test.cc
index 5faa11b..796faa2 100644
--- a/runtime/vm/thread_test.cc
+++ b/runtime/vm/thread_test.cc
@@ -303,9 +303,9 @@
   intptr_t done_count = 0;
   bool wait = true;
 
-  EXPECT(isolate->group()->heap()->GrowthControlState());
+  EXPECT(!thread->force_growth());
 
-  NoHeapGrowthControlScope no_heap_growth_scope;
+  ForceGrowthScope no_heap_growth_scope(thread);
 
   for (intptr_t i = 0; i < kTaskCount; i++) {
     Dart::thread_pool()->Run<SimpleTaskWithZoneAllocation>(