[vm/concurrency] Add patchable_call_mutex and take it in miss handlers

We serialize all changes to patchable call state to ensure we always
make progress (e.g. transition from Monomorphic -> Polymorphic).

Only if the miss handler actually needs to patch the patchable call do
we stop mutators - if it only updates ICData we don't have to do that.
=> In the future we might introduce a more clever mechanism to avoid
   stopping mutators.

The CL also moves some locks from Isolate to IsolateGroup to ensure
all mutators use the same lock, since they operate on shared data
structures.

Issue https://github.com/dart-lang/sdk/issues/36097

TEST=Existing isolate tests that --enable-isolate-groups.

Change-Id: I4a11a8b8bb65581c6aba11cdf73acdea0f24a502
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/174469
Reviewed-by: Alexander Aprelev <aam@google.com>
Commit-Queue: Martin Kustermann <kustermann@google.com>
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 9fdefe6..04c0b16 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -824,7 +824,7 @@
     const auto& cache = MegamorphicCache::Handle(
         zone, MegamorphicCacheTable::Lookup(thread, name, descriptor));
     {
-      SafepointMutexLocker ml(thread->isolate()->type_feedback_mutex());
+      SafepointMutexLocker ml(thread->isolate_group()->type_feedback_mutex());
       MegamorphicCacheEntries entries(Array::Handle(zone, cache.buckets()));
       for (intptr_t i = 0, n = entries.Length(); i < n; i++) {
         const intptr_t id =
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 3849287..3f9b43f 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -361,6 +361,12 @@
           "IsolateGroup::type_arguments_canonicalization_mutex_")),
       subtype_test_cache_mutex_(
           NOT_IN_PRODUCT("IsolateGroup::subtype_test_cache_mutex_")),
+      megamorphic_table_mutex_(
+          NOT_IN_PRODUCT("IsolateGroup::megamorphic_table_mutex_")),
+      type_feedback_mutex_(
+          NOT_IN_PRODUCT("IsolateGroup::type_feedback_mutex_")),
+      patchable_call_mutex_(
+          NOT_IN_PRODUCT("IsolateGroup::patchable_call_mutex_")),
       program_lock_(new SafepointRwLock()),
       active_mutators_monitor_(new Monitor()),
       max_active_mutators_(Scavenger::MaxMutatorThreadCount()) {
@@ -1648,9 +1654,6 @@
       mutex_(NOT_IN_PRODUCT("Isolate::mutex_")),
       constant_canonicalization_mutex_(
           NOT_IN_PRODUCT("Isolate::constant_canonicalization_mutex_")),
-      megamorphic_table_mutex_(
-          NOT_IN_PRODUCT("Isolate::megamorphic_table_mutex_")),
-      type_feedback_mutex_(NOT_IN_PRODUCT("Isolate::type_feedback_mutex_")),
       kernel_data_lib_cache_mutex_(
           NOT_IN_PRODUCT("Isolate::kernel_data_lib_cache_mutex_")),
       kernel_data_class_cache_mutex_(
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index 77991a9..a98d48b 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -424,6 +424,9 @@
     return &type_arguments_canonicalization_mutex_;
   }
   Mutex* subtype_test_cache_mutex() { return &subtype_test_cache_mutex_; }
+  Mutex* megamorphic_table_mutex() { return &megamorphic_table_mutex_; }
+  Mutex* type_feedback_mutex() { return &type_feedback_mutex_; }
+  Mutex* patchable_call_mutex() { return &patchable_call_mutex_; }
 
 #if defined(DART_PRECOMPILED_RUNTIME)
   Mutex* unlinked_call_map_mutex() { return &unlinked_call_map_mutex_; }
@@ -722,6 +725,9 @@
   Mutex type_canonicalization_mutex_;
   Mutex type_arguments_canonicalization_mutex_;
   Mutex subtype_test_cache_mutex_;
+  Mutex megamorphic_table_mutex_;
+  Mutex type_feedback_mutex_;
+  Mutex patchable_call_mutex_;
 
 #if defined(DART_PRECOMPILED_RUNTIME)
   Mutex unlinked_call_map_mutex_;
@@ -964,9 +970,6 @@
   Mutex* constant_canonicalization_mutex() {
     return &constant_canonicalization_mutex_;
   }
-  Mutex* megamorphic_table_mutex() { return &megamorphic_table_mutex_; }
-  Mutex* type_feedback_mutex() { return &type_feedback_mutex_; }
-
   Mutex* kernel_data_lib_cache_mutex() { return &kernel_data_lib_cache_mutex_; }
   Mutex* kernel_data_class_cache_mutex() {
     return &kernel_data_class_cache_mutex_;
@@ -1601,8 +1604,6 @@
   Simulator* simulator_ = nullptr;
   Mutex mutex_;                            // Protects compiler stats.
   Mutex constant_canonicalization_mutex_;  // Protects const canonicalization.
-  Mutex megamorphic_table_mutex_;
-  Mutex type_feedback_mutex_;
   Mutex kernel_data_lib_cache_mutex_;
   Mutex kernel_data_class_cache_mutex_;
   Mutex kernel_constants_mutex_;
diff --git a/runtime/vm/megamorphic_cache_table.cc b/runtime/vm/megamorphic_cache_table.cc
index 7a1b7f6..2e7d1b3 100644
--- a/runtime/vm/megamorphic_cache_table.cc
+++ b/runtime/vm/megamorphic_cache_table.cc
@@ -17,7 +17,7 @@
                                                   const String& name,
                                                   const Array& descriptor) {
   Isolate* isolate = thread->isolate();
-  SafepointMutexLocker ml(isolate->megamorphic_table_mutex());
+  SafepointMutexLocker ml(thread->isolate_group()->megamorphic_table_mutex());
 
   ASSERT(name.IsSymbol());
   // TODO(rmacnak): ASSERT(descriptor.IsCanonical());
@@ -46,7 +46,7 @@
 
 void MegamorphicCacheTable::PrintSizes(Isolate* isolate) {
   auto thread = Thread::Current();
-  SafepointMutexLocker ml(thread->isolate()->megamorphic_table_mutex());
+  SafepointMutexLocker ml(thread->isolate_group()->megamorphic_table_mutex());
 
   StackZone zone(thread);
   intptr_t size = 0;
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 3f6ff74..56e839d 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -15239,7 +15239,7 @@
 void ICData::EnsureHasCheck(const GrowableArray<intptr_t>& class_ids,
                             const Function& target,
                             intptr_t count) const {
-  SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex());
+  SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex());
 
   if (FindCheck(class_ids) != -1) return;
   AddCheckInternal(class_ids, target, count);
@@ -15248,14 +15248,15 @@
 void ICData::AddCheck(const GrowableArray<intptr_t>& class_ids,
                       const Function& target,
                       intptr_t count) const {
-  SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex());
+  SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex());
   AddCheckInternal(class_ids, target, count);
 }
 
 void ICData::AddCheckInternal(const GrowableArray<intptr_t>& class_ids,
                               const Function& target,
                               intptr_t count) const {
-  ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread());
+  ASSERT(
+      IsolateGroup::Current()->type_feedback_mutex()->IsOwnedByCurrentThread());
 
   ASSERT(!is_tracking_exactness());
   ASSERT(!target.IsNull());
@@ -15343,7 +15344,7 @@
                                     const Function& target,
                                     intptr_t count,
                                     StaticTypeExactnessState exactness) const {
-  SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex());
+  SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex());
 
   GrowableArray<intptr_t> class_ids(1);
   class_ids.Add(receiver_class_id);
@@ -15356,7 +15357,7 @@
                               const Function& target,
                               intptr_t count,
                               StaticTypeExactnessState exactness) const {
-  SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex());
+  SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex());
   AddReceiverCheckInternal(receiver_class_id, target, count, exactness);
 }
 
@@ -17174,7 +17175,7 @@
 
 void MegamorphicCache::EnsureContains(const Smi& class_id,
                                       const Object& target) const {
-  SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex());
+  SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex());
 
   if (LookupLocked(class_id) == Object::null()) {
     InsertLocked(class_id, target);
@@ -17195,15 +17196,16 @@
 }
 
 ObjectPtr MegamorphicCache::Lookup(const Smi& class_id) const {
-  SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex());
+  SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex());
   return LookupLocked(class_id);
 }
 
 ObjectPtr MegamorphicCache::LookupLocked(const Smi& class_id) const {
   auto thread = Thread::Current();
+  auto isolate_group = thread->isolate_group();
   auto zone = thread->zone();
   ASSERT(thread->IsMutatorThread());
-  ASSERT(thread->isolate()->type_feedback_mutex()->IsOwnedByCurrentThread());
+  ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread());
 
   const auto& backing_array = Array::Handle(zone, buckets());
   intptr_t id_mask = mask();
@@ -17225,7 +17227,7 @@
 void MegamorphicCache::InsertLocked(const Smi& class_id,
                                     const Object& target) const {
   auto isolate_group = IsolateGroup::Current();
-  ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread());
+  ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread());
 
   // As opposed to ICData we are stopping mutator threads from other isolates
   // while modifying the megamorphic cache, since updates are not atomic.
@@ -17242,17 +17244,20 @@
 }
 
 void MegamorphicCache::EnsureCapacityLocked() const {
-  ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread());
+  auto thread = Thread::Current();
+  auto zone = thread->zone();
+  auto isolate_group = thread->isolate_group();
+  ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread());
 
   intptr_t old_capacity = mask() + 1;
   double load_limit = kLoadFactor * static_cast<double>(old_capacity);
   if (static_cast<double>(filled_entry_count() + 1) > load_limit) {
-    const Array& old_buckets = Array::Handle(buckets());
+    const Array& old_buckets = Array::Handle(zone, buckets());
     intptr_t new_capacity = old_capacity * 2;
     const Array& new_buckets =
-        Array::Handle(Array::New(kEntryLength * new_capacity));
+        Array::Handle(zone, Array::New(kEntryLength * new_capacity));
 
-    auto& target = Object::Handle();
+    auto& target = Object::Handle(zone);
     for (intptr_t i = 0; i < new_capacity; ++i) {
       SetEntry(new_buckets, i, smi_illegal_cid(), target);
     }
@@ -17261,7 +17266,7 @@
     set_filled_entry_count(0);
 
     // Rehash the valid entries.
-    Smi& class_id = Smi::Handle();
+    Smi& class_id = Smi::Handle(zone);
     for (intptr_t i = 0; i < old_capacity; ++i) {
       class_id ^= GetClassId(old_buckets, i);
       if (class_id.Value() != kIllegalCid) {
@@ -17274,7 +17279,9 @@
 
 void MegamorphicCache::InsertEntryLocked(const Smi& class_id,
                                          const Object& target) const {
-  ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread());
+  auto thread = Thread::Current();
+  auto isolate_group = thread->isolate_group();
+  ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread());
 
   ASSERT(Thread::Current()->IsMutatorThread());
   ASSERT(static_cast<double>(filled_entry_count() + 1) <=
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index b5f1d00..b05b00d 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -1361,6 +1361,8 @@
     return target_function.raw();
   }
 
+  SafepointMutexLocker ml(thread->isolate_group()->patchable_call_mutex());
+
   return InlineCacheMissHandlerGivenTargetFunction(args, ic_data, 1,
                                                    target_function);
 }
@@ -1646,8 +1648,8 @@
       code = StubCode::MonomorphicSmiableCheck().raw();
     }
   }
-  CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
-      thread_, caller_frame_->pc(), caller_code_, object, code);
+  CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, object,
+                                     code);
 
   // Return the ICData. The miss stub will jump to continue in the IC lookup
   // stub.
@@ -1765,8 +1767,8 @@
     cache.set_lower_limit(lower);
     cache.set_upper_limit(upper);
     const Code& stub = StubCode::SingleTargetCall();
-    CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
-        thread_, caller_frame_->pc(), caller_code_, cache, stub);
+    CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, cache,
+                                       stub);
     // Return the ICData. The miss stub will jump to continue in the IC call
     // stub.
     arguments_.SetArgAt(0, StubCode::ICCallThroughCode());
@@ -1776,8 +1778,8 @@
 
   // Patch to call through stub.
   const Code& stub = StubCode::ICCallThroughCode();
-  CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
-      thread_, caller_frame_->pc(), caller_code_, ic_data, stub);
+  CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, ic_data,
+                                     stub);
 
   // Return the ICData. The miss stub will jump to continue in the IC lookup
   // stub.
@@ -1794,25 +1796,21 @@
   const Code& stub = ic_data.is_tracking_exactness()
                          ? StubCode::OneArgCheckInlineCacheWithExactnessCheck()
                          : StubCode::OneArgCheckInlineCache();
-  CodePatcher::PatchInstanceCallAtWithMutatorsStopped(
-      thread_, caller_frame_->pc(), caller_code_, ic_data, stub);
+  CodePatcher::PatchInstanceCallAt(caller_frame_->pc(), caller_code_, ic_data,
+                                   stub);
   if (FLAG_trace_ic) {
     OS::PrintErr("Instance call at %" Px
                  " switching to polymorphic dispatch, %s\n",
                  caller_frame_->pc(), ic_data.ToCString());
   }
 
-  // ICData can be shared between unoptimized and optimized code, so beware that
-  // the new receiver class may have already been added through the optimized
-  // code.
-  if (!ic_data.HasReceiverClassId(receiver_.GetClassId())) {
-    GrowableArray<const Instance*> args(1);
-    args.Add(&receiver_);
-    // Don't count during insertion because the IC stub we continue through will
-    // do an increment.
-    InlineCacheMissHandlerGivenTargetFunction(args, ic_data, /*count=*/0,
-                                              target_function);
-  }
+  // Don't count during insertion because the IC stub we continue through will
+  // do an increment.
+  GrowableArray<const Instance*> args(1);
+  args.Add(&receiver_);
+  InlineCacheMissHandlerGivenTargetFunction(args, ic_data, /*count=*/0,
+                                            target_function);
+
   arguments_.SetArgAt(0, stub);
   arguments_.SetReturn(ic_data);
 #endif  // defined(DART_PRECOMPILED_RUNTIME)
@@ -1848,8 +1846,8 @@
 
   // Call site is not single target, switch to call using ICData.
   const Code& stub = StubCode::ICCallThroughCode();
-  CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
-      thread_, caller_frame_->pc(), caller_code_, ic_data, stub);
+  CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, ic_data,
+                                     stub);
 
   // Return the ICData. The single target stub will jump to continue in the
   // IC call stub.
@@ -1893,25 +1891,21 @@
     const Smi& expected_cid =
         Smi::Handle(zone_, Smi::New(receiver_.GetClassId()));
     ASSERT(target_code.HasMonomorphicEntry());
-    CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
-        thread_, caller_frame_->pc(), caller_code_, expected_cid, target_code);
+    CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_,
+                                       expected_cid, target_code);
     arguments_.SetArgAt(0, target_code);
     arguments_.SetReturn(expected_cid);
   } else {
     // IC entry might have been added while we waited to get into runtime.
-    GrowableArray<intptr_t> class_ids(1);
-    class_ids.Add(receiver_.GetClassId());
-    if (ic_data.FindCheck(class_ids) == -1) {
-      ic_data.AddReceiverCheck(receiver_.GetClassId(), target_function);
-    }
+    ic_data.EnsureHasReceiverCheck(receiver_.GetClassId(), target_function);
     if (number_of_checks > FLAG_max_polymorphic_checks) {
       // Switch to megamorphic call.
       const MegamorphicCache& cache = MegamorphicCache::Handle(
           zone_, MegamorphicCacheTable::Lookup(thread_, name, descriptor));
       const Code& stub = StubCode::MegamorphicCall();
 
-      CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
-          thread_, caller_frame_->pc(), caller_code_, cache, stub);
+      CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_,
+                                         cache, stub);
       arguments_.SetArgAt(0, stub);
       arguments_.SetReturn(cache);
     } else {
@@ -2087,40 +2081,42 @@
   const Function& caller_function =
       Function::Handle(zone, caller_frame->LookupDartFunction());
 
-  Object& old_data = Object::Handle(zone);
-  Code& old_code = Code::Handle(zone);
+  SwitchableCallHandler handler(thread, receiver, arguments, caller_frame,
+                                caller_code, caller_function);
 
+  // Find out actual target (which can be time consuminmg) without holding any
+  // locks.
+  Object& old_data = Object::Handle(zone);
 #if defined(DART_PRECOMPILED_RUNTIME)
-  // Grab old_data and do potentially long-running step of resolving the
-  // target function before we stop mutators.
-  // This will reduce amount of time spent with all mutators are stopped
-  // hopefully leaving only code patching to be done then.
   old_data =
       CodePatcher::GetSwitchableCallDataAt(caller_frame->pc(), caller_code);
 #else
-  old_code ^= CodePatcher::GetInstanceCallAt(caller_frame->pc(), caller_code,
-                                             &old_data);
+  CodePatcher::GetInstanceCallAt(caller_frame->pc(), caller_code, &old_data);
 #endif
-  SwitchableCallHandler handler(thread, receiver, arguments, caller_frame,
-                                caller_code, caller_function);
   const Function& target_function =
       Function::Handle(zone, handler.ResolveTargetFunction(old_data));
-  thread->isolate_group()->RunWithStoppedMutators(
-      [&]() {
+
+  {
+    // We ensure any transition in a patchable calls are done in an atomic
+    // manner, we ensure we always transition forward (e.g. Monomorphic ->
+    // Polymorphic).
+    //
+    // Mutators are only stopped if we actually need to patch a patchable call.
+    // We may not do that if we e.g. just add one more check to an ICData.
+    SafepointMutexLocker ml(thread->isolate_group()->patchable_call_mutex());
+
+    auto& old_code = Code::Handle(zone);
 #if defined(DART_PRECOMPILED_RUNTIME)
-        old_data = CodePatcher::GetSwitchableCallDataAt(caller_frame->pc(),
-                                                        caller_code);
-#if defined(DEBUG)
-        old_code ^= CodePatcher::GetSwitchableCallTargetAt(caller_frame->pc(),
-                                                           caller_code);
-#endif
+    old_data =
+        CodePatcher::GetSwitchableCallDataAt(caller_frame->pc(), caller_code);
+    DEBUG_ONLY(old_code = CodePatcher::GetSwitchableCallTargetAt(
+                   caller_frame->pc(), caller_code));
 #else
-        old_code ^= CodePatcher::GetInstanceCallAt(caller_frame->pc(),
-                                                   caller_code, &old_data);
+    old_code ^= CodePatcher::GetInstanceCallAt(caller_frame->pc(), caller_code,
+                                               &old_data);
 #endif
-        handler.HandleMiss(old_data, old_code, target_function);
-      },
-      /*use_force_growth=*/true);
+    handler.HandleMiss(old_data, old_code, target_function);
+  }
 }
 
 // Used to find the correct receiver and function to invoke or to fall back to