[vm/concurrency] Share program structure and JITed code with --enable-isolate-groups

This removes our temporary scaffolding support for JIT isolate groups
(which was implemented by creating a new isolate group, loading the
application kernel into it and then merging the heap into the original
isolate group - maintaining a different object store)

It makes all isolates within a group share the same object store, same
libraries and JITed code. It will be conservative to start with, only
allow running unoptimized code, etc.

We will gradually remove the restrictions imposed by this CL:
  https://dart-review.googlesource.com/c/sdk/+/173970

Issue https://github.com/dart-lang/sdk/issues/36097

TEST=Tests using --enable-isolate-groups with JIT sharing.

Change-Id: I2bf69a6fe3c905067c4cec2e81613f731c52e5ee
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/175302
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
diff --git a/runtime/lib/isolate.cc b/runtime/lib/isolate.cc
index 3a41006..85286cb 100644
--- a/runtime/lib/isolate.cc
+++ b/runtime/lib/isolate.cc
@@ -634,11 +634,7 @@
     char* error = nullptr;
 
     auto group = state_->isolate_group();
-#if defined(DART_PRECOMPILED_RUNTIME)
-    Isolate* isolate = CreateWithinExistingIsolateGroupAOT(group, name, &error);
-#else
     Isolate* isolate = CreateWithinExistingIsolateGroup(group, name, &error);
-#endif
     parent_isolate_->DecrementSpawnCount();
     parent_isolate_ = nullptr;
 
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index 9f63a72..7100f1a 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -687,28 +687,17 @@
   return isolate;
 }
 
-#if defined(DART_PRECOMPILED_RUNTIME)
-static bool CloneIntoChildIsolateAOT(Thread* T,
-                                     Isolate* I,
-                                     IsolateGroup* source_isolate_group) {
-  // In AOT we speed up isolate spawning by copying donor's isolate structure.
-  if (source_isolate_group == nullptr) {
-    return false;
-  }
-  I->isolate_object_store()->Init();
-  I->isolate_object_store()->PreallocateObjects();
-  I->set_field_table(T, source_isolate_group->initial_field_table()->Clone(I));
-
-  return true;
-}
-#endif
-
 ErrorPtr Dart::InitIsolateFromSnapshot(Thread* T,
                                        Isolate* I,
                                        const uint8_t* snapshot_data,
                                        const uint8_t* snapshot_instructions,
                                        const uint8_t* kernel_buffer,
                                        intptr_t kernel_buffer_size) {
+  if (kernel_buffer != nullptr) {
+    SafepointReadRwLocker reader(T, I->group()->program_lock());
+    I->field_table()->MarkReadyToUse();
+  }
+
   Error& error = Error::Handle(T->zone());
   error = Object::Init(I, kernel_buffer, kernel_buffer_size);
   if (!error.IsNull()) {
@@ -742,7 +731,11 @@
       return error.raw();
     }
 
-    I->set_field_table(T, I->group()->initial_field_table()->Clone(I));
+    {
+      SafepointReadRwLocker reader(T, I->group()->program_lock());
+      I->set_field_table(T, I->group()->initial_field_table()->Clone(I));
+      I->field_table()->MarkReadyToUse();
+    }
 
 #if defined(SUPPORT_TIMELINE)
     if (tbes.enabled()) {
@@ -883,37 +876,58 @@
   StackZone zone(T);
   HandleScope handle_scope(T);
   bool was_child_cloned_into_existing_isolate = false;
-#if defined(DART_PRECOMPILED_RUNTIME)
-  if (CloneIntoChildIsolateAOT(T, I, source_isolate_group)) {
+  if (source_isolate_group != nullptr) {
+    I->isolate_object_store()->Init();
+    I->isolate_object_store()->PreallocateObjects();
+
+    // If a static field gets registered in [IsolateGroup::RegisterStaticField]:
+    //
+    //   * before this block it will ignore this isolate. The [Clone] of the
+    //     initial field table will pick up the new value.
+    //   * after this block it will add the new static field to this isolate.
+    {
+      SafepointReadRwLocker reader(T, source_isolate_group->program_lock());
+      I->set_field_table(T,
+                         source_isolate_group->initial_field_table()->Clone(I));
+      I->field_table()->MarkReadyToUse();
+    }
+
     was_child_cloned_into_existing_isolate = true;
   } else {
-#endif
     const Error& error = Error::Handle(
         InitIsolateFromSnapshot(T, I, snapshot_data, snapshot_instructions,
                                 kernel_buffer, kernel_buffer_size));
     if (!error.IsNull()) {
       return error.raw();
     }
-#if defined(DART_PRECOMPILED_RUNTIME)
   }
-#endif
 
   Object::VerifyBuiltinVtables();
   DEBUG_ONLY(I->heap()->Verify(kForbidMarked));
 
 #if defined(DART_PRECOMPILED_RUNTIME)
-  ASSERT(I->object_store()->build_method_extractor_code() != Code::null());
-  if (FLAG_print_llvm_constant_pool) {
-    PrintLLVMConstantPool(T, I);
-  }
+  const bool kIsAotRuntime = true;
 #else
-#if !defined(TARGET_ARCH_IA32)
-  if (I != Dart::vm_isolate()) {
-    I->object_store()->set_build_method_extractor_code(
-        Code::Handle(StubCode::GetBuildMethodExtractorStub(nullptr)));
-  }
+  const bool kIsAotRuntime = false;
 #endif
+
+  if (kIsAotRuntime || was_child_cloned_into_existing_isolate) {
+#if !defined(TARGET_ARCH_IA32)
+    ASSERT(I->object_store()->build_method_extractor_code() != Code::null());
+#endif
+#if defined(DART_PRECOMPILED_RUNTIME)
+    if (FLAG_print_llvm_constant_pool) {
+      PrintLLVMConstantPool(T, I);
+    }
 #endif  // defined(DART_PRECOMPILED_RUNTIME)
+  } else {
+#if !defined(TARGET_ARCH_IA32)
+    if (I != Dart::vm_isolate()) {
+      I->object_store()->set_build_method_extractor_code(
+          Code::Handle(StubCode::GetBuildMethodExtractorStub(nullptr)));
+    }
+#endif  // !defined(TARGET_ARCH_IA32)
+  }
 
   I->set_ic_miss_code(StubCode::SwitchableCallMiss());
 
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index 92fa101..60f008d 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -1308,10 +1308,9 @@
   return false;
 }
 
-Isolate* CreateWithinExistingIsolateGroupAOT(IsolateGroup* group,
-                                             const char* name,
-                                             char** error) {
-#if defined(DART_PRECOMPILED_RUNTIME)
+Isolate* CreateWithinExistingIsolateGroup(IsolateGroup* group,
+                                          const char* name,
+                                          char** error) {
   API_TIMELINE_DURATION(Thread::Current());
   CHECK_NO_ISOLATE(Isolate::Current());
 
@@ -1326,171 +1325,6 @@
   ASSERT(isolate->source() == source);
 
   return isolate;
-#else
-  UNREACHABLE();
-#endif
-}
-
-Isolate* CreateWithinExistingIsolateGroup(IsolateGroup* group,
-                                          const char* name,
-                                          char** error) {
-#if !defined(DART_PRECOMPILED_RUNTIME)
-  API_TIMELINE_DURATION(Thread::Current());
-  CHECK_NO_ISOLATE(Isolate::Current());
-
-  // During isolate start we'll make a temporary anonymous group from the same
-  // [source]. Once the isolate has been fully loaded we will merge it's heap
-  // into the shared heap.
-  auto spawning_group = new IsolateGroup(group->shareable_source(),
-                                         /*isolate_group_data=*/nullptr);
-  IsolateGroup::RegisterIsolateGroup(spawning_group);
-  spawning_group->CreateHeap(
-      /*is_vm_isolate=*/false,
-      IsServiceOrKernelIsolateName(group->source()->name));
-
-  Isolate* isolate = reinterpret_cast<Isolate*>(
-      CreateIsolate(spawning_group, /*is_new_group=*/false, name,
-                    /*isolate_data=*/nullptr, error));
-  if (isolate == nullptr) return nullptr;
-
-  auto source = spawning_group->source();
-  ASSERT(isolate->source() == source);
-
-  if (source->script_kernel_buffer != nullptr) {
-    Dart_EnterScope();
-    {
-      Thread* T = Thread::Current();
-      TransitionNativeToVM transition(T);
-      HANDLESCOPE(T);
-      StackZone zone(T);
-
-      // NOTE: We do not attach a finalizer for this object, because the
-      // embedder will free it once the isolate group has shutdown.
-      const auto& td = ExternalTypedData::Handle(ExternalTypedData::New(
-          kExternalTypedDataUint8ArrayCid,
-          const_cast<uint8_t*>(source->script_kernel_buffer),
-          source->script_kernel_size, Heap::kOld));
-
-      std::unique_ptr<kernel::Program> program =
-          kernel::Program::ReadFromTypedData(td,
-                                             const_cast<const char**>(error));
-      if (program == nullptr) {
-        UNIMPLEMENTED();
-      }
-      const Object& tmp =
-          kernel::KernelLoader::LoadEntireProgram(program.get());
-
-      // If the existing isolate could spawn with a root library we should be
-      // able to do the same
-      RELEASE_ASSERT(!tmp.IsNull() && tmp.IsLibrary());
-      isolate->object_store()->set_root_library(Library::Cast(tmp));
-    }
-    Dart_ExitScope();
-  }
-
-  // If we are running in AppJIT training mode we'll have to remap class ids.
-  if (auto permutation_map = group->source()->cid_permutation_map.get()) {
-    Dart_EnterScope();
-    {
-      auto T = Thread::Current();
-      TransitionNativeToVM transition(T);
-      HANDLESCOPE(T);
-
-      // Remap all class ids loaded atm (e.g. from snapshot) and do appropriate
-      // re-hashing of constants and types.
-      ClassFinalizer::RemapClassIds(permutation_map);
-      // Types use cid's as part of their hashes.
-      ClassFinalizer::RehashTypes();
-      // Const objects use cid's as part of their hashes.
-      isolate->RehashConstants();
-    }
-    Dart_ExitScope();
-  }
-
-  auto thread = Thread::Current();
-  {
-    TransitionNativeToVM native_to_vm(thread);
-
-    // Ensure there are no helper threads running.
-    BackgroundCompiler::Stop(isolate);
-    isolate->heap()->WaitForMarkerTasks(thread);
-    isolate->heap()->WaitForSweeperTasks(thread);
-    SafepointOperationScope safepoint_operation(thread);
-    isolate->group()->ReleaseStoreBuffers();
-    RELEASE_ASSERT(isolate->heap()->old_space()->tasks() == 0);
-  }
-
-  Dart_ExitIsolate();
-  {
-    const bool kBypassSafepoint = false;
-    Thread::EnterIsolateGroupAsHelper(group, Thread::kUnknownTask,
-                                      kBypassSafepoint);
-    ASSERT(group == IsolateGroup::Current());
-
-    {
-      auto thread = Thread::Current();
-
-      // Prevent additions of new isolates to [group] until we're done.
-      group->RunWithLockedGroup([&]() {
-        // Ensure no other old space GC tasks are running and "occupy" the old
-        // space.
-        SafepointOperationScope safepoint_scope(thread);
-        {
-          auto old_space = group->heap()->old_space();
-          MonitorLocker ml(old_space->tasks_lock());
-          while (old_space->tasks() > 0) {
-            ml.Wait();
-          }
-          old_space->set_tasks(1);
-        }
-
-        // Merge the heap from [spawning_group] to [group].
-        group->heap()->MergeFrom(isolate->group()->heap());
-
-        spawning_group->UnregisterIsolate(isolate);
-        const bool shutdown_group =
-            spawning_group->UnregisterIsolateDecrementCount(isolate);
-        ASSERT(shutdown_group);
-
-        isolate->isolate_group_ = group;
-        group->RegisterIsolateLocked(isolate);
-        isolate->class_table()->shared_class_table_ =
-            group->shared_class_table();
-        isolate->set_shared_class_table(group->shared_class_table());
-
-        // Even though the mutator thread was descheduled, it will still
-        // retain its [Thread] structure with valid isolate/isolate_group
-        // pointers.
-        // If GC happens before the mutator gets scheduled again, we have to
-        // ensure the isolate group change is reflected in the threads
-        // structure.
-        ASSERT(isolate->mutator_thread() != nullptr);
-        ASSERT(isolate->mutator_thread()->isolate_group() == spawning_group);
-        isolate->mutator_thread()->isolate_group_ = group;
-
-        // Allow other old space GC tasks to run again.
-        {
-          auto old_space = group->heap()->old_space();
-          MonitorLocker ml(old_space->tasks_lock());
-          ASSERT(old_space->tasks() == 1);
-          old_space->set_tasks(0);
-          ml.NotifyAll();
-        }
-      });
-    }
-
-    Thread::ExitIsolateGroupAsHelper(kBypassSafepoint);
-  }
-
-  spawning_group->Shutdown();
-
-  Dart_EnterIsolate(Api::CastIsolate(isolate));
-  ASSERT(Thread::Current()->isolate_group() == isolate->group());
-
-  return isolate;
-#else
-  UNREACHABLE();
-#endif
 }
 
 DART_EXPORT void Dart_IsolateFlagsInitialize(Dart_IsolateFlags* flags) {
@@ -1589,7 +1423,7 @@
 
   Isolate* isolate;
 #if defined(DART_PRECOMPILED_RUNTIME)
-  isolate = CreateWithinExistingIsolateGroupAOT(member->group(), name, error);
+  isolate = CreateWithinExistingIsolateGroup(member->group(), name, error);
   if (isolate != nullptr) {
     isolate->set_origin_id(member->origin_id());
     isolate->set_init_callback_data(child_isolate_data);
diff --git a/runtime/vm/dart_api_impl.h b/runtime/vm/dart_api_impl.h
index 80f0462..7c58510 100644
--- a/runtime/vm/dart_api_impl.h
+++ b/runtime/vm/dart_api_impl.h
@@ -353,9 +353,6 @@
 Isolate* CreateWithinExistingIsolateGroup(IsolateGroup* group,
                                           const char* name,
                                           char** error);
-Isolate* CreateWithinExistingIsolateGroupAOT(IsolateGroup* group,
-                                             const char* name,
-                                             char** error);
 
 }  // namespace dart.
 
diff --git a/runtime/vm/field_table.cc b/runtime/vm/field_table.cc
index a6358a1..fe40d10 100644
--- a/runtime/vm/field_table.cc
+++ b/runtime/vm/field_table.cc
@@ -22,6 +22,25 @@
   free(table_);        // Allocated in FieldTable::Grow()
 }
 
+bool FieldTable::IsReadyToUse() const {
+  DEBUG_ASSERT(
+      IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
+  return is_ready_to_use_;
+}
+
+void FieldTable::MarkReadyToUse() {
+  // The isolate will mark it's field table ready-to-use upon initialization of
+  // the isolate. Only after it was marked as ready-to-use will it participate
+  // in new static field registrations.
+  //
+  // By requiring a read lock here we ensure no other thread is is registering a
+  // new static field at this moment (it would need exlusive writer lock).
+  DEBUG_ASSERT(
+      IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
+  ASSERT(!is_ready_to_use_);
+  is_ready_to_use_ = true;
+}
+
 void FieldTable::FreeOldTables() {
   while (old_tables_->length() > 0) {
     free(old_tables_->RemoveLast());
@@ -32,21 +51,27 @@
   return field_id * sizeof(InstancePtr);  // NOLINT
 }
 
-void FieldTable::Register(const Field& field) {
+bool FieldTable::Register(const Field& field, intptr_t expected_field_id) {
+  DEBUG_ASSERT(
+      IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter());
   ASSERT(Thread::Current()->IsMutatorThread());
+  ASSERT(is_ready_to_use_);
+
   if (free_head_ < 0) {
+    bool grown_backing_store = false;
     if (top_ == capacity_) {
       const intptr_t new_capacity = capacity_ + kCapacityIncrement;
       Grow(new_capacity);
+      grown_backing_store = true;
     }
 
     ASSERT(top_ < capacity_);
-
+    ASSERT(expected_field_id == -1 || expected_field_id == top_);
     field.set_field_id(top_);
     table_[top_] = Object::sentinel().raw();
 
     ++top_;
-    return;
+    return grown_backing_store;
   }
 
   // Reuse existing free element. This is "slow path" that should only be
@@ -55,6 +80,7 @@
   free_head_ = Smi::Value(Smi::RawCast(table_[free_head_]));
   field.set_field_id(reused_free);
   table_[reused_free] = Object::sentinel().raw();
+  return false;
 }
 
 void FieldTable::Free(intptr_t field_id) {
@@ -104,6 +130,9 @@
 }
 
 FieldTable* FieldTable::Clone(Isolate* for_isolate) {
+  DEBUG_ASSERT(
+      IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
+
   FieldTable* clone = new FieldTable(for_isolate);
   auto new_table = static_cast<InstancePtr*>(
       malloc(capacity_ * sizeof(InstancePtr)));  // NOLINT
diff --git a/runtime/vm/field_table.h b/runtime/vm/field_table.h
index 4fc1ca8..4350745 100644
--- a/runtime/vm/field_table.h
+++ b/runtime/vm/field_table.h
@@ -27,10 +27,14 @@
         free_head_(-1),
         table_(nullptr),
         old_tables_(new MallocGrowableArray<InstancePtr*>()),
-        isolate_(isolate) {}
+        isolate_(isolate),
+        is_ready_to_use_(isolate == nullptr) {}
 
   ~FieldTable();
 
+  bool IsReadyToUse() const;
+  void MarkReadyToUse();
+
   intptr_t NumFieldIds() const { return top_; }
   intptr_t Capacity() const { return capacity_; }
 
@@ -43,7 +47,9 @@
 
   bool IsValidIndex(intptr_t index) const { return index >= 0 && index < top_; }
 
-  void Register(const Field& field);
+  // Returns whether registering this field caused a growth in the backing
+  // store.
+  bool Register(const Field& field, intptr_t expected_field_id = -1);
   void AllocateIndex(intptr_t index);
 
   // Static field elements are being freed only during isolate reload
@@ -89,6 +95,10 @@
   // mutator thread up-to-date.
   Isolate* isolate_;
 
+  // Whether this field table is ready to use by e.g. registering new static
+  // fields.
+  bool is_ready_to_use_ = false;
+
   DISALLOW_COPY_AND_ASSIGN(FieldTable);
 };
 
diff --git a/runtime/vm/heap/freelist.cc b/runtime/vm/heap/freelist.cc
index 2923aee..e2fd5f8 100644
--- a/runtime/vm/heap/freelist.cc
+++ b/runtime/vm/heap/freelist.cc
@@ -373,41 +373,4 @@
   return NULL;
 }
 
-void FreeList::MergeFrom(FreeList* donor, bool is_protected) {
-  // The [other] free list is from a dying isolate. There are no other threads
-  // accessing it, so there is no need to lock here.
-  MutexLocker ml(&mutex_);
-  for (intptr_t i = 0; i < (kNumLists + 1); ++i) {
-    FreeListElement* donor_head = donor->free_lists_[i];
-    if (donor_head != nullptr) {
-      // If we didn't have a freelist element before we have to set the bit now,
-      // since we will get 1+ elements from [other].
-      FreeListElement* old_head = free_lists_[i];
-      if (old_head == nullptr && i != kNumLists) {
-        free_map_.Set(i, true);
-      }
-
-      // Chain other's list in.
-      FreeListElement* last = donor_head;
-      while (last->next() != nullptr) {
-        last = last->next();
-      }
-
-      if (is_protected) {
-        VirtualMemory::Protect(reinterpret_cast<void*>(last), sizeof(*last),
-                               VirtualMemory::kReadWrite);
-      }
-      last->set_next(old_head);
-      if (is_protected) {
-        VirtualMemory::Protect(reinterpret_cast<void*>(last), sizeof(*last),
-                               VirtualMemory::kReadExecute);
-      }
-      free_lists_[i] = donor_head;
-    }
-  }
-
-  last_free_small_size_ =
-      Utils::Maximum(last_free_small_size_, donor->last_free_small_size_);
-}
-
 }  // namespace dart
diff --git a/runtime/vm/heap/freelist.h b/runtime/vm/heap/freelist.h
index 677a0f2..b868c19 100644
--- a/runtime/vm/heap/freelist.h
+++ b/runtime/vm/heap/freelist.h
@@ -154,8 +154,6 @@
   void set_end(uword value) { end_ = value; }
   void AddUnaccountedSize(intptr_t size) { unaccounted_size_ += size; }
 
-  void MergeFrom(FreeList* donor, bool is_protected);
-
  private:
   static const int kNumLists = 128;
   static const intptr_t kInitialFreeListSearchBudget = 1000;
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index a1d5866..e5c2426 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -717,30 +717,6 @@
   gc_on_nth_allocation_ = num_allocations;
 }
 
-void Heap::MergeFrom(Heap* donor) {
-  ASSERT(!donor->read_only_);
-  ASSERT(donor->old_space()->tasks() == 0);
-
-  new_space_.MergeFrom(donor->new_space());
-  old_space_.MergeFrom(donor->old_space());
-
-  for (intptr_t i = 0; i < kNumWeakSelectors; ++i) {
-    // The new space rehashing should not be necessary.
-    new_weak_tables_[i]->MergeFrom(donor->new_weak_tables_[i]);
-    old_weak_tables_[i]->MergeFrom(donor->old_weak_tables_[i]);
-  }
-
-  StoreBufferBlock* block =
-      donor->isolate_group()->store_buffer()->TakeBlocks();
-  while (block != nullptr) {
-    StoreBufferBlock* next = block->next();
-    block->set_next(nullptr);
-    isolate_group()->store_buffer()->PushBlock(block,
-                                               StoreBuffer::kIgnoreThreshold);
-    block = next;
-  }
-}
-
 void Heap::CollectForDebugging() {
   if (gc_on_nth_allocation_ == kNoForcedGarbageCollection) return;
   if (Thread::Current()->IsAtSafepoint()) {
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 026d793..f303a15 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -322,8 +322,6 @@
 
   void CollectOnNthAllocation(intptr_t num_allocations);
 
-  void MergeFrom(Heap* donor);
-
  private:
   class GCStats : public ValueObject {
    public:
diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc
index d3b62fa..39180e8 100644
--- a/runtime/vm/heap/pages.cc
+++ b/runtime/vm/heap/pages.cc
@@ -1447,95 +1447,6 @@
   return false;
 }
 
-static void AppendList(OldPage** pages,
-                       OldPage** pages_tail,
-                       OldPage** other_pages,
-                       OldPage** other_pages_tail) {
-  ASSERT((*pages == nullptr) == (*pages_tail == nullptr));
-  ASSERT((*other_pages == nullptr) == (*other_pages_tail == nullptr));
-
-  if (*other_pages != nullptr) {
-    if (*pages_tail == nullptr) {
-      *pages = *other_pages;
-      *pages_tail = *other_pages_tail;
-    } else {
-      const bool is_execute = FLAG_write_protect_code &&
-                              (*pages_tail)->type() == OldPage::kExecutable;
-      if (is_execute) {
-        (*pages_tail)->WriteProtect(false);
-      }
-      (*pages_tail)->set_next(*other_pages);
-      if (is_execute) {
-        (*pages_tail)->WriteProtect(true);
-      }
-      *pages_tail = *other_pages_tail;
-    }
-    *other_pages = nullptr;
-    *other_pages_tail = nullptr;
-  }
-}
-
-static void EnsureEqualImagePages(OldPage* pages, OldPage* other_pages) {
-#if defined(DEBUG)
-  while (pages != nullptr) {
-    ASSERT((pages == nullptr) == (other_pages == nullptr));
-    ASSERT(pages->object_start() == other_pages->object_start());
-    ASSERT(pages->object_end() == other_pages->object_end());
-    pages = pages->next();
-    other_pages = other_pages->next();
-  }
-#endif
-}
-
-void PageSpace::MergeFrom(PageSpace* donor) {
-  donor->AbandonBumpAllocation();
-
-  ASSERT(donor->tasks_ == 0);
-  ASSERT(donor->concurrent_marker_tasks_ == 0);
-  ASSERT(donor->phase_ == kDone);
-  DEBUG_ASSERT(donor->iterating_thread_ == nullptr);
-  ASSERT(donor->marker_ == nullptr);
-
-  for (intptr_t i = 0; i < num_freelists_; ++i) {
-    ASSERT(donor->freelists_[i].top() == 0);
-    ASSERT(donor->freelists_[i].end() == 0);
-    const bool is_protected =
-        FLAG_write_protect_code && i == OldPage::kExecutable;
-    freelists_[i].MergeFrom(&donor->freelists_[i], is_protected);
-    donor->freelists_[i].Reset();
-  }
-
-  // The freelist locks will be taken in MergeOtherFreelist above, and the
-  // locking order is the freelist locks are taken before the page list locks,
-  // so don't take the pages lock until after MergeOtherFreelist.
-  MutexLocker ml(&pages_lock_);
-  MutexLocker ml2(&donor->pages_lock_);
-
-  AppendList(&pages_, &pages_tail_, &donor->pages_, &donor->pages_tail_);
-  AppendList(&exec_pages_, &exec_pages_tail_, &donor->exec_pages_,
-             &donor->exec_pages_tail_);
-  AppendList(&large_pages_, &large_pages_tail_, &donor->large_pages_,
-             &donor->large_pages_tail_);
-  // We intentionall do not merge [image_pages_] beause [this] and [other] have
-  // the same mmap()ed image page areas.
-  EnsureEqualImagePages(image_pages_, donor->image_pages_);
-
-  // We intentionaly do not increase [max_capacity_in_words_] because this can
-  // lead [max_capacity_in_words_] to become larger and larger and eventually
-  // wrap-around and become negative.
-  allocated_black_in_words_ += donor->allocated_black_in_words_;
-  gc_time_micros_ += donor->gc_time_micros_;
-  collections_ += donor->collections_;
-
-  usage_.capacity_in_words += donor->usage_.capacity_in_words;
-  usage_.used_in_words += donor->usage_.used_in_words;
-  usage_.external_in_words += donor->usage_.external_in_words;
-
-  page_space_controller_.MergeFrom(&donor->page_space_controller_);
-
-  ASSERT(FLAG_concurrent_mark || donor->enable_concurrent_mark_ == false);
-}
-
 PageSpaceController::PageSpaceController(Heap* heap,
                                          int heap_growth_ratio,
                                          int heap_growth_max,
@@ -1771,12 +1682,6 @@
   // TODO(rmacnak): Hasten the soft threshold at some discount?
 }
 
-void PageSpaceController::MergeFrom(PageSpaceController* donor) {
-  last_usage_.capacity_in_words += donor->last_usage_.capacity_in_words;
-  last_usage_.used_in_words += donor->last_usage_.used_in_words;
-  last_usage_.external_in_words += donor->last_usage_.external_in_words;
-}
-
 void PageSpaceGarbageCollectionHistory::AddGarbageCollectionTime(int64_t start,
                                                                  int64_t end) {
   Entry entry;
diff --git a/runtime/vm/heap/pages.h b/runtime/vm/heap/pages.h
index a786f78..434ad7f 100644
--- a/runtime/vm/heap/pages.h
+++ b/runtime/vm/heap/pages.h
@@ -240,8 +240,6 @@
   friend class PageSpace;  // For MergeOtherPageSpaceController
 
   void RecordUpdate(SpaceUsage before, SpaceUsage after, const char* reason);
-  void MergeFrom(PageSpaceController* donor);
-
   void RecordUpdate(SpaceUsage before,
                     SpaceUsage after,
                     intptr_t growth_in_pages,
@@ -489,8 +487,6 @@
 
   bool IsObjectFromImagePages(ObjectPtr object);
 
-  void MergeFrom(PageSpace* donor);
-
  private:
   // Ids for time and data records in Heap::GCStats.
   enum {
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index 3f6f09d..f22ab54 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -715,19 +715,6 @@
   tail_ = tail;
 }
 
-void SemiSpace::MergeFrom(SemiSpace* donor) {
-  for (NewPage* page = donor->head_; page != nullptr; page = page->next()) {
-    page->Release();
-  }
-
-  AddList(donor->head_, donor->tail_);
-  capacity_in_words_ += donor->capacity_in_words_;
-
-  donor->head_ = nullptr;
-  donor->tail_ = nullptr;
-  donor->capacity_in_words_ = 0;
-}
-
 // The initial estimate of how many words we can scavenge per microsecond (usage
 // before / scavenge time). This is a conservative value observed running
 // Flutter on a Nexus 4. After the first scavenge, we instead use a value based
@@ -1721,13 +1708,4 @@
   ASSERT((UsedInWords() == 0) || failed_to_promote_);
 }
 
-void Scavenger::MergeFrom(Scavenger* donor) {
-  MutexLocker ml(&space_lock_);
-  MutexLocker ml2(&donor->space_lock_);
-  to_->MergeFrom(donor->to_);
-
-  external_size_ += donor->external_size_;
-  donor->external_size_ = 0;
-}
-
 }  // namespace dart
diff --git a/runtime/vm/heap/scavenger.h b/runtime/vm/heap/scavenger.h
index 5e7486b..785825e 100644
--- a/runtime/vm/heap/scavenger.h
+++ b/runtime/vm/heap/scavenger.h
@@ -184,7 +184,6 @@
   NewPage* head() const { return head_; }
 
   void AddList(NewPage* head, NewPage* tail);
-  void MergeFrom(SemiSpace* donor);
 
  private:
   // Size of NewPages in this semi-space.
@@ -281,8 +280,6 @@
   // Promote all live objects.
   void Evacuate();
 
-  void MergeFrom(Scavenger* donor);
-
   int64_t UsedInWords() const {
     MutexLocker ml(&space_lock_);
     return to_->capacity_in_words();
diff --git a/runtime/vm/heap/weak_table.cc b/runtime/vm/heap/weak_table.cc
index fdb0a7d..4070590 100644
--- a/runtime/vm/heap/weak_table.cc
+++ b/runtime/vm/heap/weak_table.cc
@@ -140,12 +140,4 @@
   free(old_data);
 }
 
-void WeakTable::MergeFrom(WeakTable* donor) {
-  for (intptr_t i = 0; i < donor->size(); i++) {
-    if (donor->IsValidEntryAtExclusive(i)) {
-      SetValueExclusive(donor->ObjectAtExclusive(i), ValueIndex(i));
-    }
-  }
-}
-
 }  // namespace dart
diff --git a/runtime/vm/heap/weak_table.h b/runtime/vm/heap/weak_table.h
index c7728b0..c72d773 100644
--- a/runtime/vm/heap/weak_table.h
+++ b/runtime/vm/heap/weak_table.h
@@ -133,8 +133,6 @@
 
   void Reset();
 
-  void MergeFrom(WeakTable* donor);
-
  private:
   enum {
     kObjectOffset = 0,
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 4500d86..7f229b7 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -345,11 +345,7 @@
       safepoint_handler_(new SafepointHandler(this)),
       shared_class_table_(new SharedClassTable()),
       object_store_(object_store),
-#if defined(DART_PRECOMPILED_RUNTIME)
       class_table_(new ClassTable(shared_class_table_.get())),
-#else
-      class_table_(nullptr),
-#endif
       store_buffer_(new StoreBuffer()),
       heap_(nullptr),
       saved_unlinked_calls_(Array::null()),
@@ -393,14 +389,7 @@
 
 IsolateGroup::IsolateGroup(std::shared_ptr<IsolateGroupSource> source,
                            void* embedder_data)
-    : IsolateGroup(source,
-                   embedder_data,
-#if !defined(DART_PRECOMPILED_RUNTIME)
-                   // in JIT, with --enable_isolate_groups keep object store
-                   // on isolate, rather than on isolate group
-                   FLAG_enable_isolate_groups ? nullptr :
-#endif
-                                              new ObjectStore()) {
+    : IsolateGroup(source, embedder_data, new ObjectStore()) {
   if (object_store() != nullptr) {
     object_store()->InitStubs();
   }
@@ -908,15 +897,31 @@
   ASSERT(program_lock()->IsCurrentThreadWriter());
 
   ASSERT(field.is_static());
-  initial_field_table()->Register(field);
-  initial_field_table()->SetAt(field.field_id(), initial_value.raw());
+  const bool need_to_grow_backing_store =
+      initial_field_table()->Register(field);
+  const intptr_t field_id = field.field_id();
+  initial_field_table()->SetAt(field_id, initial_value.raw());
 
-  // TODO(dartbug.com/36097): When we start sharing the object stores (and
-  // therefore libraries, classes, fields) we'll have to register the initial
-  // static field value in all isolates.
-  auto current = Isolate::Current();
-  current->field_table()->AllocateIndex(field.field_id());
-  current->field_table()->SetAt(field.field_id(), initial_value.raw());
+  if (need_to_grow_backing_store) {
+    // We have to stop other isolates from accessing their field state, since
+    // we'll have to grow the backing store.
+    SafepointOperationScope ops(Thread::Current());
+    for (auto isolate : isolates_) {
+      auto field_table = isolate->field_table();
+      if (field_table->IsReadyToUse()) {
+        field_table->Register(field, field_id);
+        field_table->SetAt(field_id, initial_value.raw());
+      }
+    }
+  } else {
+    for (auto isolate : isolates_) {
+      auto field_table = isolate->field_table();
+      if (field_table->IsReadyToUse()) {
+        field_table->Register(field, field_id);
+        field_table->SetAt(field_id, initial_value.raw());
+      }
+    }
+  }
 }
 
 void Isolate::RehashConstants() {
@@ -1636,11 +1641,7 @@
       isolate_object_store_(
           new IsolateObjectStore(isolate_group->object_store())),
       object_store_shared_ptr_(isolate_group->object_store_shared_ptr()),
-#if defined(DART_PRECOMPILED_RUNTIME)
       class_table_(isolate_group->class_table_shared_ptr()),
-#else
-      class_table_(new ClassTable(shared_class_table_)),
-#endif
 #if !defined(DART_PRECOMPILED_RUNTIME)
       native_callback_trampolines_(),
 #endif
@@ -1773,12 +1774,7 @@
     // Non-vm isolates need to have isolate object store initialized is that
     // exit_listeners have to be null-initialized as they will be used if
     // we fail to create isolate below, have to do low level shutdown.
-    if (result->object_store() == nullptr) {
-      // in JIT with --enable-isolate-groups each isolate still
-      // has to have its own object store
-      result->set_object_store(new ObjectStore());
-      result->object_store()->InitStubs();
-    }
+    ASSERT(result->object_store() != nullptr);
     result->isolate_object_store()->Init();
   }
 
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index e88229a..a3b1b8a 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -718,8 +718,8 @@
   uint64_t id_ = 0;
 
   std::unique_ptr<SharedClassTable> shared_class_table_;
-  std::shared_ptr<ObjectStore> object_store_;  // nullptr in JIT mode
-  std::shared_ptr<ClassTable> class_table_;    // nullptr in JIT mode
+  std::shared_ptr<ObjectStore> object_store_;
+  std::shared_ptr<ClassTable> class_table_;
   std::unique_ptr<StoreBuffer> store_buffer_;
   std::unique_ptr<Heap> heap_;
   std::unique_ptr<DispatchTable> dispatch_table_;
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index e673c0a..acb3646 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -10731,16 +10731,23 @@
 
 void Field::SetStaticValue(const Instance& value,
                            bool save_initial_value) const {
-  ASSERT(Thread::Current()->IsMutatorThread());
+  auto thread = Thread::Current();
+  ASSERT(thread->IsMutatorThread());
 
   ASSERT(is_static());  // Valid only for static dart fields.
-  Isolate* isolate = Isolate::Current();
   const intptr_t id = field_id();
   ASSERT(id >= 0);
-  isolate->field_table()->SetAt(id, value.raw());
+
+  SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
+  thread->isolate()->field_table()->SetAt(id, value.raw());
   if (save_initial_value) {
+    // TODO(https://dartbug.com/36097): We should re-visit call-sites where
+    // `save_initial_value == true` and try to have a different path. This
+    // method should only modify the isolate-local field state and not modify
+    // the initial field table.
 #if !defined(DART_PRECOMPILED_RUNTIME)
-    isolate->group()->initial_field_table()->SetAt(field_id(), value.raw());
+    thread->isolate_group()->initial_field_table()->SetAt(field_id(),
+                                                          value.raw());
 #endif
   }
 }
diff --git a/runtime/vm/object_reload.cc b/runtime/vm/object_reload.cc
index 907dda1..4aab7f8 100644
--- a/runtime/vm/object_reload.cc
+++ b/runtime/vm/object_reload.cc
@@ -219,6 +219,12 @@
           if (update_values && !field.is_const()) {
             // Make new field point to the old field value so that both
             // old and new code see and update same value.
+            //
+            // TODO(https://dartbug.com/36097): Once we look into enabling
+            // hot-reload with --enable-isolate-groups we have to do this
+            // for all isolates.
+            reload_context->isolate()->group()->initial_field_table()->Free(
+                field.field_id());
             reload_context->isolate()->field_table()->Free(field.field_id());
             field.set_field_id(old_field.field_id());
           }
diff --git a/runtime/vm/unit_test.cc b/runtime/vm/unit_test.cc
index 52f46f8..3364072 100644
--- a/runtime/vm/unit_test.cc
+++ b/runtime/vm/unit_test.cc
@@ -164,13 +164,8 @@
                                                 void* group_data,
                                                 void* isolate_data) {
   char* error;
-#if defined(DART_PRECOMPILED_RUNTIME)
-  Isolate* result = CreateWithinExistingIsolateGroupAOT(
-      reinterpret_cast<Isolate*>(parent)->group(), name, &error);
-#else
   Isolate* result = CreateWithinExistingIsolateGroup(
       reinterpret_cast<Isolate*>(parent)->group(), name, &error);
-#endif
   if (error != nullptr) {
     OS::PrintErr("CreateTestIsolateInGroup failed: %s\n", error);
     free(error);