[VM] Bare instructions - Part 4: Add --use-bare-instructions flag to AOT compiler & runtime

This is the final CL which adds a new --use-bare-instructions flag to
the VM.

If this flag is set during AOT compilation, we will:

  * Build one global object pool (abbr: GOP) which all code objects
    share. This gop will be stored in the object store.  The PP register
    is populated in the enter dart stub and it is restored when
    returning from native calls.

  * Gets rid of the CODE_REG/PP slots from the dart frames. Instead the
    compiled code uses the global object pool, which is always in PP.

  * Starts emitting pc-relative calls for calls between two dart
    functions or when invoking a stub.
    Limitation: We only emit pc-relative calls between two code objects
    in the same isolate (this is because the image writer is writing
    instruction objects for vm-isolate/main-isolate seperately)

  * We do compile-time relocation of those static calls after the
    precompiler has finished its work, but before writing the snapshot.
    This patches all the instruction objects with pc-relative calls to
    have the right .text distance.

  * We emit a sorted list of code objects in ObjectStore::reverse_code_table,
    which will be used by the AOT runtime to go back from PC to Code
    objects (where all metadata, e.g. stack maps, catch entry moves, pc
    descriptors are available).

Issue https://github.com/dart-lang/sdk/issues/33274

Change-Id: I6c5dd2b1571e3a889b27e804a24c2986c71e03b6
Reviewed-on: https://dart-review.googlesource.com/c/85769
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
diff --git a/runtime/include/dart_api.h b/runtime/include/dart_api.h
index d5fa7f7..6f259c1 100644
--- a/runtime/include/dart_api.h
+++ b/runtime/include/dart_api.h
@@ -562,6 +562,7 @@
   bool use_osr;
   bool obfuscate;
   Dart_QualifiedFunctionName* entry_points;
+  bool use_bare_instructions;
   bool load_vmservice_library;
   bool unsafe_trust_strong_mode_types;
 } Dart_IsolateFlags;
diff --git a/runtime/vm/class_finalizer.cc b/runtime/vm/class_finalizer.cc
index fd69a3d..e0fc7de 100644
--- a/runtime/vm/class_finalizer.cc
+++ b/runtime/vm/class_finalizer.cc
@@ -2046,7 +2046,7 @@
   object_store->set_canonical_type_arguments(typeargs_table.Release());
 }
 
-void ClassFinalizer::ClearAllCode() {
+void ClassFinalizer::ClearAllCode(bool including_nonchanging_cids) {
   class ClearCodeFunctionVisitor : public FunctionVisitor {
     void Visit(const Function& function) {
       function.ClearCode();
@@ -2057,14 +2057,34 @@
   ProgramVisitor::VisitFunctions(&function_visitor);
 
   class ClearCodeClassVisitor : public ClassVisitor {
+   public:
+    explicit ClearCodeClassVisitor(bool force) : force_(force) {}
+
     void Visit(const Class& cls) {
-      if (cls.id() >= kNumPredefinedCids) {
+      if (force_ || cls.id() >= kNumPredefinedCids) {
         cls.DisableAllocationStub();
       }
     }
+
+   private:
+    bool force_;
   };
-  ClearCodeClassVisitor class_visitor;
+  ClearCodeClassVisitor class_visitor(including_nonchanging_cids);
   ProgramVisitor::VisitClasses(&class_visitor);
+
+  // Apart from normal function code and allocation stubs we have two global
+  // code objects to clear.
+  if (including_nonchanging_cids) {
+    auto thread = Thread::Current();
+    auto object_store = thread->isolate()->object_store();
+    auto& null_code = Code::Handle(thread->zone());
+    object_store->set_build_method_extractor_code(null_code);
+
+    auto& miss_function = Function::Handle(
+        thread->zone(), object_store->megamorphic_miss_function());
+    miss_function.ClearCode();
+    object_store->SetMegamorphicMissHandler(null_code, miss_function);
+  }
 }
 
 }  // namespace dart
diff --git a/runtime/vm/class_finalizer.h b/runtime/vm/class_finalizer.h
index 565e452..b3b9c42 100644
--- a/runtime/vm/class_finalizer.h
+++ b/runtime/vm/class_finalizer.h
@@ -43,7 +43,7 @@
   static void SortClasses();
   static void RemapClassIds(intptr_t* old_to_new_cid);
   static void RehashTypes();
-  static void ClearAllCode();
+  static void ClearAllCode(bool including_nonchanging_cids = false);
 
   // Return whether processing pending classes (ObjectStore::pending_classes_)
   // failed. The function returns true if the processing was successful.
diff --git a/runtime/vm/clustered_snapshot.cc b/runtime/vm/clustered_snapshot.cc
index 4e4d864..2e86c2b 100644
--- a/runtime/vm/clustered_snapshot.cc
+++ b/runtime/vm/clustered_snapshot.cc
@@ -1446,13 +1446,41 @@
   ~CodeDeserializationCluster() {}
 
   void ReadAlloc(Deserializer* d) {
+    const bool is_vm_object = d->isolate() == Dart::vm_isolate();
+
     start_index_ = d->next_index();
     PageSpace* old_space = d->heap()->old_space();
     const intptr_t count = d->ReadUnsigned();
 
-    for (intptr_t i = 0; i < count; i++) {
-      d->AssignRef(AllocateUninitialized(old_space, Code::InstanceSize(0)));
+    // Build an array of code objects representing the order in which the
+    // [Code]'s instructions will be located in memory.
+    const bool build_code_order =
+        FLAG_precompiled_mode && FLAG_use_bare_instructions;
+    RawArray* code_order = nullptr;
+    const intptr_t code_order_length = d->code_order_length();
+    if (build_code_order) {
+      code_order = static_cast<RawArray*>(
+          AllocateUninitialized(old_space, Array::InstanceSize(count)));
+      Deserializer::InitializeHeader(code_order, kArrayCid,
+                                     Array::InstanceSize(count), is_vm_object,
+                                     /*is_canonical=*/false);
+      code_order->ptr()->type_arguments_ = TypeArguments::null();
+      code_order->ptr()->length_ = Smi::New(code_order_length);
     }
+
+    for (intptr_t i = 0; i < count; i++) {
+      auto code = AllocateUninitialized(old_space, Code::InstanceSize(0));
+      d->AssignRef(code);
+      if (code_order != nullptr && i < code_order_length) {
+        code_order->ptr()->data()[i] = code;
+      }
+    }
+
+    if (code_order != nullptr) {
+      const auto& code_order_table = Array::Handle(code_order);
+      d->isolate()->object_store()->set_code_order_table(code_order_table);
+    }
+
     stop_index_ = d->next_index();
   }
 
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index 08c3d82..d250b51 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -24,6 +24,7 @@
 #include "vm/compiler/compiler_pass.h"
 #include "vm/compiler/compiler_state.h"
 #include "vm/compiler/frontend/flow_graph_builder.h"
+#include "vm/compiler/frontend/kernel_to_il.h"
 #include "vm/compiler/jit/compiler.h"
 #include "vm/dart_entry.h"
 #include "vm/exceptions.h"
@@ -58,6 +59,7 @@
 #define Z (zone())
 
 DEFINE_FLAG(bool, print_unique_targets, false, "Print unique dynamic targets");
+DEFINE_FLAG(bool, print_gop, false, "Print global object pool");
 DEFINE_FLAG(bool, trace_precompiler, false, "Trace precompiler.");
 DEFINE_FLAG(
     int,
@@ -84,6 +86,8 @@
 DECLARE_FLAG(int, inlining_constant_arguments_min_size_threshold);
 DECLARE_FLAG(bool, print_instruction_stats);
 
+Precompiler* Precompiler::singleton_ = nullptr;
+
 #if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_DBC) &&                  \
     !defined(TARGET_ARCH_IA32)
 
@@ -173,7 +177,15 @@
       types_to_retain_(),
       consts_to_retain_(),
       error_(Error::Handle()),
-      get_runtime_type_is_unique_(false) {}
+      get_runtime_type_is_unique_(false) {
+  ASSERT(Precompiler::singleton_ == NULL);
+  Precompiler::singleton_ = this;
+}
+
+Precompiler::~Precompiler() {
+  ASSERT(Precompiler::singleton_ == this);
+  Precompiler::singleton_ = NULL;
+}
 
 void Precompiler::DoCompileAll() {
   ASSERT(I->compilation_allowed());
@@ -182,8 +194,16 @@
     StackZone stack_zone(T);
     zone_ = stack_zone.GetZone();
 
+    if (FLAG_use_bare_instructions) {
+      // Since we keep the object pool until the end of AOT compilation, it
+      // will hang on to its entries until the very end. Therefore we have
+      // to use handles which survive that long, so we use [zone_] here.
+      global_object_pool_wrapper_.InitializeWithZone(zone_);
+    }
+
     {
       HANDLESCOPE(T);
+
       // Make sure class hierarchy is stable before compilation so that CHA
       // can be used. Also ensures lookup of entry points won't miss functions
       // because their class hasn't been finalized yet.
@@ -202,10 +222,35 @@
 
       // Precompile constructors to compute information such as
       // optimized instruction count (used in inlining heuristics).
-      ClassFinalizer::ClearAllCode();
+      ClassFinalizer::ClearAllCode(
+          /*including_nonchanging_cids=*/FLAG_use_bare_instructions);
       PrecompileConstructors();
 
-      ClassFinalizer::ClearAllCode();
+      ClassFinalizer::ClearAllCode(
+          /*including_nonchanging_cids=*/FLAG_use_bare_instructions);
+
+      // All stubs have already been generated, all of them share the same pool.
+      // We use that pool to initialize our global object pool, to guarantee
+      // stubs as well as code compiled from here on will have the same pool.
+      if (FLAG_use_bare_instructions) {
+        // We use any stub here to get it's object pool (all stubs share the
+        // same object pool in bare instructions mode).
+        const Code& code = StubCode::InterpretCall();
+        const ObjectPool& stub_pool = ObjectPool::Handle(code.object_pool());
+
+        global_object_pool_wrapper()->Reset();
+        global_object_pool_wrapper()->InitializeFrom(stub_pool);
+
+        // We have two global code objects we need to re-generate with the new
+        // global object pool, namely the
+        //   - megamorphic miss handler code and the
+        //   - build method extractor code
+        MegamorphicCacheTable::ReInitMissHandlerCode(
+            isolate_, global_object_pool_wrapper());
+        I->object_store()->set_build_method_extractor_code(
+            Code::Handle(StubCode::GetBuildMethodExtractorStub(
+                global_object_pool_wrapper())));
+      }
 
       CollectDynamicFunctionNames();
 
@@ -221,6 +266,21 @@
       // [Type]-specialized stubs.
       AttachOptimizedTypeTestingStub();
 
+      if (FLAG_use_bare_instructions) {
+        // Now we generate the actual object pool instance and attach it to the
+        // object store. The AOT runtime will use it from there in the enter
+        // dart code stub.
+        const auto& pool =
+            ObjectPool::Handle(global_object_pool_wrapper()->MakeObjectPool());
+        I->object_store()->set_global_object_pool(pool);
+        global_object_pool_wrapper()->Reset();
+
+        if (FLAG_print_gop) {
+          THR_Print("Global object pool:\n");
+          pool.DebugPrint();
+        }
+      }
+
       I->set_compilation_allowed(false);
 
       TraceForRetainedFunctions();
@@ -433,6 +493,10 @@
 }
 
 void Precompiler::ProcessFunction(const Function& function) {
+  const intptr_t gop_offset =
+      FLAG_use_bare_instructions ? global_object_pool_wrapper()->CurrentLength()
+                                 : 0;
+
   if (!function.HasCode()) {
     function_count_++;
 
@@ -464,10 +528,10 @@
   }
 
   ASSERT(function.HasCode());
-  AddCalleesOf(function);
+  AddCalleesOf(function, gop_offset);
 }
 
-void Precompiler::AddCalleesOf(const Function& function) {
+void Precompiler::AddCalleesOf(const Function& function, intptr_t gop_offset) {
   ASSERT(function.HasCode());
 
   const Code& code = Code::Handle(Z, function.CurrentCode());
@@ -494,12 +558,24 @@
   FATAL("Callee scanning unimplemented for IA32");
 #endif
 
-  const ObjectPool& pool = ObjectPool::Handle(Z, code.GetObjectPool());
   String& selector = String::Handle(Z);
-  for (intptr_t i = 0; i < pool.Length(); i++) {
-    if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
-      entry = pool.ObjectAt(i);
-      AddCalleesOfHelper(entry, &selector, &cls);
+  if (FLAG_use_bare_instructions) {
+    for (intptr_t i = gop_offset;
+         i < global_object_pool_wrapper()->CurrentLength(); i++) {
+      const auto& wrapper_entry = global_object_pool_wrapper()->EntryAt(i);
+      if (wrapper_entry.type() == ObjectPool::kTaggedObject) {
+        const auto& entry = *wrapper_entry.obj_;
+        AddCalleesOfHelper(entry, &selector, &cls);
+      }
+    }
+  } else {
+    const auto& pool = ObjectPool::Handle(Z, code.object_pool());
+    auto& entry = Object::Handle(Z);
+    for (intptr_t i = 0; i < pool.Length(); i++) {
+      if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
+        entry = pool.ObjectAt(i);
+        AddCalleesOfHelper(entry, &selector, &cls);
+      }
     }
   }
 
@@ -672,7 +748,7 @@
   }
 }
 
-void Precompiler::AddConstObject(const Instance& instance) {
+void Precompiler::AddConstObject(const class Instance& instance) {
   // Types and type arguments require special handling.
   if (instance.IsAbstractType()) {
     AddType(AbstractType::Cast(instance));
@@ -770,12 +846,16 @@
         if (FLAG_trace_precompiler) {
           THR_Print("Precompiling initializer for %s\n", field.ToCString());
         }
+        const intptr_t gop_offset =
+            FLAG_use_bare_instructions
+                ? global_object_pool_wrapper()->CurrentLength()
+                : 0;
         ASSERT(Dart::vm_snapshot_kind() != Snapshot::kFullAOT);
         const Function& initializer =
             Function::Handle(Z, CompileStaticInitializer(field));
         ASSERT(!initializer.IsNull());
         field.SetPrecompiledInitializer(initializer);
-        AddCalleesOf(initializer);
+        AddCalleesOf(initializer, gop_offset);
       }
     }
   }
@@ -792,7 +872,7 @@
       kernel::ParseStaticFieldInitializer(zone, field);
 
   DartCompilationPipeline pipeline;
-  PrecompileParsedFunctionHelper helper(/* precompiler = */ NULL,
+  PrecompileParsedFunctionHelper helper(Precompiler::Instance(),
                                         parsed_function,
                                         /* optimized = */ true);
   if (!helper.Compile(&pipeline)) {
@@ -1527,12 +1607,12 @@
 }
 
 void Precompiler::TraceTypesFromRetainedClasses() {
-  Library& lib = Library::Handle(Z);
-  Class& cls = Class::Handle(Z);
-  Array& members = Array::Handle(Z);
-  Array& constants = Array::Handle(Z);
-  GrowableObjectArray& retained_constants = GrowableObjectArray::Handle(Z);
-  Instance& constant = Instance::Handle(Z);
+  auto& lib = Library::Handle(Z);
+  auto& cls = Class::Handle(Z);
+  auto& members = Array::Handle(Z);
+  auto& constants = Array::Handle(Z);
+  auto& retained_constants = GrowableObjectArray::Handle(Z);
+  auto& constant = Instance::Handle(Z);
 
   for (intptr_t i = 0; i < libraries_.Length(); i++) {
     lib ^= libraries_.At(i);
@@ -1837,33 +1917,41 @@
       code_ = function.CurrentCode();
       table_ = code_.static_calls_target_table();
       StaticCallsTable static_calls(table_);
+      bool only_call_via_code = true;
       for (auto& view : static_calls) {
         kind_and_offset_ = view.Get<Code::kSCallTableKindAndOffset>();
         auto kind = Code::KindField::decode(kind_and_offset_.Value());
-        ASSERT(kind == Code::kCallViaCode);
         auto pc_offset = Code::OffsetField::decode(kind_and_offset_.Value());
-        target_ = view.Get<Code::kSCallTableFunctionTarget>();
-        if (target_.IsNull()) {
-          target_ = view.Get<Code::kSCallTableCodeTarget>();
-          ASSERT(!Code::Cast(target_).IsFunctionCode());
-          // Allocation stub or AllocateContext or AllocateArray or ...
+        if (kind == Code::kCallViaCode) {
+          target_ = view.Get<Code::kSCallTableFunctionTarget>();
+          if (target_.IsNull()) {
+            target_ = view.Get<Code::kSCallTableCodeTarget>();
+            ASSERT(!Code::Cast(target_).IsFunctionCode());
+            // Allocation stub or AllocateContext or AllocateArray or ...
+          } else {
+            // Static calls initially call the CallStaticFunction stub because
+            // their target might not be compiled yet. After tree shaking, all
+            // static call targets are compiled.
+            // Cf. runtime entry PatchStaticCall called from CallStaticFunction
+            // stub.
+            auto& fun = Function::Cast(target_);
+            ASSERT(fun.HasCode());
+            target_code_ ^= fun.CurrentCode();
+            uword pc = pc_offset + code_.PayloadStart();
+            CodePatcher::PatchStaticCallAt(pc, code_, target_code_);
+          }
         } else {
-          // Static calls initially call the CallStaticFunction stub because
-          // their target might not be compiled yet. After tree shaking, all
-          // static call targets are compiled.
-          // Cf. runtime entry PatchStaticCall called from CallStaticFunction
-          // stub.
-          const auto& fun = Function::Cast(target_);
-          ASSERT(fun.HasCode());
-          target_code_ ^= fun.CurrentCode();
-          uword pc = pc_offset + code_.PayloadStart();
-          CodePatcher::PatchStaticCallAt(pc, code_, target_code_);
+          ASSERT(kind == Code::kPcRelativeCall ||
+                 kind == Code::kPcRelativeTailCall);
+          only_call_via_code = false;
         }
       }
 
       // We won't patch static calls anymore, so drop the static call table to
       // save space.
-      code_.set_static_calls_target_table(Object::empty_array());
+      if (only_call_via_code) {
+        code_.set_static_calls_target_table(Object::empty_array());
+      }
     }
 
    private:
@@ -1889,6 +1977,7 @@
 }
 
 void Precompiler::SwitchICCalls() {
+  ASSERT(!I->compilation_allowed());
 #if !defined(TARGET_ARCH_DBC)
   // Now that all functions have been compiled, we can switch to an instance
   // call sequence that loads the Code object and entry point directly from
@@ -1978,17 +2067,23 @@
   };
 
   ICCallSwitcher switcher(Z);
-  SwitchICCallsVisitor visitor(&switcher, Z);
+  auto& gop = ObjectPool::Handle(I->object_store()->global_object_pool());
+  ASSERT(gop.IsNull() != FLAG_use_bare_instructions);
+  if (FLAG_use_bare_instructions) {
+    switcher.SwitchPool(gop);
+  } else {
+    SwitchICCallsVisitor visitor(&switcher, Z);
 
-  // We need both iterations to ensure we visit all the functions that might end
-  // up in the snapshot. The ProgramVisitor will miss closures from duplicated
-  // finally clauses, and not all functions are compiled through the
-  // tree-shaker's queue
-  ProgramVisitor::VisitFunctions(&visitor);
-  FunctionSet::Iterator it(enqueued_functions_.GetIterator());
-  for (const Function** current = it.Next(); current != NULL;
-       current = it.Next()) {
-    visitor.Visit(**current);
+    // We need both iterations to ensure we visit all the functions that might
+    // end up in the snapshot. The ProgramVisitor will miss closures from
+    // duplicated finally clauses, and not all functions are compiled through
+    // the tree-shaker's queue
+    ProgramVisitor::VisitFunctions(&visitor);
+    FunctionSet::Iterator it(enqueued_functions_.GetIterator());
+    for (const Function** current = it.Next(); current != NULL;
+         current = it.Next()) {
+      visitor.Visit(**current);
+    }
   }
 #endif
 }
@@ -2115,9 +2210,12 @@
       Array::Handle(zone, graph_compiler->CreateDeoptInfo(assembler));
   // Allocates instruction object. Since this occurs only at safepoint,
   // there can be no concurrent access to the instruction page.
-  const Code& code = Code::Handle(Code::FinalizeCode(
-      function, graph_compiler, assembler, Code::PoolAttachment::kAttachPool,
-      optimized(), stats));
+  const auto pool_attachment = FLAG_use_bare_instructions
+                                   ? Code::PoolAttachment::kNotAttachPool
+                                   : Code::PoolAttachment::kAttachPool;
+  const Code& code =
+      Code::Handle(Code::FinalizeCode(function, graph_compiler, assembler,
+                                      pool_attachment, optimized(), stats));
   code.set_is_optimized(optimized());
   code.set_owner(function);
   if (!function.IsOptimizable()) {
@@ -2244,8 +2342,14 @@
       ASSERT(pass_state.inline_id_to_function.length() ==
              pass_state.caller_inline_id.length());
 
+      ASSERT(!FLAG_use_bare_instructions || precompiler_ != nullptr);
+
       ObjectPoolWrapper object_pool;
-      Assembler assembler(&object_pool, use_far_branches);
+      ObjectPoolWrapper* active_object_pool_wrapper =
+          FLAG_use_bare_instructions
+              ? precompiler_->global_object_pool_wrapper()
+              : &object_pool;
+      Assembler assembler(active_object_pool_wrapper, use_far_branches);
 
       CodeStatistics* function_stats = NULL;
       if (FLAG_print_instruction_stats) {
diff --git a/runtime/vm/compiler/aot/precompiler.h b/runtime/vm/compiler/aot/precompiler.h
index 7c4dacac..9454afc 100644
--- a/runtime/vm/compiler/aot/precompiler.h
+++ b/runtime/vm/compiler/aot/precompiler.h
@@ -6,6 +6,7 @@
 #define RUNTIME_VM_COMPILER_AOT_PRECOMPILER_H_
 
 #include "vm/allocation.h"
+#include "vm/compiler/assembler/assembler.h"
 #include "vm/hash_map.h"
 #include "vm/hash_table.h"
 #include "vm/object.h"
@@ -246,8 +247,18 @@
     return get_runtime_type_is_unique_;
   }
 
+  ObjectPoolWrapper* global_object_pool_wrapper() {
+    ASSERT(FLAG_use_bare_instructions);
+    return &global_object_pool_wrapper_;
+  }
+
+  static Precompiler* Instance() { return singleton_; }
+
  private:
+  static Precompiler* singleton_;
+
   explicit Precompiler(Thread* thread);
+  ~Precompiler();
 
   void DoCompileAll();
   void AddRoots();
@@ -258,11 +269,11 @@
   void AddTypesOf(const Class& cls);
   void AddTypesOf(const Function& function);
   void AddTypeArguments(const TypeArguments& args);
-  void AddCalleesOf(const Function& function);
+  void AddCalleesOf(const Function& function, intptr_t gop_offset);
   void AddCalleesOfHelper(const Object& entry,
                           String* temp_selector,
                           Class* temp_cls);
-  void AddConstObject(const Instance& instance);
+  void AddConstObject(const class Instance& instance);
   void AddClosureCall(const Array& arguments_descriptor);
   void AddField(const Field& field);
   void AddFunction(const Function& function);
@@ -320,6 +331,7 @@
   intptr_t dropped_type_count_;
   intptr_t dropped_library_count_;
 
+  ObjectPoolWrapper global_object_pool_wrapper_;
   GrowableObjectArray& libraries_;
   const GrowableObjectArray& pending_functions_;
   SymbolSet sent_selectors_;
diff --git a/runtime/vm/compiler/assembler/assembler.cc b/runtime/vm/compiler/assembler/assembler.cc
index df4e886..f945e3f 100644
--- a/runtime/vm/compiler/assembler/assembler.cc
+++ b/runtime/vm/compiler/assembler/assembler.cc
@@ -250,6 +250,46 @@
   // Unlikely.
   return key.obj_->GetClassId();
 }
+void ObjectPoolWrapper::Reset() {
+  // Null out the handles we've accumulated.
+  for (intptr_t i = 0; i < object_pool_.length(); ++i) {
+    if (object_pool_[i].type() == ObjectPool::kTaggedObject) {
+      *const_cast<Object*>(object_pool_[i].obj_) = Object::null();
+      *const_cast<Object*>(object_pool_[i].equivalence_) = Object::null();
+    }
+  }
+
+  object_pool_.Clear();
+  object_pool_index_table_.Clear();
+}
+
+void ObjectPoolWrapper::InitializeFrom(const ObjectPool& other) {
+  ASSERT(object_pool_.length() == 0);
+
+  for (intptr_t i = 0; i < other.Length(); i++) {
+    auto type = other.TypeAt(i);
+    auto patchable = other.PatchableAt(i);
+    switch (type) {
+      case ObjectPool::kTaggedObject: {
+        ObjectPoolWrapperEntry entry(&Object::ZoneHandle(other.ObjectAt(i)),
+                                     patchable);
+        AddObject(entry);
+        break;
+      }
+      case ObjectPool::kImmediate:
+      case ObjectPool::kNativeFunction:
+      case ObjectPool::kNativeFunctionWrapper: {
+        ObjectPoolWrapperEntry entry(other.RawValueAt(i), type, patchable);
+        AddObject(entry);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  ASSERT(CurrentLength() == other.Length());
+}
 
 intptr_t ObjectPoolWrapper::AddObject(const Object& obj,
                                       ObjectPool::Patchability patchable) {
@@ -267,6 +307,19 @@
          (entry.obj_->IsNotTemporaryScopedHandle() &&
           (entry.equivalence_ == NULL ||
            entry.equivalence_->IsNotTemporaryScopedHandle())));
+
+  if (entry.type() == ObjectPool::kTaggedObject) {
+    // If the owner of the object pool wrapper specified a specific zone we
+    // shoulld use we'll do so.
+    if (zone_ != NULL) {
+      entry.obj_ = &Object::ZoneHandle(zone_, entry.obj_->raw());
+      if (entry.equivalence_ != NULL) {
+        entry.equivalence_ =
+            &Object::ZoneHandle(zone_, entry.equivalence_->raw());
+      }
+    }
+  }
+
   object_pool_.Add(entry);
   if (entry.patchable() == ObjectPool::kNotPatchable) {
     // The object isn't patchable. Record the index for fast lookup.
diff --git a/runtime/vm/compiler/assembler/assembler.h b/runtime/vm/compiler/assembler/assembler.h
index 7b25cf1..8d600d5 100644
--- a/runtime/vm/compiler/assembler/assembler.h
+++ b/runtime/vm/compiler/assembler/assembler.h
@@ -370,10 +370,40 @@
 
 class ObjectPoolWrapper : public ValueObject {
  public:
+  ObjectPoolWrapper() : zone_(nullptr) {}
+  ~ObjectPoolWrapper() {
+    if (zone_ != nullptr) {
+      Reset();
+      zone_ = nullptr;
+    }
+  }
+
+  // Clears all existing entries in this object pool builder.
+  //
+  // Note: Any code which has been compiled via this builder might use offsets
+  // into the pool which are not correct anymore.
+  void Reset();
+
+  // Initializes this object pool builder from [other].
+  //
+  // All entries from [other] will be populated, including their
+  // kind/patchability bits.
+  void InitializeFrom(const ObjectPool& other);
+
+  // Initialize this object pool builder with a [zone].
+  //
+  // Any objects added later on will be referenced using handles from [zone].
+  void InitializeWithZone(Zone* zone) {
+    ASSERT(object_pool_.length() == 0);
+    ASSERT(zone_ == nullptr && zone != nullptr);
+    zone_ = zone;
+  }
+
   intptr_t AddObject(
       const Object& obj,
       ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
   intptr_t AddImmediate(uword imm);
+
   intptr_t FindObject(
       const Object& obj,
       ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
@@ -386,6 +416,9 @@
 
   RawObjectPool* MakeObjectPool();
 
+  intptr_t CurrentLength() { return object_pool_.length(); }
+  ObjectPoolWrapperEntry& EntryAt(intptr_t i) { return object_pool_[i]; }
+
  private:
   intptr_t AddObject(ObjectPoolWrapperEntry entry);
   intptr_t FindObject(ObjectPoolWrapperEntry entry);
@@ -395,6 +428,11 @@
 
   // Hashmap for fast lookup in object pool.
   DirectChainedHashMap<ObjIndexPair> object_pool_index_table_;
+
+  // The zone used for allocating the handles we keep in the map and array (or
+  // NULL, in which case allocations happen using the zone active at the point
+  // of insertion).
+  Zone* zone_;
 };
 
 enum RestorePP { kRestoreCallerPP, kKeepCalleePP };
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 1889554..53de88b 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -24,6 +24,7 @@
 
 DECLARE_FLAG(bool, check_code_pointer);
 DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, precompiled_mode);
 
 uint32_t Address::encoding3() const {
   if (kind_ == Immediate) {
@@ -3162,10 +3163,16 @@
   COMPILE_ASSERT(PP < CODE_REG);
   COMPILE_ASSERT(CODE_REG < FP);
   COMPILE_ASSERT(FP < LR);
-  EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0);
 
-  // Setup pool pointer for this dart function.
-  LoadPoolPointer();
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0);
+
+    // Setup pool pointer for this dart function.
+    LoadPoolPointer();
+  } else {
+    EnterFrame((1 << FP) | (1 << LR), 0);
+  }
+  set_constant_pool_allowed(true);
 
   // Reserve space for locals.
   AddImmediate(SP, -frame_size);
@@ -3186,8 +3193,10 @@
 }
 
 void Assembler::LeaveDartFrame() {
-  ldr(PP,
-      Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    ldr(PP,
+        Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
+  }
   set_constant_pool_allowed(false);
 
   // This will implicitly drop saved PP, PC marker due to restoring SP from FP
@@ -3196,8 +3205,10 @@
 }
 
 void Assembler::LeaveDartFrameAndReturn() {
-  ldr(PP,
-      Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    ldr(PP,
+        Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
+  }
   set_constant_pool_allowed(false);
 
   // This will implicitly drop saved PP, PC marker due to restoring SP from FP
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 1f44fdd..7ca178c 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -18,6 +18,7 @@
 
 DECLARE_FLAG(bool, check_code_pointer);
 DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, precompiled_mode);
 
 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
 
@@ -1250,15 +1251,18 @@
   ASSERT(!constant_pool_allowed());
   // Setup the frame.
   EnterFrame(0);
-  TagAndPushPPAndPcMarker();  // Save PP and PC marker.
 
-  // Load the pool pointer.
-  if (new_pp == kNoRegister) {
-    LoadPoolPointer();
-  } else {
-    mov(PP, new_pp);
-    set_constant_pool_allowed(true);
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    TagAndPushPPAndPcMarker();  // Save PP and PC marker.
+
+    // Load the pool pointer.
+    if (new_pp == kNoRegister) {
+      LoadPoolPointer();
+    } else {
+      mov(PP, new_pp);
+    }
   }
+  set_constant_pool_allowed(true);
 
   // Reserve space.
   if (frame_size > 0) {
@@ -1283,13 +1287,15 @@
 }
 
 void Assembler::LeaveDartFrame(RestorePP restore_pp) {
-  if (restore_pp == kRestoreCallerPP) {
-    set_constant_pool_allowed(false);
-    // Restore and untag PP.
-    LoadFromOffset(PP, FP,
-                   compiler_frame_layout.saved_caller_pp_from_fp * kWordSize);
-    sub(PP, PP, Operand(kHeapObjectTag));
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    if (restore_pp == kRestoreCallerPP) {
+      // Restore and untag PP.
+      LoadFromOffset(PP, FP,
+                     compiler_frame_layout.saved_caller_pp_from_fp * kWordSize);
+      sub(PP, PP, Operand(kHeapObjectTag));
+    }
   }
+  set_constant_pool_allowed(false);
   LeaveFrame();
 }
 
@@ -1325,7 +1331,8 @@
   const intptr_t kPushedRegistersSize =
       kDartVolatileCpuRegCount * kWordSize +
       kDartVolatileFpuRegCount * kWordSize +
-      2 * kWordSize;  // PP and pc marker from EnterStubFrame.
+      (compiler_frame_layout.dart_fixed_frame_size - 2) *
+          kWordSize;  // From EnterStubFrame (excluding PC / FP)
   AddImmediate(SP, FP, -kPushedRegistersSize);
   for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
     const Register reg = static_cast<Register>(i);
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index f070f44..1d6be01 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -21,6 +21,7 @@
 
 DECLARE_FLAG(bool, check_code_pointer);
 DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, precompiled_mode);
 
 Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
                      bool use_far_branches)
@@ -1531,7 +1532,9 @@
   const intptr_t kPushedRegistersSize =
       kPushedCpuRegistersCount * kWordSize +
       kPushedXmmRegistersCount * kFpuRegisterSize +
-      2 * kWordSize;  // PP, pc marker from EnterStubFrame
+      (compiler_frame_layout.dart_fixed_frame_size - 2) *
+          kWordSize;  // From EnterStubFrame (excluding PC / FP)
+
   leaq(RSP, Address(RBP, -kPushedRegistersSize));
 
   // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
@@ -1568,12 +1571,14 @@
 void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
   ASSERT(!constant_pool_allowed());
   EnterFrame(0);
-  pushq(CODE_REG);
-  pushq(PP);
-  if (new_pp == kNoRegister) {
-    LoadPoolPointer(PP);
-  } else {
-    movq(PP, new_pp);
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    pushq(CODE_REG);
+    pushq(PP);
+    if (new_pp == kNoRegister) {
+      LoadPoolPointer(PP);
+    } else {
+      movq(PP, new_pp);
+    }
   }
   set_constant_pool_allowed(true);
   if (frame_size != 0) {
@@ -1583,11 +1588,13 @@
 
 void Assembler::LeaveDartFrame(RestorePP restore_pp) {
   // Restore caller's PP register that was pushed in EnterDartFrame.
-  if (restore_pp == kRestoreCallerPP) {
-    movq(PP, Address(RBP, (compiler_frame_layout.saved_caller_pp_from_fp *
-                           kWordSize)));
-    set_constant_pool_allowed(false);
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    if (restore_pp == kRestoreCallerPP) {
+      movq(PP, Address(RBP, (compiler_frame_layout.saved_caller_pp_from_fp *
+                             kWordSize)));
+    }
   }
+  set_constant_pool_allowed(false);
   LeaveFrame();
 }
 
diff --git a/runtime/vm/compiler/assembler/disassembler.cc b/runtime/vm/compiler/assembler/disassembler.cc
index 11138e6..8cf7adc 100644
--- a/runtime/vm/compiler/assembler/disassembler.cc
+++ b/runtime/vm/compiler/assembler/disassembler.cc
@@ -329,19 +329,23 @@
     THR_Print("Static call target functions {\n");
     const auto& table = Array::Handle(zone, code.static_calls_target_table());
     auto& cls = Class::Handle(zone);
-    auto& kind_and_offset = Smi::Handle(zone);
+    auto& kind_type_and_offset = Smi::Handle(zone);
     auto& function = Function::Handle(zone);
     auto& code = Code::Handle(zone);
     if (!table.IsNull()) {
       StaticCallsTable static_calls(table);
       for (auto& call : static_calls) {
-        kind_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
+        kind_type_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
         function = call.Get<Code::kSCallTableFunctionTarget>();
         code = call.Get<Code::kSCallTableCodeTarget>();
 
-        auto kind = Code::KindField::decode(kind_and_offset.Value());
-        auto offset = Code::OffsetField::decode(kind_and_offset.Value());
+        auto kind = Code::KindField::decode(kind_type_and_offset.Value());
+        auto offset = Code::OffsetField::decode(kind_type_and_offset.Value());
+        auto entry_point =
+            Code::EntryPointField::decode(kind_type_and_offset.Value());
 
+        const char* s_entry_point =
+            entry_point == Code::kUncheckedEntry ? " <unchecked-entry>" : "";
         const char* skind = nullptr;
         switch (kind) {
           case Code::kPcRelativeCall:
@@ -359,15 +363,17 @@
         if (function.IsNull()) {
           cls ^= code.owner();
           if (cls.IsNull()) {
-            THR_Print("  0x%" Px ": %s, %p (%s)\n", start + offset,
-                      code.QualifiedName(), code.raw(), skind);
+            THR_Print("  0x%" Px ": %s, %p (%s)%s\n", start + offset,
+                      code.QualifiedName(), code.raw(), skind, s_entry_point);
           } else {
-            THR_Print("  0x%" Px ": allocation stub for %s, %p (%s)\n",
-                      start + offset, cls.ToCString(), code.raw(), skind);
+            THR_Print("  0x%" Px ": allocation stub for %s, %p (%s)%s\n",
+                      start + offset, cls.ToCString(), code.raw(), skind,
+                      s_entry_point);
           }
         } else {
-          THR_Print("  0x%" Px ": %s, %p (%s)\n", start + offset,
-                    function.ToFullyQualifiedCString(), code.raw(), skind);
+          THR_Print("  0x%" Px ": %s, %p (%s)%s\n", start + offset,
+                    function.ToFullyQualifiedCString(), code.raw(), skind,
+                    s_entry_point);
         }
       }
     }
@@ -389,6 +395,11 @@
   DisassembleCodeHelper(function_fullname, code, optimized);
 }
 
+#else   // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
+
+void Disassembler::DisassembleCode(const Function& function,
+                                   const Code& code,
+                                   bool optimized) {}
 #endif  // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
 
 }  // namespace dart
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index c4c8732..cf9146c 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -693,29 +693,40 @@
                                           null_check_name_idx);
 }
 
-void FlowGraphCompiler::AddPcRelativeCallTarget(const Function& function) {
+void FlowGraphCompiler::AddPcRelativeCallTarget(const Function& function,
+                                                Code::EntryKind entry_kind) {
   ASSERT(function.IsZoneHandle());
-  static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
-      Code::kPcRelativeCall, assembler()->CodeSize(), &function, NULL));
+  const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
+                               ? Code::kUncheckedEntry
+                               : Code::kDefaultEntry;
+  static_calls_target_table_.Add(
+      new (zone()) StaticCallsStruct(Code::kPcRelativeCall, entry_point,
+                                     assembler()->CodeSize(), &function, NULL));
 }
 
 void FlowGraphCompiler::AddPcRelativeCallStubTarget(const Code& stub_code) {
   ASSERT(stub_code.IsZoneHandle() || stub_code.IsReadOnlyHandle());
   ASSERT(!stub_code.IsNull());
   static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
-      Code::kPcRelativeCall, assembler()->CodeSize(), NULL, &stub_code));
+      Code::kPcRelativeCall, Code::kDefaultEntry, assembler()->CodeSize(), NULL,
+      &stub_code));
 }
 
-void FlowGraphCompiler::AddStaticCallTarget(const Function& func) {
+void FlowGraphCompiler::AddStaticCallTarget(const Function& func,
+                                            Code::EntryKind entry_kind) {
   ASSERT(func.IsZoneHandle());
+  const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
+                               ? Code::kUncheckedEntry
+                               : Code::kDefaultEntry;
   static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
-      Code::kCallViaCode, assembler()->CodeSize(), &func, NULL));
+      Code::kCallViaCode, entry_point, assembler()->CodeSize(), &func, NULL));
 }
 
 void FlowGraphCompiler::AddStubCallTarget(const Code& code) {
   ASSERT(code.IsZoneHandle() || code.IsReadOnlyHandle());
-  static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
-      Code::kCallViaCode, assembler()->CodeSize(), NULL, &code));
+  static_calls_target_table_.Add(
+      new (zone()) StaticCallsStruct(Code::kCallViaCode, Code::kDefaultEntry,
+                                     assembler()->CodeSize(), NULL, &code));
 }
 
 CompilerDeoptInfo* FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) {
@@ -1075,13 +1086,15 @@
       Array::Handle(zone(), Array::New(array_length, Heap::kOld));
 
   StaticCallsTable entries(targets);
-  auto& kind_and_offset = Smi::Handle(zone());
+  auto& kind_type_and_offset = Smi::Handle(zone());
   for (intptr_t i = 0; i < calls.length(); i++) {
     auto entry = calls[i];
-    kind_and_offset = Smi::New(Code::KindField::encode(entry->call_kind) |
-                               Code::OffsetField::encode(entry->offset));
+    kind_type_and_offset =
+        Smi::New(Code::KindField::encode(entry->call_kind) |
+                 Code::EntryPointField::encode(entry->entry_point) |
+                 Code::OffsetField::encode(entry->offset));
     auto view = entries[i];
-    view.Set<Code::kSCallTableKindAndOffset>(kind_and_offset);
+    view.Set<Code::kSCallTableKindAndOffset>(kind_type_and_offset);
     const Object* target = nullptr;
     if (entry->function != nullptr) {
       view.Set<Code::kSCallTableFunctionTarget>(*calls[i]->function);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.h b/runtime/vm/compiler/backend/flow_graph_compiler.h
index 0c0175a..f0c0af9 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.h
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.h
@@ -773,9 +773,11 @@
 
   void EmitFrameEntry();
 
-  void AddPcRelativeCallTarget(const Function& function);
+  void AddPcRelativeCallTarget(const Function& function,
+                               Code::EntryKind entry_kind);
   void AddPcRelativeCallStubTarget(const Code& stub_code);
-  void AddStaticCallTarget(const Function& function);
+  void AddStaticCallTarget(const Function& function,
+                           Code::EntryKind entry_kind);
 
   void GenerateDeferredCode();
 
@@ -927,14 +929,17 @@
   class StaticCallsStruct : public ZoneAllocated {
    public:
     Code::CallKind call_kind;
+    Code::CallEntryPoint entry_point;
     const intptr_t offset;
     const Function* function;  // Can be NULL.
     const Code* code;          // Can be NULL.
     StaticCallsStruct(Code::CallKind call_kind,
+                      Code::CallEntryPoint entry_point,
                       intptr_t offset_arg,
                       const Function* function_arg,
                       const Code* code_arg)
         : call_kind(call_kind),
+          entry_point(entry_point),
           offset(offset_arg),
           function(function_arg),
           code(code_arg) {
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index 58b6008..ac3b643 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -787,19 +787,23 @@
       extracted_method, ObjectPool::Patchability::kNotPatchable);
 
   // We use a custom pool register to preserve caller PP.
-  const Register kPoolReg = R0;
+  Register kPoolReg = R0;
 
   // R1 = extracted function
   // R4 = offset of type argument vector (or 0 if class is not generic)
-  __ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG, Code::object_pool_offset());
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    kPoolReg = PP;
+  } else {
+    __ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG,
+                           Code::object_pool_offset());
+  }
   __ LoadImmediate(R4, type_arguments_field_offset);
   __ LoadFieldFromOffset(kWord, R1, kPoolReg,
                          ObjectPool::element_offset(function_index));
   __ LoadFieldFromOffset(kWord, CODE_REG, kPoolReg,
                          ObjectPool::element_offset(stub_index));
-  __ LoadFieldFromOffset(kWord, R3, CODE_REG,
-                         Code::entry_point_offset(Code::EntryKind::kUnchecked));
-  __ bx(R3);
+  __ Branch(FieldAddress(
+      CODE_REG, Code::entry_point_offset(Code::EntryKind::kUnchecked)));
 }
 
 void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
@@ -833,9 +837,11 @@
       (!is_optimizing() || may_reoptimize())) {
     __ Comment("Invocation Count Check");
     const Register function_reg = R8;
-    // The pool pointer is not setup before entering the Dart frame.
-    // Temporarily setup pool pointer for this dart function.
-    __ LoadPoolPointer(new_pp);
+    if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) {
+      // The pool pointer is not setup before entering the Dart frame.
+      // Temporarily setup pool pointer for this dart function.
+      __ LoadPoolPointer(new_pp);
+    }
     // Load function object from object pool.
     __ LoadFunctionFromCalleePool(function_reg, function, new_pp);
 
@@ -918,9 +924,16 @@
                                      const Code& stub,
                                      RawPcDescriptors::Kind kind,
                                      LocationSummary* locs) {
-  __ BranchLink(stub);
-  EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
-  AddStubCallTarget(stub);
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions && !stub.InVMHeap()) {
+    AddPcRelativeCallStubTarget(stub);
+    __ GenerateUnRelocatedPcRelativeCall();
+    EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
+  } else {
+    ASSERT(!stub.IsNull());
+    __ BranchLink(stub);
+    EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
+    AddStubCallTarget(stub);
+  }
 }
 
 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
@@ -947,14 +960,21 @@
                                                LocationSummary* locs,
                                                const Function& target,
                                                Code::EntryKind entry_kind) {
-  // Call sites to the same target can share object pool entries. These
-  // call sites are never patched for breakpoints: the function is deoptimized
-  // and the unoptimized code with IC calls for static calls is patched instead.
-  ASSERT(is_optimizing());
-  const auto& stub = StubCode::CallStaticFunction();
-  __ BranchLinkWithEquivalence(stub, target, entry_kind);
-  EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
-  AddStaticCallTarget(target);
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    AddPcRelativeCallTarget(target, entry_kind);
+    __ GenerateUnRelocatedPcRelativeCall();
+    EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
+  } else {
+    ASSERT(is_optimizing());
+    // Call sites to the same target can share object pool entries. These
+    // call sites are never patched for breakpoints: the function is deoptimized
+    // and the unoptimized code with IC calls for static calls is patched
+    // instead.
+    const auto& stub = StubCode::CallStaticFunction();
+    __ BranchLinkWithEquivalence(stub, target, entry_kind);
+    EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
+    AddStaticCallTarget(target, entry_kind);
+  }
 }
 
 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index e3d2594..60cec6f 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -766,17 +766,23 @@
       extracted_method, ObjectPool::Patchability::kNotPatchable);
 
   // We use a custom pool register to preserve caller PP.
-  const Register kPoolReg = R0;
+  Register kPoolReg = R0;
 
   // R1 = extracted function
   // R4 = offset of type argument vector (or 0 if class is not generic)
-  __ ldr(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
-  __ LoadFieldFromOffset(kPoolReg, CODE_REG, Code::object_pool_offset());
+  intptr_t pp_offset = 0;
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    // PP is not tagged on arm64.
+    kPoolReg = PP;
+    pp_offset = kHeapObjectTag;
+  } else {
+    __ LoadFieldFromOffset(kPoolReg, CODE_REG, Code::object_pool_offset());
+  }
   __ LoadImmediate(R4, type_arguments_field_offset);
-  __ LoadFieldFromOffset(R1, kPoolReg,
-                         ObjectPool::element_offset(function_index));
+  __ LoadFieldFromOffset(
+      R1, kPoolReg, ObjectPool::element_offset(function_index) + pp_offset);
   __ LoadFieldFromOffset(CODE_REG, kPoolReg,
-                         ObjectPool::element_offset(stub_index));
+                         ObjectPool::element_offset(stub_index) + pp_offset);
   __ LoadFieldFromOffset(R0, CODE_REG,
                          Code::entry_point_offset(Code::EntryKind::kUnchecked));
   __ br(R0);
@@ -813,9 +819,11 @@
     __ Comment("Invocation Count Check");
     const Register function_reg = R6;
     new_pp = R13;
-    // The pool pointer is not setup before entering the Dart frame.
-    // Temporarily setup pool pointer for this dart function.
-    __ LoadPoolPointer(new_pp);
+    if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) {
+      // The pool pointer is not setup before entering the Dart frame.
+      // Temporarily setup pool pointer for this dart function.
+      __ LoadPoolPointer(new_pp);
+    }
 
     // Load function object using the callee's pool pointer.
     __ LoadFunctionFromCalleePool(function_reg, function, new_pp);
@@ -912,9 +920,16 @@
                                      const Code& stub,
                                      RawPcDescriptors::Kind kind,
                                      LocationSummary* locs) {
-  __ BranchLink(stub);
-  EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
-  AddStubCallTarget(stub);
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions && !stub.InVMHeap()) {
+    AddPcRelativeCallStubTarget(stub);
+    __ GenerateUnRelocatedPcRelativeCall();
+    EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
+  } else {
+    ASSERT(!stub.IsNull());
+    __ BranchLink(stub);
+    EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
+    AddStubCallTarget(stub);
+  }
 }
 
 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
@@ -943,14 +958,21 @@
                                                const Function& target,
                                                Code::EntryKind entry_kind) {
   // TODO(sjindel/entrypoints): Support multiple entrypoints on ARM64.
-  // Call sites to the same target can share object pool entries. These
-  // call sites are never patched for breakpoints: the function is deoptimized
-  // and the unoptimized code with IC calls for static calls is patched instead.
-  ASSERT(is_optimizing());
-  const Code& stub = StubCode::CallStaticFunction();
-  __ BranchLinkWithEquivalence(stub, target);
-  EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
-  AddStaticCallTarget(target);
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    AddPcRelativeCallTarget(target, entry_kind);
+    __ GenerateUnRelocatedPcRelativeCall();
+    EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
+  } else {
+    // Call sites to the same target can share object pool entries. These
+    // call sites are never patched for breakpoints: the function is deoptimized
+    // and the unoptimized code with IC calls for static calls is patched
+    // instead.
+    ASSERT(is_optimizing());
+    const auto& stub = StubCode::CallStaticFunction();
+    __ BranchLinkWithEquivalence(stub, target);
+    EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
+    AddStaticCallTarget(target, entry_kind);
+  }
 }
 
 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index 1fa3e5d..d7d0a04 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -860,7 +860,7 @@
   const auto& stub = StubCode::CallStaticFunction();
   __ Call(stub, true /* movable_target */);
   EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
-  AddStaticCallTarget(target);
+  AddStaticCallTarget(target, entry_kind);
 }
 
 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index 4a3574e..6c32aab 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -777,6 +777,7 @@
 
   const Code& build_method_extractor = Code::ZoneHandle(
       isolate()->object_store()->build_method_extractor_code());
+  ASSERT(!build_method_extractor.IsNull());
 
   const intptr_t stub_index = __ object_pool_wrapper().AddObject(
       build_method_extractor, ObjectPool::Patchability::kNotPatchable);
@@ -784,11 +785,15 @@
       extracted_method, ObjectPool::Patchability::kNotPatchable);
 
   // We use a custom pool register to preserve caller PP.
-  const Register kPoolReg = RAX;
+  Register kPoolReg = RAX;
 
   // RBX = extracted function
   // RDX = offset of type argument vector (or 0 if class is not generic)
-  __ movq(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    kPoolReg = PP;
+  } else {
+    __ movq(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
+  }
   __ movq(RDX, Immediate(type_arguments_field_offset));
   __ movq(RBX,
           FieldAddress(kPoolReg, ObjectPool::element_offset(function_index)));
@@ -830,7 +835,9 @@
     __ EnterOsrFrame(extra_slots * kWordSize);
   } else {
     const Register new_pp = R13;
-    __ LoadPoolPointer(new_pp);
+    if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) {
+      __ LoadPoolPointer(new_pp);
+    }
 
     const Function& function = parsed_function().function();
     if (CanOptimizeFunction() && function.IsOptimizable() &&
@@ -909,9 +916,16 @@
                                      const Code& stub,
                                      RawPcDescriptors::Kind kind,
                                      LocationSummary* locs) {
-  __ Call(stub);
-  EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
-  AddStubCallTarget(stub);
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions && !stub.InVMHeap()) {
+    AddPcRelativeCallStubTarget(stub);
+    __ GenerateUnRelocatedPcRelativeCall();
+    EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
+  } else {
+    ASSERT(!stub.IsNull());
+    __ Call(stub);
+    EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
+    AddStubCallTarget(stub);
+  }
 }
 
 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
@@ -938,14 +952,21 @@
                                                LocationSummary* locs,
                                                const Function& target,
                                                Code::EntryKind entry_kind) {
-  // Call sites to the same target can share object pool entries. These
-  // call sites are never patched for breakpoints: the function is deoptimized
-  // and the unoptimized code with IC calls for static calls is patched instead.
   ASSERT(is_optimizing());
-  const auto& stub_entry = StubCode::CallStaticFunction();
-  __ CallWithEquivalence(stub_entry, target, entry_kind);
-  EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
-  AddStaticCallTarget(target);
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    AddPcRelativeCallTarget(target, entry_kind);
+    __ GenerateUnRelocatedPcRelativeCall();
+    EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
+  } else {
+    // Call sites to the same target can share object pool entries. These
+    // call sites are never patched for breakpoints: the function is deoptimized
+    // and the unoptimized code with IC calls for static calls is patched
+    // instead.
+    const auto& stub_entry = StubCode::CallStaticFunction();
+    __ CallWithEquivalence(stub_entry, target, entry_kind);
+    EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
+    AddStaticCallTarget(target, entry_kind);
+  }
 }
 
 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
diff --git a/runtime/vm/compiler/frontend/kernel_translation_helper.cc b/runtime/vm/compiler/frontend/kernel_translation_helper.cc
index 319ef4a..da70fd8 100644
--- a/runtime/vm/compiler/frontend/kernel_translation_helper.cc
+++ b/runtime/vm/compiler/frontend/kernel_translation_helper.cc
@@ -111,7 +111,8 @@
 }
 
 void TranslationHelper::SetConstants(const Array& constants) {
-  ASSERT(constants_.IsNull());
+  ASSERT(constants_.IsNull() ||
+         (constants.IsNull() || constants.Length() == 0));
   constants_ = constants.raw();
 }
 
diff --git a/runtime/vm/compiler/relocation.cc b/runtime/vm/compiler/relocation.cc
index 4c7384c..fc568c3 100644
--- a/runtime/vm/compiler/relocation.cc
+++ b/runtime/vm/compiler/relocation.cc
@@ -52,13 +52,15 @@
   GrowableArray<RawCode*> callers;
   // The offset from the instruction at which the call happens.
   GrowableArray<intptr_t> call_offsets;
+  // Type entry-point type we call in the destination.
+  GrowableArray<Code::CallEntryPoint> call_entry_points;
   // The offset in the .text segment where the call happens.
   GrowableArray<intptr_t> text_offsets;
   // The target of the forward call.
   GrowableArray<RawCode*> callees;
 
   auto& targets = Array::Handle(zone);
-  auto& kind_and_offset = Smi::Handle(zone);
+  auto& kind_type_and_offset = Smi::Handle(zone);
   auto& target = Object::Handle(zone);
   auto& destination = Code::Handle(zone);
   auto& instructions = Instructions::Handle(zone);
@@ -86,9 +88,11 @@
     if (!targets.IsNull()) {
       StaticCallsTable calls(targets);
       for (auto call : calls) {
-        kind_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
-        auto kind = Code::KindField::decode(kind_and_offset.Value());
-        auto offset = Code::OffsetField::decode(kind_and_offset.Value());
+        kind_type_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
+        auto kind = Code::KindField::decode(kind_type_and_offset.Value());
+        auto offset = Code::OffsetField::decode(kind_type_and_offset.Value());
+        auto entry_point =
+            Code::EntryPointField::decode(kind_type_and_offset.Value());
 
         if (kind == Code::kCallViaCode) {
           continue;
@@ -113,6 +117,7 @@
         callees.Add(destination.raw());
         text_offsets.Add(start_of_call);
         call_offsets.Add(offset);
+        call_entry_points.Add(entry_point);
       }
     }
   }
@@ -125,12 +130,16 @@
     callee = callees[i];
     const intptr_t text_offset = text_offsets[i];
     const intptr_t call_offset = call_offsets[i];
+    const bool use_unchecked_entry =
+        call_entry_points[i] == Code::kUncheckedEntry;
     caller_instruction = caller.instructions();
     destination_instruction = callee.instructions();
 
-    const intptr_t unchecked_offset = destination_instruction.HeaderSize() +
-                                      (destination_instruction.EntryPoint() -
-                                       destination_instruction.PayloadStart());
+    const uword entry_point = use_unchecked_entry ? callee.UncheckedEntryPoint()
+                                                  : callee.EntryPoint();
+    const intptr_t unchecked_offset =
+        destination_instruction.HeaderSize() +
+        (entry_point - destination_instruction.PayloadStart());
 
     auto map_entry = instructions_map.Lookup(destination_instruction.raw());
     auto& dst = (*commands_)[map_entry->inst_nr];
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index 6f64118..15b6f71 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -27,6 +27,7 @@
 #include "vm/object_store.h"
 #include "vm/port.h"
 #include "vm/profiler.h"
+#include "vm/reverse_pc_lookup_cache.h"
 #include "vm/service_isolate.h"
 #include "vm/simulator.h"
 #include "vm/snapshot.h"
@@ -248,6 +249,9 @@
         // Must copy before leaving the zone.
         return strdup(error.ToErrorCString());
       }
+
+      ReversePcLookupCache::BuildAndAttachToIsolate(vm_isolate_);
+
       Object::FinishInit(vm_isolate_);
 #if !defined(PRODUCT)
       if (tds.enabled()) {
@@ -604,6 +608,9 @@
     if (!error.IsNull()) {
       return error.raw();
     }
+
+    ReversePcLookupCache::BuildAndAttachToIsolate(I);
+
 #if !defined(PRODUCT)
     if (tds.enabled()) {
       tds.SetNumArguments(2);
@@ -630,17 +637,17 @@
 #if defined(DART_PRECOMPILED_RUNTIME)
   // AOT: The megamorphic miss function and code come from the snapshot.
   ASSERT(I->object_store()->megamorphic_miss_code() != Code::null());
+  ASSERT(I->object_store()->build_method_extractor_code() != Code::null());
 #else
   // JIT: The megamorphic miss function and code come from the snapshot in JIT
   // app snapshot, otherwise create them.
   if (I->object_store()->megamorphic_miss_code() == Code::null()) {
     MegamorphicCacheTable::InitMissHandler(I);
   }
-
 #if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
   if (I != Dart::vm_isolate()) {
     I->object_store()->set_build_method_extractor_code(
-        Code::Handle(StubCode::GetBuildMethodExtractorStub()));
+        Code::Handle(StubCode::GetBuildMethodExtractorStub(nullptr)));
   }
 #endif
 #endif  // defined(DART_PRECOMPILED_RUNTIME)
@@ -716,6 +723,10 @@
     ADD_FLAG(asserts, enable_asserts, FLAG_enable_asserts);
     // sync-async affects deopt_ids.
     buffer.AddString(FLAG_sync_async ? " sync_async" : " no-sync_async");
+    if (kind == Snapshot::kFullAOT) {
+      ADD_FLAG(use_bare_instructions, use_bare_instructions,
+               FLAG_use_bare_instructions);
+    }
     if (kind == Snapshot::kFullJIT) {
       ADD_FLAG(use_field_guards, use_field_guards, FLAG_use_field_guards);
       ADD_FLAG(use_osr, use_osr, FLAG_use_osr);
diff --git a/runtime/vm/dart_entry.cc b/runtime/vm/dart_entry.cc
index ca6df752..4e0acbc 100644
--- a/runtime/vm/dart_entry.cc
+++ b/runtime/vm/dart_entry.cc
@@ -21,6 +21,7 @@
 namespace dart {
 
 DECLARE_FLAG(bool, enable_interpreter);
+DECLARE_FLAG(bool, precompiled_mode);
 
 // A cache of VM heap allocated arguments descriptors.
 RawArray* ArgumentsDescriptor::cached_args_descriptors_[kCachedDescriptorCount];
@@ -116,11 +117,18 @@
   // We use a kernel2kernel constant evaluator in Dart 2.0 AOT compilation
   // and never start the VM service isolate. So we should never end up invoking
   // any dart code in the Dart 2.0 AOT compiler.
-#if !defined(DART_PRECOMPILED_RUNTIME)
   if (FLAG_precompiled_mode) {
+#if !defined(DART_PRECOMPILED_RUNTIME)
     UNREACHABLE();
-  }
+#else
+    if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+      Thread* thread = Thread::Current();
+      thread->set_global_object_pool(
+          thread->isolate()->object_store()->global_object_pool());
+      ASSERT(thread->global_object_pool() != Object::null());
+    }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
+  }
 
   ASSERT(!function.IsNull());
 
diff --git a/runtime/vm/exceptions.cc b/runtime/vm/exceptions.cc
index 5628b0c..2783bd7 100644
--- a/runtime/vm/exceptions.cc
+++ b/runtime/vm/exceptions.cc
@@ -248,7 +248,7 @@
       switch (move.source_kind()) {
         case CatchEntryMove::SourceKind::kConstant:
           if (pool == nullptr) {
-            pool = &ObjectPool::Handle(code_->object_pool());
+            pool = &ObjectPool::Handle(code_->GetObjectPool());
           }
           value = pool->ObjectAt(move.src_slot());
           break;
diff --git a/runtime/vm/flag_list.h b/runtime/vm/flag_list.h
index 4fb3c06..759b07c 100644
--- a/runtime/vm/flag_list.h
+++ b/runtime/vm/flag_list.h
@@ -157,6 +157,7 @@
   C(stress_async_stacks, false, false, bool, false,                            \
     "Stress test async stack traces")                                          \
   P(sync_async, bool, true, "Start `async` functions synchronously.")          \
+  P(use_bare_instructions, bool, false, "Enable bare instructions mode.")      \
   R(support_disassembler, false, bool, true, "Support the disassembler.")      \
   R(support_il_printer, false, bool, true, "Support the IL printer.")          \
   C(support_reload, false, false, bool, true, "Support isolate reload.")       \
diff --git a/runtime/vm/hash_map.h b/runtime/vm/hash_map.h
index 95194aa..0a2b76c 100644
--- a/runtime/vm/hash_map.h
+++ b/runtime/vm/hash_map.h
@@ -27,6 +27,8 @@
 
   BaseDirectChainedHashMap(const BaseDirectChainedHashMap& other);
 
+  intptr_t Length() const { return count_; }
+
   virtual ~BaseDirectChainedHashMap() {
     allocator_->template Free<HashMapListElement>(array_, array_size_);
     allocator_->template Free<HashMapListElement>(lists_, lists_size_);
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index 55b4bad..0cb4ecb 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -138,6 +138,8 @@
 //
 #define ISOLATE_FLAG_LIST(V)                                                   \
   V(NONPRODUCT, asserts, EnableAsserts, enable_asserts, FLAG_enable_asserts)   \
+  V(PRODUCT, use_bare_instructions, Bare, use_bare_instructions,               \
+    FLAG_use_bare_instructions)                                                \
   V(NONPRODUCT, use_field_guards, UseFieldGuards, use_field_guards,            \
     FLAG_use_field_guards)                                                     \
   V(NONPRODUCT, use_osr, UseOsr, use_osr, FLAG_use_osr)                        \
@@ -882,6 +884,7 @@
   V(EnableAsserts)                                                             \
   V(ErrorOnBadType)                                                            \
   V(ErrorOnBadOverride)                                                        \
+  V(Bare)                                                                      \
   V(UseFieldGuards)                                                            \
   V(UseOsr)                                                                    \
   V(Obfuscate)                                                                 \
diff --git a/runtime/vm/megamorphic_cache_table.cc b/runtime/vm/megamorphic_cache_table.cc
index 75d42a5a..eba6c5f 100644
--- a/runtime/vm/megamorphic_cache_table.cc
+++ b/runtime/vm/megamorphic_cache_table.cc
@@ -86,6 +86,21 @@
          Function::null());
   isolate->object_store()->SetMegamorphicMissHandler(code, function);
 }
+
+void MegamorphicCacheTable::ReInitMissHandlerCode(Isolate* isolate,
+                                                  ObjectPoolWrapper* wrapper) {
+  ASSERT(FLAG_precompiled_mode && FLAG_use_bare_instructions);
+
+  const Code& code = Code::Handle(StubCode::Generate(
+      "_stub_MegamorphicMiss", wrapper, StubCode::GenerateMegamorphicMissStub));
+  code.set_exception_handlers(Object::empty_exception_handlers());
+
+  auto object_store = isolate->object_store();
+  auto& function = Function::Handle(object_store->megamorphic_miss_function());
+  function.AttachCode(code);
+  object_store->SetMegamorphicMissHandler(code, function);
+}
+
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
 void MegamorphicCacheTable::PrintSizes(Isolate* isolate) {
diff --git a/runtime/vm/megamorphic_cache_table.h b/runtime/vm/megamorphic_cache_table.h
index cc813e8..8d7788a 100644
--- a/runtime/vm/megamorphic_cache_table.h
+++ b/runtime/vm/megamorphic_cache_table.h
@@ -13,6 +13,7 @@
 class Function;
 class Isolate;
 class ObjectPointerVisitor;
+class ObjectPoolWrapper;
 class RawArray;
 class RawFunction;
 class RawCode;
@@ -25,6 +26,15 @@
   static RawFunction* miss_handler(Isolate* isolate);
   NOT_IN_PRECOMPILED(static void InitMissHandler(Isolate* isolate));
 
+  // Re-initializes the megamorphic miss handler function in the object store.
+  //
+  // Normally we initialize the megamorphic miss handler during isolate startup.
+  // Though if we AOT compile with bare instructions support, we need to
+  // re-generate the handler to ensure it uses the common object pool.
+  NOT_IN_PRECOMPILED(
+      static void ReInitMissHandlerCode(Isolate* isolate,
+                                        ObjectPoolWrapper* wrapper));
+
   static RawMegamorphicCache* Lookup(Isolate* isolate,
                                      const String& name,
                                      const Array& descriptor);
diff --git a/runtime/vm/native_api_impl.cc b/runtime/vm/native_api_impl.cc
index 5c6e00a..6f12a68 100644
--- a/runtime/vm/native_api_impl.cc
+++ b/runtime/vm/native_api_impl.cc
@@ -17,16 +17,16 @@
 
 // --- Message sending/receiving from native code ---
 
-class IsolateSaver {
+class IsolateLeaveScope {
  public:
-  explicit IsolateSaver(Isolate* current_isolate)
+  explicit IsolateLeaveScope(Isolate* current_isolate)
       : saved_isolate_(current_isolate) {
     if (current_isolate != NULL) {
       ASSERT(current_isolate == Isolate::Current());
       Dart_ExitIsolate();
     }
   }
-  ~IsolateSaver() {
+  ~IsolateLeaveScope() {
     if (saved_isolate_ != NULL) {
       Dart_Isolate I = reinterpret_cast<Dart_Isolate>(saved_isolate_);
       Dart_EnterIsolate(I);
@@ -36,7 +36,7 @@
  private:
   Isolate* saved_isolate_;
 
-  DISALLOW_COPY_AND_ASSIGN(IsolateSaver);
+  DISALLOW_COPY_AND_ASSIGN(IsolateLeaveScope);
 };
 
 static bool PostCObjectHelper(Dart_Port port_id, Dart_CObject* message) {
@@ -79,7 +79,7 @@
     return ILLEGAL_PORT;
   }
   // Start the native port without a current isolate.
-  IsolateSaver saver(Isolate::Current());
+  IsolateLeaveScope saver(Isolate::Current());
 
   NativeMessageHandler* nmh = new NativeMessageHandler(name, handler);
   Dart_Port port_id = PortMap::CreatePort(nmh);
@@ -90,7 +90,7 @@
 
 DART_EXPORT bool Dart_CloseNativePort(Dart_Port native_port_id) {
   // Close the native port without a current isolate.
-  IsolateSaver saver(Isolate::Current());
+  IsolateLeaveScope saver(Isolate::Current());
 
   // TODO(turnidge): Check that the port is native before trying to close.
   return PortMap::ClosePort(native_port_id);
@@ -105,7 +105,7 @@
                                             char** error) {
   Isolate* isolate = Isolate::Current();
   ASSERT(isolate == nullptr || !isolate->is_service_isolate());
-  IsolateSaver saver(isolate);
+  IsolateLeaveScope saver(isolate);
 
   // We only allow one isolate reload at a time.  If this turns out to be on the
   // critical path, we can change it to have a global datastructure which is
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 676ca4e..e2f3264 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -83,6 +83,7 @@
 DECLARE_FLAG(bool, trace_deoptimization_verbose);
 DECLARE_FLAG(bool, trace_reload);
 DECLARE_FLAG(bool, write_protect_code);
+DECLARE_FLAG(bool, precompiled_mode);
 
 static const char* const kGetterPrefix = "get:";
 static const intptr_t kGetterPrefixLength = strlen(kGetterPrefix);
@@ -14194,6 +14195,15 @@
 #endif  // DEBUG
 }
 
+RawObjectPool* Code::GetObjectPool() const {
+#if defined(DART_PRECOMPILED_RUNTIME)
+  if (FLAG_use_bare_instructions) {
+    return Isolate::Current()->object_store()->global_object_pool();
+  }
+#endif
+  return object_pool();
+}
+
 bool Code::HasBreakpoint() const {
 #if defined(PRODUCT)
   return false;
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 13d9057..4acfb6f 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -4279,7 +4279,11 @@
 
   uword PayloadStart() const { return PayloadStart(raw()); }
   uword MonomorphicEntryPoint() const { return MonomorphicEntryPoint(raw()); }
+  uword MonomorphicUncheckedEntryPoint() const {
+    return MonomorphicUncheckedEntryPoint(raw());
+  }
   uword EntryPoint() const { return EntryPoint(raw()); }
+  uword UncheckedEntryPoint() const { return UncheckedEntryPoint(raw()); }
   static uword PayloadStart(const RawInstructions* instr) {
     return reinterpret_cast<uword>(instr->ptr()) + HeaderSize();
   }
@@ -4855,8 +4859,7 @@
     return Instructions::MonomorphicUncheckedEntryPoint(instructions());
   }
   intptr_t Size() const { return Instructions::Size(instructions()); }
-  RawObjectPool* GetObjectPool() const { return object_pool(); }
-
+  RawObjectPool* GetObjectPool() const;
   bool ContainsInstructionAt(uword addr) const {
     return ContainsInstructionAt(raw(), addr);
   }
@@ -4918,13 +4921,17 @@
   RawStackMap* GetStackMap(uint32_t pc_offset,
                            Array* stackmaps,
                            StackMap* map) const;
-
   enum CallKind {
     kPcRelativeCall = 1,
     kPcRelativeTailCall = 2,
     kCallViaCode = 3,
   };
 
+  enum CallEntryPoint {
+    kDefaultEntry,
+    kUncheckedEntry,
+  };
+
   enum SCallTableEntry {
     kSCallTableKindAndOffset = 0,
     kSCallTableCodeTarget = 1,
@@ -4938,7 +4945,10 @@
   };
 
   class KindField : public BitField<intptr_t, CallKind, 0, 2> {};
-  class OffsetField : public BitField<intptr_t, intptr_t, 2, 28> {};
+  class EntryPointField
+      : public BitField<intptr_t, CallEntryPoint, KindField::kNextBit, 1> {};
+  class OffsetField
+      : public BitField<intptr_t, intptr_t, EntryPointField::kNextBit, 27> {};
 
   void set_static_calls_target_table(const Array& value) const;
   RawArray* static_calls_target_table() const {
@@ -5248,6 +5258,8 @@
   FINAL_HEAP_OBJECT_IMPLEMENTATION(Code, Object);
   friend class Class;
   friend class SnapshotWriter;
+  friend class StubCode;     // for set_object_pool
+  friend class Precompiler;  // for set_object_pool
   friend class FunctionSerializationCluster;
   friend class CodeSerializationCluster;
   friend class StubCode;               // for set_object_pool
@@ -7807,6 +7819,7 @@
   static intptr_t LengthOf(const RawArray* array) {
     return Smi::Value(array->ptr()->length_);
   }
+
   static intptr_t length_offset() { return OFFSET_OF(RawArray, length_); }
   static intptr_t data_offset() {
     return OFFSET_OF_RETURNED_VALUE(RawArray, data);
diff --git a/runtime/vm/object_store.h b/runtime/vm/object_store.h
index 6b9695d..77c99ee 100644
--- a/runtime/vm/object_store.h
+++ b/runtime/vm/object_store.h
@@ -120,13 +120,14 @@
   RW(Function, async_star_move_next_helper)                                    \
   RW(Function, complete_on_async_return)                                       \
   RW(Class, async_star_stream_controller)                                      \
+  RW(ObjectPool, global_object_pool)                                           \
   RW(Array, library_load_error_table)                                          \
   RW(Array, unique_dynamic_targets)                                            \
   RW(GrowableObjectArray, megamorphic_cache_table)                             \
   RW(Code, build_method_extractor_code)                                        \
-  RW(Array, code_order_table)                                                  \
   R_(Code, megamorphic_miss_code)                                              \
   R_(Function, megamorphic_miss_function)                                      \
+  RW(Array, code_order_table)                                                  \
   RW(Array, obfuscation_map)                                                   \
   RW(GrowableObjectArray, type_testing_stubs)                                  \
   RW(GrowableObjectArray, changed_in_last_reload)                              \
diff --git a/runtime/vm/program_visitor.cc b/runtime/vm/program_visitor.cc
index 2fe8841..2eecb7b 100644
--- a/runtime/vm/program_visitor.cc
+++ b/runtime/vm/program_visitor.cc
@@ -129,8 +129,7 @@
       zone, Array::New(MegamorphicCache::kEntryLength * capacity, Heap::kOld));
   const Function& handler =
       Function::Handle(zone, MegamorphicCacheTable::miss_handler(isolate));
-  MegamorphicCache::SetEntry(buckets, 0, MegamorphicCache::smi_illegal_cid(),
-                             handler);
+  MegamorphicCache::SetEntry(buckets, 0, Object::smi_illegal_cid(), handler);
 
   for (intptr_t i = 0; i < table.Length(); i++) {
     cache ^= table.At(i);
@@ -636,6 +635,12 @@
   ProgramVisitor::VisitFunctions(&visitor);
 }
 
+// Traits for comparing two [Instructions] objects for equality, which is
+// implemented as bit-wise equality.
+//
+// This considers two instruction objects to be equal even if they have
+// different static call targets.  Since the static call targets are called via
+// the object pool this is ok.
 class InstructionsKeyValueTrait {
  public:
   // Typedefs needed for the DirectChainedHashMap template.
@@ -656,6 +661,52 @@
 
 typedef DirectChainedHashMap<InstructionsKeyValueTrait> InstructionsSet;
 
+// Traits for comparing two [Code] objects for equality.
+//
+// It considers two [Code] objects to be equal if
+//
+//   * their [RawInstruction]s are bit-wise equal
+//   * their [RawPcDescriptor]s are the same
+//   * their [RawStackMaps]s are the same
+//   * their static call targets are the same
+#if defined(DART_PRECOMPILER)
+class CodeKeyValueTrait {
+ public:
+  // Typedefs needed for the DirectChainedHashMap template.
+  typedef const Code* Key;
+  typedef const Code* Value;
+  typedef const Code* Pair;
+
+  static Key KeyOf(Pair kv) { return kv; }
+
+  static Value ValueOf(Pair kv) { return kv; }
+
+  static inline intptr_t Hashcode(Key key) { return key->Size(); }
+
+  static inline bool IsKeyEqual(Pair pair, Key key) {
+    if (pair->raw() == key->raw()) return true;
+
+    // Notice we assume that these entries have already been de-duped, so we
+    // can use pointer equality.
+    if (pair->static_calls_target_table() != key->static_calls_target_table()) {
+      return false;
+    }
+    if (pair->pc_descriptors() == key->pc_descriptors()) {
+      return false;
+    }
+    if (pair->stackmaps() == key->stackmaps()) {
+      return false;
+    }
+    if (pair->catch_entry_moves_maps() == key->catch_entry_moves_maps()) {
+      return false;
+    }
+    return Instructions::Equals(pair->instructions(), key->instructions());
+  }
+};
+
+typedef DirectChainedHashMap<CodeKeyValueTrait> CodeSet;
+#endif  // defined(DART_PRECOMPILER)
+
 void ProgramVisitor::DedupInstructions() {
   class DedupInstructionsVisitor : public FunctionVisitor,
                                    public ObjectVisitor {
@@ -712,6 +763,59 @@
   ProgramVisitor::VisitFunctions(&visitor);
 }
 
+void ProgramVisitor::DedupInstructionsWithSameMetadata() {
+#if defined(DART_PRECOMPILER)
+  class DedupInstructionsWithSameMetadataVisitor : public FunctionVisitor,
+                                                   public ObjectVisitor {
+   public:
+    explicit DedupInstructionsWithSameMetadataVisitor(Zone* zone)
+        : zone_(zone),
+          canonical_set_(),
+          code_(Code::Handle(zone)),
+          owner_(Object::Handle(zone)),
+          instructions_(Instructions::Handle(zone)) {}
+
+    void VisitObject(RawObject* obj) {
+      if (obj->IsCode()) {
+        canonical_set_.Insert(&Code::ZoneHandle(zone_, Code::RawCast(obj)));
+      }
+    }
+
+    void Visit(const Function& function) {
+      if (!function.HasCode()) {
+        return;
+      }
+      code_ = function.CurrentCode();
+      instructions_ = DedupOneInstructions(code_);
+      code_.SetActiveInstructions(instructions_);
+      code_.set_instructions(instructions_);
+      function.SetInstructions(code_);  // Update cached entry point.
+    }
+
+    RawInstructions* DedupOneInstructions(const Code& code) {
+      const Code* canonical = canonical_set_.LookupValue(&code);
+      if (canonical == NULL) {
+        canonical_set_.Insert(&Code::ZoneHandle(zone_, code.raw()));
+        return code.instructions();
+      } else {
+        owner_ = code.owner();
+        return canonical->instructions();
+      }
+    }
+
+   private:
+    Zone* zone_;
+    CodeSet canonical_set_;
+    Code& code_;
+    Object& owner_;
+    Instructions& instructions_;
+  };
+
+  DedupInstructionsWithSameMetadataVisitor visitor(Thread::Current()->zone());
+  ProgramVisitor::VisitFunctions(&visitor);
+#endif  // defined(DART_PRECOMPILER)
+}
+
 void ProgramVisitor::Dedup() {
   Thread* thread = Thread::Current();
   StackZone stack_zone(thread);
@@ -731,7 +835,11 @@
 
 #if defined(PRODUCT)
   // Reduces binary size but obfuscates profiler results.
-  DedupInstructions();
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    DedupInstructionsWithSameMetadata();
+  } else {
+    DedupInstructions();
+  }
 #endif
 }
 
diff --git a/runtime/vm/program_visitor.h b/runtime/vm/program_visitor.h
index f2b255a..c24abe5 100644
--- a/runtime/vm/program_visitor.h
+++ b/runtime/vm/program_visitor.h
@@ -40,6 +40,7 @@
   static void DedupCodeSourceMaps();
   static void DedupLists();
   static void DedupInstructions();
+  static void DedupInstructionsWithSameMetadata();
 };
 
 }  // namespace dart
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index 891d7d9..f9e0a5c 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -2254,6 +2254,7 @@
 
   friend class LinkedHashMapSerializationCluster;
   friend class LinkedHashMapDeserializationCluster;
+  friend class CodeDeserializationCluster;
   friend class Deserializer;
   friend class RawCode;
   friend class RawImmutableArray;
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index 8c5e663..2829c76 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -183,7 +183,7 @@
   const intptr_t name_index = reader.GetNullCheckNameIndexAt(pc_offset);
   RELEASE_ASSERT(name_index >= 0);
 
-  const ObjectPool& pool = ObjectPool::Handle(zone, code.object_pool());
+  const ObjectPool& pool = ObjectPool::Handle(zone, code.GetObjectPool());
   const String& member_name =
       String::CheckedHandle(zone, pool.ObjectAt(name_index));
 
@@ -787,7 +787,7 @@
       const Code& caller_code =
           Code::Handle(zone, caller_frame->LookupDartCode());
       const ObjectPool& pool =
-          ObjectPool::Handle(zone, caller_code.object_pool());
+          ObjectPool::Handle(zone, caller_code.GetObjectPool());
       TypeTestingStubCallPattern tts_pattern(caller_frame->pc());
       const intptr_t stc_pool_idx = tts_pattern.GetSubtypeTestCachePoolIndex();
       const intptr_t dst_name_idx = stc_pool_idx + 1;
@@ -824,7 +824,7 @@
       const Code& caller_code =
           Code::Handle(zone, caller_frame->LookupDartCode());
       const ObjectPool& pool =
-          ObjectPool::Handle(zone, caller_code.object_pool());
+          ObjectPool::Handle(zone, caller_code.GetObjectPool());
       TypeTestingStubCallPattern tts_pattern(caller_frame->pc());
       const intptr_t stc_pool_idx = tts_pattern.GetSubtypeTestCachePoolIndex();
 
diff --git a/runtime/vm/simulator_arm.cc b/runtime/vm/simulator_arm.cc
index 962b99d..992dd24 100644
--- a/runtime/vm/simulator_arm.cc
+++ b/runtime/vm/simulator_arm.cc
@@ -3745,8 +3745,11 @@
   // Restore pool pointer.
   int32_t code =
       *reinterpret_cast<int32_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
-  int32_t pp = *reinterpret_cast<int32_t*>(code + Code::object_pool_offset() -
-                                           kHeapObjectTag);
+  int32_t pp = (FLAG_precompiled_mode && FLAG_use_bare_instructions)
+                   ? reinterpret_cast<int32_t>(thread->global_object_pool())
+                   : *reinterpret_cast<int32_t*>(
+                         (code + Code::object_pool_offset() - kHeapObjectTag));
+
   set_register(CODE_REG, code);
   set_register(PP, pp);
   buf->Longjmp();
diff --git a/runtime/vm/simulator_arm64.cc b/runtime/vm/simulator_arm64.cc
index d72b37c..82f12b1 100644
--- a/runtime/vm/simulator_arm64.cc
+++ b/runtime/vm/simulator_arm64.cc
@@ -3565,8 +3565,10 @@
   // Restore pool pointer.
   int64_t code =
       *reinterpret_cast<int64_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
-  int64_t pp = *reinterpret_cast<int64_t*>(code + Code::object_pool_offset() -
-                                           kHeapObjectTag);
+  int64_t pp = (FLAG_precompiled_mode && FLAG_use_bare_instructions)
+                   ? reinterpret_cast<int64_t>(thread->global_object_pool())
+                   : *reinterpret_cast<int64_t*>(
+                         code + Code::object_pool_offset() - kHeapObjectTag);
   pp -= kHeapObjectTag;  // In the PP register, the pool pointer is untagged.
   set_register(NULL, CODE_REG, code);
   set_register(NULL, PP, pp);
diff --git a/runtime/vm/stack_frame.cc b/runtime/vm/stack_frame.cc
index e0302ab..5f174f8 100644
--- a/runtime/vm/stack_frame.cc
+++ b/runtime/vm/stack_frame.cc
@@ -43,6 +43,18 @@
     /*.saved_caller_pp_from_fp = */ kSavedCallerPpSlotFromFp,
     /*.code_from_fp = */ kPcMarkerSlotFromFp,
 };
+const FrameLayout bare_instructions_frame_layout = {
+    /*.first_object_from_pc =*/kFirstObjectSlotFromFp,  // No saved PP slot.
+    /*.last_fixed_object_from_fp = */ kLastFixedObjectSlotFromFp +
+        2,  // No saved CODE, PP slots
+    /*.param_end_from_fp = */ kParamEndSlotFromFp,
+    /*.first_local_from_fp =*/kFirstLocalSlotFromFp +
+        2,  // No saved CODE, PP slots.
+    /*.dart_fixed_frame_size =*/kDartFrameFixedSize -
+        2,                              // No saved CODE, PP slots.
+    /*.saved_caller_pp_from_fp = */ 0,  // No saved PP slot.
+    /*.code_from_fp = */ 0,             // No saved CODE
+};
 
 FrameLayout compiler_frame_layout = invalid_frame_layout;
 FrameLayout runtime_frame_layout = invalid_frame_layout;
@@ -62,8 +74,19 @@
 }
 
 void FrameLayout::Init() {
+  // By default we use frames with CODE_REG/PP in the frame.
   compiler_frame_layout = default_frame_layout;
   runtime_frame_layout = default_frame_layout;
+
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    compiler_frame_layout = bare_instructions_frame_layout;
+  }
+#if defined(DART_PRECOMPILED_RUNTIME)
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    compiler_frame_layout = invalid_frame_layout;
+    runtime_frame_layout = bare_instructions_frame_layout;
+  }
+#endif
 }
 
 Isolate* StackFrame::IsolateOfBareInstructionsFrame() const {
@@ -118,8 +141,8 @@
     return false;
   }
 
-  if (IsBareInstructionsStubFrame()) {
-    return true;
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    return IsBareInstructionsStubFrame();
   }
 
   ASSERT(!(IsEntryFrame() || IsExitFrame()));
diff --git a/runtime/vm/stack_frame_arm64.h b/runtime/vm/stack_frame_arm64.h
index c0a9435..70000da 100644
--- a/runtime/vm/stack_frame_arm64.h
+++ b/runtime/vm/stack_frame_arm64.h
@@ -42,7 +42,6 @@
 
 static const int kParamEndSlotFromFp = 1;  // One slot past last parameter.
 static const int kCallerSpSlotFromFp = 2;
-static const int kSavedAboveReturnAddress = 3;  // Saved above return address.
 
 // Entry and exit frame layout.
 static const int kExitLinkSlotFromEntryFp = -22;
diff --git a/runtime/vm/stack_frame_x64.h b/runtime/vm/stack_frame_x64.h
index 1435777..84d9652 100644
--- a/runtime/vm/stack_frame_x64.h
+++ b/runtime/vm/stack_frame_x64.h
@@ -43,7 +43,6 @@
 
 static const int kParamEndSlotFromFp = 1;  // One slot past last parameter.
 static const int kCallerSpSlotFromFp = 2;
-static const int kSavedAboveReturnAddress = 3;  // Saved above return address.
 
 // Entry and exit frame layout.
 #if defined(_WIN64)
diff --git a/runtime/vm/stub_code.cc b/runtime/vm/stub_code.cc
index 95e53c1..9f48a07 100644
--- a/runtime/vm/stub_code.cc
+++ b/runtime/vm/stub_code.cc
@@ -7,6 +7,7 @@
 #include "platform/assert.h"
 #include "platform/globals.h"
 #include "vm/clustered_snapshot.h"
+#include "vm/compiler/aot/precompiler.h"
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/compiler/assembler/disassembler.h"
 #include "vm/flags.h"
@@ -20,6 +21,7 @@
 namespace dart {
 
 DEFINE_FLAG(bool, disassemble_stubs, false, "Disassemble generated stubs.");
+DECLARE_FLAG(bool, precompiled_mode);
 
 DECLARE_FLAG(bool, enable_interpreter);
 
@@ -162,13 +164,24 @@
 #if !defined(DART_PRECOMPILED_RUNTIME)
   if (stub.IsNull()) {
     ObjectPoolWrapper object_pool_wrapper;
-    Assembler assembler(&object_pool_wrapper);
+    Precompiler* precompiler = Precompiler::Instance();
+
+    ObjectPoolWrapper* wrapper =
+        FLAG_use_bare_instructions && precompiler != NULL
+            ? precompiler->global_object_pool_wrapper()
+            : &object_pool_wrapper;
+
+    const auto pool_attachment =
+        FLAG_precompiled_mode && FLAG_use_bare_instructions
+            ? Code::PoolAttachment::kNotAttachPool
+            : Code::PoolAttachment::kAttachPool;
+
+    Assembler assembler(wrapper);
     const char* name = cls.ToCString();
     StubCode::GenerateAllocationStubForClass(&assembler, cls);
 
     if (thread->IsMutatorThread()) {
-      stub ^= Code::FinalizeCode(name, nullptr, &assembler,
-                                 Code::PoolAttachment::kAttachPool,
+      stub ^= Code::FinalizeCode(name, nullptr, &assembler, pool_attachment,
                                  /*optimized1*/ false);
       // Check if background compilation thread has not already added the stub.
       if (cls.allocation_stub() == Code::null()) {
@@ -193,8 +206,7 @@
         // Do not Garbage collect during this stage and instead allow the
         // heap to grow.
         NoHeapGrowthControlScope no_growth_control;
-        stub ^= Code::FinalizeCode(name, nullptr, &assembler,
-                                   Code::PoolAttachment::kAttachPool,
+        stub ^= Code::FinalizeCode(name, nullptr, &assembler, pool_attachment,
                                    false /* optimized */);
         stub.set_owner(cls);
         cls.set_allocation_stub(stub);
@@ -226,16 +238,23 @@
 }
 
 #if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
-RawCode* StubCode::GetBuildMethodExtractorStub() {
+RawCode* StubCode::GetBuildMethodExtractorStub(ObjectPoolWrapper* pool) {
 #if !defined(DART_PRECOMPILED_RUNTIME)
   ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  Assembler assembler(pool != nullptr ? pool : &object_pool_wrapper);
   StubCode::GenerateBuildMethodExtractorStub(&assembler);
 
   const char* name = "BuildMethodExtractor";
   const Code& stub = Code::Handle(Code::FinalizeCode(
-      name, nullptr, &assembler, Code::PoolAttachment::kAttachPool,
+      name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
       /*optimized=*/false));
+
+  if (pool == nullptr) {
+    const ObjectPool& object_pool =
+        ObjectPool::Handle(object_pool_wrapper.MakeObjectPool());
+    stub.set_object_pool(object_pool.raw());
+  }
+
 #ifndef PRODUCT
   if (FLAG_support_disassembler && FLAG_disassemble_stubs) {
     LogBlock lb;
diff --git a/runtime/vm/stub_code.h b/runtime/vm/stub_code.h
index 637371b..fe2fcbe 100644
--- a/runtime/vm/stub_code.h
+++ b/runtime/vm/stub_code.h
@@ -14,6 +14,7 @@
 class Code;
 class Isolate;
 class ObjectPointerVisitor;
+class ObjectPoolWrapper;
 class RawCode;
 class SnapshotReader;
 class SnapshotWriter;
@@ -151,7 +152,7 @@
   static RawCode* GetAllocationStubForClass(const Class& cls);
 
 #if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
-  static RawCode* GetBuildMethodExtractorStub();
+  static RawCode* GetBuildMethodExtractorStub(ObjectPoolWrapper* pool);
   static void GenerateBuildMethodExtractorStub(Assembler* assembler);
 #endif
 
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/stub_code_arm.cc
index f35c664..7c66ac2 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/stub_code_arm.cc
@@ -29,6 +29,7 @@
             use_slow_path,
             false,
             "Set to true for debugging & verifying the slow paths.");
+DECLARE_FLAG(bool, precompiled_mode);
 
 // Input parameters:
 //   LR : return address.
@@ -1018,7 +1019,11 @@
   __ Bind(&done_push_arguments);
 
   // Call the Dart code entrypoint.
-  __ LoadImmediate(PP, 0);  // GC safe value into PP.
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+  } else {
+    __ LoadImmediate(PP, 0);  // GC safe value into PP.
+  }
   __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
   __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
   __ blx(R0);  // R4 is the arguments descriptor array.
@@ -2466,7 +2471,12 @@
   __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
   // Restore the pool pointer.
   __ RestoreCodePointer();
-  __ LoadPoolPointer();
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+    __ set_constant_pool_allowed(true);
+  } else {
+    __ LoadPoolPointer();
+  }
   __ bx(LR);  // Jump to continuation point.
 }
 
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index 89e838e..7a2b85a 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -28,6 +28,7 @@
             false,
             "Set to true for debugging & verifying the slow paths.");
 DECLARE_FLAG(bool, enable_interpreter);
+DECLARE_FLAG(bool, precompiled_mode);
 
 // Input parameters:
 //   LR : return address.
@@ -1094,10 +1095,15 @@
   __ b(&push_arguments, LT);
   __ Bind(&done_push_arguments);
 
-  // We now load the pool pointer(PP) with a GC safe value as we are about to
-  // invoke dart code. We don't need a real object pool here.
-  // Smi zero does not work because ARM64 assumes PP to be untagged.
-  __ LoadObject(PP, Object::null_object());
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+    __ sub(PP, PP, Operand(kHeapObjectTag));  // Pool in PP is untagged!
+  } else {
+    // We now load the pool pointer(PP) with a GC safe value as we are about to
+    // invoke dart code. We don't need a real object pool here.
+    // Smi zero does not work because ARM64 assumes PP to be untagged.
+    __ LoadObject(PP, Object::null_object());
+  }
 
   // Call the Dart code entrypoint.
   __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
@@ -2734,7 +2740,12 @@
   __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
   // Restore the pool pointer.
   __ RestoreCodePointer();
-  __ LoadPoolPointer();
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+    __ sub(PP, PP, Operand(kHeapObjectTag));  // Pool in PP is untagged!
+  } else {
+    __ LoadPoolPointer();
+  }
   __ ret();  // Jump to continuation point.
 }
 
diff --git a/runtime/vm/stub_code_x64.cc b/runtime/vm/stub_code_x64.cc
index 7152c6f..42b6b21 100644
--- a/runtime/vm/stub_code_x64.cc
+++ b/runtime/vm/stub_code_x64.cc
@@ -33,6 +33,7 @@
             false,
             "Set to true for debugging & verifying the slow paths.");
 DECLARE_FLAG(bool, enable_interpreter);
+DECLARE_FLAG(bool, precompiled_mode);
 
 // Input parameters:
 //   RSP : points to return address.
@@ -1025,7 +1026,11 @@
   __ Bind(&done_push_arguments);
 
   // Call the Dart code entrypoint.
-  __ xorq(PP, PP);  // GC-safe value into PP.
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    __ movq(PP, Address(THR, Thread::global_object_pool_offset()));
+  } else {
+    __ xorq(PP, PP);  // GC-safe value into PP.
+  }
   __ movq(CODE_REG,
           Address(kTargetCodeReg, VMHandles::kOffsetOfRawPtrInHandle));
   __ movq(kTargetCodeReg, FieldAddress(CODE_REG, Code::entry_point_offset()));
@@ -2737,7 +2742,11 @@
   __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
   // Restore the pool pointer.
   __ RestoreCodePointer();
-  __ LoadPoolPointer(PP);
+  if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
+    __ movq(PP, Address(THR, Thread::global_object_pool_offset()));
+  } else {
+    __ LoadPoolPointer(PP);
+  }
   __ jmp(CallingConventions::kArg1Reg);  // Jump to program counter.
 }
 
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index 514d4d1..9cd5fb5 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -48,6 +48,7 @@
 class RawCode;
 class RawError;
 class RawGrowableObjectArray;
+class RawObjectPool;
 class RawStackTrace;
 class RawString;
 class RuntimeEntry;
@@ -121,7 +122,8 @@
 #define CACHED_NON_VM_STUB_LIST(V)                                             \
   V(RawObject*, object_null_, Object::null(), NULL)                            \
   V(RawBool*, bool_true_, Object::bool_true().raw(), NULL)                     \
-  V(RawBool*, bool_false_, Object::bool_false().raw(), NULL)
+  V(RawBool*, bool_false_, Object::bool_false().raw(), NULL)                   \
+  V(RawObjectPool*, global_object_pool_, ObjectPool::null(), NULL)
 
 // List of VM-global objects/addresses cached in each Thread object.
 // Important: constant false must immediately follow constant true.
@@ -552,6 +554,11 @@
   LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
 #undef DEFINE_OFFSET_METHOD
 
+  RawObjectPool* global_object_pool() const { return global_object_pool_; }
+  void set_global_object_pool(RawObjectPool* raw_value) {
+    global_object_pool_ = raw_value;
+  }
+
   static bool CanLoadFromThread(const Object& object);
   static intptr_t OffsetFromThread(const Object& object);
   static bool ObjectAtOffset(intptr_t offset, Object* object);
diff --git a/runtime/vm/type_testing_stubs.cc b/runtime/vm/type_testing_stubs.cc
index 864d893..1288825 100644
--- a/runtime/vm/type_testing_stubs.cc
+++ b/runtime/vm/type_testing_stubs.cc
@@ -345,9 +345,11 @@
   BuildOptimizedTypeTestStub(&assembler, hi, type, type_class);
 
   const char* name = namer_.StubNameForType(type);
+  const auto pool_attachment = FLAG_use_bare_instructions
+                                   ? Code::PoolAttachment::kNotAttachPool
+                                   : Code::PoolAttachment::kAttachPool;
   const Code& code = Code::Handle(Code::FinalizeCode(
-      name, nullptr, &assembler, Code::PoolAttachment::kAttachPool,
-      false /* optimized */));
+      name, nullptr, &assembler, pool_attachment, false /* optimized */));
 #ifndef PRODUCT
   if (FLAG_support_disassembler && FLAG_disassemble_stubs) {
     LogBlock lb;
diff --git a/runtime/vm/type_testing_stubs.h b/runtime/vm/type_testing_stubs.h
index 2ddff96..423b4d7 100644
--- a/runtime/vm/type_testing_stubs.h
+++ b/runtime/vm/type_testing_stubs.h
@@ -10,6 +10,8 @@
 
 namespace dart {
 
+class ObjectPoolWrapper;
+
 class TypeTestingStubNamer {
  public:
   TypeTestingStubNamer();
diff --git a/tests/standalone_2/standalone_2_kernel.status b/tests/standalone_2/standalone_2_kernel.status
index dbdce5e..88c74c7 100644
--- a/tests/standalone_2/standalone_2_kernel.status
+++ b/tests/standalone_2/standalone_2_kernel.status
@@ -79,7 +79,6 @@
 no_lazy_dispatchers_test: SkipByDesign # KBC interpreter doesn't support --no_lazy_dispatchers
 
 [ $compiler == dartkp && $mode == debug && $runtime == dart_precompiled && $strong ]
-io/compile_all_test: Crash # Issue 32373
 io/raw_socket_test: Crash
 io/skipping_dart2js_compilations_test: Crash
 io/socket_exception_test: Pass, Crash
@@ -97,7 +96,7 @@
 
 [ $compiler == dartkp && $runtime == dart_precompiled && $strong ]
 dwarf_stack_trace_test: RuntimeError
-io/compile_all_test: RuntimeError # Issue 32338
+io/compile_all_test: Skip # We do not support --compile-all for precompilation
 io/file_fuzz_test: RuntimeError, Pass
 io/http_client_connect_test: Skip # Flaky.
 io/http_close_test: Crash