[VM] Make method extractors call a stub for creation of tear-off closure

Issue https://github.com/dart-lang/sdk/issues/33274

Change-Id: I7ec37a57c968baa9a3099379cbecb306cd06b464
Reviewed-on: https://dart-review.googlesource.com/c/72780
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
diff --git a/runtime/vm/class_finalizer.cc b/runtime/vm/class_finalizer.cc
index 5d40a65..04fb131 100644
--- a/runtime/vm/class_finalizer.cc
+++ b/runtime/vm/class_finalizer.cc
@@ -3754,7 +3754,11 @@
   ProgramVisitor::VisitFunctions(&function_visitor);
 
   class ClearCodeClassVisitor : public ClassVisitor {
-    void Visit(const Class& cls) { cls.DisableAllocationStub(); }
+    void Visit(const Class& cls) {
+      if (cls.id() >= kNumPredefinedCids) {
+        cls.DisableAllocationStub();
+      }
+    }
   };
   ClearCodeClassVisitor class_visitor;
   ProgramVisitor::VisitClasses(&class_visitor);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index 6d70cda..6d25b3e 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -1115,35 +1115,57 @@
     // Though for implicit getters, which have only the receiver as parameter,
     // there are no checks necessary in any case and we can therefore intrinsify
     // them even in checked mode and strong mode.
-    if (parsed_function().function().kind() == RawFunction::kImplicitGetter) {
-      const Field& field = Field::Handle(function().accessor_field());
-      ASSERT(!field.IsNull());
-
-      // Only intrinsify getter if the field cannot contain a mutable double.
-      // Reading from a mutable double box requires allocating a fresh double.
-      if (field.is_instance() &&
-          (FLAG_precompiled_mode || !IsPotentialUnboxedField(field))) {
-        SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
-        GenerateInlinedGetter(field.Offset());
-        SpecialStatsEnd(CombinedCodeStatistics::kTagIntrinsics);
-        return !isolate()->use_field_guards();
-      }
-      return false;
-    } else if (parsed_function().function().kind() ==
-               RawFunction::kImplicitSetter) {
-      if (!isolate()->argument_type_checks()) {
+    switch (parsed_function().function().kind()) {
+      case RawFunction::kImplicitGetter: {
         const Field& field = Field::Handle(function().accessor_field());
         ASSERT(!field.IsNull());
 
+        // Only intrinsify getter if the field cannot contain a mutable double.
+        // Reading from a mutable double box requires allocating a fresh double.
         if (field.is_instance() &&
-            (FLAG_precompiled_mode || field.guarded_cid() == kDynamicCid)) {
+            (FLAG_precompiled_mode || !IsPotentialUnboxedField(field))) {
           SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
-          GenerateInlinedSetter(field.Offset());
+          GenerateGetterIntrinsic(field.Offset());
           SpecialStatsEnd(CombinedCodeStatistics::kTagIntrinsics);
           return !isolate()->use_field_guards();
         }
         return false;
       }
+      case RawFunction::kImplicitSetter: {
+        if (!isolate()->argument_type_checks()) {
+          const Field& field = Field::Handle(function().accessor_field());
+          ASSERT(!field.IsNull());
+
+          if (field.is_instance() &&
+              (FLAG_precompiled_mode || field.guarded_cid() == kDynamicCid)) {
+            SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
+            GenerateSetterIntrinsic(field.Offset());
+            SpecialStatsEnd(CombinedCodeStatistics::kTagIntrinsics);
+            return !isolate()->use_field_guards();
+          }
+          return false;
+        }
+        break;
+      }
+#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
+      case RawFunction::kMethodExtractor: {
+        auto& extracted_method = Function::ZoneHandle(
+            parsed_function().function().extracted_method_closure());
+        auto& klass = Class::Handle(extracted_method.Owner());
+        const intptr_t type_arguments_field_offset =
+            klass.NumTypeArguments() > 0
+                ? (klass.type_arguments_field_offset() - kHeapObjectTag)
+                : 0;
+
+        SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
+        GenerateMethodExtractorIntrinsic(extracted_method,
+                                         type_arguments_field_offset);
+        SpecialStatsEnd(CombinedCodeStatistics::kTagIntrinsics);
+        return true;
+      }
+#endif  // !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
+      default:
+        break;
     }
   }
 
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.h b/runtime/vm/compiler/backend/flow_graph_compiler.h
index c257649..4c84b7b 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.h
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.h
@@ -872,10 +872,14 @@
       Label* is_not_instance_lbl);
 
   void GenerateBoolToJump(Register bool_reg, Label* is_true, Label* is_false);
+
+  void GenerateMethodExtractorIntrinsic(const Function& extracted_method,
+                                        intptr_t type_arguments_field_offset);
+
 #endif  // !defined(TARGET_ARCH_DBC)
 
-  void GenerateInlinedGetter(intptr_t offset);
-  void GenerateInlinedSetter(intptr_t offset);
+  void GenerateGetterIntrinsic(intptr_t offset);
+  void GenerateSetterIntrinsic(intptr_t offset);
 
   // Perform a greedy local register allocation.  Consider all registers free.
   void AllocateRegistersLocally(Instruction* instr);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index b767370..b6a070f 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -788,7 +788,38 @@
   }
 }
 
-void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
+    const Function& extracted_method,
+    intptr_t type_arguments_field_offset) {
+  // No frame has been setup here.
+  ASSERT(!__ constant_pool_allowed());
+  ASSERT(extracted_method.IsZoneHandle());
+
+  const Code& build_method_extractor = Code::ZoneHandle(
+      isolate()->object_store()->build_method_extractor_code());
+
+  const intptr_t stub_index = __ object_pool_wrapper().AddObject(
+      build_method_extractor, ObjectPool::Patchability::kNotPatchable);
+  const intptr_t function_index = __ object_pool_wrapper().AddObject(
+      extracted_method, ObjectPool::Patchability::kNotPatchable);
+
+  // We use a custom pool register to preserve caller PP.
+  const Register kPoolReg = R0;
+
+  // R1 = extracted function
+  // R4 = offset of type argument vector (or 0 if class is not generic)
+  __ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG, Code::object_pool_offset());
+  __ LoadImmediate(R4, type_arguments_field_offset);
+  __ LoadFieldFromOffset(kWord, R1, kPoolReg,
+                         ObjectPool::element_offset(function_index));
+  __ LoadFieldFromOffset(kWord, CODE_REG, kPoolReg,
+                         ObjectPool::element_offset(stub_index));
+  __ LoadFieldFromOffset(kWord, R3, CODE_REG,
+                         Code::entry_point_offset(Code::EntryKind::kUnchecked));
+  __ bx(R3);
+}
+
+void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
   // LR: return address.
   // SP: receiver.
   // Sequence node has one return node, its input is load field node.
@@ -798,7 +829,7 @@
   __ Ret();
 }
 
-void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
   // LR: return address.
   // SP+1: receiver.
   // SP+0: value.
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index 41d670c..eb23398 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -768,7 +768,39 @@
   }
 }
 
-void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
+    const Function& extracted_method,
+    intptr_t type_arguments_field_offset) {
+  // No frame has been setup here.
+  ASSERT(!__ constant_pool_allowed());
+  ASSERT(extracted_method.IsZoneHandle());
+
+  const Code& build_method_extractor = Code::ZoneHandle(
+      isolate()->object_store()->build_method_extractor_code());
+
+  const intptr_t stub_index = __ object_pool_wrapper().AddObject(
+      build_method_extractor, ObjectPool::Patchability::kNotPatchable);
+  const intptr_t function_index = __ object_pool_wrapper().AddObject(
+      extracted_method, ObjectPool::Patchability::kNotPatchable);
+
+  // We use a custom pool register to preserve caller PP.
+  const Register kPoolReg = R0;
+
+  // R1 = extracted function
+  // R4 = offset of type argument vector (or 0 if class is not generic)
+  __ ldr(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
+  __ LoadFieldFromOffset(kPoolReg, CODE_REG, Code::object_pool_offset());
+  __ LoadImmediate(R4, type_arguments_field_offset);
+  __ LoadFieldFromOffset(R1, kPoolReg,
+                         ObjectPool::element_offset(function_index));
+  __ LoadFieldFromOffset(CODE_REG, kPoolReg,
+                         ObjectPool::element_offset(stub_index));
+  __ LoadFieldFromOffset(R0, CODE_REG,
+                         Code::entry_point_offset(Code::EntryKind::kUnchecked));
+  __ br(R0);
+}
+
+void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
   // LR: return address.
   // SP: receiver.
   // Sequence node has one return node, its input is load field node.
@@ -778,7 +810,7 @@
   __ ret();
 }
 
-void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
   // LR: return address.
   // SP+1: receiver.
   // SP+0: value.
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc b/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc
index 6a2c7f4..009ecb9 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc
@@ -283,7 +283,7 @@
   }
 }
 
-void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
   __ Move(0, -(1 + compiler_frame_layout.param_end_from_fp));
   ASSERT(offset % kWordSize == 0);
   if (Utils::IsInt(8, offset / kWordSize)) {
@@ -295,7 +295,7 @@
   __ Return(0);
 }
 
-void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
   __ Move(0, -(2 + compiler_frame_layout.param_end_from_fp));
   __ Move(1, -(1 + compiler_frame_layout.param_end_from_fp));
   ASSERT(offset % kWordSize == 0);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index 86071ae..d8de557 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -747,7 +747,7 @@
   }
 }
 
-void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
   // TOS: return address.
   // +1 : receiver.
   // Sequence node has one return node, its input is load field node.
@@ -757,7 +757,7 @@
   __ ret();
 }
 
-void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
   // TOS: return address.
   // +1 : value
   // +2 : receiver.
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index eb12d66..d3e58d8 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -785,7 +785,37 @@
   }
 }
 
-void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
+    const Function& extracted_method,
+    intptr_t type_arguments_field_offset) {
+  // No frame has been setup here.
+  ASSERT(!__ constant_pool_allowed());
+  ASSERT(extracted_method.IsZoneHandle());
+
+  const Code& build_method_extractor = Code::ZoneHandle(
+      isolate()->object_store()->build_method_extractor_code());
+
+  const intptr_t stub_index = __ object_pool_wrapper().AddObject(
+      build_method_extractor, ObjectPool::Patchability::kNotPatchable);
+  const intptr_t function_index = __ object_pool_wrapper().AddObject(
+      extracted_method, ObjectPool::Patchability::kNotPatchable);
+
+  // We use a custom pool register to preserve caller PP.
+  const Register kPoolReg = RAX;
+
+  // RBX = extracted function
+  // RDX = offset of type argument vector (or 0 if class is not generic)
+  __ movq(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
+  __ movq(RDX, Immediate(type_arguments_field_offset));
+  __ movq(RBX,
+          FieldAddress(kPoolReg, ObjectPool::element_offset(function_index)));
+  __ movq(CODE_REG,
+          FieldAddress(kPoolReg, ObjectPool::element_offset(stub_index)));
+  __ jmp(FieldAddress(CODE_REG,
+                      Code::entry_point_offset(Code::EntryKind::kUnchecked)));
+}
+
+void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
   // TOS: return address.
   // +1 : receiver.
   // Sequence node has one return node, its input is load field node.
@@ -795,7 +825,7 @@
   __ ret();
 }
 
-void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
+void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
   // TOS: return address.
   // +1 : value
   // +2 : receiver.
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index b51ffe3..ea392ed 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -649,6 +649,13 @@
   if (I->object_store()->megamorphic_miss_code() == Code::null()) {
     MegamorphicCacheTable::InitMissHandler(I);
   }
+
+#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
+  if (I != Dart::vm_isolate()) {
+    I->object_store()->set_build_method_extractor_code(
+        Code::Handle(StubCode::GetBuildMethodExtractorStub()));
+  }
+#endif
 #endif  // defined(DART_PRECOMPILED_RUNTIME)
 
   const Code& miss_code =
diff --git a/runtime/vm/image_snapshot.cc b/runtime/vm/image_snapshot.cc
index 4da29a8..14e5a58 100644
--- a/runtime/vm/image_snapshot.cc
+++ b/runtime/vm/image_snapshot.cc
@@ -12,6 +12,7 @@
 #include "vm/heap/heap.h"
 #include "vm/json_writer.h"
 #include "vm/object.h"
+#include "vm/object_store.h"
 #include "vm/program_visitor.h"
 #include "vm/stub_code.h"
 #include "vm/timeline.h"
@@ -354,6 +355,8 @@
   Object& owner = Object::Handle(zone);
   String& str = String::Handle(zone);
 
+  ObjectStore* object_store = Isolate::Current()->object_store();
+
   TypeTestingStubFinder tts;
   for (intptr_t i = 0; i < instructions_.length(); i++) {
     const Instructions& insns = *instructions_[i].insns_;
@@ -397,6 +400,10 @@
       owner = code.owner();
       if (owner.IsNull()) {
         const char* name = StubCode::NameOfStub(insns.EntryPoint());
+        if (name == nullptr &&
+            code.raw() == object_store->build_method_extractor_code()) {
+          name = "BuildMethodExtractor";
+        }
         if (name != NULL) {
           assembly_stream_.Print("Precompiled_Stub_%s:\n", name);
         } else {
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 50dfa5e..635a623 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -4840,6 +4840,10 @@
     return code->ptr()->instructions_;
   }
 
+  static uword EntryPoint(const RawCode* code) {
+    return Instructions::EntryPoint(InstructionsOf(code));
+  }
+
   static intptr_t saved_instructions_offset() {
     return OFFSET_OF(RawCode, instructions_);
   }
diff --git a/runtime/vm/object_store.h b/runtime/vm/object_store.h
index 6946e00..cd760de 100644
--- a/runtime/vm/object_store.h
+++ b/runtime/vm/object_store.h
@@ -123,6 +123,7 @@
   RW(Array, library_load_error_table)                                          \
   RW(Array, unique_dynamic_targets)                                            \
   RW(GrowableObjectArray, megamorphic_cache_table)                             \
+  RW(Code, build_method_extractor_code)                                        \
   R_(Code, megamorphic_miss_code)                                              \
   R_(Function, megamorphic_miss_function)                                      \
   RW(Array, obfuscation_map)                                                   \
diff --git a/runtime/vm/stub_code.cc b/runtime/vm/stub_code.cc
index 6e59100..823dea5 100644
--- a/runtime/vm/stub_code.cc
+++ b/runtime/vm/stub_code.cc
@@ -223,6 +223,35 @@
   return Code::null();
 }
 
+#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
+RawCode* StubCode::GetBuildMethodExtractorStub() {
+#if !defined(DART_PRECOMPILED_RUNTIME)
+  ObjectPoolWrapper object_pool_wrapper;
+  Assembler assembler(&object_pool_wrapper);
+  StubCode::GenerateBuildMethodExtractorStub(&assembler);
+
+  const char* name = "BuildMethodExtractor";
+  const Code& stub = Code::Handle(
+      Code::FinalizeCode(name, nullptr, &assembler, false /* optimized */));
+#ifndef PRODUCT
+  if (FLAG_support_disassembler && FLAG_disassemble_stubs) {
+    LogBlock lb;
+    THR_Print("Code for isolate stub '%s': {\n", name);
+    DisassembleToStdout formatter;
+    stub.Disassemble(&formatter);
+    THR_Print("}\n");
+    const ObjectPool& object_pool = ObjectPool::Handle(stub.object_pool());
+    object_pool.DebugPrint();
+  }
+#endif  // !PRODUCT
+  return stub.raw();
+#else   // !defined(DART_PRECOMPILED_RUNTIME)
+  UNIMPLEMENTED();
+  return nullptr;
+#endif  // !defined(DART_PRECOMPILED_RUNTIME)
+}
+#endif  // !defined(TARGET_ARCH_DBC)
+
 const StubEntry* StubCode::UnoptimizedStaticCallEntry(
     intptr_t num_args_tested) {
 // These stubs are not used by DBC.
diff --git a/runtime/vm/stub_code.h b/runtime/vm/stub_code.h
index 95ec250..cd336be 100644
--- a/runtime/vm/stub_code.h
+++ b/runtime/vm/stub_code.h
@@ -175,6 +175,11 @@
 
   static RawCode* GetAllocationStubForClass(const Class& cls);
 
+#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
+  static RawCode* GetBuildMethodExtractorStub();
+  static void GenerateBuildMethodExtractorStub(Assembler* assembler);
+#endif
+
   static const StubEntry* UnoptimizedStaticCallEntry(intptr_t num_args_tested);
 
   static const intptr_t kNoInstantiator = 0;
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/stub_code_arm.cc
index e6c0219..8791013 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/stub_code_arm.cc
@@ -166,6 +166,87 @@
   __ bx(LR);
 }
 
+// R1: The extracted method.
+// R4: The type_arguments_field_offset (or 0)
+// SP+0: The object from which we are tearing a method off.
+void StubCode::GenerateBuildMethodExtractorStub(Assembler* assembler) {
+  Thread* thread = Thread::Current();
+  Zone* Z = thread->zone();
+  ObjectStore* object_store = thread->isolate()->object_store();
+
+  const auto& closure_class =
+      Class::ZoneHandle(Z, object_store->closure_class());
+  const auto& closure_allocation_stub =
+      Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
+
+  const intptr_t kReceiverOffset = compiler_frame_layout.param_end_from_fp + 1;
+
+  const auto& context_allocation_stub =
+      Code::ZoneHandle(StubCode::AllocateContext_entry()->code());
+
+  __ EnterStubFrame();
+
+  // Build type_arguments vector (or null)
+  __ cmp(R4, Operand(0));
+  __ ldr(R3, Address(THR, Thread::object_null_offset()), EQ);
+  __ ldr(R0, Address(FP, kReceiverOffset * kWordSize), NE);
+  __ ldr(R3, Address(R0, R4), NE);
+
+  // Push type arguments & extracted method.
+  __ PushList(1 << R3 | 1 << R1);
+
+  // Allocate context.
+  {
+    Label done, slow_path;
+    __ TryAllocateArray(kContextCid, Context::InstanceSize(1), &slow_path,
+                        R0,  // instance
+                        R1,  // end address
+                        R2, R3);
+    __ ldr(R1, Address(THR, Thread::object_null_offset()));
+    __ str(R1, FieldAddress(R0, Context::parent_offset()));
+    __ LoadImmediate(R1, 1);
+    __ str(R1, FieldAddress(R0, Context::num_variables_offset()));
+    __ b(&done);
+
+    __ Bind(&slow_path);
+
+    __ LoadImmediate(/*num_vars=*/R1, 1);
+    __ LoadObject(CODE_REG, context_allocation_stub);
+    __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
+    __ blx(R0);
+
+    __ Bind(&done);
+  }
+
+  // Store receiver in context
+  __ ldr(R1, Address(FP, kWordSize * kReceiverOffset));
+  __ StoreIntoObject(R0, FieldAddress(R0, Context::variable_offset(0)), R1);
+
+  // Push context.
+  __ Push(R0);
+
+  // Allocate closure.
+  __ LoadObject(CODE_REG, closure_allocation_stub);
+  __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset(
+                                        Code::EntryKind::kUnchecked)));
+  __ blx(R1);
+
+  // Populate closure object.
+  __ Pop(R1);  // Pop context.
+  __ StoreIntoObject(R0, FieldAddress(R0, Closure::context_offset()), R1);
+  __ PopList(1 << R3 | 1 << R1);  // Pop type arguments & extracted method.
+  __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Closure::function_offset()),
+                              R1);
+  __ StoreIntoObjectNoBarrier(
+      R0, FieldAddress(R0, Closure::instantiator_type_arguments_offset()), R3);
+  __ LoadObject(R1, Object::empty_type_arguments());
+  __ StoreIntoObjectNoBarrier(
+      R0, FieldAddress(R0, Closure::delayed_type_arguments_offset()), R1);
+
+  __ LeaveStubFrame();
+  __ Ret();
+}
+
 void StubCode::GenerateNullErrorSharedWithoutFPURegsStub(Assembler* assembler) {
   GenerateSharedStub(assembler, /*save_fpu_registers=*/false,
                      &kNullErrorRuntimeEntry,
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index 15e905d..8aa8213 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -189,6 +189,89 @@
   __ ret(LR);
 }
 
+// R1: The extracted method.
+// R4: The type_arguments_field_offset (or 0)
+void StubCode::GenerateBuildMethodExtractorStub(Assembler* assembler) {
+  Thread* thread = Thread::Current();
+  Zone* Z = thread->zone();
+  ObjectStore* object_store = thread->isolate()->object_store();
+
+  const auto& closure_class =
+      Class::ZoneHandle(Z, object_store->closure_class());
+  const auto& closure_allocation_stub =
+      Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
+
+  const intptr_t kReceiverOffset = compiler_frame_layout.param_end_from_fp + 1;
+
+  const auto& context_allocation_stub =
+      Code::ZoneHandle(StubCode::AllocateContext_entry()->code());
+
+  __ EnterStubFrame();
+
+  // Build type_arguments vector (or null)
+  Label no_type_args;
+  __ ldr(R3, Address(THR, Thread::object_null_offset()), kDoubleWord);
+  __ cmp(R4, Operand(0));
+  __ b(&no_type_args, EQ);
+  __ ldr(R0, Address(FP, kReceiverOffset * kWordSize));
+  __ ldr(R3, Address(R0, R4));
+  __ Bind(&no_type_args);
+
+  // Push type arguments & extracted method.
+  __ PushPair(R3, R1);
+
+  // Allocate context.
+  {
+    Label done, slow_path;
+    __ TryAllocateArray(kContextCid, Context::InstanceSize(1), &slow_path,
+                        R0,  // instance
+                        R1,  // end address
+                        R2, R3);
+    __ ldr(R1, Address(THR, Thread::object_null_offset()));
+    __ str(R1, FieldAddress(R0, Context::parent_offset()));
+    __ LoadImmediate(R1, 1);
+    __ str(R1, FieldAddress(R0, Context::num_variables_offset()));
+    __ b(&done);
+
+    __ Bind(&slow_path);
+
+    __ LoadImmediate(/*num_vars=*/R1, 1);
+    __ LoadObject(CODE_REG, context_allocation_stub);
+    __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
+    __ blr(R0);
+
+    __ Bind(&done);
+  }
+
+  // Store receiver in context
+  __ ldr(R1, Address(FP, kWordSize * kReceiverOffset));
+  __ StoreIntoObject(R0, FieldAddress(R0, Context::variable_offset(0)), R1);
+
+  // Push context.
+  __ Push(R0);
+
+  // Allocate closure.
+  __ LoadObject(CODE_REG, closure_allocation_stub);
+  __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset(
+                                        Code::EntryKind::kUnchecked)));
+  __ blr(R1);
+
+  // Populate closure object.
+  __ Pop(R1);  // Pop context.
+  __ StoreIntoObject(R0, FieldAddress(R0, Closure::context_offset()), R1);
+  __ PopPair(R3, R1);  // Pop type arguments & extracted method.
+  __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Closure::function_offset()),
+                              R1);
+  __ StoreIntoObjectNoBarrier(
+      R0, FieldAddress(R0, Closure::instantiator_type_arguments_offset()), R3);
+  __ LoadObject(R1, Object::empty_type_arguments());
+  __ StoreIntoObjectNoBarrier(
+      R0, FieldAddress(R0, Closure::delayed_type_arguments_offset()), R1);
+
+  __ LeaveStubFrame();
+  __ Ret();
+}
+
 void StubCode::GenerateNullErrorSharedWithoutFPURegsStub(Assembler* assembler) {
   GenerateSharedStub(assembler, /*save_fpu_registers=*/false,
                      &kNullErrorRuntimeEntry,
diff --git a/runtime/vm/stub_code_x64.cc b/runtime/vm/stub_code_x64.cc
index 182667e..c8f4218 100644
--- a/runtime/vm/stub_code_x64.cc
+++ b/runtime/vm/stub_code_x64.cc
@@ -158,6 +158,90 @@
   __ ret();
 }
 
+// RBX: The extracted method.
+// RDX: The type_arguments_field_offset (or 0)
+void StubCode::GenerateBuildMethodExtractorStub(Assembler* assembler) {
+  Thread* thread = Thread::Current();
+  Zone* Z = thread->zone();
+  ObjectStore* object_store = thread->isolate()->object_store();
+
+  const auto& closure_class =
+      Class::ZoneHandle(Z, object_store->closure_class());
+  const auto& closure_allocation_stub =
+      Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
+
+  const intptr_t kReceiverOffset = compiler_frame_layout.param_end_from_fp + 1;
+
+  const auto& context_allocation_stub =
+      Code::ZoneHandle(StubCode::AllocateContext_entry()->code());
+
+  __ EnterStubFrame();
+
+  // Push type_arguments vector (or null)
+  Label no_type_args;
+  __ movq(RCX, Address(THR, Thread::object_null_offset()));
+  __ cmpq(RDX, Immediate(0));
+  __ j(EQUAL, &no_type_args, Assembler::kNearJump);
+  __ movq(RAX, Address(RBP, kWordSize * kReceiverOffset));
+  __ movq(RCX, Address(RAX, RDX, TIMES_1, 0));
+  __ Bind(&no_type_args);
+  __ pushq(RCX);
+
+  // Push extracted method.
+  __ pushq(RBX);
+
+  // Allocate context.
+  {
+    Label done, slow_path;
+    __ TryAllocateArray(kContextCid, Context::InstanceSize(1), &slow_path,
+                        Assembler::kFarJump,
+                        RAX,  // instance
+                        RSI,  // end address
+                        RDI);
+    __ movq(RSI, Address(THR, Thread::object_null_offset()));
+    __ movq(FieldAddress(RAX, Context::parent_offset()), RSI);
+    __ movq(FieldAddress(RAX, Context::num_variables_offset()), Immediate(1));
+    __ jmp(&done);
+
+    __ Bind(&slow_path);
+
+    __ LoadImmediate(/*num_vars=*/R10, Immediate(1));
+    __ LoadObject(CODE_REG, context_allocation_stub);
+    __ call(FieldAddress(CODE_REG, Code::entry_point_offset()));
+
+    __ Bind(&done);
+  }
+
+  // Store receiver in context
+  __ movq(RSI, Address(RBP, kWordSize * kReceiverOffset));
+  __ StoreIntoObject(RAX, FieldAddress(RAX, Context::variable_offset(0)), RSI);
+
+  // Push context.
+  __ pushq(RAX);
+
+  // Allocate closure.
+  __ LoadObject(CODE_REG, closure_allocation_stub);
+  __ call(FieldAddress(CODE_REG,
+                       Code::entry_point_offset(Code::EntryKind::kUnchecked)));
+
+  // Populate closure object.
+  __ popq(RCX);  // Pop context.
+  __ StoreIntoObject(RAX, FieldAddress(RAX, Closure::context_offset()), RCX);
+  __ popq(RCX);  // Pop extracted method.
+  __ StoreIntoObjectNoBarrier(
+      RAX, FieldAddress(RAX, Closure::function_offset()), RCX);
+  __ popq(RCX);  // Pop type argument vector.
+  __ StoreIntoObjectNoBarrier(
+      RAX, FieldAddress(RAX, Closure::instantiator_type_arguments_offset()),
+      RCX);
+  __ LoadObject(RCX, Object::empty_type_arguments());
+  __ StoreIntoObjectNoBarrier(
+      RAX, FieldAddress(RAX, Closure::delayed_type_arguments_offset()), RCX);
+
+  __ LeaveStubFrame();
+  __ Ret();
+}
+
 void StubCode::GenerateNullErrorSharedWithoutFPURegsStub(Assembler* assembler) {
   GenerateSharedStub(assembler, /*save_fpu_registers=*/false,
                      &kNullErrorRuntimeEntry,