[VM runtime] Introduce a new VM class Bytecode.

Allow pc_marker slot to hold a Code object or a new Bytecode object.

Change-Id: If11c1df6dafc5b1cfcce6f0322c36d1d68e86df9
Reviewed-on: https://dart-review.googlesource.com/c/82526
Commit-Queue: Régis Crelier <regis@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
diff --git a/runtime/lib/errors.cc b/runtime/lib/errors.cc
index 8b6b5a8..22c1130 100644
--- a/runtime/lib/errors.cc
+++ b/runtime/lib/errors.cc
@@ -30,29 +30,33 @@
       Class::Handle(Library::LookupCoreClass(Symbols::AssertionError()));
   ASSERT(!assert_error_class.IsNull());
   bool hit_assertion_error = false;
-  while (stack_frame != NULL) {
-    code ^= stack_frame->LookupDartCode();
-    if (code.is_optimized()) {
-      InlinedFunctionsIterator inlined_iterator(code, stack_frame->pc());
-      while (!inlined_iterator.Done()) {
-        func ^= inlined_iterator.function();
-        if (hit_assertion_error) {
-          return func.script();
-        }
-        ASSERT(!hit_assertion_error);
-        hit_assertion_error = (func.Owner() == assert_error_class.raw());
-        inlined_iterator.Advance();
-      }
+  for (; stack_frame != NULL; stack_frame = iterator->NextFrame()) {
+    if (stack_frame->is_interpreted()) {
+      func = stack_frame->LookupDartFunction();
     } else {
-      func ^= code.function();
-      ASSERT(!func.IsNull());
-      if (hit_assertion_error) {
-        return func.script();
+      code ^= stack_frame->LookupDartCode();
+      if (code.is_optimized()) {
+        InlinedFunctionsIterator inlined_iterator(code, stack_frame->pc());
+        while (!inlined_iterator.Done()) {
+          func ^= inlined_iterator.function();
+          if (hit_assertion_error) {
+            return func.script();
+          }
+          ASSERT(!hit_assertion_error);
+          hit_assertion_error = (func.Owner() == assert_error_class.raw());
+          inlined_iterator.Advance();
+        }
+        continue;
+      } else {
+        func = code.function();
       }
-      ASSERT(!hit_assertion_error);
-      hit_assertion_error = (func.Owner() == assert_error_class.raw());
     }
-    stack_frame = iterator->NextFrame();
+    ASSERT(!func.IsNull());
+    if (hit_assertion_error) {
+      return func.script();
+    }
+    ASSERT(!hit_assertion_error);
+    hit_assertion_error = (func.Owner() == assert_error_class.raw());
   }
   UNREACHABLE();
   return Script::null();
diff --git a/runtime/lib/stacktrace.cc b/runtime/lib/stacktrace.cc
index 28edcf1..2575da5 100644
--- a/runtime/lib/stacktrace.cc
+++ b/runtime/lib/stacktrace.cc
@@ -145,15 +145,22 @@
   StackFrame* frame = frames.NextFrame();
   ASSERT(frame != NULL);  // We expect to find a dart invocation frame.
   Code& code = Code::Handle();
+  Bytecode& bytecode = Bytecode::Handle();
   Smi& offset = Smi::Handle();
   while (frame != NULL) {
     if (frame->IsDartFrame()) {
       if (skip_frames > 0) {
         skip_frames--;
       } else {
-        code = frame->LookupDartCode();
-        offset = Smi::New(frame->pc() - code.PayloadStart());
-        code_list.Add(code);
+        if (frame->is_interpreted()) {
+          bytecode = frame->LookupDartBytecode();
+          offset = Smi::New(frame->pc() - bytecode.PayloadStart());
+          code_list.Add(bytecode);
+        } else {
+          code = frame->LookupDartCode();
+          offset = Smi::New(frame->pc() - code.PayloadStart());
+          code_list.Add(code);
+        }
         pc_offset_list.Add(offset);
       }
     }
diff --git a/runtime/vm/benchmark_test.cc b/runtime/vm/benchmark_test.cc
index 1caffca..60078c6 100644
--- a/runtime/vm/benchmark_test.cc
+++ b/runtime/vm/benchmark_test.cc
@@ -432,6 +432,7 @@
     TransitionNativeToVM transition(thread);
     const int kNumIterations = 100;
     Code& code = Code::Handle(thread->zone());
+    Bytecode& bytecode = Bytecode::Handle(thread->zone());
     for (int i = 0; i < kNumIterations; i++) {
       StackFrameIterator frames(ValidationPolicy::kDontValidateFrames, thread,
                                 StackFrameIterator::kNoCrossThreadIteration);
@@ -441,8 +442,13 @@
           code = frame->LookupDartCode();
           EXPECT(code.function() == Function::null());
         } else if (frame->IsDartFrame()) {
-          code = frame->LookupDartCode();
-          EXPECT(code.function() != Function::null());
+          if (frame->is_interpreted()) {
+            bytecode = frame->LookupDartBytecode();
+            EXPECT(bytecode.function() != Function::null());
+          } else {
+            code = frame->LookupDartCode();
+            EXPECT(code.function() != Function::null());
+          }
         }
         frame = frames.NextFrame();
       }
diff --git a/runtime/vm/clustered_snapshot.cc b/runtime/vm/clustered_snapshot.cc
index 77e083f..68fc310 100644
--- a/runtime/vm/clustered_snapshot.cc
+++ b/runtime/vm/clustered_snapshot.cc
@@ -546,6 +546,7 @@
   ~FunctionSerializationCluster() {}
 
   void Trace(Serializer* s, RawObject* object) {
+    Snapshot::Kind kind = s->kind();
     RawFunction* func = Function::RawCast(object);
     objects_.Add(func);
 
@@ -554,9 +555,11 @@
     for (RawObject** p = from; p <= to; p++) {
       s->Push(*p);
     }
-    if (s->kind() == Snapshot::kFullAOT) {
+    if (kind == Snapshot::kFull) {
+      NOT_IN_PRECOMPILED(s->Push(func->ptr()->bytecode_));
+    } else if (kind == Snapshot::kFullAOT) {
       s->Push(func->ptr()->code_);
-    } else if (s->kind() == Snapshot::kFullJIT) {
+    } else if (kind == Snapshot::kFullJIT) {
       NOT_IN_PRECOMPILED(s->Push(func->ptr()->unoptimized_code_));
       NOT_IN_PRECOMPILED(s->Push(func->ptr()->bytecode_));
       s->Push(func->ptr()->code_);
@@ -584,7 +587,9 @@
       for (RawObject** p = from; p <= to; p++) {
         s->WriteRef(*p);
       }
-      if (kind == Snapshot::kFullAOT) {
+      if (kind == Snapshot::kFull) {
+        NOT_IN_PRECOMPILED(s->WriteRef(func->ptr()->bytecode_));
+      } else if (kind == Snapshot::kFullAOT) {
         s->WriteRef(func->ptr()->code_);
       } else if (s->kind() == Snapshot::kFullJIT) {
         NOT_IN_PRECOMPILED(s->WriteRef(func->ptr()->unoptimized_code_));
@@ -643,13 +648,16 @@
         *p = Object::null();
       }
 
-      if (kind == Snapshot::kFullAOT) {
+      if (kind == Snapshot::kFull) {
+        NOT_IN_PRECOMPILED(func->ptr()->bytecode_ =
+                               reinterpret_cast<RawBytecode*>(d->ReadRef()));
+      } else if (kind == Snapshot::kFullAOT) {
         func->ptr()->code_ = reinterpret_cast<RawCode*>(d->ReadRef());
       } else if (kind == Snapshot::kFullJIT) {
         NOT_IN_PRECOMPILED(func->ptr()->unoptimized_code_ =
                                reinterpret_cast<RawCode*>(d->ReadRef()));
         NOT_IN_PRECOMPILED(func->ptr()->bytecode_ =
-                               reinterpret_cast<RawCode*>(d->ReadRef()));
+                               reinterpret_cast<RawBytecode*>(d->ReadRef()));
         func->ptr()->code_ = reinterpret_cast<RawCode*>(d->ReadRef());
         func->ptr()->ic_data_array_ = reinterpret_cast<RawArray*>(d->ReadRef());
       }
@@ -1583,7 +1591,7 @@
       }
 
       s->WriteInstructions(code->ptr()->instructions_, code);
-      if (s->kind() == Snapshot::kFullJIT) {
+      if (kind == Snapshot::kFullJIT) {
         // TODO(rmacnak): Fix references to disabled code before serializing.
         // For now, we may write the FixCallersTarget or equivalent stub. This
         // will cause a fixup if this code is called.
@@ -1607,7 +1615,7 @@
         s->WriteRef(code->ptr()->inlined_id_to_function_);
         s->WriteRef(code->ptr()->code_source_map_);
       }
-      if (s->kind() == Snapshot::kFullJIT) {
+      if (kind == Snapshot::kFullJIT) {
         s->WriteRef(code->ptr()->deopt_info_array_);
         s->WriteRef(code->ptr()->static_calls_target_table_);
       }
@@ -1712,6 +1720,81 @@
 };
 
 #if !defined(DART_PRECOMPILED_RUNTIME)
+class BytecodeSerializationCluster : public SerializationCluster {
+ public:
+  BytecodeSerializationCluster() : SerializationCluster("Bytecode") {}
+  virtual ~BytecodeSerializationCluster() {}
+
+  void Trace(Serializer* s, RawObject* object) {
+    RawBytecode* bytecode = Bytecode::RawCast(object);
+    objects_.Add(bytecode);
+
+    RawObject** from = bytecode->from();
+    RawObject** to = bytecode->to();
+    for (RawObject** p = from; p <= to; p++) {
+      s->Push(*p);
+    }
+  }
+
+  void WriteAlloc(Serializer* s) {
+    s->WriteCid(kBytecodeCid);
+    intptr_t count = objects_.length();
+    s->WriteUnsigned(count);
+    for (intptr_t i = 0; i < count; i++) {
+      RawBytecode* bytecode = objects_[i];
+      s->AssignRef(bytecode);
+    }
+  }
+
+  void WriteFill(Serializer* s) {
+    ASSERT(s->kind() == Snapshot::kFullJIT);
+    intptr_t count = objects_.length();
+    for (intptr_t i = 0; i < count; i++) {
+      RawBytecode* bytecode = objects_[i];
+      RawObject** from = bytecode->from();
+      RawObject** to = bytecode->to();
+      for (RawObject** p = from; p <= to; p++) {
+        s->WriteRef(*p);
+      }
+    }
+  }
+
+ private:
+  GrowableArray<RawBytecode*> objects_;
+};
+
+class BytecodeDeserializationCluster : public DeserializationCluster {
+ public:
+  BytecodeDeserializationCluster() {}
+  virtual ~BytecodeDeserializationCluster() {}
+
+  void ReadAlloc(Deserializer* d) {
+    start_index_ = d->next_index();
+    PageSpace* old_space = d->heap()->old_space();
+    intptr_t count = d->ReadUnsigned();
+    for (intptr_t i = 0; i < count; i++) {
+      d->AssignRef(AllocateUninitialized(old_space, Bytecode::InstanceSize()));
+    }
+    stop_index_ = d->next_index();
+  }
+
+  void ReadFill(Deserializer* d) {
+    ASSERT(d->kind() == Snapshot::kFullJIT);
+    bool is_vm_object = d->isolate() == Dart::vm_isolate();
+
+    for (intptr_t id = start_index_; id < stop_index_; id++) {
+      RawBytecode* bytecode = reinterpret_cast<RawBytecode*>(d->Ref(id));
+      Deserializer::InitializeHeader(bytecode, kBytecodeCid,
+                                     Bytecode::InstanceSize(), is_vm_object);
+      RawObject** from = bytecode->from();
+      RawObject** to = bytecode->to();
+      for (RawObject** p = from; p <= to; p++) {
+        *p = d->ReadRef();
+      }
+    }
+  }
+};
+
 class ObjectPoolSerializationCluster : public SerializationCluster {
  public:
   ObjectPoolSerializationCluster() : SerializationCluster("ObjectPool") {}
@@ -4585,6 +4668,10 @@
       return new (Z) KernelProgramInfoSerializationCluster();
     case kCodeCid:
       return new (Z) CodeSerializationCluster();
+#if !defined(DART_PRECOMPILED_RUNTIME)
+    case kBytecodeCid:
+      return new (Z) BytecodeSerializationCluster();
+#endif  // !DART_PRECOMPILED_RUNTIME
     case kObjectPoolCid:
       return new (Z) ObjectPoolSerializationCluster();
     case kPcDescriptorsCid:
@@ -4715,6 +4802,11 @@
   if (object->IsCode() && !Snapshot::IncludesCode(kind_)) {
     return;  // Do not trace, will write null.
   }
+#if !defined(DART_PRECOMPILED_RUNTIME)
+  if (object->IsBytecode() && !Snapshot::IncludesBytecode(kind_)) {
+    return;  // Do not trace, will write null.
+  }
+#endif  // !DART_PRECOMPILED_RUNTIME
 
   if (object->IsSendPort()) {
     // TODO(rmacnak): Do a better job of resetting fields in precompilation
@@ -5110,6 +5202,10 @@
 #endif  // !DART_PRECOMPILED_RUNTIME
     case kCodeCid:
       return new (Z) CodeDeserializationCluster();
+#if !defined(DART_PRECOMPILED_RUNTIME)
+    case kBytecodeCid:
+      return new (Z) BytecodeDeserializationCluster();
+#endif  // !DART_PRECOMPILED_RUNTIME
     case kObjectPoolCid:
       return new (Z) ObjectPoolDeserializationCluster();
     case kPcDescriptorsCid:
diff --git a/runtime/vm/clustered_snapshot.h b/runtime/vm/clustered_snapshot.h
index 77c285c..1f64d11 100644
--- a/runtime/vm/clustered_snapshot.h
+++ b/runtime/vm/clustered_snapshot.h
@@ -234,6 +234,12 @@
         WriteRef(Object::null());
         return;
       }
+#if !defined(DART_PRECOMPILED_RUNTIME)
+      if (object->IsBytecode() && !Snapshot::IncludesBytecode(kind_)) {
+        WriteRef(Object::null());
+        return;
+      }
+#endif  // !DART_PRECOMPILED_RUNTIME
       if (object->IsSendPort()) {
         // TODO(rmacnak): Do a better job of resetting fields in precompilation
         // and assert this is unreachable.
diff --git a/runtime/vm/code_patcher.h b/runtime/vm/code_patcher.h
index 82668c5..8b69986 100644
--- a/runtime/vm/code_patcher.h
+++ b/runtime/vm/code_patcher.h
@@ -113,11 +113,11 @@
 class KBCPatcher : public AllStatic {
  public:
   static NativeFunctionWrapper GetNativeCallAt(uword return_address,
-                                               const Code& bytecode,
+                                               const Bytecode& bytecode,
                                                NativeFunction* function);
 
   static void PatchNativeCallAt(uword return_address,
-                                const Code& bytecode,
+                                const Bytecode& bytecode,
                                 NativeFunction function,
                                 NativeFunctionWrapper trampoline);
 };
diff --git a/runtime/vm/code_patcher_kbc.cc b/runtime/vm/code_patcher_kbc.cc
index de7cac4..2dd74bd 100644
--- a/runtime/vm/code_patcher_kbc.cc
+++ b/runtime/vm/code_patcher_kbc.cc
@@ -13,7 +13,7 @@
 namespace dart {
 
 void KBCPatcher::PatchNativeCallAt(uword return_address,
-                                   const Code& bytecode,
+                                   const Bytecode& bytecode,
                                    NativeFunction function,
                                    NativeFunctionWrapper trampoline) {
   ASSERT(bytecode.ContainsInstructionAt(return_address));
@@ -24,7 +24,7 @@
 }
 
 NativeFunctionWrapper KBCPatcher::GetNativeCallAt(uword return_address,
-                                                  const Code& bytecode,
+                                                  const Bytecode& bytecode,
                                                   NativeFunction* function) {
   ASSERT(bytecode.ContainsInstructionAt(return_address));
   NativeEntryData native_entry_data(TypedData::Handle(
diff --git a/runtime/vm/compiler/assembler/disassembler.cc b/runtime/vm/compiler/assembler/disassembler.cc
index f1c9363..11138e6 100644
--- a/runtime/vm/compiler/assembler/disassembler.cc
+++ b/runtime/vm/compiler/assembler/disassembler.cc
@@ -21,8 +21,7 @@
 DECLARE_FLAG(bool, trace_inlining_intervals);
 DEFINE_FLAG(bool, trace_source_positions, false, "Source position diagnostics");
 
-void DisassembleToStdout::ConsumeInstruction(const Code& code,
-                                             char* hex_buffer,
+void DisassembleToStdout::ConsumeInstruction(char* hex_buffer,
                                              intptr_t hex_size,
                                              char* human_buffer,
                                              intptr_t human_size,
@@ -51,8 +50,7 @@
   va_end(args);
 }
 
-void DisassembleToJSONStream::ConsumeInstruction(const Code& code,
-                                                 char* hex_buffer,
+void DisassembleToJSONStream::ConsumeInstruction(char* hex_buffer,
                                                  intptr_t hex_size,
                                                  char* human_buffer,
                                                  intptr_t human_size,
@@ -97,8 +95,7 @@
   free(p);
 }
 
-void DisassembleToMemory::ConsumeInstruction(const Code& code,
-                                             char* hex_buffer,
+void DisassembleToMemory::ConsumeInstruction(char* hex_buffer,
                                              intptr_t hex_size,
                                              char* human_buffer,
                                              intptr_t human_size,
@@ -203,9 +200,8 @@
     DecodeInstruction(hex_buffer, sizeof(hex_buffer), human_buffer,
                       sizeof(human_buffer), &instruction_length, code, &object,
                       pc);
-    formatter->ConsumeInstruction(code, hex_buffer, sizeof(hex_buffer),
-                                  human_buffer, sizeof(human_buffer), object,
-                                  pc);
+    formatter->ConsumeInstruction(hex_buffer, sizeof(hex_buffer), human_buffer,
+                                  sizeof(human_buffer), object, pc);
     pc += instruction_length;
   }
 }
diff --git a/runtime/vm/compiler/assembler/disassembler.h b/runtime/vm/compiler/assembler/disassembler.h
index 071cd14..f23f08c 100644
--- a/runtime/vm/compiler/assembler/disassembler.h
+++ b/runtime/vm/compiler/assembler/disassembler.h
@@ -24,8 +24,7 @@
   virtual ~DisassemblyFormatter() {}
 
   // Consume the decoded instruction at the given pc.
-  virtual void ConsumeInstruction(const Code& code,
-                                  char* hex_buffer,
+  virtual void ConsumeInstruction(char* hex_buffer,
                                   intptr_t hex_size,
                                   char* human_buffer,
                                   intptr_t human_size,
@@ -43,8 +42,7 @@
   DisassembleToStdout() : DisassemblyFormatter() {}
   ~DisassembleToStdout() {}
 
-  virtual void ConsumeInstruction(const Code& code,
-                                  char* hex_buffer,
+  virtual void ConsumeInstruction(char* hex_buffer,
                                   intptr_t hex_size,
                                   char* human_buffer,
                                   intptr_t human_size,
@@ -65,8 +63,7 @@
       : DisassemblyFormatter(), jsarr_(jsarr) {}
   ~DisassembleToJSONStream() {}
 
-  virtual void ConsumeInstruction(const Code& code,
-                                  char* hex_buffer,
+  virtual void ConsumeInstruction(char* hex_buffer,
                                   intptr_t hex_size,
                                   char* human_buffer,
                                   intptr_t human_size,
@@ -93,8 +90,7 @@
         overflowed_(false) {}
   ~DisassembleToMemory() {}
 
-  virtual void ConsumeInstruction(const Code& code,
-                                  char* hex_buffer,
+  virtual void ConsumeInstruction(char* hex_buffer,
                                   intptr_t hex_size,
                                   char* human_buffer,
                                   intptr_t human_size,
diff --git a/runtime/vm/compiler/assembler/disassembler_kbc.cc b/runtime/vm/compiler/assembler/disassembler_kbc.cc
index a536dcd..9102f84 100644
--- a/runtime/vm/compiler/assembler/disassembler_kbc.cc
+++ b/runtime/vm/compiler/assembler/disassembler_kbc.cc
@@ -246,7 +246,7 @@
                                                    char* human_buffer,
                                                    intptr_t human_size,
                                                    int* out_instr_size,
-                                                   const Code& bytecode,
+                                                   const Bytecode& bytecode,
                                                    Object** object,
                                                    uword pc) {
   const uint32_t instr = *reinterpret_cast<uint32_t*>(pc);
@@ -277,57 +277,22 @@
 void KernelBytecodeDisassembler::Disassemble(uword start,
                                              uword end,
                                              DisassemblyFormatter* formatter,
-                                             const Code& bytecode) {
+                                             const Bytecode& bytecode) {
 #if !defined(PRODUCT)
-  const Code::Comments& comments =
-      bytecode.IsNull() ? Code::Comments::New(0) : bytecode.comments();
   ASSERT(formatter != NULL);
   char hex_buffer[kHexadecimalBufferSize];  // Instruction in hexadecimal form.
   char human_buffer[kUserReadableBufferSize];  // Human-readable instruction.
   uword pc = start;
-  intptr_t comment_finger = 0;
   GrowableArray<const Function*> inlined_functions;
   GrowableArray<TokenPosition> token_positions;
   while (pc < end) {
-    const intptr_t offset = pc - start;
-    const intptr_t old_comment_finger = comment_finger;
-    while (comment_finger < comments.Length() &&
-           comments.PCOffsetAt(comment_finger) <= offset) {
-      formatter->Print(
-          "        ;; %s\n",
-          String::Handle(comments.CommentAt(comment_finger)).ToCString());
-      comment_finger++;
-    }
-    if (old_comment_finger != comment_finger) {
-      char str[4000];
-      BufferFormatter f(str, sizeof(str));
-      // Comment emitted, emit inlining information.
-      bytecode.GetInlinedFunctionsAtInstruction(offset, &inlined_functions,
-                                                &token_positions);
-      // Skip top scope function printing (last entry in 'inlined_functions').
-      bool first = true;
-      for (intptr_t i = 1; i < inlined_functions.length(); i++) {
-        const char* name = inlined_functions[i]->ToQualifiedCString();
-        if (first) {
-          f.Print("        ;; Inlined [%s", name);
-          first = false;
-        } else {
-          f.Print(" -> %s", name);
-        }
-      }
-      if (!first) {
-        f.Print("]\n");
-        formatter->Print("%s", str);
-      }
-    }
     int instruction_length;
     Object* object;
     DecodeInstruction(hex_buffer, sizeof(hex_buffer), human_buffer,
                       sizeof(human_buffer), &instruction_length, bytecode,
                       &object, pc);
-    formatter->ConsumeInstruction(bytecode, hex_buffer, sizeof(hex_buffer),
-                                  human_buffer, sizeof(human_buffer), object,
-                                  pc);
+    formatter->ConsumeInstruction(hex_buffer, sizeof(hex_buffer), human_buffer,
+                                  sizeof(human_buffer), object, pc);
     pc += instruction_length;
   }
 #else
@@ -340,17 +305,16 @@
   ASSERT(function.HasBytecode());
   const char* function_fullname = function.ToFullyQualifiedCString();
   Zone* zone = Thread::Current()->zone();
-  const Code& bytecode = Code::Handle(zone, function.Bytecode());
+  const Bytecode& bytecode = Bytecode::Handle(zone, function.bytecode());
   THR_Print("Bytecode for function '%s' {\n", function_fullname);
-  const Instructions& instr = Instructions::Handle(bytecode.instructions());
-  uword start = instr.PayloadStart();
+  uword start = bytecode.PayloadStart();
   DisassembleToStdout stdout_formatter;
   LogBlock lb;
-  Disassemble(start, start + instr.Size(), &stdout_formatter, bytecode);
+  Disassemble(start, start + bytecode.Size(), &stdout_formatter, bytecode);
   THR_Print("}\n");
 
   const ObjectPool& object_pool =
-      ObjectPool::Handle(zone, bytecode.GetObjectPool());
+      ObjectPool::Handle(zone, bytecode.object_pool());
   object_pool.DebugPrint();
 
   THR_Print("PC Descriptors for function '%s' {\n", function_fullname);
diff --git a/runtime/vm/compiler/assembler/disassembler_kbc.h b/runtime/vm/compiler/assembler/disassembler_kbc.h
index 3512779..89bbc18 100644
--- a/runtime/vm/compiler/assembler/disassembler_kbc.h
+++ b/runtime/vm/compiler/assembler/disassembler_kbc.h
@@ -21,15 +21,15 @@
   static void Disassemble(uword start,
                           uword end,
                           DisassemblyFormatter* formatter,
-                          const Code& bytecode);
+                          const Bytecode& bytecode);
 
   static void Disassemble(uword start,
                           uword end,
                           DisassemblyFormatter* formatter) {
-    Disassemble(start, end, formatter, Code::Handle());
+    Disassemble(start, end, formatter, Bytecode::Handle());
   }
 
-  static void Disassemble(uword start, uword end, const Code& bytecode) {
+  static void Disassemble(uword start, uword end, const Bytecode& bytecode) {
 #if !defined(PRODUCT)
     DisassembleToStdout stdout_formatter;
     LogBlock lb;
@@ -71,7 +71,7 @@
                                 char* human_buffer,
                                 intptr_t human_size,
                                 int* out_instr_len,
-                                const Code& bytecode,
+                                const Bytecode& bytecode,
                                 Object** object,
                                 uword pc);
 
diff --git a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
index c4202de..5692014 100644
--- a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
@@ -1452,10 +1452,10 @@
   // Use default flow graph builder for native methods.
   ASSERT(!function().is_native());
 
-  const Code& bytecode = Code::Handle(Z, function().Bytecode());
+  const Bytecode& bytecode = Bytecode::Handle(Z, function().bytecode());
 
   object_pool_ = bytecode.object_pool();
-  raw_bytecode_ = reinterpret_cast<KBCInstr*>(bytecode.EntryPoint());
+  raw_bytecode_ = reinterpret_cast<KBCInstr*>(bytecode.PayloadStart());
   bytecode_length_ = bytecode.Size() / sizeof(KBCInstr);
 
   ProcessICDataInObjectPool(object_pool_);
diff --git a/runtime/vm/compiler/frontend/bytecode_reader.cc b/runtime/vm/compiler/frontend/bytecode_reader.cc
index 0f80fbc..d2fdfee 100644
--- a/runtime/vm/compiler/frontend/bytecode_reader.cc
+++ b/runtime/vm/compiler/frontend/bytecode_reader.cc
@@ -100,7 +100,8 @@
   }
 
   // Read bytecode and attach to function.
-  const Code& bytecode = Code::Handle(helper_->zone_, ReadBytecode(pool));
+  const Bytecode& bytecode =
+      Bytecode::Handle(helper_->zone_, ReadBytecode(pool));
   function.AttachBytecode(bytecode);
 
   ReadExceptionsTable(bytecode, has_exceptions_table);
@@ -133,7 +134,7 @@
   // Read closures.
   if (has_closures) {
     Function& closure = Function::Handle(helper_->zone_);
-    Code& closure_bytecode = Code::Handle(helper_->zone_);
+    Bytecode& closure_bytecode = Bytecode::Handle(helper_->zone_);
     const intptr_t num_closures = helper_->ReadListLength();
     for (intptr_t i = 0; i < num_closures; i++) {
       intptr_t closure_index = helper_->ReadUInt();
@@ -637,7 +638,7 @@
   return obj_count - 1;
 }
 
-RawCode* BytecodeMetadataHelper::ReadBytecode(const ObjectPool& pool) {
+RawBytecode* BytecodeMetadataHelper::ReadBytecode(const ObjectPool& pool) {
 #if !defined(PRODUCT)
   TimelineDurationScope tds(Thread::Current(), Timeline::GetCompilerStream(),
                             "BytecodeMetadataHelper::ReadBytecode");
@@ -650,12 +651,16 @@
   ASSERT(Utils::IsAligned(data, sizeof(KBCInstr)));
   helper_->reader_.set_offset(offset + size);
 
-  // Create and return code object.
-  return Code::FinalizeBytecode(reinterpret_cast<const void*>(data), size,
-                                pool);
+  const ExternalTypedData& instructions = ExternalTypedData::Handle(
+      helper_->zone_,
+      ExternalTypedData::New(kExternalTypedDataInt8ArrayCid,
+                             const_cast<uint8_t*>(data), size, Heap::kOld));
+
+  // Create and return bytecode object.
+  return Bytecode::New(instructions, pool);
 }
 
-void BytecodeMetadataHelper::ReadExceptionsTable(const Code& bytecode,
+void BytecodeMetadataHelper::ReadExceptionsTable(const Bytecode& bytecode,
                                                  bool has_exceptions_table) {
 #if !defined(PRODUCT)
   TimelineDurationScope tds(Thread::Current(), Timeline::GetCompilerStream(),
@@ -725,7 +730,7 @@
   }
 }
 
-void BytecodeMetadataHelper::ReadSourcePositions(const Code& bytecode,
+void BytecodeMetadataHelper::ReadSourcePositions(const Bytecode& bytecode,
                                                  bool has_source_positions) {
   if (!has_source_positions) {
     return;
diff --git a/runtime/vm/compiler/frontend/bytecode_reader.h b/runtime/vm/compiler/frontend/bytecode_reader.h
index 0e9d730..8dbc003 100644
--- a/runtime/vm/compiler/frontend/bytecode_reader.h
+++ b/runtime/vm/compiler/frontend/bytecode_reader.h
@@ -32,9 +32,9 @@
                            const Function& inner_function,
                            const ObjectPool& pool,
                            intptr_t from_index);
-  RawCode* ReadBytecode(const ObjectPool& pool);
-  void ReadExceptionsTable(const Code& bytecode, bool has_exceptions_table);
-  void ReadSourcePositions(const Code& bytecode, bool has_source_positions);
+  RawBytecode* ReadBytecode(const ObjectPool& pool);
+  void ReadExceptionsTable(const Bytecode& bytecode, bool has_exceptions_table);
+  void ReadSourcePositions(const Bytecode& bytecode, bool has_source_positions);
   RawTypedData* NativeEntry(const Function& function,
                             const String& external_name);
 
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index e2cb503..972285d 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -1263,12 +1263,6 @@
 void Compiler::ComputeLocalVarDescriptors(const Code& code) {
   ASSERT(!code.is_optimized());
   const Function& function = Function::Handle(code.function());
-  if (FLAG_enable_interpreter && function.Bytecode() == code.raw()) {
-    // TODO(regis): Kernel bytecode does not yet provide var descriptors.
-    ASSERT(code.var_descriptors() == Object::null());
-    code.set_var_descriptors(Object::empty_var_descriptors());
-    return;
-  }
   ParsedFunction* parsed_function = new ParsedFunction(
       Thread::Current(), Function::ZoneHandle(function.raw()));
   ASSERT(code.var_descriptors() == Object::null());
diff --git a/runtime/vm/debugger.cc b/runtime/vm/debugger.cc
index bac41d9..6823611 100644
--- a/runtime/vm/debugger.cc
+++ b/runtime/vm/debugger.cc
@@ -259,17 +259,11 @@
       deopt_frame_(Array::ZoneHandle(deopt_frame.raw())),
       deopt_frame_offset_(deopt_frame_offset),
       kind_(kind),
-#if !defined(DART_PRECOMPILED_RUNTIME)
-      is_interpreted_(FLAG_enable_interpreter &&
-                      function_.Bytecode() == code_.raw()),
-#else
-      is_interpreted_(false),
-#endif  // !defined(DART_PRECOMPILED_RUNTIME)
+      is_interpreted_(false),  // TODO(regis): No bytecode debugging support.
       vars_initialized_(false),
       var_descriptors_(LocalVarDescriptors::ZoneHandle()),
       desc_indices_(8),
-      pc_desc_(PcDescriptors::ZoneHandle()) {
-}
+      pc_desc_(PcDescriptors::ZoneHandle()) {}
 
 ActivationFrame::ActivationFrame(Kind kind)
     : pc_(0),
@@ -318,22 +312,8 @@
       pc_desc_(PcDescriptors::ZoneHandle()) {
   // Extract the function and the code from the asynchronous activation.
   function_ = async_activation.function();
-#if !defined(DART_PRECOMPILED_RUNTIME)
-  // TODO(regis): Revise debugger functionality when running a mix of
-  // interpreted and compiled code.
-  if (!FLAG_enable_interpreter || !function_.HasBytecode()) {
-    function_.EnsureHasCompiledUnoptimizedCode();
-  }
-  if (FLAG_enable_interpreter && function_.HasBytecode()) {
-    is_interpreted_ = true;
-    code_ = function_.Bytecode();
-  } else {
-    code_ = function_.unoptimized_code();
-  }
-#else
   function_.EnsureHasCompiledUnoptimizedCode();
   code_ = function_.unoptimized_code();
-#endif  // !defined(DART_PRECOMPILED_RUNTIME)
   ctx_ = async_activation.context();
   ASSERT(fp_ == 0);
   ASSERT(!ctx_.IsNull());
@@ -1949,6 +1929,12 @@
                    frame->ToCString());
     }
     if (frame->IsDartFrame()) {
+      if (frame->is_interpreted()) {
+        // TODO(regis): Support debugging of interpreted frames.
+        // For now, do not abort, but skip the frame, as this code is run
+        // while handling a stack overflow. See HandleStackOverflowTestCases.
+        continue;
+      }
       code = frame->LookupDartCode();
       AppendCodeFrames(thread, isolate, zone, stack_trace, frame, &code,
                        &inlined_code, &deopt_frame);
@@ -2030,6 +2016,10 @@
   while (synchronous_stack_trace_length > 0) {
     ASSERT(frame != NULL);
     if (frame->IsDartFrame()) {
+      if (frame->is_interpreted()) {
+        // TODO(regis): Support debugging of interpreted frames.
+        UNIMPLEMENTED();
+      }
       code = frame->LookupDartCode();
       AppendCodeFrames(thread, isolate, zone, stack_trace, frame, &code,
                        &inlined_code, &deopt_frame);
@@ -2125,6 +2115,10 @@
                    frame->ToCString());
     }
     if (frame->IsDartFrame()) {
+      if (frame->is_interpreted()) {
+        // TODO(regis): Support debugging of interpreted frames.
+        UNIMPLEMENTED();
+      }
       code = frame->LookupDartCode();
       if (code.is_optimized()) {
         deopt_frame = DeoptimizeToArray(thread, frame, code);
@@ -2294,6 +2288,12 @@
   while ((frame != NULL) && !frame->IsDartFrame()) {
     frame = iterator.NextFrame();
   }
+  ASSERT(frame != NULL);
+  if (frame->is_interpreted()) {
+    // TODO(regis): Support debugging of interpreted frames.
+    UNIMPLEMENTED();
+    return NULL;
+  }
   Code& code = Code::Handle(frame->LookupDartCode());
   ActivationFrame* activation = new ActivationFrame(
       frame->pc(), frame->fp(), frame->sp(), code, Object::null_array(), 0);
@@ -2329,6 +2329,7 @@
 DebuggerStackTrace* Debugger::StackTraceFrom(const class StackTrace& ex_trace) {
   DebuggerStackTrace* stack_trace = new DebuggerStackTrace(8);
   Function& function = Function::Handle();
+  Object& code_object = Object::Handle();
   Code& code = Code::Handle();
 
   const uword fp = 0;
@@ -2337,17 +2338,21 @@
   const intptr_t deopt_frame_offset = -1;
 
   for (intptr_t i = 0; i < ex_trace.Length(); i++) {
-    code = ex_trace.CodeAtFrame(i);
+    code_object = ex_trace.CodeAtFrame(i);
     // Pre-allocated StackTraces may include empty slots, either (a) to indicate
     // where frames were omitted in the case a stack has more frames than the
     // pre-allocated trace (such as a stack overflow) or (b) because a stack has
     // fewer frames that the pre-allocated trace (such as memory exhaustion with
     // a shallow stack).
-    if (!code.IsNull()) {
+    if (!code_object.IsNull()) {
+      if (code_object.IsBytecode()) {
+        // TODO(regis): Support debugging of interpreted frames.
+        UNIMPLEMENTED();
+      }
+      code ^= code_object.raw();
       ASSERT(code.IsFunctionCode());
       function = code.function();
       if (function.is_visible()) {
-        code = ex_trace.CodeAtFrame(i);
         ASSERT(function.raw() == code.function());
         uword pc =
             code.PayloadStart() + Smi::Value(ex_trace.PcOffsetAtFrame(i));
diff --git a/runtime/vm/deopt_instructions.cc b/runtime/vm/deopt_instructions.cc
index ec35ebc8..7d082c7 100644
--- a/runtime/vm/deopt_instructions.cc
+++ b/runtime/vm/deopt_instructions.cc
@@ -406,6 +406,7 @@
                                StackFrameIterator::kNoCrossThreadIteration);
     StackFrame* top_frame = iterator.NextFrame();
     ASSERT(top_frame != NULL);
+    ASSERT(!top_frame->is_interpreted());
     const Code& code = Code::Handle(top_frame->LookupDartCode());
     const Function& top_function = Function::Handle(code.function());
     const Script& script = Script::Handle(top_function.script());
diff --git a/runtime/vm/exceptions.cc b/runtime/vm/exceptions.cc
index 06f5a47..123870a 100644
--- a/runtime/vm/exceptions.cc
+++ b/runtime/vm/exceptions.cc
@@ -37,7 +37,7 @@
   StackTraceBuilder() {}
   virtual ~StackTraceBuilder() {}
 
-  virtual void AddFrame(const Code& code, const Smi& offset) = 0;
+  virtual void AddFrame(const Object& code, const Smi& offset) = 0;
 };
 
 class RegularStackTraceBuilder : public StackTraceBuilder {
@@ -52,7 +52,7 @@
   const GrowableObjectArray& code_list() const { return code_list_; }
   const GrowableObjectArray& pc_offset_list() const { return pc_offset_list_; }
 
-  virtual void AddFrame(const Code& code, const Smi& offset) {
+  virtual void AddFrame(const Object& code, const Smi& offset) {
     code_list_.Add(code);
     pc_offset_list_.Add(offset);
   }
@@ -75,7 +75,7 @@
   }
   ~PreallocatedStackTraceBuilder() {}
 
-  virtual void AddFrame(const Code& code, const Smi& offset);
+  virtual void AddFrame(const Object& code, const Smi& offset);
 
  private:
   static const int kNumTopframes = StackTrace::kPreallocatedStackdepth / 2;
@@ -87,11 +87,11 @@
   DISALLOW_COPY_AND_ASSIGN(PreallocatedStackTraceBuilder);
 };
 
-void PreallocatedStackTraceBuilder::AddFrame(const Code& code,
+void PreallocatedStackTraceBuilder::AddFrame(const Object& code,
                                              const Smi& offset) {
   if (cur_index_ >= StackTrace::kPreallocatedStackdepth) {
     // The number of frames is overflowing the preallocated stack trace object.
-    Code& frame_code = Code::Handle();
+    Object& frame_code = Object::Handle();
     Smi& frame_offset = Smi::Handle();
     intptr_t start = StackTrace::kPreallocatedStackdepth - (kNumTopframes - 1);
     intptr_t null_slot = start - 2;
@@ -129,13 +129,21 @@
   StackFrame* frame = frames.NextFrame();
   ASSERT(frame != NULL);  // We expect to find a dart invocation frame.
   Code& code = Code::Handle();
+  Bytecode& bytecode = Bytecode::Handle();
   Smi& offset = Smi::Handle();
   while (frame != NULL) {
     if (frame->IsDartFrame()) {
-      code = frame->LookupDartCode();
-      ASSERT(code.ContainsInstructionAt(frame->pc()));
-      offset = Smi::New(frame->pc() - code.PayloadStart());
-      builder->AddFrame(code, offset);
+      if (frame->is_interpreted()) {
+        bytecode = frame->LookupDartBytecode();
+        ASSERT(bytecode.ContainsInstructionAt(frame->pc()));
+        offset = Smi::New(frame->pc() - bytecode.PayloadStart());
+        builder->AddFrame(bytecode, offset);
+      } else {
+        code = frame->LookupDartCode();
+        ASSERT(code.ContainsInstructionAt(frame->pc()));
+        offset = Smi::New(frame->pc() - code.PayloadStart());
+        builder->AddFrame(code, offset);
+      }
     }
     frame = frames.NextFrame();
   }
diff --git a/runtime/vm/heap/weak_code.cc b/runtime/vm/heap/weak_code.cc
index c1a3d53..795eda1a 100644
--- a/runtime/vm/heap/weak_code.cc
+++ b/runtime/vm/heap/weak_code.cc
@@ -76,10 +76,12 @@
                                StackFrameIterator::kNoCrossThreadIteration);
     StackFrame* frame = iterator.NextFrame();
     while (frame != NULL) {
-      code = frame->LookupDartCode();
-      if (IsOptimizedCode(code_objects, code)) {
-        ReportDeoptimization(code);
-        DeoptimizeAt(code, frame);
+      if (!frame->is_interpreted()) {
+        code = frame->LookupDartCode();
+        if (IsOptimizedCode(code_objects, code)) {
+          ReportDeoptimization(code);
+          DeoptimizeAt(code, frame);
+        }
       }
       frame = iterator.NextFrame();
     }
diff --git a/runtime/vm/instructions_kbc.cc b/runtime/vm/instructions_kbc.cc
index e33ff4d..f6d9f61 100644
--- a/runtime/vm/instructions_kbc.cc
+++ b/runtime/vm/instructions_kbc.cc
@@ -13,17 +13,18 @@
 
 namespace dart {
 
-RawTypedData* KBCNativeCallPattern::GetNativeEntryDataAt(uword pc,
-                                                         const Code& bytecode) {
+RawTypedData* KBCNativeCallPattern::GetNativeEntryDataAt(
+    uword pc,
+    const Bytecode& bytecode) {
   ASSERT(bytecode.ContainsInstructionAt(pc));
   const uword call_pc = pc - sizeof(KBCInstr);
   KBCInstr call_instr = KernelBytecode::At(call_pc);
   ASSERT(KernelBytecode::DecodeOpcode(call_instr) ==
          KernelBytecode::kNativeCall);
   intptr_t native_entry_data_pool_index = KernelBytecode::DecodeD(call_instr);
-  const ObjectPool& object_pool = ObjectPool::Handle(bytecode.GetObjectPool());
+  const ObjectPool& obj_pool = ObjectPool::Handle(bytecode.object_pool());
   TypedData& native_entry_data = TypedData::Handle();
-  native_entry_data ^= object_pool.ObjectAt(native_entry_data_pool_index);
+  native_entry_data ^= obj_pool.ObjectAt(native_entry_data_pool_index);
   // Native calls to recognized functions should never be patched.
   ASSERT(NativeEntryData(native_entry_data).kind() ==
          MethodRecognizer::kUnknown);
diff --git a/runtime/vm/instructions_kbc.h b/runtime/vm/instructions_kbc.h
index 11f56fb..d210a28 100644
--- a/runtime/vm/instructions_kbc.h
+++ b/runtime/vm/instructions_kbc.h
@@ -15,7 +15,7 @@
 
 class KBCNativeCallPattern : public AllStatic {
  public:
-  static RawTypedData* GetNativeEntryDataAt(uword pc, const Code& bytecode);
+  static RawTypedData* GetNativeEntryDataAt(uword pc, const Bytecode& bytecode);
 };
 
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/interpreter.cc b/runtime/vm/interpreter.cc
index f79d1c5..50b3846 100644
--- a/runtime/vm/interpreter.cc
+++ b/runtime/vm/interpreter.cc
@@ -467,14 +467,9 @@
     return true;
   }
 
-  DART_FORCE_INLINE static RawCode* FrameCode(RawObject** FP) {
-    ASSERT(GetClassId(FP[kKBCPcMarkerSlotFromFp]) == kCodeCid);
-    return static_cast<RawCode*>(FP[kKBCPcMarkerSlotFromFp]);
-  }
-
-  DART_FORCE_INLINE static void SetFrameCode(RawObject** FP, RawCode* code) {
-    ASSERT(GetClassId(code) == kCodeCid);
-    FP[kKBCPcMarkerSlotFromFp] = code;
+  DART_FORCE_INLINE static RawBytecode* FrameBytecode(RawObject** FP) {
+    ASSERT(GetClassId(FP[kKBCPcMarkerSlotFromFp]) == kBytecodeCid);
+    return static_cast<RawBytecode*>(FP[kKBCPcMarkerSlotFromFp]);
   }
 
   DART_FORCE_INLINE static uint8_t* GetTypedData(RawObject* obj,
@@ -616,7 +611,7 @@
                        RawObject** frame,
                        uint32_t* pc) {
   frame[0] = Function::null();
-  frame[1] = Code::null();
+  frame[1] = Bytecode::null();
   frame[2] = reinterpret_cast<RawObject*>(pc);
   frame[3] = reinterpret_cast<RawObject*>(base);
   fp_ = frame + kKBCDartFrameFixedSize;
@@ -643,24 +638,6 @@
   reinterpret_cast<RuntimeFunction>(target)(native_args);
 }
 
-DART_FORCE_INLINE static void EnterSyntheticFrame(RawObject*** FP,
-                                                  RawObject*** SP,
-                                                  uint32_t* pc) {
-  RawObject** fp = *SP + kKBCDartFrameFixedSize;
-  fp[kKBCPcMarkerSlotFromFp] = 0;
-  fp[kKBCSavedCallerPcSlotFromFp] = reinterpret_cast<RawObject*>(pc);
-  fp[kKBCSavedCallerFpSlotFromFp] = reinterpret_cast<RawObject*>(*FP);
-  *FP = fp;
-  *SP = fp - 1;
-}
-
-DART_FORCE_INLINE static void LeaveSyntheticFrame(RawObject*** FP,
-                                                  RawObject*** SP) {
-  RawObject** fp = *FP;
-  *FP = reinterpret_cast<RawObject**>(fp[kKBCSavedCallerFpSlotFromFp]);
-  *SP = fp - kKBCDartFrameFixedSize;
-}
-
 // Calling into runtime may trigger garbage collection and relocate objects,
 // so all RawObject* pointers become outdated and should not be used across
 // runtime calls.
@@ -743,7 +720,7 @@
   // Pop args and push result.
   *SP = call_base;
   **SP = result;
-  pp_ = InterpreterHelpers::FrameCode(*FP)->ptr()->object_pool_;
+  pp_ = InterpreterHelpers::FrameBytecode(*FP)->ptr()->object_pool_;
 
   // If the result is an error (not a Dart instance), it must either be rethrown
   // (in the case of an unhandled exception) or it must be returned to the
@@ -922,7 +899,7 @@
         // Reload objects after the call which may trigger GC.
         function = reinterpret_cast<RawFunction*>(call_top[0]);
         field = reinterpret_cast<RawField*>(function->ptr()->data_);
-        pp_ = InterpreterHelpers::FrameCode(*FP)->ptr()->object_pool_;
+        pp_ = InterpreterHelpers::FrameBytecode(*FP)->ptr()->object_pool_;
         // The field is initialized by the runtime call, but not returned.
         value = field->ptr()->value_.static_value_;
       }
@@ -1078,12 +1055,13 @@
               Function::Handle(function).ToFullyQualifiedCString());
   }
 #endif
-  RawCode* bytecode = function->ptr()->bytecode_;
+  RawBytecode* bytecode = function->ptr()->bytecode_;
   callee_fp[kKBCPcMarkerSlotFromFp] = bytecode;
   callee_fp[kKBCSavedCallerPcSlotFromFp] = reinterpret_cast<RawObject*>(*pc);
   callee_fp[kKBCSavedCallerFpSlotFromFp] = reinterpret_cast<RawObject*>(*FP);
   pp_ = bytecode->ptr()->object_pool_;
-  *pc = reinterpret_cast<uint32_t*>(bytecode->ptr()->entry_point_);
+  *pc =
+      reinterpret_cast<uint32_t*>(bytecode->ptr()->instructions_->ptr()->data_);
   pc_ = reinterpret_cast<uword>(*pc);  // For the profiler.
   *FP = callee_fp;
   fp_ = callee_fp;  // For the profiler.
@@ -1219,24 +1197,6 @@
   return Invoke(thread, call_base, top, pc, FP, SP);
 }
 
-DART_FORCE_INLINE void Interpreter::PrepareForTailCall(
-    RawCode* code,
-    RawImmutableArray* args_desc,
-    RawObject** FP,
-    RawObject*** SP,
-    uint32_t** pc) {
-  // Drop all stack locals.
-  *SP = FP - 1;
-
-  // Replace the callee with the new [code].
-  FP[kKBCFunctionSlotFromFp] = Object::null();
-  FP[kKBCPcMarkerSlotFromFp] = code;
-  *pc = reinterpret_cast<uint32_t*>(code->ptr()->entry_point_);
-  pc_ = reinterpret_cast<uword>(pc);  // For the profiler.
-  pp_ = code->ptr()->object_pool_;
-  argdesc_ = args_desc;
-}
-
 // Note:
 // All macro helpers are intended to be used only inside Interpreter::Call.
 
@@ -1371,7 +1331,7 @@
 
 #define HANDLE_RETURN                                                          \
   do {                                                                         \
-    pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;              \
+    pp_ = InterpreterHelpers::FrameBytecode(FP)->ptr()->object_pool_;          \
     fp_ = FP; /* For the profiler. */                                          \
   } while (0)
 
@@ -1415,62 +1375,6 @@
   }                                                                            \
   ASSERT(Integer::GetInt64Value(RAW_CAST(Integer, SP[0])) == result);
 
-// Returns true if deoptimization succeeds.
-DART_FORCE_INLINE bool Interpreter::Deoptimize(Thread* thread,
-                                               uint32_t** pc,
-                                               RawObject*** FP,
-                                               RawObject*** SP,
-                                               bool is_lazy) {
-  // Note: frame translation will take care of preserving result at the
-  // top of the stack. See CompilerDeoptInfo::CreateDeoptInfo.
-
-  // Make sure we preserve SP[0] when entering synthetic frame below.
-  (*SP)++;
-
-  // Leaf runtime function DeoptimizeCopyFrame expects a Dart frame.
-  // The code in this frame may not cause GC.
-  // DeoptimizeCopyFrame and DeoptimizeFillFrame are leaf runtime calls.
-  EnterSyntheticFrame(FP, SP, *pc - (is_lazy ? 1 : 0));
-  const intptr_t frame_size_in_bytes =
-      DLRT_DeoptimizeCopyFrame(reinterpret_cast<uword>(*FP), is_lazy ? 1 : 0);
-  LeaveSyntheticFrame(FP, SP);
-
-  *SP = *FP + (frame_size_in_bytes / kWordSize);
-  EnterSyntheticFrame(FP, SP, *pc - (is_lazy ? 1 : 0));
-  DLRT_DeoptimizeFillFrame(reinterpret_cast<uword>(*FP));
-
-  // We are now inside a valid frame.
-  {
-    *++(*SP) = 0;  // Space for the result: number of materialization args.
-    Exit(thread, *FP, *SP + 1, /*pc=*/0);
-    NativeArguments native_args(thread, 0, *SP, *SP);
-    if (!InvokeRuntime(thread, this, DRT_DeoptimizeMaterialize, native_args)) {
-      return false;
-    }
-  }
-  const intptr_t materialization_arg_count =
-      Smi::Value(RAW_CAST(Smi, *(*SP)--)) / kWordSize;
-
-  // Restore caller PC.
-  *pc = SavedCallerPC(*FP);
-  pc_ = reinterpret_cast<uword>(*pc);  // For the profiler.
-
-  // Check if it is a fake PC marking the entry frame.
-  ASSERT(!IsEntryFrameMarker(reinterpret_cast<uword>(*pc)));
-
-  // Restore SP, FP and PP.
-  // Unoptimized frame SP is one below FrameArguments(...) because
-  // FrameArguments(...) returns a pointer to the first argument.
-  *SP = FrameArguments(*FP, materialization_arg_count) - 1;
-  *FP = SavedCallerFP(*FP);
-  fp_ = *FP;  // For the profiler.
-
-  // Restore pp.
-  pp_ = InterpreterHelpers::FrameCode(*FP)->ptr()->object_pool_;
-
-  return true;
-}
-
 bool Interpreter::AssertAssignable(Thread* thread,
                                    uint32_t* pc,
                                    RawObject** FP,
@@ -1674,7 +1578,7 @@
     fp_[kKBCEntrySavedSlots + i] = argv[argc < 0 ? -i : i];
   }
 
-  RawCode* bytecode = function->ptr()->bytecode_;
+  RawBytecode* bytecode = function->ptr()->bytecode_;
   FP[kKBCFunctionSlotFromFp] = function;
   FP[kKBCPcMarkerSlotFromFp] = bytecode;
   FP[kKBCSavedCallerPcSlotFromFp] =
@@ -1686,7 +1590,8 @@
 
   // Ready to start executing bytecode. Load entry point and corresponding
   // object pool.
-  pc = reinterpret_cast<uint32_t*>(bytecode->ptr()->entry_point_);
+  pc =
+      reinterpret_cast<uint32_t*>(bytecode->ptr()->instructions_->ptr()->data_);
   pc_ = reinterpret_cast<uword>(pc);  // For the profiler.
   fp_ = FP;                           // For the profiler.
   pp_ = bytecode->ptr()->object_pool_;
@@ -2372,7 +2277,7 @@
     SP = FrameArguments(FP, argc);
     FP = SavedCallerFP(FP);
     fp_ = FP;  // For the profiler.
-    pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;
+    pp_ = InterpreterHelpers::FrameBytecode(FP)->ptr()->object_pool_;
     *SP = result;
     DISPATCH();
   }
@@ -2927,7 +2832,7 @@
     FP = SavedCallerFP(FP);
     fp_ = FP;  // For the profiler.
     if (has_dart_caller) {
-      pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;
+      pp_ = InterpreterHelpers::FrameBytecode(FP)->ptr()->object_pool_;
     }
 
     *++SP = null_value;
@@ -2999,7 +2904,7 @@
   // Single dispatch point used by exception handling macros.
   {
   DispatchAfterException:
-    pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;
+    pp_ = InterpreterHelpers::FrameBytecode(FP)->ptr()->object_pool_;
     DISPATCH();
   }
 
diff --git a/runtime/vm/interpreter.h b/runtime/vm/interpreter.h
index 24d66a8..5dc3dc4 100644
--- a/runtime/vm/interpreter.h
+++ b/runtime/vm/interpreter.h
@@ -152,12 +152,6 @@
                       RawObject*** FP,
                       RawObject*** SP);
 
-  bool Deoptimize(Thread* thread,
-                  uint32_t** pc,
-                  RawObject*** FP,
-                  RawObject*** SP,
-                  bool is_lazy);
-
   void InlineCacheMiss(int checked_args,
                        Thread* thread,
                        RawICData* icdata,
@@ -185,12 +179,6 @@
                      RawObject*** SP,
                      bool optimized);
 
-  void PrepareForTailCall(RawCode* code,
-                          RawImmutableArray* args_desc,
-                          RawObject** FP,
-                          RawObject*** SP,
-                          uint32_t** pc);
-
   bool AssertAssignable(Thread* thread,
                         uint32_t* pc,
                         RawObject** FP,
diff --git a/runtime/vm/interpreter_unsupported.cc b/runtime/vm/interpreter_unsupported.cc
index b12ba4e..757e9b4 100644
--- a/runtime/vm/interpreter_unsupported.cc
+++ b/runtime/vm/interpreter_unsupported.cc
@@ -128,23 +128,6 @@
   return false;
 }
 
-void Interpreter::PrepareForTailCall(RawCode* code,
-                                     RawImmutableArray* args_desc,
-                                     RawObject** FP,
-                                     RawObject*** SP,
-                                     uint32_t** pc) {
-  UNIMPLEMENTED();
-}
-
-bool Interpreter::Deoptimize(Thread* thread,
-                             uint32_t** pc,
-                             RawObject*** FP,
-                             RawObject*** SP,
-                             bool is_lazy) {
-  UNIMPLEMENTED();
-  return false;
-}
-
 bool Interpreter::AssertAssignable(Thread* thread,
                                    uint32_t* pc,
                                    RawObject** FP,
diff --git a/runtime/vm/isolate_reload.cc b/runtime/vm/isolate_reload.cc
index 9c8b082..1beb60b 100644
--- a/runtime/vm/isolate_reload.cc
+++ b/runtime/vm/isolate_reload.cc
@@ -895,12 +895,10 @@
   Function& func = Function::Handle();
   while (it.HasNextFrame()) {
     StackFrame* frame = it.NextFrame();
-    if (frame->IsDartFrame()) {
+    if (frame->IsDartFrame() && !frame->is_interpreted()) {
       func = frame->LookupDartFunction();
       ASSERT(!func.IsNull());
-      if (!frame->is_interpreted()) {
-        func.EnsureHasCompiledUnoptimizedCode();
-      }
+      func.EnsureHasCompiledUnoptimizedCode();
     }
   }
 }
@@ -1834,22 +1832,28 @@
   Zone* zone = stack_zone.GetZone();
 
   Code& code = Code::Handle(zone);
+  Bytecode& bytecode = Bytecode::Handle(zone);
   Function& function = Function::Handle(zone);
   DartFrameIterator iterator(thread,
                              StackFrameIterator::kNoCrossThreadIteration);
   StackFrame* frame = iterator.NextFrame();
   while (frame != NULL) {
-    code = frame->LookupDartCode();
-    if (code.is_optimized()) {
-      // If this code is optimized, we need to reset the ICs in the
-      // corresponding unoptimized code, which will be executed when the stack
-      // unwinds to the optimized code.
-      function = code.function();
-      code = function.unoptimized_code();
-      ASSERT(!code.IsNull());
-      code.ResetICDatas(zone);
+    if (frame->is_interpreted()) {
+      bytecode = frame->LookupDartBytecode();
+      bytecode.ResetICDatas(zone);
     } else {
-      code.ResetICDatas(zone);
+      code = frame->LookupDartCode();
+      if (code.is_optimized()) {
+        // If this code is optimized, we need to reset the ICs in the
+        // corresponding unoptimized code, which will be executed when the stack
+        // unwinds to the optimized code.
+        function = code.function();
+        code = function.unoptimized_code();
+        ASSERT(!code.IsNull());
+        code.ResetICDatas(zone);
+      } else {
+        code.ResetICDatas(zone);
+      }
     }
     frame = iterator.NextFrame();
   }
@@ -1873,6 +1877,7 @@
         owning_class_(Class::Handle(zone)),
         owning_lib_(Library::Handle(zone)),
         code_(Code::Handle(zone)),
+        bytecode_(Bytecode::Handle(zone)),
         reload_context_(reload_context),
         zone_(zone) {}
 
@@ -1894,19 +1899,25 @@
       // Grab the current code.
       code_ = func.CurrentCode();
       ASSERT(!code_.IsNull());
+      bytecode_ = func.bytecode();
       const bool clear_code = IsFromDirtyLibrary(func);
       const bool stub_code = code_.IsStubCode();
 
       // Zero edge counters.
       func.ZeroEdgeCounters();
 
-      if (!stub_code) {
+      if (!stub_code || !bytecode_.IsNull()) {
         if (clear_code) {
-          VTIR_Print("Marking %s for recompilation, clearning code\n",
+          VTIR_Print("Marking %s for recompilation, clearing code\n",
                      func.ToCString());
           ClearAllCode(func);
         } else {
-          PreserveUnoptimizedCode();
+          if (!stub_code) {
+            PreserveUnoptimizedCode();
+          }
+          if (!bytecode_.IsNull()) {
+            PreserveBytecode();
+          }
         }
       }
 
@@ -1933,6 +1944,13 @@
     code_.ResetICDatas(zone_);
   }
 
+  void PreserveBytecode() {
+    ASSERT(!bytecode_.IsNull());
+    // We are preserving the bytecode, fill all ICData arrays with
+    // the sentinel values so that we have no stale type feedback.
+    bytecode_.ResetICDatas(zone_);
+  }
+
   bool IsFromDirtyLibrary(const Function& func) {
     owning_class_ = func.Owner();
     owning_lib_ = owning_class_.library();
@@ -1943,6 +1961,7 @@
   Class& owning_class_;
   Library& owning_lib_;
   Code& code_;
+  Bytecode& bytecode_;
   IsolateReloadContext* reload_context_;
   Zone* zone_;
 };
diff --git a/runtime/vm/native_entry.cc b/runtime/vm/native_entry.cc
index 7bc012c..f6ac51b 100644
--- a/runtime/vm/native_entry.cc
+++ b/runtime/vm/native_entry.cc
@@ -247,8 +247,16 @@
                                StackFrameIterator::kNoCrossThreadIteration);
     StackFrame* caller_frame = iterator.NextFrame();
 
-    const Code& code = Code::Handle(zone, caller_frame->LookupDartCode());
-    const Function& func = Function::Handle(zone, code.function());
+    Code& code = Code::Handle(zone);
+    Bytecode& bytecode = Bytecode::Handle(zone);
+    Function& func = Function::Handle(zone);
+    if (caller_frame->is_interpreted()) {
+      bytecode = caller_frame->LookupDartBytecode();
+      func = bytecode.function();
+    } else {
+      code = caller_frame->LookupDartCode();
+      func = code.function();
+    }
 
     if (FLAG_trace_natives) {
       THR_Print("Resolving native target for %s\n", func.ToCString());
@@ -265,7 +273,7 @@
 #if !defined(DART_PRECOMPILED_RUNTIME)
       ASSERT(FLAG_enable_interpreter);
       NativeFunctionWrapper current_trampoline = KBCPatcher::GetNativeCallAt(
-          caller_frame->pc(), code, &current_function);
+          caller_frame->pc(), bytecode, &current_function);
       ASSERT(current_function ==
              reinterpret_cast<NativeFunction>(LinkNativeCall));
       ASSERT(current_trampoline == &BootstrapNativeCallWrapper ||
@@ -317,7 +325,7 @@
       } else {
         trampoline = &NoScopeNativeCallWrapper;
       }
-      KBCPatcher::PatchNativeCallAt(caller_frame->pc(), code,
+      KBCPatcher::PatchNativeCallAt(caller_frame->pc(), bytecode,
                                     patch_target_function, trampoline);
 #else
       UNREACHABLE();
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index a158cd3..62b381f 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -13,6 +13,7 @@
 #include "vm/compiler/aot/precompiler.h"
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/compiler/assembler/disassembler.h"
+#include "vm/compiler/assembler/disassembler_kbc.h"
 #include "vm/compiler/frontend/bytecode_reader.h"
 #include "vm/compiler/frontend/kernel_fingerprints.h"
 #include "vm/compiler/frontend/kernel_translation_helper.h"
@@ -127,6 +128,7 @@
 RawClass* Object::kernel_program_info_class_ =
     reinterpret_cast<RawClass*>(RAW_NULL);
 RawClass* Object::code_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
+RawClass* Object::bytecode_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
 RawClass* Object::instructions_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
 RawClass* Object::object_pool_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
 RawClass* Object::pc_descriptors_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
@@ -594,6 +596,9 @@
   cls = Class::New<Code>();
   code_class_ = cls.raw();
 
+  cls = Class::New<Bytecode>();
+  bytecode_class_ = cls.raw();
+
   cls = Class::New<Instructions>();
   instructions_class_ = cls.raw();
 
@@ -955,6 +960,7 @@
   namespace_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
   kernel_program_info_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
   code_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
+  bytecode_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
   instructions_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
   object_pool_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
   pc_descriptors_class_ = reinterpret_cast<RawClass*>(RAW_NULL);
@@ -1054,6 +1060,7 @@
   SET_CLASS_NAME(namespace, Namespace);
   SET_CLASS_NAME(kernel_program_info, KernelProgramInfo);
   SET_CLASS_NAME(code, Code);
+  SET_CLASS_NAME(bytecode, Bytecode);
   SET_CLASS_NAME(instructions, Instructions);
   SET_CLASS_NAME(object_pool, ObjectPool);
   SET_CLASS_NAME(code_source_map, CodeSourceMap);
@@ -3932,6 +3939,8 @@
       return Symbols::KernelProgramInfo().raw();
     case kCodeCid:
       return Symbols::Code().raw();
+    case kBytecodeCid:
+      return Symbols::Bytecode().raw();
     case kInstructionsCid:
       return Symbols::Instructions().raw();
     case kObjectPoolCid:
@@ -6005,11 +6014,11 @@
   }
 }
 
-void Function::AttachBytecode(const Code& value) const {
+void Function::AttachBytecode(const Bytecode& value) const {
   DEBUG_ASSERT(IsMutatorOrAtSafepoint());
   ASSERT(FLAG_enable_interpreter || FLAG_use_bytecode_compiler);
   // Finish setting up code before activating it.
-  value.set_owner(*this);
+  value.set_function(*this);
   StorePointer(&raw_ptr()->bytecode_, value.raw());
 
   // We should not have loaded the bytecode if the function had code.
@@ -6022,11 +6031,11 @@
 }
 
 bool Function::HasBytecode() const {
-  return raw_ptr()->bytecode_ != Code::null();
+  return raw_ptr()->bytecode_ != Bytecode::null();
 }
 
 bool Function::HasBytecode(RawFunction* function) {
-  return function->ptr()->bytecode_ != Code::null();
+  return function->ptr()->bytecode_ != Bytecode::null();
 }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
@@ -6048,7 +6057,7 @@
   ASSERT(Thread::Current()->IsMutatorThread());
 
   StorePointer(&raw_ptr()->unoptimized_code_, Code::null());
-  StorePointer(&raw_ptr()->bytecode_, Code::null());
+  StorePointer(&raw_ptr()->bytecode_, Bytecode::null());
 
   SetInstructions(Code::Handle(StubCode::LazyCompile_entry()->code()));
 #endif  // defined(DART_PRECOMPILED_RUNTIME)
@@ -15010,60 +15019,6 @@
                       stats);
 }
 
-RawCode* Code::FinalizeBytecode(const void* bytecode_data,
-                                intptr_t bytecode_size,
-                                const ObjectPool& object_pool,
-                                CodeStatistics* stats /* = nullptr */) {
-  // Allocate the Code and Instructions objects.  Code is allocated first
-  // because a GC during allocation of the code will leave the instruction
-  // pages read-only.
-  const intptr_t pointer_offset_count = 0;  // No fixups in bytecode.
-  Code& code = Code::ZoneHandle(Code::New(pointer_offset_count));
-  Instructions& instrs = Instructions::ZoneHandle(
-      Instructions::New(bytecode_size, true /* has_single_entry_point */, 0));
-
-  // Copy the bytecode data into the instruction area. No fixups to apply.
-  MemoryRegion instrs_region(reinterpret_cast<void*>(instrs.PayloadStart()),
-                             instrs.Size());
-  MemoryRegion bytecode_region(const_cast<void*>(bytecode_data), bytecode_size);
-  // TODO(regis): Avoid copying bytecode.
-  instrs_region.CopyFrom(0, bytecode_region);
-
-  // TODO(regis): Keep following lines or not?
-  // TODO(regis): Do we need to notify CodeObservers for bytecode too?
-  // If so, provide a better name using ToLibNamePrefixedQualifiedCString().
-#ifndef PRODUCT
-  code.set_compile_timestamp(OS::GetCurrentMonotonicMicros());
-  CodeObservers::NotifyAll("bytecode", instrs.PayloadStart(),
-                           0 /* prologue_offset */, instrs.Size(),
-                           false /* optimized */, nullptr);
-#endif
-  {
-    NoSafepointScope no_safepoint;
-
-    // Hook up Code and Instructions objects.
-    code.SetActiveInstructions(instrs);
-    code.set_instructions(instrs);
-    code.set_is_alive(true);
-
-    // Set object pool in Instructions object.
-    code.set_object_pool(object_pool.raw());
-
-    if (FLAG_write_protect_code) {
-      uword address = RawObject::ToAddr(instrs.raw());
-      VirtualMemory::Protect(reinterpret_cast<void*>(address),
-                             instrs.raw()->Size(), VirtualMemory::kReadExecute);
-    }
-  }
-#ifndef PRODUCT
-  // No Code::Comments to set. Default is 0 length Comments.
-  // No prologue was ever entered, optimistically assume nothing was ever
-  // pushed onto the stack.
-  code.SetPrologueOffset(bytecode_size);  // TODO(regis): Correct?
-#endif
-  return code.raw();
-}
-
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
 bool Code::SlowFindRawCodeVisitor::FindObject(RawObject* raw_obj) const {
@@ -15205,16 +15160,6 @@
   return obj.IsFunction();
 }
 
-bool Code::IsBytecode() const {
-#if defined(DART_PRECOMPILED_RUNTIME)
-  return false;
-#else
-  const Object& obj = Object::Handle(owner());
-  if (!obj.IsFunction()) return false;
-  return Function::Cast(obj).Bytecode() == raw();
-#endif
-}
-
 void Code::DisableDartCode() const {
   DEBUG_ASSERT(IsMutatorOrAtSafepoint());
   ASSERT(IsFunctionCode());
@@ -15293,7 +15238,7 @@
     GrowableArray<TokenPosition>* token_positions) const {
   const CodeSourceMap& map = CodeSourceMap::Handle(code_source_map());
   if (map.IsNull()) {
-    ASSERT(!IsFunctionCode() || IsBytecode() ||
+    ASSERT(!IsFunctionCode() ||
            (Isolate::Current()->object_store()->megamorphic_miss_code() ==
             this->raw()));
     return;  // VM stub, allocation stub, or megamorphic miss function.
@@ -15349,6 +15294,92 @@
 #endif
 }
 
+void Bytecode::set_instructions(const ExternalTypedData& instructions) const {
+#if !defined(DART_PRECOMPILED_RUNTIME)
+  ASSERT(Thread::Current()->IsMutatorThread());
+  // The interpreter requires the instructions to be aligned.
+  ASSERT(Utils::IsAligned(instructions.DataAddr(0), sizeof(KBCInstr)));
+  StorePointer(&raw_ptr()->instructions_, instructions.raw());
+#else
+  UNREACHABLE();
+#endif
+}
+
+uword Bytecode::PayloadStart() const {
+  const ExternalTypedData& instr = ExternalTypedData::Handle(instructions());
+  return reinterpret_cast<uword>(instr.DataAddr(0));
+}
+
+intptr_t Bytecode::Size() const {
+  const ExternalTypedData& instr = ExternalTypedData::Handle(instructions());
+  return instr.LengthInBytes();
+}
+
+bool Bytecode::ContainsInstructionAt(uword addr) const {
+  const ExternalTypedData& instr = ExternalTypedData::Handle(instructions());
+  const uword offset = addr - reinterpret_cast<uword>(instr.DataAddr(0));
+  return offset < static_cast<uword>(instr.LengthInBytes());
+}
+
+void Bytecode::Disassemble(DisassemblyFormatter* formatter) const {
+#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
+#if !defined(DART_PRECOMPILED_RUNTIME)
+  if (!FLAG_support_disassembler) {
+    return;
+  }
+  uword start = PayloadStart();
+  intptr_t size = Size();
+  if (formatter == NULL) {
+    KernelBytecodeDisassembler::Disassemble(start, start + size, *this);
+  } else {
+    KernelBytecodeDisassembler::Disassemble(start, start + size, formatter,
+                                            *this);
+  }
+#endif  // !defined(DART_PRECOMPILED_RUNTIME)
+#endif  // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
+}
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+RawBytecode* Bytecode::New(const ExternalTypedData& instructions,
+                           const ObjectPool& object_pool) {
+  ASSERT(Object::bytecode_class() != Class::null());
+  Bytecode& result = Bytecode::Handle();
+  {
+    uword size = Bytecode::InstanceSize();
+    RawObject* raw = Object::Allocate(Bytecode::kClassId, size, Heap::kOld);
+    NoSafepointScope no_safepoint;
+    result ^= raw;
+    result.set_pc_descriptors(Object::empty_descriptors());
+    result.set_instructions(instructions);
+    result.set_object_pool(object_pool);
+  }
+  return result.raw();
+}
+#endif
+
+const char* Bytecode::ToCString() const {
+  return Thread::Current()->zone()->PrintToString("Bytecode(%s)",
+                                                  QualifiedName());
+}
+
+const char* Bytecode::Name() const {
+  Zone* zone = Thread::Current()->zone();
+  const Function& fun = Function::Handle(zone, function());
+  ASSERT(!fun.IsNull());
+  const char* function_name =
+      String::Handle(zone, fun.UserVisibleName()).ToCString();
+  return zone->PrintToString("%s", function_name);
+}
+
+const char* Bytecode::QualifiedName() const {
+  Zone* zone = Thread::Current()->zone();
+  const Function& fun = Function::Handle(zone, function());
+  ASSERT(!fun.IsNull());
+  const char* function_name =
+      String::Handle(zone, fun.QualifiedScrubbedName()).ToCString();
+  return zone->PrintToString("%s", function_name);
+}
+
 RawContext* Context::New(intptr_t num_variables, Heap::Space space) {
   ASSERT(num_variables >= 0);
   ASSERT(Object::context_class() != Class::null());
@@ -16737,7 +16768,9 @@
     if (IsClosure()) {
       return Closure::Cast(*this).ToCString();
     }
-    const AbstractType& type = AbstractType::Handle(GetType(Heap::kNew));
+    // Background compiler disassembly of instructions referring to pool objects
+    // calls this function and requires allocation of Type in old space.
+    const AbstractType& type = AbstractType::Handle(GetType(Heap::kOld));
     const String& type_name = String::Handle(type.UserVisibleName());
     return OS::SCreate(Thread::Current()->zone(), "Instance of '%s'",
                        type_name.ToCString());
@@ -22314,12 +22347,13 @@
   return code_array.Length();
 }
 
-RawCode* StackTrace::CodeAtFrame(intptr_t frame_index) const {
+RawObject* StackTrace::CodeAtFrame(intptr_t frame_index) const {
   const Array& code_array = Array::Handle(raw_ptr()->code_array_);
-  return reinterpret_cast<RawCode*>(code_array.At(frame_index));
+  return code_array.At(frame_index);
 }
 
-void StackTrace::SetCodeAtFrame(intptr_t frame_index, const Code& code) const {
+void StackTrace::SetCodeAtFrame(intptr_t frame_index,
+                                const Object& code) const {
   const Array& code_array = Array::Handle(raw_ptr()->code_array_);
   code_array.SetAt(frame_index, code);
 }
@@ -22430,7 +22464,9 @@
   Zone* zone = Thread::Current()->zone();
   StackTrace& stack_trace = StackTrace::Handle(zone, stack_trace_in.raw());
   Function& function = Function::Handle(zone);
+  Object& code_object = Object::Handle(zone);
   Code& code = Code::Handle(zone);
+  Bytecode& bytecode = Bytecode::Handle(zone);
 
   GrowableArray<const Function*> inlined_functions;
   GrowableArray<TokenPosition> inlined_token_positions;
@@ -22441,8 +22477,8 @@
   intptr_t frame_index = 0;
   do {
     for (intptr_t i = 0; i < stack_trace.Length(); i++) {
-      code = stack_trace.CodeAtFrame(i);
-      if (code.IsNull()) {
+      code_object = stack_trace.CodeAtFrame(i);
+      if (code_object.IsNull()) {
         // Check for a null function, which indicates a gap in a StackOverflow
         // or OutOfMemory trace.
         if ((i < (stack_trace.Length() - 1)) &&
@@ -22452,7 +22488,7 @@
           // To account for gap frames.
           frame_index += Smi::Value(stack_trace.PcOffsetAtFrame(i));
         }
-      } else if (code.raw() ==
+      } else if (code_object.raw() ==
                  StubCode::AsynchronousGapMarker_entry()->code()) {
         buffer.AddString("<asynchronous suspension>\n");
         // The frame immediately after the asynchronous gap marker is the
@@ -22460,25 +22496,39 @@
         // the readability of the trace.
         i++;
       } else {
-        ASSERT(code.IsFunctionCode());
         intptr_t pc_offset = Smi::Value(stack_trace.PcOffsetAtFrame(i));
-        if (code.is_optimized() && stack_trace.expand_inlined()) {
-          code.GetInlinedFunctionsAtReturnAddress(pc_offset, &inlined_functions,
-                                                  &inlined_token_positions);
-          ASSERT(inlined_functions.length() >= 1);
-          for (intptr_t j = inlined_functions.length() - 1; j >= 0; j--) {
-            if (inlined_functions[j]->is_visible() ||
-                FLAG_show_invisible_frames) {
-              PrintStackTraceFrame(zone, &buffer, *inlined_functions[j],
-                                   inlined_token_positions[j], frame_index);
+        if (code_object.IsCode()) {
+          code ^= code_object.raw();
+          ASSERT(code.IsFunctionCode());
+          if (code.is_optimized() && stack_trace.expand_inlined()) {
+            code.GetInlinedFunctionsAtReturnAddress(
+                pc_offset, &inlined_functions, &inlined_token_positions);
+            ASSERT(inlined_functions.length() >= 1);
+            for (intptr_t j = inlined_functions.length() - 1; j >= 0; j--) {
+              if (inlined_functions[j]->is_visible() ||
+                  FLAG_show_invisible_frames) {
+                PrintStackTraceFrame(zone, &buffer, *inlined_functions[j],
+                                     inlined_token_positions[j], frame_index);
+                frame_index++;
+              }
+            }
+          } else {
+            function = code.function();
+            if (function.is_visible() || FLAG_show_invisible_frames) {
+              uword pc = code.PayloadStart() + pc_offset;
+              const TokenPosition token_pos = code.GetTokenIndexOfPC(pc);
+              PrintStackTraceFrame(zone, &buffer, function, token_pos,
+                                   frame_index);
               frame_index++;
             }
           }
         } else {
-          function = code.function();
+          ASSERT(code_object.IsBytecode());
+          bytecode ^= code_object.raw();
+          function = bytecode.function();
           if (function.is_visible() || FLAG_show_invisible_frames) {
-            uword pc = code.PayloadStart() + pc_offset;
-            const TokenPosition token_pos = code.GetTokenIndexOfPC(pc);
+            uword pc = bytecode.PayloadStart() + pc_offset;
+            const TokenPosition token_pos = bytecode.GetTokenIndexOfPC(pc);
             PrintStackTraceFrame(zone, &buffer, function, token_pos,
                                  frame_index);
             frame_index++;
@@ -22497,7 +22547,7 @@
 #if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
   Zone* zone = Thread::Current()->zone();
   StackTrace& stack_trace = StackTrace::Handle(zone, stack_trace_in.raw());
-  Code& code = Code::Handle(zone);
+  Object& code = Object::Handle(zone);
   ZoneTextBuffer buffer(zone, 1024);
 
   // The Dart standard requires the output of StackTrace.toString to include
@@ -22538,7 +22588,9 @@
         intptr_t pc_offset = Smi::Value(stack_trace.PcOffsetAtFrame(i));
         // This output is formatted like Android's debuggerd. Note debuggerd
         // prints call addresses instead of return addresses.
-        uword return_addr = code.PayloadStart() + pc_offset;
+        uword start = code.IsBytecode() ? Bytecode::Cast(code).PayloadStart()
+                                        : Code::Cast(code).PayloadStart();
+        uword return_addr = start + pc_offset;
         uword call_addr = return_addr - 1;
         uword dso_base;
         char* dso_name;
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index dd36b85..6f5a157 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -430,6 +430,7 @@
     return kernel_program_info_class_;
   }
   static RawClass* code_class() { return code_class_; }
+  static RawClass* bytecode_class() { return bytecode_class_; }
   static RawClass* instructions_class() { return instructions_class_; }
   static RawClass* object_pool_class() { return object_pool_class_; }
   static RawClass* pc_descriptors_class() { return pc_descriptors_class_; }
@@ -679,6 +680,7 @@
   static RawClass* kernel_program_info_class_;  // Class of KernelProgramInfo vm
                                                 // object.
   static RawClass* code_class_;                 // Class of the Code vm object.
+  static RawClass* bytecode_class_;      // Class of the Bytecode vm object.
   static RawClass* instructions_class_;  // Class of the Instructions vm object.
   static RawClass* object_pool_class_;   // Class of the ObjectPool vm object.
   static RawClass* pc_descriptors_class_;   // Class of PcDescriptors vm object.
@@ -2360,8 +2362,8 @@
 
 #if !defined(DART_PRECOMPILED_RUNTIME)
   bool IsBytecodeAllowed(Zone* zone) const;
-  void AttachBytecode(const Code& bytecode) const;
-  RawCode* Bytecode() const { return raw_ptr()->bytecode_; }
+  void AttachBytecode(const Bytecode& bytecode) const;
+  RawBytecode* bytecode() const { return raw_ptr()->bytecode_; }
   bool HasBytecode() const;
 #endif
 
@@ -4247,6 +4249,9 @@
     StoreNonPointer(&EntryAddr(index)->raw_value_, raw_value);
   }
 
+  // Used during reloading (see object_reload.cc). Calls Reset on all ICDatas.
+  void ResetICDatas(Zone* zone) const;
+
   static intptr_t InstanceSize() {
     ASSERT(sizeof(RawObjectPool) ==
            OFFSET_OF_RETURNED_VALUE(RawObjectPool, data));
@@ -4918,7 +4923,7 @@
   void set_await_token_positions(const Array& await_token_positions) const;
 
   // Used during reloading (see object_reload.cc). Calls Reset on all ICDatas
-  // that are embedded inside the Code object.
+  // that are embedded inside the Code or ObjecPool objects.
   void ResetICDatas(Zone* zone) const;
 
   // Array of DeoptInfo objects.
@@ -5150,10 +5155,6 @@
                                PoolAttachment pool_attachment,
                                bool optimized,
                                CodeStatistics* stats = nullptr);
-  static RawCode* FinalizeBytecode(const void* bytecode_data,
-                                   intptr_t bytecode_size,
-                                   const ObjectPool& object_pool,
-                                   CodeStatistics* stats = nullptr);
 #endif
   static RawCode* LookupCode(uword pc);
   static RawCode* LookupCodeInVmIsolate(uword pc);
@@ -5183,7 +5184,6 @@
   bool IsAllocationStubCode() const;
   bool IsStubCode() const;
   bool IsFunctionCode() const;
-  bool IsBytecode() const;
 
   void DisableDartCode() const;
 
@@ -5293,6 +5293,77 @@
   friend class RawFunction;
 };
 
+class Bytecode : public Object {
+ public:
+  RawExternalTypedData* instructions() const {
+    return raw_ptr()->instructions_;
+  }
+  uword PayloadStart() const;
+  intptr_t Size() const;
+
+  RawObjectPool* object_pool() const { return raw_ptr()->object_pool_; }
+  bool ContainsInstructionAt(uword addr) const;
+
+  RawPcDescriptors* pc_descriptors() const {
+    return raw_ptr()->pc_descriptors_;
+  }
+  void set_pc_descriptors(const PcDescriptors& descriptors) const {
+    ASSERT(descriptors.IsOld());
+    StorePointer(&raw_ptr()->pc_descriptors_, descriptors.raw());
+  }
+
+  void Disassemble(DisassemblyFormatter* formatter = NULL) const;
+
+  RawExceptionHandlers* exception_handlers() const {
+    return raw_ptr()->exception_handlers_;
+  }
+  void set_exception_handlers(const ExceptionHandlers& handlers) const {
+    ASSERT(handlers.IsOld());
+    StorePointer(&raw_ptr()->exception_handlers_, handlers.raw());
+  }
+
+  RawFunction* function() const { return raw_ptr()->function_; }
+
+  void set_function(const Function& function) const {
+    ASSERT(function.IsOld());
+    StorePointer(&raw_ptr()->function_, function.raw());
+  }
+
+  // Used during reloading (see object_reload.cc). Calls Reset on all ICDatas
+  // that are embedded inside the Code or ObjecPool objects.
+  void ResetICDatas(Zone* zone) const;
+
+  static intptr_t InstanceSize() {
+    return RoundedAllocationSize(sizeof(RawBytecode));
+  }
+#if !defined(DART_PRECOMPILED_RUNTIME)
+  static RawBytecode* New(const ExternalTypedData& instructions,
+                          const ObjectPool& object_pool);
+#endif
+
+  TokenPosition GetTokenIndexOfPC(uword pc) const {
+    // TODO(alexmarkov): implement.
+    return TokenPosition::kNoSource;
+  }
+
+  const char* Name() const;
+  const char* QualifiedName() const;
+
+ private:
+  void set_object_pool(const ObjectPool& object_pool) const {
+    StorePointer(&raw_ptr()->object_pool_, object_pool.raw());
+  }
+
+  friend class RawObject;  // For RawObject::SizeFromClass().
+  friend class RawBytecode;
+
+  void set_instructions(const ExternalTypedData& instructions) const;
+
+  FINAL_HEAP_OBJECT_IMPLEMENTATION(Bytecode, Object);
+  friend class Class;
+  friend class SnapshotWriter;
+};
+
 class Context : public Object {
  public:
   RawContext* parent() const { return raw_ptr()->parent_; }
@@ -9052,8 +9123,8 @@
   void set_expand_inlined(bool value) const;
 
   RawArray* code_array() const { return raw_ptr()->code_array_; }
-  RawCode* CodeAtFrame(intptr_t frame_index) const;
-  void SetCodeAtFrame(intptr_t frame_index, const Code& code) const;
+  RawObject* CodeAtFrame(intptr_t frame_index) const;
+  void SetCodeAtFrame(intptr_t frame_index, const Object& code) const;
 
   RawArray* pc_offset_array() const { return raw_ptr()->pc_offset_array_; }
   RawSmi* PcOffsetAtFrame(intptr_t frame_index) const;
diff --git a/runtime/vm/object_reload.cc b/runtime/vm/object_reload.cc
index bc78047..e63ed7a 100644
--- a/runtime/vm/object_reload.cc
+++ b/runtime/vm/object_reload.cc
@@ -82,14 +82,29 @@
   }
 #else
   const ObjectPool& pool = ObjectPool::Handle(zone, object_pool());
-  Object& object = Object::Handle(zone);
   ASSERT(!pool.IsNull());
-  for (intptr_t i = 0; i < pool.Length(); i++) {
-    ObjectPool::EntryType entry_type = pool.TypeAt(i);
+  pool.ResetICDatas(zone);
+#endif
+}
+
+void Bytecode::ResetICDatas(Zone* zone) const {
+  // Iterate over the Bytecode's object pool and reset all ICDatas.
+  const ObjectPool& pool = ObjectPool::Handle(zone, object_pool());
+  ASSERT(!pool.IsNull());
+  pool.ResetICDatas(zone);
+}
+
+void ObjectPool::ResetICDatas(Zone* zone) const {
+#ifdef TARGET_ARCH_IA32
+  UNREACHABLE();
+#else
+  Object& object = Object::Handle(zone);
+  for (intptr_t i = 0; i < Length(); i++) {
+    ObjectPool::EntryType entry_type = TypeAt(i);
     if (entry_type != ObjectPool::kTaggedObject) {
       continue;
     }
-    object = pool.ObjectAt(i);
+    object = ObjectAt(i);
     if (object.IsICData()) {
       ICData::Cast(object).Reset(zone);
     }
diff --git a/runtime/vm/object_service.cc b/runtime/vm/object_service.cc
index d82135f..964dc6d 100644
--- a/runtime/vm/object_service.cc
+++ b/runtime/vm/object_service.cc
@@ -312,6 +312,12 @@
   if (!code.IsNull()) {
     jsobj.AddProperty("code", code);
   }
+#if !defined(DART_PRECOMPILED_RUNTIME)
+  Bytecode& bytecode = Bytecode::Handle(this->bytecode());
+  if (!bytecode.IsNull()) {
+    jsobj.AddProperty("bytecode", bytecode);
+  }
+#endif  // !DART_PRECOMPILED_RUNTIME
   Array& ics = Array::Handle(ic_data_array());
   if (!ics.IsNull()) {
     jsobj.AddProperty("_icDataArray", ics);
@@ -853,6 +859,33 @@
 #endif
 }
 
+void Bytecode::PrintJSONImpl(JSONStream* stream, bool ref) const {
+  JSONObject jsobj(stream);
+  AddCommonObjectProperties(&jsobj, "Bytecode", ref);
+  const char* qualified_name = QualifiedName();
+  const char* vm_name = Name();
+  AddNameProperties(&jsobj, qualified_name, vm_name);
+  if (ref) {
+    return;
+  }
+  const Function& fun = Function::Handle(function());
+  jsobj.AddProperty("function", fun);
+  jsobj.AddPropertyF("_startAddress", "%" Px "", PayloadStart());
+  jsobj.AddPropertyF("_endAddress", "%" Px "", PayloadStart() + Size());
+  const ObjectPool& obj_pool = ObjectPool::Handle(object_pool());
+  jsobj.AddProperty("_objectPool", obj_pool);
+  {
+    JSONArray jsarr(&jsobj, "_disassembly");
+    DisassembleToJSONStream formatter(jsarr);
+    Disassemble(&formatter);
+  }
+  const PcDescriptors& descriptors = PcDescriptors::Handle(pc_descriptors());
+  if (!descriptors.IsNull()) {
+    JSONObject desc(&jsobj, "_descriptors");
+    descriptors.PrintToJSONObject(&desc, false);
+  }
+}
+
 void Context::PrintJSONImpl(JSONStream* stream, bool ref) const {
   JSONObject jsobj(stream);
   // TODO(turnidge): Should the user level type for Context be Context
diff --git a/runtime/vm/raw_object.cc b/runtime/vm/raw_object.cc
index 801c925..3ce3ced 100644
--- a/runtime/vm/raw_object.cc
+++ b/runtime/vm/raw_object.cc
@@ -383,6 +383,7 @@
 
 REGULAR_VISITOR(Class)
 REGULAR_VISITOR(UnresolvedClass)
+REGULAR_VISITOR(Bytecode)
 REGULAR_VISITOR(Type)
 REGULAR_VISITOR(TypeRef)
 REGULAR_VISITOR(TypeParameter)
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index 16109d1..ce9a94a 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -34,6 +34,7 @@
   V(Namespace)                                                                 \
   V(KernelProgramInfo)                                                         \
   V(Code)                                                                      \
+  V(Bytecode)                                                                  \
   V(Instructions)                                                              \
   V(ObjectPool)                                                                \
   V(PcDescriptors)                                                             \
@@ -1078,7 +1079,7 @@
     return reinterpret_cast<RawObject**>(&ptr()->ic_data_array_);
   }
   RawCode* code_;  // Currently active code. Accessed from generated code.
-  NOT_IN_PRECOMPILED(RawCode* bytecode_);
+  NOT_IN_PRECOMPILED(RawBytecode* bytecode_);
   NOT_IN_PRECOMPILED(RawCode* unoptimized_code_);  // Unoptimized code, keep it
                                                    // after optimization.
 #if defined(DART_PRECOMPILED_RUNTIME)
@@ -1476,6 +1477,21 @@
   friend class FunctionDeserializationCluster;
 };
 
+class RawBytecode : public RawObject {
+  RAW_HEAP_OBJECT_IMPLEMENTATION(Bytecode);
+
+  VISIT_FROM(RawObject*, object_pool_);
+  RawObjectPool* object_pool_;
+  RawExternalTypedData* instructions_;
+  RawFunction* function_;
+  RawExceptionHandlers* exception_handlers_;
+  RawPcDescriptors* pc_descriptors_;
+  VISIT_TO(RawObject*, pc_descriptors_);
+
+  friend class Function;
+  friend class StackFrame;
+};
+
 class RawObjectPool : public RawObject {
   RAW_HEAP_OBJECT_IMPLEMENTATION(ObjectPool);
 
diff --git a/runtime/vm/raw_object_snapshot.cc b/runtime/vm/raw_object_snapshot.cc
index 94366af..70bed6a 100644
--- a/runtime/vm/raw_object_snapshot.cc
+++ b/runtime/vm/raw_object_snapshot.cc
@@ -695,6 +695,22 @@
   UNREACHABLE();
 }
 
+RawBytecode* Bytecode::ReadFrom(SnapshotReader* reader,
+                                intptr_t object_id,
+                                intptr_t tags,
+                                Snapshot::Kind kind,
+                                bool as_reference) {
+  UNREACHABLE();
+  return Bytecode::null();
+}
+
+void RawBytecode::WriteTo(SnapshotWriter* writer,
+                          intptr_t object_id,
+                          Snapshot::Kind kind,
+                          bool as_reference) {
+  UNREACHABLE();
+}
+
 RawInstructions* Instructions::ReadFrom(SnapshotReader* reader,
                                         intptr_t object_id,
                                         intptr_t tags,
diff --git a/runtime/vm/reusable_handles.h b/runtime/vm/reusable_handles.h
index 7c452bc..2c06da9 100644
--- a/runtime/vm/reusable_handles.h
+++ b/runtime/vm/reusable_handles.h
@@ -86,6 +86,8 @@
   ReusableClassHandleScope reused_class_handle(thread);
 #define REUSABLE_CODE_HANDLESCOPE(thread)                                      \
   ReusableCodeHandleScope reused_code_handle(thread);
+#define REUSABLE_BYTECODE_HANDLESCOPE(thread)                                  \
+  ReusableBytecodeHandleScope reused_bytecode_handle(thread);
 #define REUSABLE_ERROR_HANDLESCOPE(thread)                                     \
   ReusableErrorHandleScope reused_error_handle(thread);
 #define REUSABLE_EXCEPTION_HANDLERS_HANDLESCOPE(thread)                        \
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index 33584a0..372493e 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -165,6 +165,7 @@
                              StackFrameIterator::kNoCrossThreadIteration);
   const StackFrame* caller_frame = iterator.NextFrame();
   ASSERT(caller_frame->IsDartFrame());
+  ASSERT(!caller_frame->is_interpreted());
   const Code& code = Code::Handle(zone, caller_frame->LookupDartCode());
   const uword pc_offset = caller_frame->pc() - code.PayloadStart();
 
@@ -845,6 +846,7 @@
       DartFrameIterator iterator(thread,
                                  StackFrameIterator::kNoCrossThreadIteration);
       StackFrame* caller_frame = iterator.NextFrame();
+      ASSERT(!caller_frame->is_interpreted());
       const Code& caller_code =
           Code::Handle(zone, caller_frame->LookupDartCode());
       const ObjectPool& pool =
@@ -882,6 +884,7 @@
       DartFrameIterator iterator(thread,
                                  StackFrameIterator::kNoCrossThreadIteration);
       StackFrame* caller_frame = iterator.NextFrame();
+      ASSERT(!caller_frame->is_interpreted());
       const Code& caller_code =
           Code::Handle(zone, caller_frame->LookupDartCode());
       const ObjectPool& pool =
@@ -983,6 +986,7 @@
                              StackFrameIterator::kNoCrossThreadIteration);
   StackFrame* caller_frame = iterator.NextFrame();
   ASSERT(caller_frame != NULL);
+  ASSERT(!caller_frame->is_interpreted());
   const Code& caller_code = Code::Handle(zone, caller_frame->LookupDartCode());
   ASSERT(!caller_code.IsNull());
   ASSERT(caller_code.is_optimized());
@@ -1868,21 +1872,29 @@
                                StackFrameIterator::kNoCrossThreadIteration);
     StackFrame* frame = iterator.NextFrame();
     ASSERT(frame != NULL);
-    const Code& code = Code::Handle(frame->LookupDartCode());
-    ASSERT(!code.IsNull());
-    const Function& function = Function::Handle(code.function());
+    Code& code = Code::Handle();
+    Function& function = Function::Handle();
+    if (frame->is_interpreted()) {
+      function = frame->LookupDartFunction();
+    } else {
+      code = frame->LookupDartCode();
+      ASSERT(!code.IsNull());
+      function = code.function();
+    }
     ASSERT(!function.IsNull());
     const char* function_name = function.ToFullyQualifiedCString();
     ASSERT(function_name != NULL);
-    if (!code.is_optimized() && FLAG_reload_every_optimized) {
-      // Don't do the reload if we aren't inside optimized code.
-      do_reload = false;
-    }
-    if (code.is_optimized() && FLAG_deoptimize_filter != NULL &&
-        strstr(function_name, FLAG_deoptimize_filter) != NULL) {
-      OS::PrintErr("*** Forcing deoptimization (%s)\n",
-                   function.ToFullyQualifiedCString());
-      do_deopt = true;
+    if (!code.IsNull()) {
+      if (!code.is_optimized() && FLAG_reload_every_optimized) {
+        // Don't do the reload if we aren't inside optimized code.
+        do_reload = false;
+      }
+      if (code.is_optimized() && FLAG_deoptimize_filter != NULL &&
+          strstr(function_name, FLAG_deoptimize_filter) != NULL) {
+        OS::PrintErr("*** Forcing deoptimization (%s)\n",
+                     function.ToFullyQualifiedCString());
+        do_deopt = true;
+      }
     }
     if (FLAG_stacktrace_filter != NULL &&
         strstr(function_name, FLAG_stacktrace_filter) != NULL) {
@@ -2360,9 +2372,11 @@
   StackFrame* frame = iterator.NextFrame();
   Code& optimized_code = Code::Handle();
   while (frame != NULL) {
-    optimized_code = frame->LookupDartCode();
-    if (optimized_code.is_optimized()) {
-      DeoptimizeAt(optimized_code, frame);
+    if (!frame->is_interpreted()) {
+      optimized_code = frame->LookupDartCode();
+      if (optimized_code.is_optimized()) {
+        DeoptimizeAt(optimized_code, frame);
+      }
     }
     frame = iterator.NextFrame();
   }
diff --git a/runtime/vm/simulator_arm.cc b/runtime/vm/simulator_arm.cc
index 25c6ac2..779a863 100644
--- a/runtime/vm/simulator_arm.cc
+++ b/runtime/vm/simulator_arm.cc
@@ -276,6 +276,7 @@
   Code& unoptimized_code = Code::Handle();
   while (frame != NULL) {
     if (frame->IsDartFrame()) {
+      ASSERT(!frame->is_interpreted());  // Not yet supported.
       code = frame->LookupDartCode();
       function = code.function();
       if (code.is_optimized()) {
diff --git a/runtime/vm/simulator_arm64.cc b/runtime/vm/simulator_arm64.cc
index caebf8d..12a2b71 100644
--- a/runtime/vm/simulator_arm64.cc
+++ b/runtime/vm/simulator_arm64.cc
@@ -299,6 +299,7 @@
   Code& unoptimized_code = Code::Handle();
   while (frame != NULL) {
     if (frame->IsDartFrame()) {
+      ASSERT(!frame->is_interpreted());  // Not yet supported.
       code = frame->LookupDartCode();
       function = code.function();
       if (code.is_optimized()) {
diff --git a/runtime/vm/snapshot.cc b/runtime/vm/snapshot.cc
index eaae077..61521ac 100644
--- a/runtime/vm/snapshot.cc
+++ b/runtime/vm/snapshot.cc
@@ -1079,7 +1079,7 @@
 
   // Check if it is a code object in that case just write a Null object
   // as we do not want code objects in the snapshot.
-  if (cid == kCodeCid) {
+  if ((cid == kCodeCid) || (cid == kBytecodeCid)) {
     WriteVMIsolateObject(kNullObject);
     return true;
   }
diff --git a/runtime/vm/snapshot.h b/runtime/vm/snapshot.h
index 64f5d59..f7d4f4d 100644
--- a/runtime/vm/snapshot.h
+++ b/runtime/vm/snapshot.h
@@ -184,6 +184,9 @@
   static bool IncludesCode(Kind kind) {
     return (kind == kFullJIT) || (kind == kFullAOT);
   }
+  static bool IncludesBytecode(Kind kind) {
+    return (kind == kFull) || (kind == kFullJIT);
+  }
 
   const uint8_t* Addr() const { return reinterpret_cast<const uint8_t*>(this); }
 
diff --git a/runtime/vm/snapshot_test.cc b/runtime/vm/snapshot_test.cc
index 6cdbd5b..f1f05d7 100644
--- a/runtime/vm/snapshot_test.cc
+++ b/runtime/vm/snapshot_test.cc
@@ -403,6 +403,7 @@
   TEST_ROUND_TRIP_IDENTICAL(Object::script_class());
   TEST_ROUND_TRIP_IDENTICAL(Object::library_class());
   TEST_ROUND_TRIP_IDENTICAL(Object::code_class());
+  TEST_ROUND_TRIP_IDENTICAL(Object::bytecode_class());
   TEST_ROUND_TRIP_IDENTICAL(Object::instructions_class());
   TEST_ROUND_TRIP_IDENTICAL(Object::pc_descriptors_class());
   TEST_ROUND_TRIP_IDENTICAL(Object::exception_handlers_class());
diff --git a/runtime/vm/source_report.cc b/runtime/vm/source_report.cc
index 0bcef13..edc5923 100644
--- a/runtime/vm/source_report.cc
+++ b/runtime/vm/source_report.cc
@@ -389,12 +389,13 @@
   const TokenPosition end_pos = func.end_token_pos();
 
   Code& code = Code::Handle(zone(), func.unoptimized_code());
+  Bytecode& bytecode = Bytecode::Handle(zone());
 #if !defined(DART_PRECOMPILED_RUNTIME)
   if (FLAG_enable_interpreter && code.IsNull() && func.HasBytecode()) {
-    code = func.Bytecode();
+    bytecode = func.bytecode();
   }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
-  if (code.IsNull()) {
+  if (code.IsNull() && bytecode.IsNull()) {
     if (func.HasCode() || (compile_mode_ == kForceCompile)) {
       const Error& err =
           Error::Handle(Compiler::EnsureUnoptimizedCode(thread(), func));
@@ -411,7 +412,7 @@
       code = func.unoptimized_code();
 #if !defined(DART_PRECOMPILED_RUNTIME)
       if (FLAG_enable_interpreter && code.IsNull() && func.HasBytecode()) {
-        code = func.Bytecode();
+        bytecode = func.bytecode();
       }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
     } else {
@@ -424,7 +425,7 @@
       return;
     }
   }
-  ASSERT(!code.IsNull());
+  ASSERT(!code.IsNull() || !bytecode.IsNull());
 
   // We skip compiled async functions.  Once an async function has
   // been compiled, there is another function with the same range which
@@ -438,8 +439,14 @@
   range.AddProperty("scriptIndex", GetScriptIndex(script));
   range.AddProperty("startPos", begin_pos);
   range.AddProperty("endPos", end_pos);
-  range.AddProperty("compiled", true);
+  // TODO(regis): What is the meaning of 'compiled' in the presence of bytecode?
+  // If it means 'called', it should say 'true' if bytecode is present.
+  range.AddProperty("compiled", !code.IsNull());
 
+  // TODO(regis): Do we want a report covering interpreted functions too?
+  if (code.IsNull()) {
+    return;
+  }
   if (IsReportRequested(kCallSites)) {
     PrintCallSitesData(&range, func, code);
   }
diff --git a/runtime/vm/stack_frame.cc b/runtime/vm/stack_frame.cc
index a8936df..59ed934 100644
--- a/runtime/vm/stack_frame.cc
+++ b/runtime/vm/stack_frame.cc
@@ -86,6 +86,15 @@
   ASSERT(thread_ == Thread::Current());
   Zone* zone = Thread::Current()->zone();
   if (IsDartFrame()) {
+    if (is_interpreted()) {
+      const Bytecode& bytecode = Bytecode::Handle(zone, LookupDartBytecode());
+      ASSERT(!bytecode.IsNull());
+      const Function& function = Function::Handle(zone, bytecode.function());
+      ASSERT(!function.IsNull());
+      return zone->PrintToString(
+          "[%-8s : sp(%#" Px ") fp(%#" Px ") pc(%#" Px ") bytecode %s ]",
+          GetName(), sp(), fp(), pc(), function.ToFullyQualifiedCString());
+    }
     const Code& code = Code::Handle(zone, LookupDartCode());
     ASSERT(!code.IsNull());
     const Object& owner = Object::Handle(zone, code.owner());
@@ -163,12 +172,21 @@
   // be able to reuse the handle based code and avoid having to add
   // helper functions to the raw object interface.
   NoSafepointScope no_safepoint;
-  Code code;
-  RawCode* raw_code = UncheckedGetCodeObject();
-  // May forward raw_code. Note we don't just visit the pc marker slot first
+  RawObject* pc_marker = *(reinterpret_cast<RawObject**>(
+      fp() + ((is_interpreted() ? kKBCPcMarkerSlotFromFp
+                                : runtime_frame_layout.code_from_fp) *
+              kWordSize)));
+  // May forward raw code. Note we don't just visit the pc marker slot first
   // because the visitor's forwarding might not be idempotent.
-  visitor->VisitPointer(reinterpret_cast<RawObject**>(&raw_code));
-  code ^= raw_code;
+  visitor->VisitPointer(&pc_marker);
+  Code code;
+  if (pc_marker->IsHeapObject() && (pc_marker->GetClassId() == kCodeCid)) {
+    code ^= pc_marker;
+  } else {
+    ASSERT(pc_marker == Object::null() ||
+           (is_interpreted() && (!pc_marker->IsHeapObject() ||
+                                 (pc_marker->GetClassId() == kBytecodeCid))));
+  }
   if (!code.IsNull()) {
     // Optimized frames have a stack map. We need to visit the frame based
     // on the stack map.
@@ -289,6 +307,11 @@
 }
 
 RawFunction* StackFrame::LookupDartFunction() const {
+  if (is_interpreted()) {
+    const Bytecode& bytecode = Bytecode::Handle(LookupDartBytecode());
+    ASSERT(!bytecode.IsNull());
+    return bytecode.function();
+  }
   const Code& code = Code::Handle(LookupDartCode());
   if (!code.IsNull()) {
     return code.function();
@@ -314,17 +337,33 @@
 }
 
 RawCode* StackFrame::GetCodeObject() const {
-  RawCode* pc_marker = UncheckedGetCodeObject();
+  ASSERT(!is_interpreted());
+  RawObject* pc_marker = *(reinterpret_cast<RawObject**>(
+      fp() + runtime_frame_layout.code_from_fp * kWordSize));
   ASSERT((pc_marker == Object::null()) ||
          (pc_marker->GetClassId() == kCodeCid));
-  return pc_marker;
+  return reinterpret_cast<RawCode*>(pc_marker);
 }
 
-RawCode* StackFrame::UncheckedGetCodeObject() const {
-  return *(reinterpret_cast<RawCode**>(
-      fp() + ((is_interpreted() ? kKBCPcMarkerSlotFromFp
-                                : runtime_frame_layout.code_from_fp) *
-              kWordSize)));
+RawBytecode* StackFrame::LookupDartBytecode() const {
+// We add a no gc scope to ensure that the code below does not trigger
+// a GC as we are handling raw object references here. It is possible
+// that the code is called while a GC is in progress, that is ok.
+#if !defined(HOST_OS_WINDOWS) && !defined(HOST_OS_FUCHSIA)
+  // On Windows and Fuchsia, the profiler calls this from a separate thread
+  // where Thread::Current() is NULL, so we cannot create a NoSafepointScope.
+  NoSafepointScope no_safepoint;
+#endif
+  return GetBytecodeObject();
+}
+
+RawBytecode* StackFrame::GetBytecodeObject() const {
+  ASSERT(is_interpreted());
+  RawObject* pc_marker = *(
+      reinterpret_cast<RawObject**>(fp() + kKBCPcMarkerSlotFromFp * kWordSize));
+  ASSERT((pc_marker == Object::null()) ||
+         (pc_marker->GetClassId() == kBytecodeCid));
+  return reinterpret_cast<RawBytecode*>(pc_marker);
 }
 
 bool StackFrame::FindExceptionHandler(Thread* thread,
@@ -334,32 +373,46 @@
                                       bool* is_optimized) const {
   REUSABLE_CODE_HANDLESCOPE(thread);
   Code& code = reused_code_handle.Handle();
-  code = LookupDartCode();
-  if (code.IsNull()) {
-    return false;  // Stub frames do not have exception handlers.
+  REUSABLE_BYTECODE_HANDLESCOPE(thread);
+  Bytecode& bytecode = reused_bytecode_handle.Handle();
+  REUSABLE_EXCEPTION_HANDLERS_HANDLESCOPE(thread);
+  ExceptionHandlers& handlers = reused_exception_handlers_handle.Handle();
+  REUSABLE_PC_DESCRIPTORS_HANDLESCOPE(thread);
+  PcDescriptors& descriptors = reused_pc_descriptors_handle.Handle();
+  uword start;
+  intptr_t size;
+  if (is_interpreted()) {
+    bytecode = LookupDartBytecode();
+    ASSERT(!bytecode.IsNull());
+    start = bytecode.PayloadStart();
+    size = bytecode.Size();
+    handlers = bytecode.exception_handlers();
+    descriptors = bytecode.pc_descriptors();
+  } else {
+    code = LookupDartCode();
+    if (code.IsNull()) {
+      return false;  // Stub frames do not have exception handlers.
+    }
+    start = code.PayloadStart();
+    size = code.Size();
+    handlers = code.exception_handlers();
+    descriptors = code.pc_descriptors();
+    *is_optimized = code.is_optimized();
   }
-  *is_optimized = code.is_optimized();
   HandlerInfoCache* cache = thread->isolate()->handler_info_cache();
   ExceptionHandlerInfo* info = cache->Lookup(pc());
   if (info != NULL) {
-    *handler_pc = code.PayloadStart() + info->handler_pc_offset;
+    *handler_pc = start + info->handler_pc_offset;
     *needs_stacktrace = info->needs_stacktrace;
     *has_catch_all = info->has_catch_all;
     return true;
   }
-  uword pc_offset = pc() - code.PayloadStart();
+  uword pc_offset = pc() - start;
 
-  REUSABLE_EXCEPTION_HANDLERS_HANDLESCOPE(thread);
-  ExceptionHandlers& handlers = reused_exception_handlers_handle.Handle();
-  handlers = code.exception_handlers();
   if (handlers.num_entries() == 0) {
     return false;
   }
 
-  // Find pc descriptor for the current pc.
-  REUSABLE_PC_DESCRIPTORS_HANDLESCOPE(thread);
-  PcDescriptors& descriptors = reused_pc_descriptors_handle.Handle();
-  descriptors = code.pc_descriptors();
   PcDescriptors::Iterator iter(descriptors, RawPcDescriptors::kAnyKind);
   intptr_t try_index = -1;
   if (is_interpreted()) {
@@ -394,7 +447,7 @@
   }
   ExceptionHandlerInfo handler_info;
   handlers.GetHandlerInfo(try_index, &handler_info);
-  *handler_pc = code.PayloadStart() + handler_info.handler_pc_offset;
+  *handler_pc = start + handler_info.handler_pc_offset;
   *needs_stacktrace = handler_info.needs_stacktrace;
   *has_catch_all = handler_info.has_catch_all;
   cache->Insert(pc(), handler_info);
@@ -402,6 +455,10 @@
 }
 
 TokenPosition StackFrame::GetTokenPos() const {
+  if (is_interpreted()) {
+    // TODO(alexmarkov): Support source information in bytecode.
+    return TokenPosition::kNoSource;
+  }
   const Code& code = Code::Handle(LookupDartCode());
   if (code.IsNull()) {
     return TokenPosition::kNoSource;  // Stub frames do not have token_pos.
@@ -423,6 +480,9 @@
   if (IsEntryFrame() || IsExitFrame() || IsStubFrame()) {
     return true;
   }
+  if (is_interpreted()) {
+    return (LookupDartBytecode() != Bytecode::null());
+  }
   return (LookupDartCode() != Code::null());
 }
 
diff --git a/runtime/vm/stack_frame.h b/runtime/vm/stack_frame.h
index c43b60f..0ffe5da 100644
--- a/runtime/vm/stack_frame.h
+++ b/runtime/vm/stack_frame.h
@@ -165,6 +165,7 @@
 
   RawFunction* LookupDartFunction() const;
   RawCode* LookupDartCode() const;
+  RawBytecode* LookupDartBytecode() const;
   bool FindExceptionHandler(Thread* thread,
                             uword* handler_pc,
                             bool* needs_stacktrace,
@@ -189,7 +190,7 @@
 
  private:
   RawCode* GetCodeObject() const;
-  RawCode* UncheckedGetCodeObject() const;
+  RawBytecode* GetBytecodeObject() const;
 
   uword GetCallerSp() const {
     return fp() +
diff --git a/runtime/vm/stack_trace.cc b/runtime/vm/stack_trace.cc
index 8f5829d..a16b98d 100644
--- a/runtime/vm/stack_trace.cc
+++ b/runtime/vm/stack_trace.cc
@@ -17,7 +17,6 @@
                             StackFrameIterator::kNoCrossThreadIteration);
   StackFrame* frame = frames.NextFrame();
   ASSERT(frame != NULL);  // We expect to find a dart invocation frame.
-  Code& code = Code::Handle(zone);
   Function& function = Function::Handle(zone);
   const bool async_function_is_null = async_function.IsNull();
   while (frame != NULL) {
@@ -25,8 +24,7 @@
       if (skip_frames > 0) {
         skip_frames--;
       } else {
-        code = frame->LookupDartCode();
-        function = code.function();
+        function = frame->LookupDartFunction();
         frame_count++;
         if (!async_function_is_null &&
             (async_function.raw() == function.parent_function())) {
@@ -54,6 +52,7 @@
   ASSERT(frame != NULL);  // We expect to find a dart invocation frame.
   Function& function = Function::Handle(zone);
   Code& code = Code::Handle(zone);
+  Bytecode& bytecode = Bytecode::Handle(zone);
   Smi& offset = Smi::Handle(zone);
   intptr_t collected_frames_count = 0;
   while ((frame != NULL) && (collected_frames_count < count)) {
@@ -61,10 +60,17 @@
       if (skip_frames > 0) {
         skip_frames--;
       } else {
-        code = frame->LookupDartCode();
-        function = code.function();
-        offset = Smi::New(frame->pc() - code.PayloadStart());
-        code_array.SetAt(array_offset, code);
+        if (frame->is_interpreted()) {
+          bytecode = frame->LookupDartBytecode();
+          function = bytecode.function();
+          offset = Smi::New(frame->pc() - bytecode.PayloadStart());
+          code_array.SetAt(array_offset, bytecode);
+        } else {
+          code = frame->LookupDartCode();
+          function = code.function();
+          offset = Smi::New(frame->pc() - code.PayloadStart());
+          code_array.SetAt(array_offset, code);
+        }
         pc_offset_array.SetAt(array_offset, offset);
         array_offset++;
         collected_frames_count++;
@@ -100,8 +106,13 @@
   ASSERT(async_code_array->At(0) != Code::null());
   ASSERT(async_code_array->At(0) ==
          StubCode::AsynchronousGapMarker_entry()->code());
-  const Code& code = Code::Handle(Code::RawCast(async_code_array->At(1)));
-  *async_function = code.function();
+  const Object& code_object = Object::Handle(async_code_array->At(1));
+  if (code_object.IsCode()) {
+    *async_function = Code::Cast(code_object).function();
+  } else {
+    ASSERT(code_object.IsBytecode());
+    *async_function = Bytecode::Cast(code_object).function();
+  }
   ASSERT(!async_function->IsNull());
   ASSERT(async_function->IsAsyncFunction() ||
          async_function->IsAsyncGenerator());
diff --git a/runtime/vm/symbols.h b/runtime/vm/symbols.h
index 99c9b97..dd25ceb 100644
--- a/runtime/vm/symbols.h
+++ b/runtime/vm/symbols.h
@@ -190,6 +190,7 @@
   V(Namespace, "Namespace")                                                    \
   V(KernelProgramInfo, "KernelProgramInfo")                                    \
   V(Code, "Code")                                                              \
+  V(Bytecode, "Bytecode")                                                      \
   V(Instructions, "Instructions")                                              \
   V(ObjectPool, "ObjectPool")                                                  \
   V(PcDescriptors, "PcDescriptors")                                            \
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index e7e0ab0..2041022 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -25,6 +25,7 @@
 class CompilerState;
 class Class;
 class Code;
+class Bytecode;
 class Error;
 class ExceptionHandlers;
 class Field;
@@ -65,6 +66,7 @@
   V(Array)                                                                     \
   V(Class)                                                                     \
   V(Code)                                                                      \
+  V(Bytecode)                                                                  \
   V(Error)                                                                     \
   V(ExceptionHandlers)                                                         \
   V(Field)                                                                     \