[VM runtime] Initial version of Kernel Bytecode interpreter in VM runtime.
Not fully working yet, only x64, no gc, no frame walking, etc...

Change-Id: I4d8357f6d46371bf21c3d54266cfe26163e3c8dc
Reviewed-on: https://dart-review.googlesource.com/50021
Commit-Queue: Régis Crelier <regis@google.com>
Reviewed-by: Zach Anderson <zra@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
diff --git a/pkg/vm/tool/test_bytecode b/pkg/vm/tool/test_bytecode
new file mode 100755
index 0000000..f1c4893
--- /dev/null
+++ b/pkg/vm/tool/test_bytecode
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+# Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+# for details. All rights reserved. Use of this source code is governed by a
+# BSD-style license that can be found in the LICENSE file.
+
+# Script for generating bytecode in a kernel file using Dart 2 pipeline and
+# interpreting the resulting bytecode.
+
+# Usage
+# pkg/vm/tool/test_bytecode ~/foo.dart
+
+set -e
+
+# Pick the architecture and mode to build and test.
+BUILD_FLAGS="-m debug -a x64"
+BUILD_SUBDIR="DebugX64"
+
+function follow_links() {
+  file="$1"
+  while [ -h "$file" ]; do
+    # On Mac OS, readlink -f doesn't work.
+    file="$(readlink "$file")"
+  done
+  echo "$file"
+}
+
+# Unlike $0, $BASH_SOURCE points to the absolute path of this file.
+PROG_NAME="$(follow_links "$BASH_SOURCE")"
+
+# Handle the case where dart-sdk/bin has been symlinked to.
+CUR_DIR="$(cd "${PROG_NAME%/*}" ; pwd -P)"
+
+SDK_DIR="$CUR_DIR/../../.."
+BUILD_DIR="$SDK_DIR/out/$BUILD_SUBDIR"
+
+# Verify that the VM supports the interpreter, if not, rebuild it.
+REBUILD=0
+if [ -f $BUILD_DIR/dart ]
+then
+  $BUILD_DIR/dart --trace-interpreter-after=-1 \
+    $SDK_DIR/runtime/tests/vm/dart/hello_world_test.dart > /dev/null 2>&1 \
+    || REBUILD=1
+else
+  REBUILD=1
+fi
+if [ $REBUILD -ne 0 ]
+then
+  echo "Rebuilding VM to support interpreter"
+  rm -rf $BUILD_DIR
+  $SDK_DIR/tools/gn.py $BUILD_FLAGS --gn-args=dart_use_interpreter=true
+  $SDK_DIR/tools/build.py $BUILD_FLAGS runtime
+fi
+
+# Generate dill file containing bytecode for input dart source.
+$CUR_DIR/gen_kernel --platform $BUILD_DIR/vm_platform_strong.dill \
+  --gen-bytecode $@ -o $BUILD_DIR/test_bytecode.dill
+
+# Required flags.
+DART_VM_FLAGS="--preview-dart-2 --optimization-counter-threshold=-1 $DART_VM_FLAGS"
+
+# Optional flags.
+# DART_VM_FLAGS="--force-log-flush --dump-kernel-bytecode --trace-interpreter-after=0 $DART_VM_FLAGS"
+
+# Execute dill file.
+exec $BUILD_DIR/dart $DART_VM_FLAGS $BUILD_DIR/test_bytecode.dill
+
diff --git a/runtime/BUILD.gn b/runtime/BUILD.gn
index f0e3eac..1a7fc12 100644
--- a/runtime/BUILD.gn
+++ b/runtime/BUILD.gn
@@ -143,6 +143,10 @@
     ]
   }
 
+  if (dart_use_interpreter) {
+    defines += [ "DART_USE_INTERPRETER" ]
+  }
+
   if (!is_win) {
     cflags = [
       "-Werror",
diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h
index ef225bc..517d182 100644
--- a/runtime/platform/globals.h
+++ b/runtime/platform/globals.h
@@ -140,6 +140,11 @@
 #error DART_PRECOMPILED_RUNTIME and DART_NOSNAPSHOT are mutually exclusive
 #endif  // defined(DART_PRECOMPILED_RUNTIME) && defined(DART_NOSNAPSHOT)
 
+#if defined(DART_PRECOMPILED_RUNTIME) || defined(DART_PRECOMPILER)
+// TODO(zra): Fix GN build file not to define DART_USE_INTERPRETER in this case.
+#undef DART_USE_INTERPRETER
+#endif  // defined(DART_PRECOMPILED_RUNTIME) || defined(DART_PRECOMPILER)
+
 #if defined(DART_PRECOMPILED_RUNTIME)
 #define NOT_IN_PRECOMPILED(code)
 #else
diff --git a/runtime/runtime_args.gni b/runtime/runtime_args.gni
index e88807e..f6586e2 100644
--- a/runtime/runtime_args.gni
+++ b/runtime/runtime_args.gni
@@ -77,4 +77,8 @@
   } else {
     dart_component_kind = "static_library"
   }
+
+  # Whether the runtime should interpret called functions for which bytecode
+  # is provided by kernel, rather than compile them before execution.
+  dart_use_interpreter = false
 }
diff --git a/runtime/vm/compiler/assembler/disassembler_kbc.cc b/runtime/vm/compiler/assembler/disassembler_kbc.cc
new file mode 100644
index 0000000..0fefb14
--- /dev/null
+++ b/runtime/vm/compiler/assembler/disassembler_kbc.cc
@@ -0,0 +1,367 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(DART_USE_INTERPRETER)
+
+#include "vm/compiler/assembler/disassembler_kbc.h"
+
+#include "platform/assert.h"
+#include "vm/constants_kbc.h"
+#include "vm/cpu.h"
+#include "vm/instructions.h"
+
+namespace dart {
+
+static const char* kOpcodeNames[] = {
+#define BYTECODE_NAME(name, encoding, op1, op2, op3) #name,
+    KERNEL_BYTECODES_LIST(BYTECODE_NAME)
+#undef BYTECODE_NAME
+};
+
+static const size_t kOpcodeCount =
+    sizeof(kOpcodeNames) / sizeof(kOpcodeNames[0]);
+
+typedef void (*BytecodeFormatter)(char* buffer,
+                                  intptr_t size,
+                                  uword pc,
+                                  uint32_t bc);
+typedef void (*Fmt)(char** buf, intptr_t* size, uword pc, int32_t value);
+
+template <typename ValueType>
+void FormatOperand(char** buf,
+                   intptr_t* size,
+                   const char* fmt,
+                   ValueType value) {
+  intptr_t written = Utils::SNPrint(*buf, *size, fmt, value);
+  if (written < *size) {
+    *buf += written;
+    *size += written;
+  } else {
+    *size = -1;
+  }
+}
+
+static void Fmt___(char** buf, intptr_t* size, uword pc, int32_t value) {}
+
+static void Fmttgt(char** buf, intptr_t* size, uword pc, int32_t value) {
+  FormatOperand(buf, size, "-> %" Px, pc + (value << 2));
+}
+
+static void Fmtlit(char** buf, intptr_t* size, uword pc, int32_t value) {
+  FormatOperand(buf, size, "k%d", value);
+}
+
+static void Fmtreg(char** buf, intptr_t* size, uword pc, int32_t value) {
+  FormatOperand(buf, size, "r%d", value);
+}
+
+static void Fmtxeg(char** buf, intptr_t* size, uword pc, int32_t value) {
+  if (value < 0) {
+    FormatOperand(buf, size, "FP[%d]", value);
+  } else {
+    Fmtreg(buf, size, pc, value);
+  }
+}
+
+static void Fmtnum(char** buf, intptr_t* size, uword pc, int32_t value) {
+  FormatOperand(buf, size, "#%d", value);
+}
+
+static void Apply(char** buf,
+                  intptr_t* size,
+                  uword pc,
+                  Fmt fmt,
+                  int32_t value,
+                  const char* suffix) {
+  if (*size <= 0) {
+    return;
+  }
+
+  fmt(buf, size, pc, value);
+  if (*size > 0) {
+    FormatOperand(buf, size, "%s", suffix);
+  }
+}
+
+static void Format0(char* buf,
+                    intptr_t size,
+                    uword pc,
+                    uint32_t op,
+                    Fmt op1,
+                    Fmt op2,
+                    Fmt op3) {}
+
+static void FormatT(char* buf,
+                    intptr_t size,
+                    uword pc,
+                    uint32_t op,
+                    Fmt op1,
+                    Fmt op2,
+                    Fmt op3) {
+  const int32_t x = static_cast<int32_t>(op) >> 8;
+  Apply(&buf, &size, pc, op1, x, "");
+}
+
+static void FormatA(char* buf,
+                    intptr_t size,
+                    uword pc,
+                    uint32_t op,
+                    Fmt op1,
+                    Fmt op2,
+                    Fmt op3) {
+  const int32_t a = (op & 0xFF00) >> 8;
+  Apply(&buf, &size, pc, op1, a, "");
+}
+
+static void FormatA_D(char* buf,
+                      intptr_t size,
+                      uword pc,
+                      uint32_t op,
+                      Fmt op1,
+                      Fmt op2,
+                      Fmt op3) {
+  const int32_t a = (op & 0xFF00) >> 8;
+  const int32_t bc = op >> 16;
+  Apply(&buf, &size, pc, op1, a, ", ");
+  Apply(&buf, &size, pc, op2, bc, "");
+}
+
+static void FormatA_X(char* buf,
+                      intptr_t size,
+                      uword pc,
+                      uint32_t op,
+                      Fmt op1,
+                      Fmt op2,
+                      Fmt op3) {
+  const int32_t a = (op & 0xFF00) >> 8;
+  const int32_t bc = static_cast<int32_t>(op) >> 16;
+  Apply(&buf, &size, pc, op1, a, ", ");
+  Apply(&buf, &size, pc, op2, bc, "");
+}
+
+static void FormatX(char* buf,
+                    intptr_t size,
+                    uword pc,
+                    uint32_t op,
+                    Fmt op1,
+                    Fmt op2,
+                    Fmt op3) {
+  const int32_t bc = static_cast<int32_t>(op) >> 16;
+  Apply(&buf, &size, pc, op1, bc, "");
+}
+
+static void FormatD(char* buf,
+                    intptr_t size,
+                    uword pc,
+                    uint32_t op,
+                    Fmt op1,
+                    Fmt op2,
+                    Fmt op3) {
+  const int32_t bc = op >> 16;
+  Apply(&buf, &size, pc, op1, bc, "");
+}
+
+static void FormatA_B_C(char* buf,
+                        intptr_t size,
+                        uword pc,
+                        uint32_t op,
+                        Fmt op1,
+                        Fmt op2,
+                        Fmt op3) {
+  const int32_t a = (op >> 8) & 0xFF;
+  const int32_t b = (op >> 16) & 0xFF;
+  const int32_t c = (op >> 24) & 0xFF;
+  Apply(&buf, &size, pc, op1, a, ", ");
+  Apply(&buf, &size, pc, op2, b, ", ");
+  Apply(&buf, &size, pc, op3, c, "");
+}
+
+static void FormatA_B_Y(char* buf,
+                        intptr_t size,
+                        uword pc,
+                        uint32_t op,
+                        Fmt op1,
+                        Fmt op2,
+                        Fmt op3) {
+  const int32_t a = (op >> 8) & 0xFF;
+  const int32_t b = (op >> 16) & 0xFF;
+  const int32_t y = static_cast<int8_t>((op >> 24) & 0xFF);
+  Apply(&buf, &size, pc, op1, a, ", ");
+  Apply(&buf, &size, pc, op2, b, ", ");
+  Apply(&buf, &size, pc, op3, y, "");
+}
+
+#define BYTECODE_FORMATTER(name, encoding, op1, op2, op3)                      \
+  static void Format##name(char* buf, intptr_t size, uword pc, uint32_t op) {  \
+    Format##encoding(buf, size, pc, op, Fmt##op1, Fmt##op2, Fmt##op3);         \
+  }
+KERNEL_BYTECODES_LIST(BYTECODE_FORMATTER)
+#undef BYTECODE_FORMATTER
+
+static const BytecodeFormatter kFormatters[] = {
+#define BYTECODE_FORMATTER(name, encoding, op1, op2, op3) &Format##name,
+    KERNEL_BYTECODES_LIST(BYTECODE_FORMATTER)
+#undef BYTECODE_FORMATTER
+};
+
+static bool HasLoadFromPool(KBCInstr instr) {
+  switch (KernelBytecode::DecodeOpcode(instr)) {
+    case KernelBytecode::kLoadConstant:
+    case KernelBytecode::kPushConstant:
+    case KernelBytecode::kStaticCall:
+    case KernelBytecode::kIndirectStaticCall:
+    case KernelBytecode::kInstanceCall1:
+    case KernelBytecode::kInstanceCall2:
+    case KernelBytecode::kInstanceCall1Opt:
+    case KernelBytecode::kInstanceCall2Opt:
+    case KernelBytecode::kStoreStaticTOS:
+    case KernelBytecode::kPushStatic:
+    case KernelBytecode::kAllocate:
+    case KernelBytecode::kInstantiateType:
+    case KernelBytecode::kInstantiateTypeArgumentsTOS:
+    case KernelBytecode::kAssertAssignable:
+      return true;
+    default:
+      return false;
+  }
+}
+
+static bool GetLoadedObjectAt(uword pc,
+                              const ObjectPool& object_pool,
+                              Object* obj) {
+  KBCInstr instr = KernelBytecode::At(pc);
+  if (HasLoadFromPool(instr)) {
+    uint16_t index = KernelBytecode::DecodeD(instr);
+    if (object_pool.TypeAt(index) == ObjectPool::kTaggedObject) {
+      *obj = object_pool.ObjectAt(index);
+      return true;
+    }
+  }
+  return false;
+}
+
+void KernelBytecodeDisassembler::DecodeInstruction(char* hex_buffer,
+                                                   intptr_t hex_size,
+                                                   char* human_buffer,
+                                                   intptr_t human_size,
+                                                   int* out_instr_size,
+                                                   const Code& bytecode,
+                                                   Object** object,
+                                                   uword pc) {
+#if !defined(PRODUCT)
+  const uint32_t instr = *reinterpret_cast<uint32_t*>(pc);
+  const uint8_t opcode = instr & 0xFF;
+  ASSERT(opcode < kOpcodeCount);
+  size_t name_size =
+      Utils::SNPrint(human_buffer, human_size, "%-10s\t", kOpcodeNames[opcode]);
+
+  human_buffer += name_size;
+  human_size -= name_size;
+  kFormatters[opcode](human_buffer, human_size, pc, instr);
+
+  Utils::SNPrint(hex_buffer, hex_size, "%08x", instr);
+  if (out_instr_size) {
+    *out_instr_size = sizeof(uint32_t);
+  }
+
+  *object = NULL;
+  if (!bytecode.IsNull()) {
+    *object = &Object::Handle();
+    const ObjectPool& pool = ObjectPool::Handle(bytecode.object_pool());
+    if (!GetLoadedObjectAt(pc, pool, *object)) {
+      *object = NULL;
+    }
+  }
+#else
+  UNREACHABLE();
+#endif
+}
+
+void KernelBytecodeDisassembler::Disassemble(uword start,
+                                             uword end,
+                                             DisassemblyFormatter* formatter,
+                                             const Code& bytecode) {
+#if !defined(PRODUCT)
+  const Code::Comments& comments =
+      bytecode.IsNull() ? Code::Comments::New(0) : bytecode.comments();
+  ASSERT(formatter != NULL);
+  char hex_buffer[kHexadecimalBufferSize];  // Instruction in hexadecimal form.
+  char human_buffer[kUserReadableBufferSize];  // Human-readable instruction.
+  uword pc = start;
+  intptr_t comment_finger = 0;
+  GrowableArray<const Function*> inlined_functions;
+  GrowableArray<TokenPosition> token_positions;
+  while (pc < end) {
+    const intptr_t offset = pc - start;
+    const intptr_t old_comment_finger = comment_finger;
+    while (comment_finger < comments.Length() &&
+           comments.PCOffsetAt(comment_finger) <= offset) {
+      formatter->Print(
+          "        ;; %s\n",
+          String::Handle(comments.CommentAt(comment_finger)).ToCString());
+      comment_finger++;
+    }
+    if (old_comment_finger != comment_finger) {
+      char str[4000];
+      BufferFormatter f(str, sizeof(str));
+      // Comment emitted, emit inlining information.
+      bytecode.GetInlinedFunctionsAtInstruction(offset, &inlined_functions,
+                                                &token_positions);
+      // Skip top scope function printing (last entry in 'inlined_functions').
+      bool first = true;
+      for (intptr_t i = 1; i < inlined_functions.length(); i++) {
+        const char* name = inlined_functions[i]->ToQualifiedCString();
+        if (first) {
+          f.Print("        ;; Inlined [%s", name);
+          first = false;
+        } else {
+          f.Print(" -> %s", name);
+        }
+      }
+      if (!first) {
+        f.Print("]\n");
+        formatter->Print(str);
+      }
+    }
+    int instruction_length;
+    Object* object;
+    DecodeInstruction(hex_buffer, sizeof(hex_buffer), human_buffer,
+                      sizeof(human_buffer), &instruction_length, bytecode,
+                      &object, pc);
+    formatter->ConsumeInstruction(bytecode, hex_buffer, sizeof(hex_buffer),
+                                  human_buffer, sizeof(human_buffer), object,
+                                  pc);
+    pc += instruction_length;
+  }
+#else
+  UNREACHABLE();
+#endif
+}
+
+void KernelBytecodeDisassembler::Disassemble(const Function& function) {
+#if !defined(PRODUCT)
+  ASSERT(function.HasBytecode());
+  const char* function_fullname = function.ToFullyQualifiedCString();
+  Zone* zone = Thread::Current()->zone();
+  const Code& bytecode = Code::Handle(zone, function.Bytecode());
+  THR_Print("Bytecode for function '%s' {\n", function_fullname);
+  const Instructions& instr = Instructions::Handle(bytecode.instructions());
+  uword start = instr.PayloadStart();
+  DisassembleToStdout stdout_formatter;
+  LogBlock lb;
+  Disassemble(start, start + instr.Size(), &stdout_formatter, bytecode);
+  THR_Print("}\n");
+
+  const ObjectPool& object_pool =
+      ObjectPool::Handle(zone, bytecode.GetObjectPool());
+  object_pool.DebugPrint();
+#else
+  UNREACHABLE();
+#endif
+}
+
+}  // namespace dart
+
+#endif  // defined(DART_USE_INTERPRETER)
diff --git a/runtime/vm/compiler/assembler/disassembler_kbc.h b/runtime/vm/compiler/assembler/disassembler_kbc.h
new file mode 100644
index 0000000..b3e2408
--- /dev/null
+++ b/runtime/vm/compiler/assembler/disassembler_kbc.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_DISASSEMBLER_KBC_H_
+#define RUNTIME_VM_COMPILER_ASSEMBLER_DISASSEMBLER_KBC_H_
+
+#include "vm/globals.h"
+#if defined(DART_USE_INTERPRETER)
+
+#include "vm/compiler/assembler/disassembler.h"
+
+namespace dart {
+
+// Disassemble instructions.
+class KernelBytecodeDisassembler : public AllStatic {
+ public:
+  // Disassemble instructions between start and end.
+  // (The assumption is that start is at a valid instruction).
+  // Return true if all instructions were successfully decoded, false otherwise.
+  static void Disassemble(uword start,
+                          uword end,
+                          DisassemblyFormatter* formatter,
+                          const Code& bytecode);
+
+  static void Disassemble(uword start,
+                          uword end,
+                          DisassemblyFormatter* formatter) {
+    Disassemble(start, end, formatter, Code::Handle());
+  }
+
+  static void Disassemble(uword start, uword end, const Code& bytecode) {
+#if !defined(PRODUCT)
+    DisassembleToStdout stdout_formatter;
+    LogBlock lb;
+    Disassemble(start, end, &stdout_formatter, bytecode);
+#else
+    UNREACHABLE();
+#endif
+  }
+
+  static void Disassemble(uword start, uword end) {
+#if !defined(PRODUCT)
+    DisassembleToStdout stdout_formatter;
+    LogBlock lb;
+    Disassemble(start, end, &stdout_formatter);
+#else
+    UNREACHABLE();
+#endif
+  }
+
+  static void Disassemble(uword start,
+                          uword end,
+                          char* buffer,
+                          uintptr_t buffer_size) {
+#if !defined(PRODUCT)
+    DisassembleToMemory memory_formatter(buffer, buffer_size);
+    LogBlock lb;
+    Disassemble(start, end, &memory_formatter);
+#else
+    UNREACHABLE();
+#endif
+  }
+
+  // Decodes one instruction.
+  // Writes a hexadecimal representation into the hex_buffer and a
+  // human-readable representation into the human_buffer.
+  // Writes the length of the decoded instruction in bytes in out_instr_len.
+  static void DecodeInstruction(char* hex_buffer,
+                                intptr_t hex_size,
+                                char* human_buffer,
+                                intptr_t human_size,
+                                int* out_instr_len,
+                                const Code& bytecode,
+                                Object** object,
+                                uword pc);
+
+  static void Disassemble(const Function& function);
+
+ private:
+  static const int kHexadecimalBufferSize = 32;
+  static const int kUserReadableBufferSize = 256;
+};
+
+}  // namespace dart
+
+#endif  // defined(DART_USE_INTERPRETER)
+
+#endif  // RUNTIME_VM_COMPILER_ASSEMBLER_DISASSEMBLER_KBC_H_
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index af0ec3f..376f43b 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -26,6 +26,8 @@
   "assembler/disassembler_arm.cc",
   "assembler/disassembler_arm64.cc",
   "assembler/disassembler_dbc.cc",
+  "assembler/disassembler_kbc.cc",
+  "assembler/disassembler_kbc.h",
   "assembler/disassembler_x86.cc",
   "backend/block_scheduler.cc",
   "backend/block_scheduler.h",
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
index 5d5b373..0e85f68 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
@@ -4,6 +4,7 @@
 
 #include "vm/compiler/frontend/kernel_binary_flowgraph.h"
 #include "vm/compiler/aot/precompiler.h"
+#include "vm/compiler/assembler/disassembler_kbc.h"
 #include "vm/compiler/frontend/prologue_builder.h"
 #include "vm/compiler/jit/compiler.h"
 #include "vm/longjump.h"
@@ -14,6 +15,11 @@
 #if !defined(DART_PRECOMPILED_RUNTIME)
 
 namespace dart {
+
+#if defined(DART_USE_INTERPRETER)
+DEFINE_FLAG(bool, dump_kernel_bytecode, false, "Dump kernel bytecode");
+#endif  // defined(DART_USE_INTERPRETER)
+
 namespace kernel {
 
 #define Z (zone_)
@@ -907,6 +913,262 @@
   return InferredTypeMetadata(cid, nullable);
 }
 
+#if defined(DART_USE_INTERPRETER)
+void BytecodeMetadataHelper::CopyBytecode(const Function& function) {
+  // TODO(regis): Avoid copying bytecode from mapped kernel binary.
+  const intptr_t node_offset = function.kernel_offset();
+  const intptr_t md_offset = GetNextMetadataPayloadOffset(node_offset);
+  if (md_offset < 0) {
+    return;
+  }
+
+  AlternativeReadingScope alt(&builder_->reader_, &H.metadata_payloads(),
+                              md_offset - MetadataPayloadOffset);
+
+  // Read bytecode.
+  intptr_t bytecode_size = builder_->reader_.ReadUInt();
+  intptr_t bytecode_offset = builder_->reader_.offset();
+  uint8_t* bytecode_data = builder_->reader_.CopyDataIntoZone(
+      builder_->zone_, bytecode_offset, bytecode_size);
+
+  // This enum and the code below reading the constant pool from kernel must be
+  // kept in sync with pkg/vm/lib/bytecode/constant_pool.dart.
+  enum ConstantPoolTag {
+    kInvalid,
+    kNull,
+    kString,
+    kInt,
+    kDouble,
+    kBool,
+    kArgDesc,
+    kICData,
+    kStaticICData,
+    kField,
+    kFieldOffset,
+    kClass,
+    kTypeArgumentsFieldOffset,
+    kTearOff,
+    kType,
+    kTypeArguments,
+    kList,
+    kInstance,
+    kSymbol,
+    kTypeArgumentsForInstanceAllocation,
+  };
+
+  // Read object pool.
+  builder_->reader_.set_offset(bytecode_offset + bytecode_size);
+  intptr_t obj_count = builder_->reader_.ReadListLength();
+  const ObjectPool& obj_pool =
+      ObjectPool::Handle(builder_->zone_, ObjectPool::New(obj_count));
+  Object& obj = Object::Handle(builder_->zone_);
+  Object& elem = Object::Handle(builder_->zone_);
+  Array& array = Array::Handle(builder_->zone_);
+  Field& field = Field::Handle(builder_->zone_);
+  String& name = String::Handle(builder_->zone_);
+  for (intptr_t i = 0; i < obj_count; ++i) {
+    const intptr_t tag = builder_->ReadTag();
+    switch (tag) {
+      case ConstantPoolTag::kInvalid:
+        UNREACHABLE();
+      case ConstantPoolTag::kNull:
+        obj = Object::null();
+        break;
+      case ConstantPoolTag::kString:
+        obj = H.DartString(builder_->ReadStringReference()).raw();
+        ASSERT(obj.IsString());
+        obj = H.Canonicalize(String::Cast(obj));
+        break;
+      case ConstantPoolTag::kInt: {
+        uint32_t low_bits = builder_->ReadUInt32();
+        int64_t value = builder_->ReadUInt32();
+        value = (value << 32) | low_bits;
+        obj = Integer::New(value);
+      } break;
+      case ConstantPoolTag::kDouble: {
+        uint32_t low_bits = builder_->ReadUInt32();
+        uint64_t bits = builder_->ReadUInt32();
+        bits = (bits << 32) | low_bits;
+        double value = bit_cast<double, uint64_t>(bits);
+        obj = Double::New(value);
+      } break;
+      case ConstantPoolTag::kBool:
+        if (builder_->ReadUInt() == 1) {
+          obj = Bool::True().raw();
+        } else {
+          obj = Bool::False().raw();
+        }
+        break;
+      case ConstantPoolTag::kArgDesc: {
+        intptr_t num_arguments = builder_->ReadUInt();
+        intptr_t num_type_args = builder_->ReadUInt();
+        intptr_t num_arg_names = builder_->ReadListLength();
+        if (num_arg_names == 0) {
+          obj = ArgumentsDescriptor::New(num_type_args, num_arguments);
+        } else {
+          array = Array::New(num_arg_names);
+          for (intptr_t j = 0; j < num_arg_names; j++) {
+            array.SetAt(j, H.DartSymbolPlain(builder_->ReadStringReference()));
+          }
+          obj = ArgumentsDescriptor::New(num_type_args, num_arguments, array);
+        }
+      } break;
+      case ConstantPoolTag::kICData: {
+        NameIndex target = builder_->ReadCanonicalNameReference();
+        name = H.DartProcedureName(target).raw();
+        intptr_t arg_desc_index = builder_->ReadUInt();
+        ASSERT(arg_desc_index < i);
+        array ^= obj_pool.ObjectAt(arg_desc_index);
+        // TODO(regis): Should num_args_tested be explicitly provided?
+        obj = ICData::New(function, name,
+                          array,  // Arguments descriptor.
+                          Thread::kNoDeoptId, 1 /* num_args_tested */,
+                          ICData::RebindRule::kInstance);
+#if defined(TAG_IC_DATA)
+        ICData::Cast(obj).set_tag(Instruction::kInstanceCall);
+#endif
+      } break;
+      case ConstantPoolTag::kStaticICData: {
+        NameIndex target = builder_->ReadCanonicalNameReference();
+        if (H.IsConstructor(target)) {
+          name = H.DartConstructorName(target).raw();
+          elem = H.LookupConstructorByKernelConstructor(target);
+        } else {
+          name = H.DartProcedureName(target).raw();
+          elem = H.LookupStaticMethodByKernelProcedure(target);
+        }
+        ASSERT(elem.IsFunction());
+        intptr_t arg_desc_index = builder_->ReadUInt();
+        ASSERT(arg_desc_index < i);
+        array ^= obj_pool.ObjectAt(arg_desc_index);
+        obj = ICData::New(function, name,
+                          array,  // Arguments descriptor.
+                          Thread::kNoDeoptId, 0 /* num_args_tested */,
+                          ICData::RebindRule::kStatic);
+        ICData::Cast(obj).AddTarget(Function::Cast(elem));
+#if defined(TAG_IC_DATA)
+        ICData::Cast(obj).set_tag(Instruction::kStaticCall);
+#endif
+      } break;
+      case ConstantPoolTag::kField:
+        obj =
+            H.LookupFieldByKernelField(builder_->ReadCanonicalNameReference());
+        ASSERT(obj.IsField());
+        break;
+      case ConstantPoolTag::kFieldOffset:
+        obj =
+            H.LookupFieldByKernelField(builder_->ReadCanonicalNameReference());
+        ASSERT(obj.IsField());
+        obj = Smi::New(Field::Cast(obj).Offset() / kWordSize);
+        break;
+      case ConstantPoolTag::kClass:
+        obj =
+            H.LookupClassByKernelClass(builder_->ReadCanonicalNameReference());
+        ASSERT(obj.IsClass());
+        break;
+      case ConstantPoolTag::kTypeArgumentsFieldOffset:
+        obj =
+            H.LookupClassByKernelClass(builder_->ReadCanonicalNameReference());
+        ASSERT(obj.IsClass());
+        obj = Smi::New(Class::Cast(obj).type_arguments_field_offset() /
+                       kWordSize);
+        break;
+      case ConstantPoolTag::kTearOff:
+        obj = H.LookupStaticMethodByKernelProcedure(
+            builder_->ReadCanonicalNameReference());
+        ASSERT(obj.IsFunction());
+        obj = Function::Cast(obj).ImplicitClosureFunction();
+        ASSERT(obj.IsFunction());
+        obj = Function::Cast(obj).ImplicitStaticClosure();
+        ASSERT(obj.IsInstance());
+        obj = H.Canonicalize(Instance::Cast(obj));
+        break;
+      case ConstantPoolTag::kType:
+        UNIMPLEMENTED();  // Encoding is under discussion with CFE team.
+        obj = builder_->type_translator_.BuildType().raw();
+        ASSERT(obj.IsAbstractType());
+        break;
+      case ConstantPoolTag::kTypeArguments:
+        UNIMPLEMENTED();  // Encoding is under discussion with CFE team.
+        obj = builder_->type_translator_
+                  .BuildTypeArguments(builder_->ReadListLength())
+                  .raw();
+        ASSERT(obj.IsNull() || obj.IsTypeArguments());
+        break;
+      case ConstantPoolTag::kList: {
+        obj = builder_->type_translator_.BuildType().raw();
+        ASSERT(obj.IsAbstractType());
+        const intptr_t length = builder_->ReadListLength();
+        array = Array::New(length, AbstractType::Cast(obj));
+        for (intptr_t j = 0; j < length; j++) {
+          intptr_t elem_index = builder_->ReadUInt();
+          ASSERT(elem_index < i);
+          elem = obj_pool.ObjectAt(elem_index);
+          array.SetAt(j, elem);
+        }
+        obj = H.Canonicalize(Array::Cast(obj));
+        ASSERT(!obj.IsNull());
+      } break;
+      case ConstantPoolTag::kInstance: {
+        obj =
+            H.LookupClassByKernelClass(builder_->ReadCanonicalNameReference());
+        ASSERT(obj.IsClass());
+        obj = Instance::New(Class::Cast(obj), Heap::kOld);
+        intptr_t elem_index = builder_->ReadUInt();
+        ASSERT(elem_index < i);
+        elem = obj_pool.ObjectAt(elem_index);
+        if (!elem.IsNull()) {
+          ASSERT(elem.IsTypeArguments());
+          Instance::Cast(obj).SetTypeArguments(TypeArguments::Cast(elem));
+        }
+        intptr_t num_fields = builder_->ReadUInt();
+        for (intptr_t j = 0; j < num_fields; j++) {
+          NameIndex field_name = builder_->ReadCanonicalNameReference();
+          ASSERT(H.IsField(field_name));
+          field = H.LookupFieldByKernelField(field_name);
+          intptr_t elem_index = builder_->ReadUInt();
+          ASSERT(elem_index < i);
+          elem = obj_pool.ObjectAt(elem_index);
+          Instance::Cast(obj).SetField(field, elem);
+        }
+        obj = H.Canonicalize(Instance::Cast(obj));
+      } break;
+      case ConstantPoolTag::kSymbol:
+        obj = H.DartSymbolPlain(builder_->ReadStringReference()).raw();
+        ASSERT(String::Cast(obj).IsSymbol());
+        break;
+      case kTypeArgumentsForInstanceAllocation: {
+        obj =
+            H.LookupClassByKernelClass(builder_->ReadCanonicalNameReference());
+        ASSERT(obj.IsClass());
+        intptr_t elem_index = builder_->ReadUInt();
+        ASSERT(elem_index < i);
+        elem = obj_pool.ObjectAt(elem_index);
+        ASSERT(elem.IsNull() || elem.IsTypeArguments());
+        elem = Type::New(Class::Cast(obj), TypeArguments::Cast(elem),
+                         TokenPosition::kNoSource);
+        elem = ClassFinalizer::FinalizeType(Class::Cast(obj), Type::Cast(elem));
+        obj = Type::Cast(elem).arguments();
+      } break;
+      default:
+        UNREACHABLE();
+    }
+    obj_pool.SetTypeAt(i, ObjectPool::kTaggedObject);
+    obj_pool.SetObjectAt(i, obj);
+  }
+
+  const Code& bytecode = Code::Handle(
+      builder_->zone_,
+      Code::FinalizeBytecode(reinterpret_cast<void*>(bytecode_data),
+                             bytecode_size, obj_pool));
+  function.AttachBytecode(bytecode);
+
+  if (FLAG_dump_kernel_bytecode) {
+    KernelBytecodeDisassembler::Disassemble(function);
+  }
+}
+#endif  // defined(DART_USE_INTERPRETER)
+
 StreamingScopeBuilder::StreamingScopeBuilder(ParsedFunction* parsed_function)
     : result_(NULL),
       parsed_function_(parsed_function),
@@ -5788,6 +6050,17 @@
 
   SetOffset(kernel_offset);
 
+#if defined(DART_USE_INTERPRETER)
+  // TODO(regis): Clean up this logic of when to compile.
+  // If the bytecode was previously loaded, we really want to compile.
+  if (!function.HasBytecode()) {
+    bytecode_metadata_helper_.CopyBytecode(function);
+    if (function.HasBytecode()) {
+      return NULL;
+    }
+  }
+#endif
+
   // We need to read out the NSM-forwarder bit before we can build scopes.
   switch (function.kind()) {
     case RawFunction::kImplicitClosureFunction:
@@ -10684,6 +10957,9 @@
         procedure_attributes_metadata_helper_.SetMetadataMappings(
             offset + kUInt32Size, mappings_num);
       }
+    } else if (H.StringEquals(tag, BytecodeMetadataHelper::tag())) {
+      bytecode_metadata_helper_.SetMetadataMappings(offset + kUInt32Size,
+                                                    mappings_num);
     }
   }
 }
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h
index 591bd04..77d8ce6 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h
@@ -664,6 +664,19 @@
                     ProcedureAttributesMetadata* metadata);
 };
 
+// Helper class which provides access to bytecode metadata.
+class BytecodeMetadataHelper : public MetadataHelper {
+ public:
+  static const char* tag() { return "vm.bytecode"; }
+
+  explicit BytecodeMetadataHelper(StreamingFlowGraphBuilder* builder)
+      : MetadataHelper(builder) {}
+
+#if defined(DART_USE_INTERPRETER)
+  void CopyBytecode(const Function& function);
+#endif
+};
+
 class StreamingDartTypeTranslator {
  public:
   StreamingDartTypeTranslator(StreamingFlowGraphBuilder* builder,
@@ -1179,6 +1192,7 @@
         direct_call_metadata_helper_(this),
         inferred_type_metadata_helper_(this),
         procedure_attributes_metadata_helper_(this),
+        bytecode_metadata_helper_(this),
         metadata_scanned_(false) {}
 
   StreamingFlowGraphBuilder(TranslationHelper* translation_helper,
@@ -1201,6 +1215,7 @@
         direct_call_metadata_helper_(this),
         inferred_type_metadata_helper_(this),
         procedure_attributes_metadata_helper_(this),
+        bytecode_metadata_helper_(this),
         metadata_scanned_(false) {}
 
   StreamingFlowGraphBuilder(TranslationHelper* translation_helper,
@@ -1223,6 +1238,7 @@
         direct_call_metadata_helper_(this),
         inferred_type_metadata_helper_(this),
         procedure_attributes_metadata_helper_(this),
+        bytecode_metadata_helper_(this),
         metadata_scanned_(false) {}
 
   virtual ~StreamingFlowGraphBuilder() {}
@@ -1552,6 +1568,7 @@
   DirectCallMetadataHelper direct_call_metadata_helper_;
   InferredTypeMetadataHelper inferred_type_metadata_helper_;
   ProcedureAttributesMetadataHelper procedure_attributes_metadata_helper_;
+  BytecodeMetadataHelper bytecode_metadata_helper_;
   bool metadata_scanned_;
 
   friend class ClassHelper;
@@ -1559,6 +1576,7 @@
   friend class ConstructorHelper;
   friend class DirectCallMetadataHelper;
   friend class ProcedureAttributesMetadataHelper;
+  friend class BytecodeMetadataHelper;
   friend class FieldHelper;
   friend class FunctionNodeHelper;
   friend class InferredTypeMetadataHelper;
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index 81113f6..18225e6 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -161,7 +161,11 @@
         /* not building var desc */ NULL,
         /* not inlining */ NULL, optimized, osr_id);
     FlowGraph* graph = builder.BuildGraph();
+#if defined(DART_USE_INTERPRETER)
+    ASSERT((graph != NULL) || parsed_function->function().HasBytecode());
+#else
     ASSERT(graph != NULL);
+#endif
     return graph;
   }
   FlowGraphBuilder builder(*parsed_function, ic_data_array,
@@ -255,6 +259,14 @@
     }
     Exceptions::PropagateError(Error::Cast(result));
   }
+#if defined(DART_USE_INTERPRETER)
+  // TODO(regis): Revisit.
+  if (!function.HasCode() && function.HasBytecode()) {
+    // Function was not actually compiled, but its bytecode was loaded.
+    // Verify that InterpretCall stub code was installed.
+    ASSERT(function.CurrentCode() == StubCode::InterpretCall_entry()->code());
+  }
+#endif
 }
 
 bool Compiler::CanOptimizeFunction(Thread* thread, const Function& function) {
@@ -816,6 +828,13 @@
             zone, parsed_function(), *ic_data_array, osr_id(), optimized());
       }
 
+#if defined(DART_USE_INTERPRETER)
+      // TODO(regis): Revisit.
+      if (flow_graph == NULL && function.HasBytecode()) {
+        return Code::null();
+      }
+#endif
+
       const bool print_flow_graph =
           (FLAG_print_flow_graph ||
            (optimized() && FLAG_print_flow_graph_optimized)) &&
@@ -997,6 +1016,14 @@
     }
 
     const Code& result = Code::Handle(helper.Compile(pipeline));
+
+#if defined(DART_USE_INTERPRETER)
+    // TODO(regis): Revisit.
+    if (result.IsNull() && function.HasBytecode()) {
+      return Object::null();
+    }
+#endif
+
     if (!result.IsNull()) {
       if (!optimized) {
         function.SetWasCompiled(true);
diff --git a/runtime/vm/constants_dbc.h b/runtime/vm/constants_dbc.h
index b30f42c..2c94483 100644
--- a/runtime/vm/constants_dbc.h
+++ b/runtime/vm/constants_dbc.h
@@ -953,7 +953,7 @@
     const char* names[] = {
 #define NAME(name, encoding, op1, op2, op3) #name,
         BYTECODES_LIST(NAME)
-#undef DECLARE_BYTECODE
+#undef NAME
     };
     return names[DecodeOpcode(instr)];
   }
diff --git a/runtime/vm/constants_kbc.h b/runtime/vm/constants_kbc.h
new file mode 100644
index 0000000..52ceebf
--- /dev/null
+++ b/runtime/vm/constants_kbc.h
@@ -0,0 +1,1090 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_CONSTANTS_KBC_H_
+#define RUNTIME_VM_CONSTANTS_KBC_H_
+
+#include "platform/assert.h"
+#include "platform/globals.h"
+#include "platform/utils.h"
+
+namespace dart {
+
+// clang-format off
+// List of KernelBytecode instructions.
+//
+// INTERPRETER STATE
+//
+//      current frame info (see stack_frame_kbc.h for layout)
+//        v-----^-----v
+//   ~----+----~ ~----+-------+-------+-~ ~-+-------+-------+-~
+//   ~    |    ~ ~    | FP[0] | FP[1] | ~ ~ | SP[-1]| SP[0] |
+//   ~----+----~ ~----+-------+-------+-~ ~-+-------+-------+-~
+//                    ^                             ^
+//                    FP                            SP
+//
+//
+// The state of execution is captured in few interpreter registers:
+//
+//   FP - base of the current frame
+//   SP - top of the stack (TOS) for the current frame
+//   PP - object pool for the currently execution function
+//
+// Frame info stored below FP additionally contains pointers to the currently
+// executing function and code (see stack_frame_dbc.h for more information).
+//
+// In the unoptimized code most of bytecodes take operands implicitly from
+// stack and store results again on the stack. Constant operands are usually
+// taken from the object pool by index.
+//
+// ENCODING
+//
+// Each instruction is a 32-bit integer with opcode stored in the least
+// significant byte. The following operand encodings are used:
+//
+//   0........8.......16.......24.......32
+//   +--------+--------+--------+--------+
+//   | opcode |~~~~~~~~~~~~~~~~~~~~~~~~~~|   0: no operands
+//   +--------+--------+--------+--------+
+//
+//   +--------+--------+--------+--------+
+//   | opcode |    A   |~~~~~~~~~~~~~~~~~|   A: single unsigned 8-bit operand
+//   +--------+--------+--------+--------+
+//
+//   +--------+--------+--------+--------+
+//   | opcode |    A   |        D        | A_D: unsigned 8-bit operand and
+//   +--------+--------+--------+--------+      unsigned 16-bit operand
+//
+//   +--------+--------+--------+--------+
+//   | opcode |    A   |        X        | A_X: unsigned 8-bit operand and
+//   +--------+--------+--------+--------+      signed 16-bit operand
+//
+//   +--------+--------+--------+--------+
+//   | opcode |~~~~~~~~|        D        |   D: unsigned 16-bit operand
+//   +--------+--------+--------+--------+
+//
+//   +--------+--------+--------+--------+
+//   | opcode |~~~~~~~~|        X        |   X: signed 16-bit operand
+//   +--------+--------+--------+--------+
+//
+//   +--------+--------+--------+--------+
+//   | opcode |    A   |    B   |    C   | A_B_C: 3 unsigned 8-bit operands
+//   +--------+--------+--------+--------+
+//
+//   +--------+--------+--------+--------+
+//   | opcode |    A   |    B   |    Y   | A_B_Y: 2 unsigned 8-bit operands
+//   +--------+--------+--------+--------+        1 signed 8-bit operand
+//
+//   +--------+--------+--------+--------+
+//   | opcode |             T            |   T: signed 24-bit operand
+//   +--------+--------+--------+--------+
+//
+//
+// INSTRUCTIONS
+//
+//  - Trap
+//
+//    Unreachable instruction.
+//
+//  - Nop D
+//
+//    This instruction does nothing. It may refer to an object in the constant
+//    pool that may be decoded by other instructions.
+//
+//  - Compile
+//
+//    Compile current function and start executing newly produced code
+//    (used to implement LazyCompileStub);
+//
+//  - Intrinsic id
+//
+//    Execute intrinsic with the given id. If intrinsic returns true then
+//    return from the current function to the caller passing value produced
+//    by the intrinsic as a result;
+//
+//  - Drop1; DropR n; Drop n
+//
+//    Drop 1 or n values from the stack, if instruction is DropR push the first
+//    dropped value to the stack;
+//
+//  - Jump target
+//
+//    Jump to the given target. Target is specified as offset from the PC of the
+//    jump instruction.
+//
+//  - Return R; ReturnTOS
+//
+//    Return to the caller using either a value from the given register or a
+//    value from the top-of-stack as a result.
+//
+//    Note: return instruction knows how many arguments to remove from the
+//    stack because it can look at the call instruction at caller's PC and
+//    take argument count from it.
+//
+//  - Move rA, rX
+//
+//    FP[rA] <- FP[rX]
+//    Note: rX is signed so it can be used to address parameters which are
+//    at negative indices with respect to FP.
+//
+//  - Swap rA, rX
+//
+//    FP[rA], FP[rX] <- FP[rX], FP[rA]
+//    Note: rX is signed so it can be used to address parameters which are
+//    at negative indices with respect to FP.
+//
+//  - Push rX
+//
+//    Push FP[rX] to the stack.
+//
+//  - LoadConstant rA, D; PushConstant D
+//
+//    Load value at index D from constant pool into FP[rA] or push it onto the
+//    stack.
+//
+//  - StoreLocal rX; PopLocal rX
+//
+//    Store top of the stack into FP[rX] and pop it if needed.
+//
+//  - StaticCall ArgC, D
+//
+//    Invoke function in SP[0] with arguments SP[-(1+ArgC)], ..., SP[-1] and
+//    argument descriptor PP[D], which indicates whether the first argument
+//    is a type argument vector.
+//
+//  - IndirectStaticCall ArgC, D
+//
+//    Invoke the function given by the ICData in SP[0] with arguments
+//    SP[-(1+ArgC)], ..., SP[-1] and argument descriptor PP[D], which
+//    indicates whether the first argument is a type argument vector.
+//
+//  - InstanceCall<N> ArgC, D; InstanceCall<N>Opt ArgC, D
+//
+//    Lookup and invoke method with N checked arguments using ICData in PP[D]
+//    with arguments SP[-(1+ArgC)], ..., SP[-1].
+//    The ICData indicates whether the first argument is a type argument vector.
+//
+//  - NativeCall ArgA, ArgB, ArgC
+//
+//    Invoke native function at pool[ArgB] with argc_tag at pool[ArgC] using
+//    wrapper at pool[ArgA].
+//
+//  - PushPolymorphicInstanceCall ArgC, D
+//
+//    Skips 2*D + 1 instructions and pushes a function object onto the stack
+//    if one can be found as follows. Otherwise skips only 2*D instructions.
+//    The function is looked up in the IC data encoded in the following 2*D
+//    Nop instructions. The Nop instructions should be arranged in pairs with
+//    the first being the cid, and the second being the function to push if
+//    the cid matches the cid in the pair.
+//
+//  - PushPolymorphicInstanceCallByRange ArgC, D
+//
+//    Skips 3*D + 1 instructions and pushes a function object onto the stack
+//    if one can be found as follows. Otherwise skips only 3*D instructions.
+//    The function is looked up in the IC data encoded in the following 3*D
+//    Nop instructions. The Nop instructions should be arranged in triples with
+//    the first being the start cid, the second being the number of cids, and
+//    the third being the function to push if the cid is in the range given
+//    by the first two Nop instructions.
+//
+//  - OneByteStringFromCharCode rA, rX
+//
+//    Load the one-character symbol with the char code given by the Smi
+//    in FP[rX] into FP[rA].
+//
+//  - StringToCharCode rA, rX
+//
+//    Load and smi-encode the single char code of the string in FP[rX] into
+//    FP[rA]. If the string's length is not 1, load smi -1 instead.
+//
+//  - AddTOS; SubTOS; MulTOS; BitOrTOS; BitAndTOS; EqualTOS; LessThanTOS;
+//    GreaterThanTOS;
+//
+//    Smi fast-path for a corresponding method. Checks if SP[0] and SP[-1] are
+//    both smis and result of SP[0] <op> SP[-1] is a smi - if this is true
+//    then pops operands and pushes result on the stack and skips the next
+//    instruction (which implements a slow path fallback).
+//
+//  - Add, Sub, Mul, Div, Mod, Shl, Shr rA, rB, rC
+//
+//    Arithmetic operations on Smis. FP[rA] <- FP[rB] op FP[rC].
+//    If these instructions can trigger a deoptimization, the following
+//    instruction should be Deopt. If no deoptimization should be triggered,
+//    the immediately following instruction is skipped. These instructions
+//    expect their operands to be Smis, but don't check that they are.
+//
+//  - Smi<op>TOS
+//
+//    Performs SP[0] <op> SP[-1], pops operands and pushes result on the stack.
+//    Assumes SP[0] and SP[-1] are both smis and the result is a Smi.
+//
+//  - ShlImm rA, rB, rC
+//
+//    FP[rA] <- FP[rB] << rC. Shifts the Smi in FP[rB] left by rC. rC is
+//    assumed to be a legal positive number by which left-shifting is possible.
+//
+//  - Min, Max rA, rB, rC
+//
+//    FP[rA] <- {min, max}(FP[rB], FP[rC]). Assumes that FP[rB], and FP[rC] are
+//    Smis.
+//
+//  - Neg rA , rD
+//
+//    FP[rA] <- -FP[rD]. Assumes FP[rD] is a Smi. If there is no overflow the
+//    immediately following instruction is skipped.
+//
+//  - DMin, DMax, DAdd, DSub, DMul, DDiv, DPow, DMod rA, rB, rC
+//
+//    Arithmetic operations on unboxed doubles. FP[rA] <- FP[rB] op FP[rC].
+//
+//  - DNeg, DCos, DSin, DSqrt rA, rD
+//
+//    FP[rA] <- op(FP[rD]). Assumes FP[rD] is an unboxed double.
+//
+//  - DTruncate, DFloor, DCeil rA, rD
+//
+//    Applies trunc(), floor(), or ceil() to the unboxed double in FP[rD], and
+//    stores the result in FP[rA].
+//
+//  - DoubleToFloat, FloatToDouble rA, rD
+//
+//    Convert the unboxed float or double in FP[rD] as indicated, and store the
+//    result in FP[rA].
+//
+//  - DoubleIsNaN rA, rD
+//
+//    If the unboxed double in FP[rD] is a NaN, then writes Bool::True().raw()
+//    into FP[rA], and Bool::False().raw() otherwise.
+//
+//  - DoubleIsInfinite rA, rD
+//
+//    If the unboxed double in FP[rD] is + or - infinity, then
+//    writes Bool::True().raw() into FP[rA], and Bool::False().raw() otherwise.
+//
+//  - BitOr, BitAnd, BitXor rA, rB, rC
+//
+//    FP[rA] <- FP[rB] op FP[rC]. These instructions expect their operands to be
+//    Smis, but don't check that they are.
+//
+//  - BitNot rA, rD
+//
+//    FP[rA] <- ~FP[rD]. As above, assumes FP[rD] is a Smi.
+//
+//  - WriteIntoDouble rA, rD
+//
+//    Box the double in FP[rD] using the box in FP[rA].
+//
+//  - UnboxDouble rA, rD
+//
+//    Unbox the double in FP[rD] into FP[rA]. Assumes FP[rD] is a double.
+//
+//  - CheckedUnboxDouble rA, rD
+//
+//    Unboxes FP[rD] into FP[rA] and skips the following instruction unless
+//    FP[rD] is not a double or a Smi. When FP[rD] is a Smi, converts it to a
+//    double.
+//
+//  - UnboxInt32 rA, rB, C
+//
+//    Unboxes the integer in FP[rB] into FP[rA]. If C == 1, the value may be
+//    truncated. If FP[rA] is successfully unboxed the following instruction is
+//    skipped.
+//
+//  - BoxInt32 rA, rD
+//
+//    Boxes the unboxed signed 32-bit integer in FP[rD] into FP[rA].
+//
+//  - BoxUint32 rA, rD
+//
+//    Boxes the unboxed unsigned 32-bit integer in FP[rD] into FP[rA].
+//
+//  - SmiToDouble rA, rD
+//
+//    Convert the Smi in FP[rD] to an unboxed double in FP[rA].
+//
+//  - DoubleToSmi rA, rD
+//
+//    If the unboxed double in FP[rD] can be converted to a Smi in FP[rA], then
+//    this instruction does so, and skips the following instruction. Otherwise,
+//    the following instruction is not skipped.
+//
+//  - StoreStaticTOS D
+//
+//    Stores TOS into the static field PP[D].
+//
+//  - PushStatic
+//
+//    Pushes value of the static field PP[D] on to the stack.
+//
+//  - InitStaticTOS
+//
+//    Takes static field from TOS and ensures that it is initialized.
+//
+//  - If<Cond>(Num)TOS
+//    If<Cond>(Num) rA, rD
+//
+//    Cond is either NeStrict or EqStrict
+//
+//    Skips the next instruction unless the given condition holds. 'Num'
+//    variants perform number check while non-Num variants just compare
+//    RawObject pointers.
+//
+//    Used to implement conditional jump:
+//
+//        IfNeStrictTOS
+//        Jump T         ;; jump if not equal
+//
+//  - If<Cond>Null rA
+//
+//    Cond is Eq or Ne. Skips the next instruction unless the given condition
+//    holds.
+//
+//  - If<Cond> rA, rD
+//
+//    Cond is Le, Lt, Ge, Gt, unsigned variants ULe, ULt, UGe, UGt, and
+//    unboxed double variants DEq, DNe, DLe, DLt, DGe, DGt.
+//    Skips the next instruction unless FP[rA] <Cond> FP[rD]. Assumes that
+//    FP[rA] and FP[rD] are Smis or unboxed doubles as indicated by <Cond>.
+//
+//  - IfSmi<Cond>TOS
+//
+//    Cond is Lt, Le, Ge, Gt.
+//    Skips the next instruction unless SP[-1] <Cond> SP[-0].
+//    It is expected both SP[-1] and SP[-0] are Smis.
+//
+//  - CreateArrayTOS
+//
+//    Allocate array of length SP[0] with type arguments SP[-1].
+//
+//  - CreateArrayOpt rA, rB, rC
+//
+//    Try to allocate a new array where FP[rB] is the length, and FP[rC] is the
+//    type. If allocation is successful, the result is stored in FP[rA], and
+//    the next four instructions, which should be the
+//    (Push type; Push length; AllocateTOS; PopLocal) slow path are skipped.
+//
+//  - Allocate D
+//
+//    Allocate object of class PP[D] with no type arguments.
+//
+//  - AllocateOpt rA, D
+//
+//    Try allocating an object with tags in PP[D] with no type arguments.
+//    If allocation is successful, the result is stored in FP[rA], and
+//    the next two instructions, which should be the (Allocate class; PopLocal)
+//    slow path are skipped
+//
+//  - AllocateT
+//
+//    Allocate object of class SP[0] with type arguments SP[-1].
+//
+//  - AllocateTOpt rA, D
+//
+//    Similar to AllocateOpt with the difference that the offset of the
+//    type arguments in the resulting object is taken from the D field of the
+//    following Nop instruction, and on success 4 instructions are skipped and
+//    the object at the top of the stack is popped.
+//
+//  - StoreIndexedTOS
+//
+//    Store SP[0] into array SP[-2] at index SP[-1]. No typechecking is done.
+//    SP[-2] is assumed to be a RawArray, SP[-1] to be a smi.
+//
+//  - StoreIndexed rA, rB, rC
+//
+//    Store FP[rC] into array FP[rA] at index FP[rB]. No typechecking is done.
+//    FP[rA] is assumed to be a RawArray, FP[rB] to be a smi.
+//
+//  - StoreIndexed{N}{Type} rA, rB, rC
+//
+//    Where Type is Float32, Float64, Uint8, or OneByteString
+//    Where N is '', '4', or '8'. N may only be '4' for Float32 and '8' for
+//    Float64.
+//
+//    Store the unboxed double or tagged Smi in FP[rC] into the typed data array
+//    at FP[rA] at index FP[rB]. If N is not '', the index is assumed to be
+//    already scaled by N.
+//
+//  - StoreIndexedExternalUint8 rA, rB, rC
+//
+//    Similar to StoreIndexedUint8 but FP[rA] is an external typed data aray.
+//
+//  - NoSuchMethod
+//
+//    Performs noSuchmethod handling code.
+//
+//  - TailCall
+//
+//    Unwinds the current frame, populates the arguments descriptor register
+//    with SP[-1] and tail calls the code in SP[-0].
+//
+//  - TailCallOpt  rA, rD
+//
+//    Unwinds the current frame, populates the arguments descriptor register
+//    with rA and tail calls the code in rD.
+//
+//  - LoadArgDescriptor
+//
+//    Load the caller-provoided argument descriptor and pushes it onto the
+//    stack.
+//
+//  - LoadArgDescriptorOpt rA
+//
+//    Load the caller-provoided argument descriptor into [rA].
+//
+//  - LoadFpRelativeSlot rD
+//
+//    Loads from FP using the negative index of SP[-0]+rD.
+//    It is assumed that SP[-0] is a Smi.
+//
+//  - LoadFpRelativeSlotOpt  rA, rB, rY
+//
+//    Loads from FP using the negative index of FP[rB]+rY and stores the result
+//    into rA.
+//    It is assumed that rY is a Smi.
+//
+//  - StoreFpRelativeSlot rD
+//
+//    Stores SP[-0] by indexing into FP using the negative index of SP[-1]+rD.
+//    It is assumed that SP[-1] is a Smi.
+//
+//  - StoreFpRelativeSlotOpt  rA, rB, rY
+//
+//    Stores rA by indexing into FP using the the negative index of FP[rB]+rY.
+//    It is assumed that rY is a Smi.
+//
+//  - LoadIndexedTOS
+//
+//    Loads from array SP[-1] at index SP[-0].
+//    It is assumed that SP[-0] is a Smi.
+//
+//  - LoadIndexed rA, rB, rC
+//
+//    Loads from array FP[rB] at index FP[rC] into FP[rA]. No typechecking is
+//    done. FP[rB] is assumed to be a RawArray, and to contain a Smi at FP[rC].
+//
+//  - LoadIndexed{N}{Type} rA, rB, rC
+//
+//    Where Type is Float32, Float64, OneByteString, TwoByteString, Uint8,
+//    Int8, and N is '', '4', or '8'. N may only be '4' for Float32, and may
+//    only be '8' for Float64.
+//
+//    Loads from typed data array FP[rB] at index FP[rC] into an unboxed double,
+//    or tagged Smi in FP[rA] as indicated by the type in the name. If N is not
+//    '', the index is assumed to be already scaled by N.
+//
+//  - LoadIndexedExternal{Int8, Uint8} rA, rB, rC
+//
+//    Loads from the external typed data array FP[rB] at index FP[rC] into
+//    FP[rA]. No typechecking is done.
+//
+//  - StoreField rA, B, rC
+//
+//    Store value FP[rC] into object FP[rA] at offset (in words) B.
+//
+//  - StoreFieldExt rA, rD
+//
+//    Store value FP[rD] into object FP[rA] at offset (in words)
+//    stored in the following Nop instruction. Used to access fields with
+//    large offsets.
+//
+//  - StoreFieldTOS D
+//
+//    Store value SP[0] into object SP[-1] at offset (in words) PP[D].
+//
+//  - LoadField rA, rB, C
+//
+//    Load value at offset (in words) C from object FP[rB] into FP[rA].
+//
+//  - LoadFieldExt rA, rD
+//
+//    Load value from object FP[rD] at offset (in words) stored in the
+//    following Nop instruction into FP[rA]. Used to access fields with
+//    large offsets.
+//
+//  - LoadUntagged rA, rB, C
+//
+//    Like LoadField, but assumes that FP[rB] is untagged.
+//
+//  - LoadFieldTOS D
+//
+//    Push value at offset (in words) PP[D] from object SP[0].
+//
+//  - BooleanNegateTOS
+//
+//    SP[0] = !SP[0]
+//
+//  - BooleanNegate rA, rD
+//
+//    FP[rA] = !FP[rD]
+//
+//  - Throw A
+//
+//    Throw (Rethrow if A != 0) exception. Exception object and stack object
+//    are taken from TOS.
+//
+//  - Entry rD
+//
+//    Function prologue for the function
+//        rD - number of local slots to reserve;
+//
+//  - EntryOptional A, B, C
+//
+//    Function prologue for the function with optional or named arguments:
+//        A - expected number of positional arguments;
+//        B - number of optional arguments;
+//        C - number of named arguments;
+//
+//    Only one of B and C can be not 0.
+//
+//    If B is not 0 then EntryOptional bytecode is followed by B LoadConstant
+//    bytecodes specifying default values for optional arguments.
+//
+//    If C is not 0 then EntryOptional is followed by 2 * B LoadConstant
+//    bytecodes.
+//    Bytecode at 2 * i specifies name of the i-th named argument and at
+//    2 * i + 1 default value. rA part of the LoadConstant bytecode specifies
+//    the location of the parameter on the stack. Here named arguments are
+//    sorted alphabetically to enable linear matching similar to how function
+//    prologues are implemented on other architectures.
+//
+//    Note: Unlike Entry bytecode EntryOptional does not setup the frame for
+//    local variables this is done by a separate bytecode Frame.
+//
+//  - EntryOptimized rD
+//
+//    Function prologue for optimized functions.
+//        rD - number of local slots to reserve for registers;
+//
+//    Note: reserved slots are not initialized because optimized code
+//    has stack maps attached to call sites.
+//
+//  - HotCheck A, D
+//
+//    Increment current function's usage counter by A and check if it
+//    exceeds D. If it does trigger (re)optimization of the current
+//    function.
+//
+//  - Frame D
+//
+//    Reserve and initialize with null space for D local variables.
+//
+//  - SetFrame A
+//
+//    Reinitialize SP assuming that current frame has size A.
+//    Used to drop temporaries from the stack in the exception handler.
+//
+//  - AllocateContext D
+//
+//    Allocate Context object assuming for D context variables.
+//
+//  - AllocateUninitializedContext rA, D
+//
+//    Allocates an uninitialized context for D variables, and places the result
+//    in FP[rA]. On success, skips the next 2 instructions, which should be the
+//    slow path (AllocateContext D; PopLocal rA).
+//
+//  - CloneContext
+//
+//    Clone context stored in TOS.
+//
+//  - MoveSpecial rA, D
+//
+//    Copy special values from inside interpreter to FP[rA]. Currently only
+//    used to pass exception object (D = 0) and stack trace object (D = 1) to
+//    catch handler.
+//
+//  - InstantiateType D
+//
+//    Instantiate type PP[D] with instantiator type arguments SP[-1] and
+//    function type arguments SP[0].
+//
+//  - InstantiateTypeArgumentsTOS D
+//
+//    Instantiate type arguments PP[D] with instantiator type arguments SP[-1]
+//    and function type arguments SP[0].
+//
+//  - InstanceOf
+//
+//    Test if instance SP[-4] with instantiator type arguments SP[-3] and
+//    function type arguments SP[-2] is a subtype of type SP[-1] using
+//    SubtypeTestCache SP[0], with result placed at top of stack.
+//
+//  - AssertAssignable A, D
+//
+//    Assert that instance SP[-4] is assignable to variable named SP[0] of
+//    type SP[-1] with instantiator type arguments SP[-3] and function type
+//    arguments SP[-2] using SubtypeTestCache PP[D].
+//    If A is 1, then the instance may be a Smi.
+//
+//  - AssertSubtype
+//
+//    Assers that one type is a subtype of another.  Throws a TypeError
+//    otherwise.  The stack has the following arguments on it:
+//
+//        SP[-4]  instantiator type args
+//        SP[-3]  function type args
+//        SP[-2]  sub_type
+//        SP[-1]  super_type
+//        SP[-0]  dst_name
+//
+//    All 5 arguments are consumed from the stack and no results is pushed.
+//
+//  - BadTypeError
+//
+//    If SP[-4] is non-null, throws a BadType error by calling into the runtime.
+//    Assumes that the stack is arranged the same as for AssertAssignable.
+//
+//  - AssertBoolean A
+//
+//    Assert that TOS is a boolean (A = 1) or that TOS is not null (A = 0).
+//
+//  - TestSmi rA, rD
+//
+//    If FP[rA] & FP[rD] != 0, then skip the next instruction. FP[rA] and FP[rD]
+//    must be Smis.
+//
+//  - TestCids rA, D
+//
+//    The next D instructions must be Nops whose D field encodes a class id. If
+//    the class id of FP[rA] matches, jump to PC + N + 1 if the matching Nop's
+//    A != 0 or PC + N + 2 if the matching Nop's A = 0. If no match is found,
+//    jump to PC + N.
+//
+//  - CheckSmi rA
+//
+//    If FP[rA] is a Smi, then skip the next instruction.
+//
+//  - CheckEitherNonSmi rA, rD
+//
+//    If either FP[rA] or FP[rD] is not a Smi, then skip the next instruction.
+//
+//  - CheckClassId rA, D
+//
+//    If the class id in FP[rA] matches the class id D, then skip the
+//    following instruction.
+//
+//  - CheckClassIdRange rA, D
+//
+//    Next instruction is a Nop with S, the size of the class-id range.
+//    If the class id in FP[rA] is between the D D + S, then skip the
+//    following instruction.
+//
+//  - CheckBitTest rA, D
+//
+//    Skips the next 3 instructions if the object at FP[rA] is a valid class for
+//    a dense switch with low cid encoded in the following Nop instruction, and
+//    the cid mask encoded in the Nop instruction after that, or if D == 1 and
+//    FP[rA] is a Smi. Skips 2 instructions otherwise.
+//
+//  - CheckCids rA, rB, rC
+//
+//    Skips rC + 1 instructions if the object at FP[rA] is a Smi and
+//    rB == 1, or if FP[rA]'s cid is found in the array of cids encoded by the
+//    following rC Nop instructions. Otherwise skips only rC instructions.
+//
+//  - CheckCidsByRange rA, rB, rC
+//
+//    Skips rC + 1 instructions if the object at FP[rA] is a Smi and rB ==
+//    1, or if FP[rA]'s cid is found in the array of cid ranges encoded by the
+//    following rC Nop instructions. The cid ranges from a inclusive to b
+//    exclusive are coded in pairs of (a, b - a). Otherwise skips only 2
+//    instructions.
+//
+//  - CheckStack
+//
+//    Compare SP against isolate stack limit and call StackOverflow handler if
+//    necessary.
+//
+//  - CheckStackAlwaysExit
+//
+//    Unconditionally call StackOverflow handler.
+//
+//  - CheckFunctionTypeArgs A, D
+//
+//    Check for a passed-in type argument vector of length A and
+//    store it at FP[D].
+//
+//  - DebugStep, DebugBreak A
+//
+//    Debugger support. DebugBreak is bytecode that can be patched into the
+//    instruction stream to trigger in place breakpoint.
+//
+//    When patching instance or static call with DebugBreak we set A to
+//    match patched call's argument count so that Return instructions continue
+//    to work.
+//
+// TODO(vegorov) the way we replace calls with DebugBreak does not work
+//               with our smi fast paths because DebugBreak is simply skipped.
+//
+//  - LoadClassIdTOS, LoadClassId rA, D
+//
+//    LoadClassIdTOS loads the class id from the object at SP[0] and stores it
+//    to SP[0]. LoadClassId loads the class id from FP[rA] and stores it to
+//    FP[D].
+//
+//  - Deopt ArgC, D
+//
+//    If D != 0 then trigger eager deoptimization with deopt id (D - 1).
+//    If D == 0 then trigger lazy deoptimization.
+//
+//    The meaning of operand ArgC (encoded as A operand) matches that of an
+//    ArgC operand in call instructions. This is needed because we could
+//    potentially patch calls instructions with a lazy deopt and we need to
+//    ensure that any Return/ReturnTOS instructions
+//    returning from the patched calls will continue to function,
+//    e.g. in bytecode sequences like
+//
+//    InstanceCall ... <- lazy deopt inside first call
+//    InstanceCall ... <- patches second call with Deopt
+//
+// BYTECODE LIST FORMAT
+//
+// KernelBytecode list below is specified using the following format:
+//
+//     V(BytecodeName, OperandForm, Op1, Op2, Op3)
+//
+// - OperandForm specifies operand encoding and should be one of 0, A, T, A_D,
+//   A_X, X, D (see ENCODING section above).
+//
+// - Op1, Op2, Op2 specify operand meaning. Possible values:
+//
+//     ___ ignored / non-existent operand
+//     num immediate operand
+//     lit constant literal from object pool
+//     reg register (unsigned FP relative local)
+//     xeg x-register (signed FP relative local)
+//     tgt jump target relative to the PC of the current instruction
+//
+// TODO(vegorov) jump targets should be encoded relative to PC of the next
+//               instruction because PC is incremented immediately after fetch
+//               and before decoding.
+//
+#define KERNEL_BYTECODES_LIST(V)                                               \
+  V(Trap,                                  0, ___, ___, ___)                   \
+  V(Nop,                                 A_D, num, lit, ___)                   \
+  V(Compile,                               0, ___, ___, ___)                   \
+  V(HotCheck,                            A_D, num, num, ___)                   \
+  V(Intrinsic,                             A, num, ___, ___)                   \
+  V(Drop1,                                 0, ___, ___, ___)                   \
+  V(DropR,                                 A, num, ___, ___)                   \
+  V(Drop,                                  A, num, ___, ___)                   \
+  V(Jump,                                  T, tgt, ___, ___)                   \
+  V(Return,                                A, reg, ___, ___)                   \
+  V(ReturnTOS,                             0, ___, ___, ___)                   \
+  V(Move,                                A_X, reg, xeg, ___)                   \
+  V(Swap,                                A_X, reg, xeg, ___)                   \
+  V(Push,                                  X, xeg, ___, ___)                   \
+  V(LoadConstant,                        A_D, reg, lit, ___)                   \
+  V(LoadClassId,                         A_D, reg, reg, ___)                   \
+  V(LoadClassIdTOS,                        0, ___, ___, ___)                   \
+  V(PushConstant,                          D, lit, ___, ___)                   \
+  V(StoreLocal,                            X, xeg, ___, ___)                   \
+  V(PopLocal,                              X, xeg, ___, ___)                   \
+  V(IndirectStaticCall,                  A_D, num, num, ___)                   \
+  V(StaticCall,                          A_D, num, num, ___)                   \
+  V(InstanceCall1,                       A_D, num, num, ___)                   \
+  V(InstanceCall2,                       A_D, num, num, ___)                   \
+  V(InstanceCall1Opt,                    A_D, num, num, ___)                   \
+  V(InstanceCall2Opt,                    A_D, num, num, ___)                   \
+  V(PushPolymorphicInstanceCall,         A_D, num, num, ___)                   \
+  V(PushPolymorphicInstanceCallByRange,  A_D, num, num, ___)                   \
+  V(NativeCall,                        A_B_C, num, num, num)                   \
+  V(OneByteStringFromCharCode,           A_X, reg, xeg, ___)                   \
+  V(StringToCharCode,                    A_X, reg, xeg, ___)                   \
+  V(AddTOS,                                0, ___, ___, ___)                   \
+  V(SubTOS,                                0, ___, ___, ___)                   \
+  V(MulTOS,                                0, ___, ___, ___)                   \
+  V(BitOrTOS,                              0, ___, ___, ___)                   \
+  V(BitAndTOS,                             0, ___, ___, ___)                   \
+  V(EqualTOS,                              0, ___, ___, ___)                   \
+  V(LessThanTOS,                           0, ___, ___, ___)                   \
+  V(GreaterThanTOS,                        0, ___, ___, ___)                   \
+  V(SmiAddTOS,                             0, ___, ___, ___)                   \
+  V(SmiSubTOS,                             0, ___, ___, ___)                   \
+  V(SmiMulTOS,                             0, ___, ___, ___)                   \
+  V(SmiBitAndTOS,                          0, ___, ___, ___)                   \
+  V(Add,                               A_B_C, reg, reg, reg)                   \
+  V(Sub,                               A_B_C, reg, reg, reg)                   \
+  V(Mul,                               A_B_C, reg, reg, reg)                   \
+  V(Div,                               A_B_C, reg, reg, reg)                   \
+  V(Mod,                               A_B_C, reg, reg, reg)                   \
+  V(Shl,                               A_B_C, reg, reg, reg)                   \
+  V(Shr,                               A_B_C, reg, reg, reg)                   \
+  V(ShlImm,                            A_B_C, reg, reg, num)                   \
+  V(Neg,                                 A_D, reg, reg, ___)                   \
+  V(BitOr,                             A_B_C, reg, reg, reg)                   \
+  V(BitAnd,                            A_B_C, reg, reg, reg)                   \
+  V(BitXor,                            A_B_C, reg, reg, reg)                   \
+  V(BitNot,                              A_D, reg, reg, ___)                   \
+  V(Min,                               A_B_C, reg, reg, reg)                   \
+  V(Max,                               A_B_C, reg, reg, reg)                   \
+  V(WriteIntoDouble,                     A_D, reg, reg, ___)                   \
+  V(UnboxDouble,                         A_D, reg, reg, ___)                   \
+  V(CheckedUnboxDouble,                  A_D, reg, reg, ___)                   \
+  V(UnboxInt32,                        A_B_C, reg, reg, num)                   \
+  V(BoxInt32,                            A_D, reg, reg, ___)                   \
+  V(BoxUint32,                           A_D, reg, reg, ___)                   \
+  V(SmiToDouble,                         A_D, reg, reg, ___)                   \
+  V(DoubleToSmi,                         A_D, reg, reg, ___)                   \
+  V(DAdd,                              A_B_C, reg, reg, reg)                   \
+  V(DSub,                              A_B_C, reg, reg, reg)                   \
+  V(DMul,                              A_B_C, reg, reg, reg)                   \
+  V(DDiv,                              A_B_C, reg, reg, reg)                   \
+  V(DNeg,                                A_D, reg, reg, ___)                   \
+  V(DSqrt,                               A_D, reg, reg, ___)                   \
+  V(DMin,                              A_B_C, reg, reg, reg)                   \
+  V(DMax,                              A_B_C, reg, reg, reg)                   \
+  V(DCos,                                A_D, reg, reg, ___)                   \
+  V(DSin,                                A_D, reg, reg, ___)                   \
+  V(DPow,                              A_B_C, reg, reg, reg)                   \
+  V(DMod,                              A_B_C, reg, reg, reg)                   \
+  V(DTruncate,                           A_D, reg, reg, ___)                   \
+  V(DFloor,                              A_D, reg, reg, ___)                   \
+  V(DCeil,                               A_D, reg, reg, ___)                   \
+  V(DoubleToFloat,                       A_D, reg, reg, ___)                   \
+  V(FloatToDouble,                       A_D, reg, reg, ___)                   \
+  V(DoubleIsNaN,                           A, reg, ___, ___)                   \
+  V(DoubleIsInfinite,                      A, reg, ___, ___)                   \
+  V(StoreStaticTOS,                        D, lit, ___, ___)                   \
+  V(PushStatic,                            D, lit, ___, ___)                   \
+  V(InitStaticTOS,                         0, ___, ___, ___)                   \
+  V(IfNeStrictTOS,                         0, ___, ___, ___)                   \
+  V(IfEqStrictTOS,                         0, ___, ___, ___)                   \
+  V(IfNeStrictNumTOS,                      0, ___, ___, ___)                   \
+  V(IfEqStrictNumTOS,                      0, ___, ___, ___)                   \
+  V(IfSmiLtTOS,                            0, ___, ___, ___)                   \
+  V(IfSmiLeTOS,                            0, ___, ___, ___)                   \
+  V(IfSmiGeTOS,                            0, ___, ___, ___)                   \
+  V(IfSmiGtTOS,                            0, ___, ___, ___)                   \
+  V(IfNeStrict,                          A_D, reg, reg, ___)                   \
+  V(IfEqStrict,                          A_D, reg, reg, ___)                   \
+  V(IfLe,                                A_D, reg, reg, ___)                   \
+  V(IfLt,                                A_D, reg, reg, ___)                   \
+  V(IfGe,                                A_D, reg, reg, ___)                   \
+  V(IfGt,                                A_D, reg, reg, ___)                   \
+  V(IfULe,                               A_D, reg, reg, ___)                   \
+  V(IfULt,                               A_D, reg, reg, ___)                   \
+  V(IfUGe,                               A_D, reg, reg, ___)                   \
+  V(IfUGt,                               A_D, reg, reg, ___)                   \
+  V(IfDNe,                               A_D, reg, reg, ___)                   \
+  V(IfDEq,                               A_D, reg, reg, ___)                   \
+  V(IfDLe,                               A_D, reg, reg, ___)                   \
+  V(IfDLt,                               A_D, reg, reg, ___)                   \
+  V(IfDGe,                               A_D, reg, reg, ___)                   \
+  V(IfDGt,                               A_D, reg, reg, ___)                   \
+  V(IfNeStrictNum,                       A_D, reg, reg, ___)                   \
+  V(IfEqStrictNum,                       A_D, reg, reg, ___)                   \
+  V(IfEqNull,                              A, reg, ___, ___)                   \
+  V(IfNeNull,                              A, reg, ___, ___)                   \
+  V(CreateArrayTOS,                        0, ___, ___, ___)                   \
+  V(CreateArrayOpt,                    A_B_C, reg, reg, reg)                   \
+  V(Allocate,                              D, lit, ___, ___)                   \
+  V(AllocateT,                             0, ___, ___, ___)                   \
+  V(AllocateOpt,                         A_D, reg, lit, ___)                   \
+  V(AllocateTOpt,                        A_D, reg, lit, ___)                   \
+  V(StoreIndexedTOS,                       0, ___, ___, ___)                   \
+  V(StoreIndexed,                      A_B_C, reg, reg, reg)                   \
+  V(StoreIndexedUint8,                 A_B_C, reg, reg, reg)                   \
+  V(StoreIndexedExternalUint8,         A_B_C, reg, reg, reg)                   \
+  V(StoreIndexedOneByteString,         A_B_C, reg, reg, reg)                   \
+  V(StoreIndexedUint32,                A_B_C, reg, reg, reg)                   \
+  V(StoreIndexedFloat32,               A_B_C, reg, reg, reg)                   \
+  V(StoreIndexed4Float32,              A_B_C, reg, reg, reg)                   \
+  V(StoreIndexedFloat64,               A_B_C, reg, reg, reg)                   \
+  V(StoreIndexed8Float64,              A_B_C, reg, reg, reg)                   \
+  V(NoSuchMethod,                          0, ___, ___, ___)                   \
+  V(TailCall,                              0, ___, ___, ___)                   \
+  V(TailCallOpt,                         A_D, reg, reg, ___)                   \
+  V(LoadArgDescriptor,                     0, ___, ___, ___)                   \
+  V(LoadArgDescriptorOpt,                  A, reg, ___, ___)                   \
+  V(LoadFpRelativeSlot,                    X, reg, ___, ___)                   \
+  V(LoadFpRelativeSlotOpt,             A_B_Y, reg, reg, reg)                   \
+  V(StoreFpRelativeSlot,                    X, reg, ___, ___)                  \
+  V(StoreFpRelativeSlotOpt,             A_B_Y, reg, reg, reg)                  \
+  V(LoadIndexedTOS,                        0, ___, ___, ___)                   \
+  V(LoadIndexed,                       A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedUint8,                  A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedInt8,                   A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedInt32,                  A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedUint32,                 A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedExternalUint8,          A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedExternalInt8,           A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedFloat32,                A_B_C, reg, reg, reg)                   \
+  V(LoadIndexed4Float32,               A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedFloat64,                A_B_C, reg, reg, reg)                   \
+  V(LoadIndexed8Float64,               A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedOneByteString,          A_B_C, reg, reg, reg)                   \
+  V(LoadIndexedTwoByteString,          A_B_C, reg, reg, reg)                   \
+  V(StoreField,                        A_B_C, reg, num, reg)                   \
+  V(StoreFieldExt,                       A_D, reg, reg, ___)                   \
+  V(StoreFieldTOS,                         D, lit, ___, ___)                   \
+  V(LoadField,                         A_B_C, reg, reg, num)                   \
+  V(LoadFieldExt,                        A_D, reg, reg, ___)                   \
+  V(LoadUntagged,                      A_B_C, reg, reg, num)                   \
+  V(LoadFieldTOS,                          D, lit, ___, ___)                   \
+  V(BooleanNegateTOS,                      0, ___, ___, ___)                   \
+  V(BooleanNegate,                       A_D, reg, reg, ___)                   \
+  V(Throw,                                 A, num, ___, ___)                   \
+  V(Entry,                                 D, num, ___, ___)                   \
+  V(EntryOptional,                     A_B_C, num, num, num)                   \
+  V(EntryOptimized,                      A_D, num, num, ___)                   \
+  V(Frame,                                 D, num, ___, ___)                   \
+  V(SetFrame,                              A, num, ___, num)                   \
+  V(AllocateContext,                       D, num, ___, ___)                   \
+  V(AllocateUninitializedContext,        A_D, reg, num, ___)                   \
+  V(CloneContext,                          0, ___, ___, ___)                   \
+  V(MoveSpecial,                         A_D, reg, num, ___)                   \
+  V(InstantiateType,                       D, lit, ___, ___)                   \
+  V(InstantiateTypeArgumentsTOS,         A_D, num, lit, ___)                   \
+  V(InstanceOf,                            0, ___, ___, ___)                   \
+  V(BadTypeError,                          0, ___, ___, ___)                   \
+  V(AssertAssignable,                    A_D, num, lit, ___)                   \
+  V(AssertSubtype,                         0, ___, ___, ___)                   \
+  V(AssertBoolean,                         A, num, ___, ___)                   \
+  V(TestSmi,                             A_D, reg, reg, ___)                   \
+  V(TestCids,                            A_D, reg, num, ___)                   \
+  V(CheckSmi,                              A, reg, ___, ___)                   \
+  V(CheckEitherNonSmi,                   A_D, reg, reg, ___)                   \
+  V(CheckClassId,                        A_D, reg, num, ___)                   \
+  V(CheckClassIdRange,                   A_D, reg, num, ___)                   \
+  V(CheckBitTest,                        A_D, reg, num, ___)                   \
+  V(CheckCids,                         A_B_C, reg, num, num)                   \
+  V(CheckCidsByRange,                  A_B_C, reg, num, num)                   \
+  V(CheckStack,                            0, ___, ___, ___)                   \
+  V(CheckStackAlwaysExit,                  0, ___, ___, ___)                   \
+  V(CheckFunctionTypeArgs,               A_D, num, num, ___)                   \
+  V(DebugStep,                             0, ___, ___, ___)                   \
+  V(DebugBreak,                            A, num, ___, ___)                   \
+  V(Deopt,                               A_D, num, num, ___)                   \
+  V(DeoptRewind,                           0, ___, ___, ___)
+
+// clang-format on
+
+typedef uint32_t KBCInstr;
+
+class KernelBytecode {
+ public:
+  enum Opcode {
+#define DECLARE_BYTECODE(name, encoding, op1, op2, op3) k##name,
+    KERNEL_BYTECODES_LIST(DECLARE_BYTECODE)
+#undef DECLARE_BYTECODE
+  };
+
+  static const char* NameOf(KBCInstr instr) {
+    const char* names[] = {
+#define NAME(name, encoding, op1, op2, op3) #name,
+        KERNEL_BYTECODES_LIST(NAME)
+#undef NAME
+    };
+    return names[DecodeOpcode(instr)];
+  }
+
+  static const intptr_t kOpShift = 0;
+  static const intptr_t kAShift = 8;
+  static const intptr_t kAMask = 0xFF;
+  static const intptr_t kBShift = 16;
+  static const intptr_t kBMask = 0xFF;
+  static const intptr_t kCShift = 24;
+  static const intptr_t kCMask = 0xFF;
+  static const intptr_t kDShift = 16;
+  static const intptr_t kDMask = 0xFFFF;
+  static const intptr_t kYShift = 24;
+  static const intptr_t kYMask = 0xFF;
+
+  static KBCInstr Encode(Opcode op, uintptr_t a, uintptr_t b, uintptr_t c) {
+    ASSERT((a & kAMask) == a);
+    ASSERT((b & kBMask) == b);
+    ASSERT((c & kCMask) == c);
+    return op | (a << kAShift) | (b << kBShift) | (c << kCShift);
+  }
+
+  static KBCInstr Encode(Opcode op, uintptr_t a, uintptr_t d) {
+    ASSERT((a & kAMask) == a);
+    ASSERT((d & kDMask) == d);
+    return op | (a << kAShift) | (d << kDShift);
+  }
+
+  static KBCInstr EncodeSigned(Opcode op, uintptr_t a, intptr_t x) {
+    ASSERT((a & kAMask) == a);
+    ASSERT((x << kDShift) >> kDShift == x);
+    return op | (a << kAShift) | (x << kDShift);
+  }
+
+  static KBCInstr EncodeSigned(Opcode op, intptr_t x) {
+    ASSERT((x << kAShift) >> kAShift == x);
+    return op | (x << kAShift);
+  }
+
+  static KBCInstr Encode(Opcode op) { return op; }
+
+  DART_FORCE_INLINE static uint8_t DecodeA(KBCInstr bc) {
+    return (bc >> kAShift) & kAMask;
+  }
+
+  DART_FORCE_INLINE static uint8_t DecodeB(KBCInstr bc) {
+    return (bc >> kBShift) & kBMask;
+  }
+
+  DART_FORCE_INLINE static uint16_t DecodeD(KBCInstr bc) {
+    return (bc >> kDShift) & kDMask;
+  }
+
+  DART_FORCE_INLINE static Opcode DecodeOpcode(KBCInstr bc) {
+    return static_cast<Opcode>(bc & 0xFF);
+  }
+
+  DART_FORCE_INLINE static bool IsTrap(KBCInstr instr) {
+    return DecodeOpcode(instr) == KernelBytecode::kTrap;
+  }
+
+  DART_FORCE_INLINE static bool IsCallOpcode(KBCInstr instr) {
+    switch (DecodeOpcode(instr)) {
+      case KernelBytecode::kStaticCall:
+      case KernelBytecode::kIndirectStaticCall:
+      case KernelBytecode::kInstanceCall1:
+      case KernelBytecode::kInstanceCall2:
+      case KernelBytecode::kInstanceCall1Opt:
+      case KernelBytecode::kInstanceCall2Opt:
+      case KernelBytecode::kDebugBreak:
+        return true;
+
+      default:
+        return false;
+    }
+  }
+
+  DART_FORCE_INLINE static bool IsFastSmiOpcode(KBCInstr instr) {
+    switch (DecodeOpcode(instr)) {
+      case KernelBytecode::kAddTOS:
+      case KernelBytecode::kSubTOS:
+      case KernelBytecode::kMulTOS:
+      case KernelBytecode::kBitOrTOS:
+      case KernelBytecode::kBitAndTOS:
+      case KernelBytecode::kEqualTOS:
+      case KernelBytecode::kLessThanTOS:
+      case KernelBytecode::kGreaterThanTOS:
+        return true;
+
+      default:
+        return false;
+    }
+  }
+
+  DART_FORCE_INLINE static uint8_t DecodeArgc(KBCInstr call) {
+    ASSERT(IsCallOpcode(call));
+    return (call >> 8) & 0xFF;
+  }
+
+  static KBCInstr At(uword pc) { return *reinterpret_cast<KBCInstr*>(pc); }
+
+ private:
+  DISALLOW_ALLOCATION();
+  DISALLOW_IMPLICIT_CONSTRUCTORS(KernelBytecode);
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_CONSTANTS_KBC_H_
diff --git a/runtime/vm/dart_entry.cc b/runtime/vm/dart_entry.cc
index 860f672..15ab7b8 100644
--- a/runtime/vm/dart_entry.cc
+++ b/runtime/vm/dart_entry.cc
@@ -8,6 +8,7 @@
 #include "vm/class_finalizer.h"
 #include "vm/compiler/jit/compiler.h"
 #include "vm/debugger.h"
+#include "vm/interpreter.h"
 #include "vm/object_store.h"
 #include "vm/resolver.h"
 #include "vm/runtime_entry.h"
@@ -113,11 +114,32 @@
   ASSERT(thread->IsMutatorThread());
   ScopedIsolateStackLimits stack_limit(thread, current_sp);
   if (!function.HasCode()) {
+#if defined(DART_USE_INTERPRETER)
+    // The function is not compiled yet. Interpret it if it has bytecode.
+    // The bytecode is loaded as part as an aborted compilation step.
+    if (!function.HasBytecode()) {
+      const Object& result =
+          Object::Handle(zone, Compiler::CompileFunction(thread, function));
+      if (result.IsError()) {
+        return Error::Cast(result).raw();
+      }
+    }
+    if (!function.HasCode() && function.HasBytecode()) {
+      const Code& bytecode = Code::Handle(zone, function.Bytecode());
+      ASSERT(!bytecode.IsNull());
+      ASSERT(thread->no_callback_scope_depth() == 0);
+      SuspendLongJumpScope suspend_long_jump_scope(thread);
+      TransitionToGenerated transition(thread);
+      return Interpreter::Current()->Call(bytecode, arguments_descriptor,
+                                          arguments, thread);
+    }
+#else
     const Object& result =
         Object::Handle(zone, Compiler::CompileFunction(thread, function));
     if (result.IsError()) {
       return Error::Cast(result).raw();
     }
+#endif
   }
 // Now Call the invoke stub which will invoke the dart function.
 #if !defined(TARGET_ARCH_DBC)
diff --git a/runtime/vm/dart_entry.h b/runtime/vm/dart_entry.h
index b518523..0898a93 100644
--- a/runtime/vm/dart_entry.h
+++ b/runtime/vm/dart_entry.h
@@ -125,6 +125,8 @@
   friend class SnapshotWriter;
   friend class Serializer;
   friend class Deserializer;
+  friend class Interpreter;
+  friend class InterpreterHelpers;
   friend class Simulator;
   friend class SimulatorHelpers;
   DISALLOW_COPY_AND_ASSIGN(ArgumentsDescriptor);
diff --git a/runtime/vm/interpreter.cc b/runtime/vm/interpreter.cc
new file mode 100644
index 0000000..725c385
--- /dev/null
+++ b/runtime/vm/interpreter.cc
@@ -0,0 +1,4118 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include <setjmp.h>  // NOLINT
+#include <stdlib.h>
+
+#include "vm/globals.h"
+#if defined(DART_USE_INTERPRETER)
+
+#include "vm/interpreter.h"
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/assembler/disassembler_kbc.h"
+#include "vm/compiler/jit/compiler.h"
+#include "vm/constants_kbc.h"
+#include "vm/cpu.h"
+#include "vm/dart_entry.h"
+#include "vm/debugger.h"
+#include "vm/lockers.h"
+#include "vm/native_arguments.h"
+#include "vm/native_entry.h"
+#include "vm/object.h"
+#include "vm/object_store.h"
+#include "vm/os_thread.h"
+#include "vm/stack_frame_kbc.h"
+#include "vm/symbols.h"
+
+namespace dart {
+
+DEFINE_FLAG(uint64_t,
+            trace_interpreter_after,
+            ULLONG_MAX,
+            "Trace interpreter execution after instruction count reached.");
+
+#define LIKELY(cond) __builtin_expect((cond), 1)
+#define UNLIKELY(cond) __builtin_expect((cond), 0)
+
+// InterpreterSetjmpBuffer are linked together, and the last created one
+// is referenced by the Interpreter. When an exception is thrown, the exception
+// runtime looks at where to jump and finds the corresponding
+// InterpreterSetjmpBuffer based on the stack pointer of the exception handler.
+// The runtime then does a Longjmp on that buffer to return to the interpreter.
+class InterpreterSetjmpBuffer {
+ public:
+  void Longjmp() {
+    // "This" is now the last setjmp buffer.
+    interpreter_->set_last_setjmp_buffer(this);
+    longjmp(buffer_, 1);
+  }
+
+  explicit InterpreterSetjmpBuffer(Interpreter* interpreter) {
+    interpreter_ = interpreter;
+    link_ = interpreter->last_setjmp_buffer();
+    interpreter->set_last_setjmp_buffer(this);
+    fp_ = interpreter->fp_;
+  }
+
+  ~InterpreterSetjmpBuffer() {
+    ASSERT(interpreter_->last_setjmp_buffer() == this);
+    interpreter_->set_last_setjmp_buffer(link_);
+  }
+
+  InterpreterSetjmpBuffer* link() const { return link_; }
+
+  uword fp() const { return reinterpret_cast<uword>(fp_); }
+
+  jmp_buf buffer_;
+
+ private:
+  RawObject** fp_;
+  Interpreter* interpreter_;
+  InterpreterSetjmpBuffer* link_;
+
+  friend class Interpreter;
+
+  DISALLOW_ALLOCATION();
+  DISALLOW_COPY_AND_ASSIGN(InterpreterSetjmpBuffer);
+};
+
+DART_FORCE_INLINE static RawObject** SavedCallerFP(RawObject** FP) {
+  return reinterpret_cast<RawObject**>(FP[kKBCSavedCallerFpSlotFromFp]);
+}
+
+DART_FORCE_INLINE static RawObject** FrameArguments(RawObject** FP,
+                                                    intptr_t argc) {
+  return FP - (kKBCDartFrameFixedSize + argc);
+}
+
+#define RAW_CAST(Type, val) (InterpreterHelpers::CastTo##Type(val))
+
+class InterpreterHelpers {
+ public:
+#define DEFINE_CASTS(Type)                                                     \
+  DART_FORCE_INLINE static Raw##Type* CastTo##Type(RawObject* obj) {           \
+    ASSERT((k##Type##Cid == kSmiCid) ? !obj->IsHeapObject()                    \
+                                     : obj->Is##Type());                       \
+    return reinterpret_cast<Raw##Type*>(obj);                                  \
+  }
+  CLASS_LIST(DEFINE_CASTS)
+#undef DEFINE_CASTS
+
+  DART_FORCE_INLINE static RawSmi* GetClassIdAsSmi(RawObject* obj) {
+    return Smi::New(obj->IsHeapObject() ? obj->GetClassId()
+                                        : static_cast<intptr_t>(kSmiCid));
+  }
+
+  DART_FORCE_INLINE static intptr_t GetClassId(RawObject* obj) {
+    return obj->IsHeapObject() ? obj->GetClassId()
+                               : static_cast<intptr_t>(kSmiCid);
+  }
+
+  DART_FORCE_INLINE static void IncrementUsageCounter(RawFunction* f) {
+    f->ptr()->usage_counter_++;
+  }
+
+  DART_FORCE_INLINE static void IncrementICUsageCount(RawObject** entries,
+                                                      intptr_t offset,
+                                                      intptr_t args_tested) {
+    const intptr_t count_offset = ICData::CountIndexFor(args_tested);
+    const intptr_t raw_smi_old =
+        reinterpret_cast<intptr_t>(entries[offset + count_offset]);
+    const intptr_t raw_smi_new = raw_smi_old + Smi::RawValue(1);
+    *reinterpret_cast<intptr_t*>(&entries[offset + count_offset]) = raw_smi_new;
+  }
+
+  DART_FORCE_INLINE static bool IsStrictEqualWithNumberCheck(RawObject* lhs,
+                                                             RawObject* rhs) {
+    if (lhs == rhs) {
+      return true;
+    }
+
+    if (lhs->IsHeapObject() && rhs->IsHeapObject()) {
+      const intptr_t lhs_cid = lhs->GetClassId();
+      const intptr_t rhs_cid = rhs->GetClassId();
+      if (lhs_cid == rhs_cid) {
+        switch (lhs_cid) {
+          case kDoubleCid:
+            return (bit_cast<uint64_t, double>(
+                        static_cast<RawDouble*>(lhs)->ptr()->value_) ==
+                    bit_cast<uint64_t, double>(
+                        static_cast<RawDouble*>(rhs)->ptr()->value_));
+
+          case kMintCid:
+            return (static_cast<RawMint*>(lhs)->ptr()->value_ ==
+                    static_cast<RawMint*>(rhs)->ptr()->value_);
+
+          case kBigintCid:
+            return (DLRT_BigintCompare(static_cast<RawBigint*>(lhs),
+                                       static_cast<RawBigint*>(rhs)) == 0);
+        }
+      }
+    }
+
+    return false;
+  }
+
+  template <typename T>
+  DART_FORCE_INLINE static T* Untag(T* tagged) {
+    return tagged->ptr();
+  }
+
+  DART_FORCE_INLINE static bool CheckIndex(RawSmi* index, RawSmi* length) {
+    return !index->IsHeapObject() && (reinterpret_cast<intptr_t>(index) >= 0) &&
+           (reinterpret_cast<intptr_t>(index) <
+            reinterpret_cast<intptr_t>(length));
+  }
+
+  DART_FORCE_INLINE static intptr_t ArgDescTypeArgsLen(RawArray* argdesc) {
+    return Smi::Value(*reinterpret_cast<RawSmi**>(
+        reinterpret_cast<uword>(argdesc->ptr()) +
+        Array::element_offset(ArgumentsDescriptor::kTypeArgsLenIndex)));
+  }
+
+  DART_FORCE_INLINE static intptr_t ArgDescArgCount(RawArray* argdesc) {
+    return Smi::Value(*reinterpret_cast<RawSmi**>(
+        reinterpret_cast<uword>(argdesc->ptr()) +
+        Array::element_offset(ArgumentsDescriptor::kCountIndex)));
+  }
+
+  DART_FORCE_INLINE static intptr_t ArgDescPosCount(RawArray* argdesc) {
+    return Smi::Value(*reinterpret_cast<RawSmi**>(
+        reinterpret_cast<uword>(argdesc->ptr()) +
+        Array::element_offset(ArgumentsDescriptor::kPositionalCountIndex)));
+  }
+
+  static bool ObjectArraySetIndexed(Thread* thread,
+                                    RawObject** FP,
+                                    RawObject** result) {
+    return !thread->isolate()->type_checks() &&
+           ObjectArraySetIndexedUnchecked(thread, FP, result);
+  }
+
+  static bool ObjectArraySetIndexedUnchecked(Thread* thread,
+                                             RawObject** FP,
+                                             RawObject** result) {
+    RawObject** args = FrameArguments(FP, 3);
+    RawSmi* index = static_cast<RawSmi*>(args[1]);
+    RawArray* array = static_cast<RawArray*>(args[0]);
+    if (CheckIndex(index, array->ptr()->length_)) {
+      array->StorePointer(array->ptr()->data() + Smi::Value(index), args[2]);
+      return true;
+    }
+    return false;
+  }
+
+  static bool ObjectArrayGetIndexed(Thread* thread,
+                                    RawObject** FP,
+                                    RawObject** result) {
+    RawObject** args = FrameArguments(FP, 2);
+    RawSmi* index = static_cast<RawSmi*>(args[1]);
+    RawArray* array = static_cast<RawArray*>(args[0]);
+    if (CheckIndex(index, array->ptr()->length_)) {
+      *result = array->ptr()->data()[Smi::Value(index)];
+      return true;
+    }
+    return false;
+  }
+
+  static bool GrowableArraySetIndexed(Thread* thread,
+                                      RawObject** FP,
+                                      RawObject** result) {
+    return !thread->isolate()->type_checks() &&
+           GrowableArraySetIndexedUnchecked(thread, FP, result);
+  }
+
+  static bool GrowableArraySetIndexedUnchecked(Thread* thread,
+                                               RawObject** FP,
+                                               RawObject** result) {
+    RawObject** args = FrameArguments(FP, 3);
+    RawSmi* index = static_cast<RawSmi*>(args[1]);
+    RawGrowableObjectArray* array =
+        static_cast<RawGrowableObjectArray*>(args[0]);
+    if (CheckIndex(index, array->ptr()->length_)) {
+      RawArray* data = array->ptr()->data_;
+      data->StorePointer(data->ptr()->data() + Smi::Value(index), args[2]);
+      return true;
+    }
+    return false;
+  }
+
+  static bool GrowableArrayGetIndexed(Thread* thread,
+                                      RawObject** FP,
+                                      RawObject** result) {
+    RawObject** args = FrameArguments(FP, 2);
+    RawSmi* index = static_cast<RawSmi*>(args[1]);
+    RawGrowableObjectArray* array =
+        static_cast<RawGrowableObjectArray*>(args[0]);
+    if (CheckIndex(index, array->ptr()->length_)) {
+      *result = array->ptr()->data_->ptr()->data()[Smi::Value(index)];
+      return true;
+    }
+    return false;
+  }
+
+  static bool Double_getIsNan(Thread* thread,
+                              RawObject** FP,
+                              RawObject** result) {
+    RawObject** args = FrameArguments(FP, 1);
+    RawDouble* d = static_cast<RawDouble*>(args[0]);
+    *result =
+        isnan(d->ptr()->value_) ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool Double_getIsInfinite(Thread* thread,
+                                   RawObject** FP,
+                                   RawObject** result) {
+    RawObject** args = FrameArguments(FP, 1);
+    RawDouble* d = static_cast<RawDouble*>(args[0]);
+    *result =
+        isinf(d->ptr()->value_) ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool ObjectEquals(Thread* thread, RawObject** FP, RawObject** result) {
+    RawObject** args = FrameArguments(FP, 2);
+    *result = args[0] == args[1] ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool ObjectRuntimeType(Thread* thread,
+                                RawObject** FP,
+                                RawObject** result) {
+    RawObject** args = FrameArguments(FP, 1);
+    const intptr_t cid = GetClassId(args[0]);
+    if (cid == kClosureCid) {
+      return false;
+    }
+    if (cid < kNumPredefinedCids) {
+      if (cid == kDoubleCid) {
+        *result = thread->isolate()->object_store()->double_type();
+        return true;
+      } else if (RawObject::IsStringClassId(cid)) {
+        *result = thread->isolate()->object_store()->string_type();
+        return true;
+      } else if (RawObject::IsIntegerClassId(cid)) {
+        *result = thread->isolate()->object_store()->int_type();
+        return true;
+      }
+    }
+    RawClass* cls = thread->isolate()->class_table()->At(cid);
+    if (cls->ptr()->num_type_arguments_ != 0) {
+      return false;
+    }
+    RawType* typ = cls->ptr()->canonical_type_;
+    if (typ == Object::null()) {
+      return false;
+    }
+    *result = static_cast<RawObject*>(typ);
+    return true;
+  }
+
+  static bool GetDoubleOperands(RawObject** args, double* d1, double* d2) {
+    RawObject* obj2 = args[1];
+    if (!obj2->IsHeapObject()) {
+      *d2 =
+          static_cast<double>(reinterpret_cast<intptr_t>(obj2) >> kSmiTagSize);
+    } else if (obj2->GetClassId() == kDoubleCid) {
+      RawDouble* obj2d = static_cast<RawDouble*>(obj2);
+      *d2 = obj2d->ptr()->value_;
+    } else {
+      return false;
+    }
+    RawDouble* obj1 = static_cast<RawDouble*>(args[0]);
+    *d1 = obj1->ptr()->value_;
+    return true;
+  }
+
+  static RawObject* AllocateDouble(Thread* thread, double value) {
+    const intptr_t instance_size = Double::InstanceSize();
+    const uword start =
+        thread->heap()->new_space()->TryAllocateInTLAB(thread, instance_size);
+    if (LIKELY(start != 0)) {
+      uword tags = 0;
+      tags = RawObject::ClassIdTag::update(kDoubleCid, tags);
+      tags = RawObject::SizeTag::update(instance_size, tags);
+      // Also writes zero in the hash_ field.
+      *reinterpret_cast<uword*>(start + Double::tags_offset()) = tags;
+      *reinterpret_cast<double*>(start + Double::value_offset()) = value;
+      return reinterpret_cast<RawObject*>(start + kHeapObjectTag);
+    }
+    return NULL;
+  }
+
+  static bool Double_add(Thread* thread, RawObject** FP, RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    RawObject* new_double = AllocateDouble(thread, d1 + d2);
+    if (new_double != NULL) {
+      *result = new_double;
+      return true;
+    }
+    return false;
+  }
+
+  static bool Double_mul(Thread* thread, RawObject** FP, RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    RawObject* new_double = AllocateDouble(thread, d1 * d2);
+    if (new_double != NULL) {
+      *result = new_double;
+      return true;
+    }
+    return false;
+  }
+
+  static bool Double_sub(Thread* thread, RawObject** FP, RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    RawObject* new_double = AllocateDouble(thread, d1 - d2);
+    if (new_double != NULL) {
+      *result = new_double;
+      return true;
+    }
+    return false;
+  }
+
+  static bool Double_div(Thread* thread, RawObject** FP, RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    RawObject* new_double = AllocateDouble(thread, d1 / d2);
+    if (new_double != NULL) {
+      *result = new_double;
+      return true;
+    }
+    return false;
+  }
+
+  static bool Double_greaterThan(Thread* thread,
+                                 RawObject** FP,
+                                 RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    *result = d1 > d2 ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool Double_greaterEqualThan(Thread* thread,
+                                      RawObject** FP,
+                                      RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    *result = d1 >= d2 ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool Double_lessThan(Thread* thread,
+                              RawObject** FP,
+                              RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    *result = d1 < d2 ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool Double_equal(Thread* thread, RawObject** FP, RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    *result = d1 == d2 ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool Double_lessEqualThan(Thread* thread,
+                                   RawObject** FP,
+                                   RawObject** result) {
+    double d1, d2;
+    if (!GetDoubleOperands(FrameArguments(FP, 2), &d1, &d2)) {
+      return false;
+    }
+    *result = d1 <= d2 ? Bool::True().raw() : Bool::False().raw();
+    return true;
+  }
+
+  static bool ClearAsyncThreadStack(Thread* thread,
+                                    RawObject** FP,
+                                    RawObject** result) {
+    thread->clear_async_stack_trace();
+    *result = Object::null();
+    return true;
+  }
+
+  static bool SetAsyncThreadStackTrace(Thread* thread,
+                                       RawObject** FP,
+                                       RawObject** result) {
+    RawObject** args = FrameArguments(FP, 1);
+    thread->set_raw_async_stack_trace(
+        reinterpret_cast<RawStackTrace*>(args[0]));
+    *result = Object::null();
+    return true;
+  }
+
+  DART_FORCE_INLINE static RawCode* FrameCode(RawObject** FP) {
+    ASSERT(GetClassId(FP[kKBCPcMarkerSlotFromFp]) == kCodeCid);
+    return static_cast<RawCode*>(FP[kKBCPcMarkerSlotFromFp]);
+  }
+
+  DART_FORCE_INLINE static void SetFrameCode(RawObject** FP, RawCode* code) {
+    ASSERT(GetClassId(code) == kCodeCid);
+    FP[kKBCPcMarkerSlotFromFp] = code;
+  }
+
+  DART_FORCE_INLINE static uint8_t* GetTypedData(RawObject* obj,
+                                                 RawObject* index) {
+    ASSERT(RawObject::IsTypedDataClassId(obj->GetClassId()));
+    RawTypedData* array = reinterpret_cast<RawTypedData*>(obj);
+    const intptr_t byte_offset = Smi::Value(RAW_CAST(Smi, index));
+    ASSERT(byte_offset >= 0);
+    return array->ptr()->data() + byte_offset;
+  }
+};
+
+DART_FORCE_INLINE static uint32_t* SavedCallerPC(RawObject** FP) {
+  return reinterpret_cast<uint32_t*>(FP[kKBCSavedCallerPcSlotFromFp]);
+}
+
+DART_FORCE_INLINE static RawFunction* FrameFunction(RawObject** FP) {
+  RawFunction* function = static_cast<RawFunction*>(FP[kKBCFunctionSlotFromFp]);
+  ASSERT(InterpreterHelpers::GetClassId(function) == kFunctionCid ||
+         InterpreterHelpers::GetClassId(function) == kNullCid);
+  return function;
+}
+
+IntrinsicHandler Interpreter::intrinsics_[Interpreter::kIntrinsicCount];
+
+// Synchronization primitives support.
+void Interpreter::InitOnce() {
+  for (intptr_t i = 0; i < kIntrinsicCount; i++) {
+    intrinsics_[i] = 0;
+  }
+
+  intrinsics_[kObjectArraySetIndexedIntrinsic] =
+      InterpreterHelpers::ObjectArraySetIndexed;
+  intrinsics_[kObjectArraySetIndexedUncheckedIntrinsic] =
+      InterpreterHelpers::ObjectArraySetIndexedUnchecked;
+  intrinsics_[kObjectArrayGetIndexedIntrinsic] =
+      InterpreterHelpers::ObjectArrayGetIndexed;
+  intrinsics_[kGrowableArraySetIndexedIntrinsic] =
+      InterpreterHelpers::GrowableArraySetIndexed;
+  intrinsics_[kGrowableArraySetIndexedUncheckedIntrinsic] =
+      InterpreterHelpers::GrowableArraySetIndexedUnchecked;
+  intrinsics_[kGrowableArrayGetIndexedIntrinsic] =
+      InterpreterHelpers::GrowableArrayGetIndexed;
+  intrinsics_[kObjectEqualsIntrinsic] = InterpreterHelpers::ObjectEquals;
+  intrinsics_[kObjectRuntimeTypeIntrinsic] =
+      InterpreterHelpers::ObjectRuntimeType;
+
+  intrinsics_[kDouble_getIsNaNIntrinsic] = InterpreterHelpers::Double_getIsNan;
+  intrinsics_[kDouble_getIsInfiniteIntrinsic] =
+      InterpreterHelpers::Double_getIsInfinite;
+  intrinsics_[kDouble_addIntrinsic] = InterpreterHelpers::Double_add;
+  intrinsics_[kDouble_mulIntrinsic] = InterpreterHelpers::Double_mul;
+  intrinsics_[kDouble_subIntrinsic] = InterpreterHelpers::Double_sub;
+  intrinsics_[kDouble_divIntrinsic] = InterpreterHelpers::Double_div;
+  intrinsics_[kDouble_greaterThanIntrinsic] =
+      InterpreterHelpers::Double_greaterThan;
+  intrinsics_[kDouble_greaterEqualThanIntrinsic] =
+      InterpreterHelpers::Double_greaterEqualThan;
+  intrinsics_[kDouble_lessThanIntrinsic] = InterpreterHelpers::Double_lessThan;
+  intrinsics_[kDouble_equalIntrinsic] = InterpreterHelpers::Double_equal;
+  intrinsics_[kDouble_lessEqualThanIntrinsic] =
+      InterpreterHelpers::Double_lessEqualThan;
+  intrinsics_[kClearAsyncThreadStackTraceIntrinsic] =
+      InterpreterHelpers::ClearAsyncThreadStack;
+  intrinsics_[kSetAsyncThreadStackTraceIntrinsic] =
+      InterpreterHelpers::SetAsyncThreadStackTrace;
+}
+
+Interpreter::Interpreter()
+    : stack_(NULL), fp_(NULL), pp_(NULL), argdesc_(NULL) {
+  // Setup interpreter support first. Some of this information is needed to
+  // setup the architecture state.
+  // We allocate the stack here, the size is computed as the sum of
+  // the size specified by the user and the buffer space needed for
+  // handling stack overflow exceptions. To be safe in potential
+  // stack underflows we also add some underflow buffer space.
+  stack_ = new uintptr_t[(OSThread::GetSpecifiedStackSize() +
+                          OSThread::kStackSizeBuffer +
+                          kInterpreterStackUnderflowSize) /
+                         sizeof(uintptr_t)];
+  // Low address.
+  stack_base_ =
+      reinterpret_cast<uword>(stack_) + kInterpreterStackUnderflowSize;
+  // High address.
+  stack_limit_ = stack_base_ + OSThread::GetSpecifiedStackSize();
+
+  last_setjmp_buffer_ = NULL;
+  top_exit_frame_info_ = 0;
+
+  DEBUG_ONLY(icount_ = 0);
+}
+
+Interpreter::~Interpreter() {
+  delete[] stack_;
+  Isolate* isolate = Isolate::Current();
+  if (isolate != NULL) {
+    isolate->set_interpreter(NULL);
+  }
+}
+
+// Get the active Interpreter for the current isolate.
+Interpreter* Interpreter::Current() {
+  Interpreter* interpreter = Isolate::Current()->interpreter();
+  if (interpreter == NULL) {
+    interpreter = new Interpreter();
+    Isolate::Current()->set_interpreter(interpreter);
+  }
+  return interpreter;
+}
+
+#if defined(DEBUG)
+// Returns true if tracing of executed instructions is enabled.
+DART_FORCE_INLINE bool Interpreter::IsTracingExecution() const {
+  return icount_ > FLAG_trace_interpreter_after;
+}
+
+// Prints bytecode instruction at given pc for instruction tracing.
+DART_NOINLINE void Interpreter::TraceInstruction(uint32_t* pc) const {
+  THR_Print("%" Pu64 " ", icount_);
+  if (FLAG_support_disassembler) {
+    KernelBytecodeDisassembler::Disassemble(reinterpret_cast<uword>(pc),
+                                            reinterpret_cast<uword>(pc + 1));
+  } else {
+    THR_Print("Disassembler not supported in this mode.\n");
+  }
+}
+#endif  // defined(DEBUG)
+
+// Calls into the Dart runtime are based on this interface.
+typedef void (*InterpreterRuntimeCall)(NativeArguments arguments);
+
+// Calls to leaf Dart runtime functions are based on this interface.
+typedef intptr_t (*InterpreterLeafRuntimeCall)(intptr_t r0,
+                                               intptr_t r1,
+                                               intptr_t r2,
+                                               intptr_t r3);
+
+// Calls to leaf float Dart runtime functions are based on this interface.
+typedef double (*InterpreterLeafFloatRuntimeCall)(double d0, double d1);
+
+void Interpreter::Exit(Thread* thread,
+                       RawObject** base,
+                       RawObject** frame,
+                       uint32_t* pc) {
+  frame[0] = Function::null();
+  frame[1] = Code::null();
+  frame[2] = reinterpret_cast<RawObject*>(pc);
+  frame[3] = reinterpret_cast<RawObject*>(base);
+  fp_ = frame + kKBCDartFrameFixedSize;
+  thread->set_top_exit_frame_info(reinterpret_cast<uword>(fp_));
+}
+
+// TODO(vegorov): Investigate advantages of using
+// __builtin_s{add,sub,mul}_overflow() intrinsics here and below.
+// Note that they may clobber the output location even when there is overflow:
+// https://gcc.gnu.org/onlinedocs/gcc/Integer-Overflow-Builtins.html
+DART_FORCE_INLINE static bool SignedAddWithOverflow(int32_t lhs,
+                                                    int32_t rhs,
+                                                    intptr_t* out) {
+  intptr_t res = 1;
+#if defined(HOST_ARCH_IA32)
+  asm volatile(
+      "add %2, %1\n"
+      "jo 1f;\n"
+      "xor %0, %0\n"
+      "mov %1, 0(%3)\n"
+      "1: "
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_X64)
+  int64_t tmp;
+  asm volatile(
+      "addl %[rhs], %[lhs]\n"
+      "jo 1f;\n"
+      "xor %[res], %[res]\n"
+      "movslq %[lhs], %[tmp]\n"
+      "mov %[tmp], 0(%[out])\n"
+      "1: "
+      : [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
+      : [rhs] "r"(rhs), [out] "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_ARM)
+  asm volatile(
+      "adds %1, %1, %2;\n"
+      "bvs 1f;\n"
+      "mov %0, #0;\n"
+      "str %1, [%3, #0]\n"
+      "1:"
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_ARM64)
+  asm volatile(
+      "adds %w1, %w1, %w2;\n"
+      "bvs 1f;\n"
+      "sxtw %x1, %w1;\n"
+      "mov %0, #0;\n"
+      "str %x1, [%3, #0]\n"
+      "1:"
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#else
+#error "Unsupported platform"
+#endif
+  return (res != 0);
+}
+
+DART_FORCE_INLINE static bool SignedSubWithOverflow(int32_t lhs,
+                                                    int32_t rhs,
+                                                    intptr_t* out) {
+  intptr_t res = 1;
+#if defined(HOST_ARCH_IA32)
+  asm volatile(
+      "sub %2, %1\n"
+      "jo 1f;\n"
+      "xor %0, %0\n"
+      "mov %1, 0(%3)\n"
+      "1: "
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_X64)
+  int64_t tmp;
+  asm volatile(
+      "subl %[rhs], %[lhs]\n"
+      "jo 1f;\n"
+      "xor %[res], %[res]\n"
+      "movslq %[lhs], %[tmp]\n"
+      "mov %[tmp], 0(%[out])\n"
+      "1: "
+      : [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
+      : [rhs] "r"(rhs), [out] "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_ARM)
+  asm volatile(
+      "subs %1, %1, %2;\n"
+      "bvs 1f;\n"
+      "mov %0, #0;\n"
+      "str %1, [%3, #0]\n"
+      "1:"
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_ARM64)
+  asm volatile(
+      "subs %w1, %w1, %w2;\n"
+      "bvs 1f;\n"
+      "sxtw %x1, %w1;\n"
+      "mov %0, #0;\n"
+      "str %x1, [%3, #0]\n"
+      "1:"
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#else
+#error "Unsupported platform"
+#endif
+  return (res != 0);
+}
+
+DART_FORCE_INLINE static bool SignedMulWithOverflow(int32_t lhs,
+                                                    int32_t rhs,
+                                                    intptr_t* out) {
+  intptr_t res = 1;
+#if defined(HOST_ARCH_IA32)
+  asm volatile(
+      "imul %2, %1\n"
+      "jo 1f;\n"
+      "xor %0, %0\n"
+      "mov %1, 0(%3)\n"
+      "1: "
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_X64)
+  int64_t tmp;
+  asm volatile(
+      "imull %[rhs], %[lhs]\n"
+      "jo 1f;\n"
+      "xor %[res], %[res]\n"
+      "movslq %[lhs], %[tmp]\n"
+      "mov %[tmp], 0(%[out])\n"
+      "1: "
+      : [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
+      : [rhs] "r"(rhs), [out] "r"(out)
+      : "cc");
+#elif defined(HOST_ARCH_ARM)
+  asm volatile(
+      "smull %1, ip, %1, %2;\n"
+      "cmp ip, %1, ASR #31;\n"
+      "bne 1f;\n"
+      "mov %0, $0;\n"
+      "str %1, [%3, #0]\n"
+      "1:"
+      : "+r"(res), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc", "r12");
+#elif defined(HOST_ARCH_ARM64)
+  int64_t prod_lo = 0;
+  asm volatile(
+      "smull %x1, %w2, %w3\n"
+      "asr %x2, %x1, #63\n"
+      "cmp %x2, %x1, ASR #31;\n"
+      "bne 1f;\n"
+      "mov %0, #0;\n"
+      "str %x1, [%4, #0]\n"
+      "1:"
+      : "=r"(res), "+r"(prod_lo), "+r"(lhs)
+      : "r"(rhs), "r"(out)
+      : "cc");
+#else
+#error "Unsupported platform"
+#endif
+  return (res != 0);
+}
+
+DART_FORCE_INLINE static bool AreBothSmis(intptr_t a, intptr_t b) {
+  return ((a | b) & kHeapObjectTag) == 0;
+}
+
+#define SMI_MUL(lhs, rhs, pres) SignedMulWithOverflow((lhs), (rhs) >> 1, pres)
+#define SMI_COND(cond, lhs, rhs, pres)                                         \
+  ((*(pres) = ((lhs cond rhs) ? true_value : false_value)), false)
+#define SMI_EQ(lhs, rhs, pres) SMI_COND(==, lhs, rhs, pres)
+#define SMI_LT(lhs, rhs, pres) SMI_COND(<, lhs, rhs, pres)
+#define SMI_GT(lhs, rhs, pres) SMI_COND(>, lhs, rhs, pres)
+#define SMI_BITOR(lhs, rhs, pres) ((*(pres) = (lhs | rhs)), false)
+#define SMI_BITAND(lhs, rhs, pres) ((*(pres) = ((lhs) & (rhs))), false)
+#define SMI_BITXOR(lhs, rhs, pres) ((*(pres) = ((lhs) ^ (rhs))), false)
+
+void Interpreter::CallRuntime(Thread* thread,
+                              RawObject** base,
+                              RawObject** exit_frame,
+                              uint32_t* pc,
+                              intptr_t argc_tag,
+                              RawObject** args,
+                              RawObject** result,
+                              uword target) {
+  Exit(thread, base, exit_frame, pc);
+  NativeArguments native_args(thread, argc_tag, args, result);
+  reinterpret_cast<RuntimeFunction>(target)(native_args);
+}
+
+DART_FORCE_INLINE static void EnterSyntheticFrame(RawObject*** FP,
+                                                  RawObject*** SP,
+                                                  uint32_t* pc) {
+  RawObject** fp = *SP + kKBCDartFrameFixedSize;
+  fp[kKBCPcMarkerSlotFromFp] = 0;
+  fp[kKBCSavedCallerPcSlotFromFp] = reinterpret_cast<RawObject*>(pc);
+  fp[kKBCSavedCallerFpSlotFromFp] = reinterpret_cast<RawObject*>(*FP);
+  *FP = fp;
+  *SP = fp - 1;
+}
+
+DART_FORCE_INLINE static void LeaveSyntheticFrame(RawObject*** FP,
+                                                  RawObject*** SP) {
+  RawObject** fp = *FP;
+  *FP = reinterpret_cast<RawObject**>(fp[kKBCSavedCallerFpSlotFromFp]);
+  *SP = fp - kKBCDartFrameFixedSize;
+}
+
+DART_NOINLINE bool Interpreter::InvokeCompiled(Thread* thread,
+                                               RawFunction* function,
+                                               RawArray* argdesc,
+                                               RawObject** call_base,
+                                               RawObject** call_top,
+                                               uint32_t** pc,
+                                               RawObject*** FP,
+                                               RawObject*** SP) {
+  InterpreterSetjmpBuffer buffer(this);
+  if (!setjmp(buffer.buffer_)) {
+#if defined(USING_SIMULATOR) || defined(TARGET_ARCH_DBC)
+    // TODO(regis): Revisit.
+    UNIMPLEMENTED();
+#endif
+    ASSERT(thread->vm_tag() == VMTag::kDartTagId);
+    ASSERT(thread->execution_state() == Thread::kThreadInGenerated);
+    if (!Function::HasCode(function)) {
+      ASSERT(!Function::HasBytecode(function));
+      call_top[1] = 0;  // Code result.
+      call_top[2] = function;
+      CallRuntime(thread, *FP, call_top + 3, *pc, 1, call_top + 2, call_top + 1,
+                  reinterpret_cast<uword>(DRT_CompileFunction));
+    }
+    if (Function::HasCode(function)) {
+      RawCode* code = function->ptr()->code_;
+      ASSERT(code != StubCode::LazyCompile_entry()->code());
+      // TODO(regis): Do we really need a stub? Try to invoke directly.
+
+      // On success, returns a RawInstance.  On failure, a RawError.
+      typedef RawObject* (*invokestub)(RawCode * code, RawArray * argdesc,
+                                       RawObject * *arg0, Thread * thread);
+      invokestub entrypoint = reinterpret_cast<invokestub>(
+          StubCode::InvokeDartCodeFromBytecode_entry()->EntryPoint());
+      *call_base = entrypoint(code, argdesc, call_base, thread);
+      // Result is at call_base;
+      *SP = call_base;
+    } else {
+      ASSERT(Function::HasBytecode(function));
+      // Bytecode was loaded in the above compilation step.
+      // Stay in interpreter.
+      RawCode* bytecode = function->ptr()->bytecode_;
+      RawObject** callee_fp = call_top + kKBCDartFrameFixedSize;
+      callee_fp[kKBCPcMarkerSlotFromFp] = bytecode;
+      callee_fp[kKBCSavedCallerPcSlotFromFp] =
+          reinterpret_cast<RawObject*>(*pc);
+      callee_fp[kKBCSavedCallerFpSlotFromFp] =
+          reinterpret_cast<RawObject*>(*FP);
+      pp_ = bytecode->ptr()->object_pool_;
+      *pc = reinterpret_cast<uint32_t*>(bytecode->ptr()->entry_point_);
+      pc_ = reinterpret_cast<uword>(*pc);  // For the profiler.
+      *FP = callee_fp;
+      *SP = *FP - 1;
+      // Dispatch will interpret function.
+    }
+    ASSERT(thread->vm_tag() == VMTag::kDartTagId);
+    ASSERT(thread->execution_state() == Thread::kThreadInGenerated);
+    thread->set_top_exit_frame_info(0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+DART_FORCE_INLINE void Interpreter::Invoke(Thread* thread,
+                                           RawObject** call_base,
+                                           RawObject** call_top,
+                                           uint32_t** pc,
+                                           RawObject*** FP,
+                                           RawObject*** SP) {
+  RawObject** callee_fp = call_top + kKBCDartFrameFixedSize;
+
+  RawFunction* function = FrameFunction(callee_fp);
+  if (Function::HasCode(function) || !Function::HasBytecode(function)) {
+    // TODO(regis): If the function is a dispatcher, execute the dispatch here.
+    if (!InvokeCompiled(thread, function, argdesc_, call_base, call_top, pc, FP,
+                        SP)) {
+      // Handle exception
+      *FP = reinterpret_cast<RawObject**>(fp_);
+      *pc = reinterpret_cast<uint32_t*>(pc_);
+      pp_ = InterpreterHelpers::FrameCode(*FP)->ptr()->object_pool_;
+      *SP = *FP - 1;
+    }
+  } else {
+    RawCode* bytecode = function->ptr()->bytecode_;
+    callee_fp[kKBCPcMarkerSlotFromFp] = bytecode;
+    callee_fp[kKBCSavedCallerPcSlotFromFp] = reinterpret_cast<RawObject*>(*pc);
+    callee_fp[kKBCSavedCallerFpSlotFromFp] = reinterpret_cast<RawObject*>(*FP);
+    pp_ = bytecode->ptr()->object_pool_;
+    *pc = reinterpret_cast<uint32_t*>(bytecode->ptr()->entry_point_);
+    pc_ = reinterpret_cast<uword>(*pc);  // For the profiler.
+    *FP = callee_fp;
+    *SP = *FP - 1;
+  }
+}
+
+void Interpreter::InlineCacheMiss(int checked_args,
+                                  Thread* thread,
+                                  RawICData* icdata,
+                                  RawObject** args,
+                                  RawObject** top,
+                                  uint32_t* pc,
+                                  RawObject** FP,
+                                  RawObject** SP) {
+  RawObject** result = top;
+  RawObject** miss_handler_args = top + 1;
+  for (intptr_t i = 0; i < checked_args; i++) {
+    miss_handler_args[i] = args[i];
+  }
+  miss_handler_args[checked_args] = icdata;
+  RuntimeFunction handler = NULL;
+  switch (checked_args) {
+    case 1:
+      handler = DRT_InlineCacheMissHandlerOneArg;
+      break;
+    case 2:
+      handler = DRT_InlineCacheMissHandlerTwoArgs;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Handler arguments: arguments to check and an ICData object.
+  const intptr_t miss_handler_argc = checked_args + 1;
+  RawObject** exit_frame = miss_handler_args + miss_handler_argc;
+  CallRuntime(thread, FP, exit_frame, pc, miss_handler_argc, miss_handler_args,
+              result, reinterpret_cast<uword>(handler));
+}
+
+DART_FORCE_INLINE void Interpreter::InstanceCall1(Thread* thread,
+                                                  RawICData* icdata,
+                                                  RawObject** call_base,
+                                                  RawObject** top,
+                                                  uint32_t** pc,
+                                                  RawObject*** FP,
+                                                  RawObject*** SP,
+                                                  bool optimized) {
+  ASSERT(icdata->GetClassId() == kICDataCid);
+
+  const intptr_t kCheckedArgs = 1;
+  RawObject** args = call_base;
+  RawArray* cache = icdata->ptr()->ic_data_->ptr();
+
+  const intptr_t type_args_len =
+      InterpreterHelpers::ArgDescTypeArgsLen(icdata->ptr()->args_descriptor_);
+  const intptr_t receiver_idx = type_args_len > 0 ? 1 : 0;
+  RawSmi* receiver_cid =
+      InterpreterHelpers::GetClassIdAsSmi(args[receiver_idx]);
+
+  bool found = false;
+  const intptr_t length = Smi::Value(cache->length_);
+  intptr_t i;
+  for (i = 0; i < (length - (kCheckedArgs + 2)); i += (kCheckedArgs + 2)) {
+    if (cache->data()[i + 0] == receiver_cid) {
+      top[0] = cache->data()[i + kCheckedArgs];
+      found = true;
+      break;
+    }
+  }
+
+  argdesc_ = icdata->ptr()->args_descriptor_;
+
+  if (found) {
+    if (!optimized) {
+      InterpreterHelpers::IncrementICUsageCount(cache->data(), i, kCheckedArgs);
+    }
+  } else {
+    InlineCacheMiss(kCheckedArgs, thread, icdata, call_base + receiver_idx, top,
+                    *pc, *FP, *SP);
+  }
+
+  Invoke(thread, call_base, top, pc, FP, SP);
+}
+
+DART_FORCE_INLINE void Interpreter::InstanceCall2(Thread* thread,
+                                                  RawICData* icdata,
+                                                  RawObject** call_base,
+                                                  RawObject** top,
+                                                  uint32_t** pc,
+                                                  RawObject*** FP,
+                                                  RawObject*** SP,
+                                                  bool optimized) {
+  ASSERT(icdata->GetClassId() == kICDataCid);
+
+  const intptr_t kCheckedArgs = 2;
+  RawObject** args = call_base;
+  RawArray* cache = icdata->ptr()->ic_data_->ptr();
+
+  const intptr_t type_args_len =
+      InterpreterHelpers::ArgDescTypeArgsLen(icdata->ptr()->args_descriptor_);
+  const intptr_t receiver_idx = type_args_len > 0 ? 1 : 0;
+  RawSmi* receiver_cid =
+      InterpreterHelpers::GetClassIdAsSmi(args[receiver_idx]);
+  RawSmi* arg0_cid =
+      InterpreterHelpers::GetClassIdAsSmi(args[receiver_idx + 1]);
+
+  bool found = false;
+  const intptr_t length = Smi::Value(cache->length_);
+  intptr_t i;
+  for (i = 0; i < (length - (kCheckedArgs + 2)); i += (kCheckedArgs + 2)) {
+    if ((cache->data()[i + 0] == receiver_cid) &&
+        (cache->data()[i + 1] == arg0_cid)) {
+      top[0] = cache->data()[i + kCheckedArgs];
+      found = true;
+      break;
+    }
+  }
+
+  argdesc_ = icdata->ptr()->args_descriptor_;
+
+  if (found) {
+    if (!optimized) {
+      InterpreterHelpers::IncrementICUsageCount(cache->data(), i, kCheckedArgs);
+    }
+  } else {
+    InlineCacheMiss(kCheckedArgs, thread, icdata, call_base + receiver_idx, top,
+                    *pc, *FP, *SP);
+  }
+
+  Invoke(thread, call_base, top, pc, FP, SP);
+}
+
+DART_FORCE_INLINE void Interpreter::PrepareForTailCall(
+    RawCode* code,
+    RawImmutableArray* args_desc,
+    RawObject** FP,
+    RawObject*** SP,
+    uint32_t** pc) {
+  // Drop all stack locals.
+  *SP = FP - 1;
+
+  // Replace the callee with the new [code].
+  FP[kKBCFunctionSlotFromFp] = Object::null();
+  FP[kKBCPcMarkerSlotFromFp] = code;
+  *pc = reinterpret_cast<uint32_t*>(code->ptr()->entry_point_);
+  pc_ = reinterpret_cast<uword>(pc);  // For the profiler.
+  pp_ = code->ptr()->object_pool_;
+  argdesc_ = args_desc;
+}
+
+// Note: functions below are marked DART_NOINLINE to recover performance on
+// ARM where inlining these functions into the interpreter loop seemed to cause
+// some code quality issues.
+static DART_NOINLINE bool InvokeRuntime(Thread* thread,
+                                        Interpreter* interpreter,
+                                        RuntimeFunction drt,
+                                        const NativeArguments& args) {
+  InterpreterSetjmpBuffer buffer(interpreter);
+  if (!setjmp(buffer.buffer_)) {
+    thread->set_vm_tag(reinterpret_cast<uword>(drt));
+    drt(args);
+    thread->set_vm_tag(VMTag::kDartTagId);
+    thread->set_top_exit_frame_info(0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+static DART_NOINLINE bool InvokeNative(Thread* thread,
+                                       Interpreter* interpreter,
+                                       NativeFunctionWrapper wrapper,
+                                       Dart_NativeFunction function,
+                                       Dart_NativeArguments args) {
+  InterpreterSetjmpBuffer buffer(interpreter);
+  if (!setjmp(buffer.buffer_)) {
+    thread->set_vm_tag(reinterpret_cast<uword>(function));
+    wrapper(args, function);
+    thread->set_vm_tag(VMTag::kDartTagId);
+    thread->set_top_exit_frame_info(0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// Note:
+// All macro helpers are intended to be used only inside Interpreter::Call.
+
+// Counts and prints executed bytecode instructions (in DEBUG mode).
+#if defined(DEBUG)
+#define TRACE_INSTRUCTION                                                      \
+  icount_++;                                                                   \
+  if (IsTracingExecution()) {                                                  \
+    TraceInstruction(pc - 1);                                                  \
+  }
+#else
+#define TRACE_INSTRUCTION
+#endif  // defined(DEBUG)
+
+// Decode opcode and A part of the given value and dispatch to the
+// corresponding bytecode handler.
+#define DISPATCH_OP(val)                                                       \
+  do {                                                                         \
+    op = (val);                                                                \
+    rA = ((op >> 8) & 0xFF);                                                   \
+    TRACE_INSTRUCTION                                                          \
+    goto* dispatch[op & 0xFF];                                                 \
+  } while (0)
+
+// Fetch next operation from PC, increment program counter and dispatch.
+#define DISPATCH() DISPATCH_OP(*pc++)
+
+// Define entry point that handles bytecode Name with the given operand format.
+#define BYTECODE(Name, Operands)                                               \
+  BYTECODE_HEADER(Name, DECLARE_##Operands, DECODE_##Operands)
+
+#define BYTECODE_HEADER(Name, Declare, Decode)                                 \
+  Declare;                                                                     \
+  bc##Name : Decode
+
+// Helpers to decode common instruction formats. Used in conjunction with
+// BYTECODE() macro.
+#define DECLARE_A_B_C                                                          \
+  uint16_t rB, rC;                                                             \
+  USE(rB);                                                                     \
+  USE(rC)
+#define DECODE_A_B_C                                                           \
+  rB = ((op >> KernelBytecode::kBShift) & KernelBytecode::kBMask);             \
+  rC = ((op >> KernelBytecode::kCShift) & KernelBytecode::kCMask);
+
+#define DECLARE_A_B_Y                                                          \
+  uint16_t rB;                                                                 \
+  int8_t rY;                                                                   \
+  USE(rB);                                                                     \
+  USE(rY)
+#define DECODE_A_B_Y                                                           \
+  rB = ((op >> KernelBytecode::kBShift) & KernelBytecode::kBMask);             \
+  rY = ((op >> KernelBytecode::kYShift) & KernelBytecode::kYMask);
+
+#define DECLARE_0
+#define DECODE_0
+
+#define DECLARE_A
+#define DECODE_A
+
+#define DECLARE___D                                                            \
+  uint32_t rD;                                                                 \
+  USE(rD)
+#define DECODE___D rD = (op >> KernelBytecode::kDShift);
+
+#define DECLARE_A_D DECLARE___D
+#define DECODE_A_D DECODE___D
+
+#define DECLARE_A_X                                                            \
+  int32_t rD;                                                                  \
+  USE(rD)
+#define DECODE_A_X rD = (static_cast<int32_t>(op) >> KernelBytecode::kDShift);
+
+#define SMI_FASTPATH_ICDATA_INC                                                \
+  do {                                                                         \
+    ASSERT(KernelBytecode::IsCallOpcode(*pc));                                 \
+    const uint16_t kidx = KernelBytecode::DecodeD(*pc);                        \
+    const RawICData* icdata = RAW_CAST(ICData, LOAD_CONSTANT(kidx));           \
+    RawObject** entries = icdata->ptr()->ic_data_->ptr()->data();              \
+    InterpreterHelpers::IncrementICUsageCount(entries, 0, 2);                  \
+  } while (0);
+
+// Declare bytecode handler for a smi operation (e.g. AddTOS) with the
+// given result type and the given behavior specified as a function
+// that takes left and right operands and result slot and returns
+// true if fast-path succeeds.
+#define SMI_FASTPATH_TOS(ResultT, Func)                                        \
+  {                                                                            \
+    const intptr_t lhs = reinterpret_cast<intptr_t>(SP[-1]);                   \
+    const intptr_t rhs = reinterpret_cast<intptr_t>(SP[-0]);                   \
+    ResultT* slot = reinterpret_cast<ResultT*>(SP - 1);                        \
+    if (LIKELY(!thread->isolate()->single_step()) &&                           \
+        LIKELY(AreBothSmis(lhs, rhs) && !Func(lhs, rhs, slot))) {              \
+      SMI_FASTPATH_ICDATA_INC;                                                 \
+      /* Fast path succeeded. Skip the generic call that follows. */           \
+      pc++;                                                                    \
+      /* We dropped 2 arguments and push result                   */           \
+      SP--;                                                                    \
+    }                                                                          \
+  }
+
+// Skip the next instruction if there is no overflow.
+#define SMI_OP_CHECK(ResultT, Func)                                            \
+  {                                                                            \
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);                   \
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]);                   \
+    ResultT* slot = reinterpret_cast<ResultT*>(&FP[rA]);                       \
+    if (LIKELY(!Func(lhs, rhs, slot))) {                                       \
+      /* Success. Skip the instruction that follows. */                        \
+      pc++;                                                                    \
+    }                                                                          \
+  }
+
+// Do not check for overflow.
+#define SMI_OP_NOCHECK(ResultT, Func)                                          \
+  {                                                                            \
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);                   \
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]);                   \
+    ResultT* slot = reinterpret_cast<ResultT*>(&FP[rA]);                       \
+    Func(lhs, rhs, slot);                                                      \
+  }
+
+// Exception handling helper. Gets handler FP and PC from the Interpreter where
+// they were stored by Interpreter::Longjmp and proceeds to execute the handler.
+// Corner case: handler PC can be a fake marker that marks entry frame, which
+// means exception was not handled in the Dart code. In this case we return
+// caught exception from Interpreter::Call.
+#define HANDLE_EXCEPTION                                                       \
+  do {                                                                         \
+    FP = reinterpret_cast<RawObject**>(fp_);                                   \
+    pc = reinterpret_cast<uint32_t*>(pc_);                                     \
+    if ((reinterpret_cast<uword>(pc) & 2) != 0) { /* Entry frame? */           \
+      fp_ = reinterpret_cast<RawObject**>(fp_[0]);                             \
+      thread->set_top_exit_frame_info(reinterpret_cast<uword>(fp_));           \
+      thread->set_top_resource(top_resource);                                  \
+      thread->set_vm_tag(vm_tag);                                              \
+      return special_[kExceptionSpecialIndex];                                 \
+    }                                                                          \
+    pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;              \
+    goto DispatchAfterException;                                               \
+  } while (0)
+
+#define HANDLE_RETURN                                                          \
+  do {                                                                         \
+    pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;              \
+  } while (0)
+
+// Runtime call helpers: handle invocation and potential exception after return.
+#define INVOKE_RUNTIME(Func, Args)                                             \
+  if (!InvokeRuntime(thread, this, Func, Args)) {                              \
+    HANDLE_EXCEPTION;                                                          \
+  } else {                                                                     \
+    HANDLE_RETURN;                                                             \
+  }
+
+#define INVOKE_NATIVE(Wrapper, Func, Args)                                     \
+  if (!InvokeNative(thread, this, Wrapper, Func, Args)) {                      \
+    HANDLE_EXCEPTION;                                                          \
+  } else {                                                                     \
+    HANDLE_RETURN;                                                             \
+  }
+
+#define LOAD_CONSTANT(index) (pp_->ptr()->data()[(index)].raw_obj_)
+
+// Returns true if deoptimization succeeds.
+DART_FORCE_INLINE bool Interpreter::Deoptimize(Thread* thread,
+                                               uint32_t** pc,
+                                               RawObject*** FP,
+                                               RawObject*** SP,
+                                               bool is_lazy) {
+  // Note: frame translation will take care of preserving result at the
+  // top of the stack. See CompilerDeoptInfo::CreateDeoptInfo.
+
+  // Make sure we preserve SP[0] when entering synthetic frame below.
+  (*SP)++;
+
+  // Leaf runtime function DeoptimizeCopyFrame expects a Dart frame.
+  // The code in this frame may not cause GC.
+  // DeoptimizeCopyFrame and DeoptimizeFillFrame are leaf runtime calls.
+  EnterSyntheticFrame(FP, SP, *pc - (is_lazy ? 1 : 0));
+  const intptr_t frame_size_in_bytes =
+      DLRT_DeoptimizeCopyFrame(reinterpret_cast<uword>(*FP), is_lazy ? 1 : 0);
+  LeaveSyntheticFrame(FP, SP);
+
+  *SP = *FP + (frame_size_in_bytes / kWordSize);
+  EnterSyntheticFrame(FP, SP, *pc - (is_lazy ? 1 : 0));
+  DLRT_DeoptimizeFillFrame(reinterpret_cast<uword>(*FP));
+
+  // We are now inside a valid frame.
+  {
+    *++(*SP) = 0;  // Space for the result: number of materialization args.
+    Exit(thread, *FP, *SP + 1, /*pc=*/0);
+    NativeArguments native_args(thread, 0, *SP, *SP);
+    if (!InvokeRuntime(thread, this, DRT_DeoptimizeMaterialize, native_args)) {
+      return false;
+    }
+  }
+  const intptr_t materialization_arg_count =
+      Smi::Value(RAW_CAST(Smi, *(*SP)--)) / kWordSize;
+
+  // Restore caller PC.
+  *pc = SavedCallerPC(*FP);
+  pc_ = reinterpret_cast<uword>(*pc);  // For the profiler.
+
+  // Check if it is a fake PC marking the entry frame.
+  ASSERT((reinterpret_cast<uword>(*pc) & 2) == 0);
+
+  // Restore SP, FP and PP.
+  // Unoptimized frame SP is one below FrameArguments(...) because
+  // FrameArguments(...) returns a pointer to the first argument.
+  *SP = FrameArguments(*FP, materialization_arg_count) - 1;
+  *FP = SavedCallerFP(*FP);
+
+  // Restore pp.
+  pp_ = InterpreterHelpers::FrameCode(*FP)->ptr()->object_pool_;
+
+  return true;
+}
+
+RawObject* Interpreter::Call(const Code& code,
+                             const Array& arguments_descriptor,
+                             const Array& arguments,
+                             Thread* thread) {
+  // Dispatch used to interpret bytecode. Contains addresses of
+  // labels of bytecode handlers. Handlers themselves are defined below.
+  static const void* dispatch[] = {
+#define TARGET(name, fmt, fmta, fmtb, fmtc) &&bc##name,
+      KERNEL_BYTECODES_LIST(TARGET)
+#undef TARGET
+  };
+
+  // Interpreter state (see constants_kbc.h for high-level overview).
+  uint32_t* pc;    // Program Counter: points to the next op to execute.
+  RawObject** FP;  // Frame Pointer.
+  RawObject** SP;  // Stack Pointer.
+
+  uint32_t op;  // Currently executing op.
+  uint16_t rA;  // A component of the currently executing op.
+
+  if (fp_ == NULL) {
+    fp_ = reinterpret_cast<RawObject**>(stack_);
+  }
+
+  // Save current VM tag and mark thread as executing Dart code.
+  const uword vm_tag = thread->vm_tag();
+  thread->set_vm_tag(VMTag::kDartTagId);  // TODO(regis): kDartBytecodeTagId?
+
+  // Save current top stack resource and reset the list.
+  StackResource* top_resource = thread->top_resource();
+  thread->set_top_resource(NULL);
+
+  // Setup entry frame:
+  //
+  //                        ^
+  //                        |  previous Dart frames
+  //       ~~~~~~~~~~~~~~~  |
+  //       | ........... | -+
+  // fp_ > |             |     saved top_exit_frame_info
+  //       | arg 0       | -+
+  //       ~~~~~~~~~~~~~~~  |
+  //                         > incoming arguments
+  //       ~~~~~~~~~~~~~~~  |
+  //       | arg 1       | -+
+  //       | function    | -+
+  //       | code        |  |
+  //       | callee PC   | ---> special fake PC marking an entry frame
+  //  SP > | fp_         |  |
+  //  FP > | ........... |   > normal Dart frame (see stack_frame_kbc.h)
+  //                        |
+  //                        v
+  //
+  FP = fp_ + 1 + arguments.Length() + kKBCDartFrameFixedSize;
+  SP = FP - 1;
+
+  // Save outer top_exit_frame_info.
+  fp_[0] = reinterpret_cast<RawObject*>(thread->top_exit_frame_info());
+  thread->set_top_exit_frame_info(0);
+
+  // Copy arguments and setup the Dart frame.
+  const intptr_t argc = arguments.Length();
+  for (intptr_t i = 0; i < argc; i++) {
+    fp_[1 + i] = arguments.At(i);
+  }
+
+  FP[kKBCFunctionSlotFromFp] = code.function();
+  FP[kKBCPcMarkerSlotFromFp] = code.raw();
+  FP[kKBCSavedCallerPcSlotFromFp] =
+      reinterpret_cast<RawObject*>((argc << 2) | 2);
+  FP[kKBCSavedCallerFpSlotFromFp] = reinterpret_cast<RawObject*>(fp_);
+
+  // Load argument descriptor.
+  argdesc_ = arguments_descriptor.raw();
+
+  // Ready to start executing bytecode. Load entry point and corresponding
+  // object pool.
+  pc = reinterpret_cast<uint32_t*>(code.raw()->ptr()->entry_point_);
+  pc_ = reinterpret_cast<uword>(pc);  // For the profiler.
+  pp_ = code.object_pool();
+
+  // Cache some frequently used values in the frame.
+  RawBool* true_value = Bool::True().raw();
+  RawBool* false_value = Bool::False().raw();
+  RawObject* null_value = Object::null();
+
+#if defined(DEBUG)
+  Function& function_h = Function::Handle();
+#endif
+
+  // Enter the dispatch loop.
+  DISPATCH();
+
+  // KernelBytecode handlers (see constants_kbc.h for bytecode descriptions).
+  {
+    BYTECODE(Entry, A_D);
+    const uint16_t num_locals = rD;
+
+    // Initialize locals with null & set SP.
+    for (intptr_t i = 0; i < num_locals; i++) {
+      FP[i] = null_value;
+    }
+    SP = FP + num_locals - 1;
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(EntryOptional, A_B_C);
+    // TODO(regis): Recover deleted code.
+    // See https://dart-review.googlesource.com/c/sdk/+/25320
+    UNIMPLEMENTED();
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(EntryOptimized, A_D);
+    const uint16_t num_registers = rD;
+
+    // Reserve space for registers used by the optimized code.
+    SP = FP + num_registers - 1;
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Frame, A_D);
+    // Initialize locals with null and increment SP.
+    const uint16_t num_locals = rD;
+    for (intptr_t i = 1; i <= num_locals; i++) {
+      SP[i] = null_value;
+    }
+    SP += num_locals;
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(SetFrame, A);
+    SP = FP + rA - 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Compile, 0);
+    FP[0] = argdesc_;
+    FP[1] = FrameFunction(FP);
+    FP[2] = 0;
+
+    UNIMPLEMENTED();  // TODO(regis): Revisit.
+
+    Exit(thread, FP, FP + 3, pc);
+    NativeArguments args(thread, 1, FP + 1, FP + 2);
+    INVOKE_RUNTIME(DRT_CompileFunction, args);
+    {
+      // Function should be compiled now, dispatch to its entry point.
+      RawCode* code = FrameFunction(FP)->ptr()->code_;
+      InterpreterHelpers::SetFrameCode(FP, code);
+      pp_ = code->ptr()->object_pool_;
+      pc = reinterpret_cast<uint32_t*>(code->ptr()->entry_point_);
+      pc_ = reinterpret_cast<uword>(pc);  // For the profiler.
+      argdesc_ = static_cast<RawArray*>(FP[0]);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(HotCheck, A_D);
+    const uint8_t increment = rA;
+    const uint16_t threshold = rD;
+    RawFunction* f = FrameFunction(FP);
+    int32_t counter = f->ptr()->usage_counter_;
+    // Note: we don't increment usage counter in the prologue of optimized
+    // functions.
+    if (increment) {
+      counter += increment;
+      f->ptr()->usage_counter_ = counter;
+    }
+    if (UNLIKELY(counter >= threshold)) {
+      FP[0] = f;
+      FP[1] = 0;
+
+      // Save the args desriptor which came in.
+      FP[2] = argdesc_;
+
+      UNIMPLEMENTED();  // TODO(regis): Revisit.
+
+      // Make the DRT_OptimizeInvokedFunction see a stub as its caller for
+      // consistency with the other architectures, and to avoid needing to
+      // generate a stackmap for the HotCheck pc.
+      const StubEntry* stub = StubCode::OptimizeFunction_entry();
+      FP[kKBCPcMarkerSlotFromFp] = stub->code();
+      pc = reinterpret_cast<uint32_t*>(stub->EntryPoint());
+
+      Exit(thread, FP, FP + 3, pc);
+      NativeArguments args(thread, 1, /*argv=*/FP, /*retval=*/FP + 1);
+      INVOKE_RUNTIME(DRT_OptimizeInvokedFunction, args);
+      {
+        // DRT_OptimizeInvokedFunction returns the code object to execute.
+        ASSERT(FP[1]->GetClassId() == kFunctionCid);
+        RawFunction* function = static_cast<RawFunction*>(FP[1]);
+        RawCode* code = function->ptr()->code_;
+        InterpreterHelpers::SetFrameCode(FP, code);
+
+        // Restore args descriptor which came in.
+        argdesc_ = Array::RawCast(FP[2]);
+
+        pp_ = code->ptr()->object_pool_;
+        pc = reinterpret_cast<uint32_t*>(function->ptr()->entry_point_);
+        pc_ = reinterpret_cast<uword>(pc);  // For the profiler.
+      }
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckStack, A);
+    {
+      // TODO(regis): Support a second stack limit or can we share the DBC one?
+#if 0
+      if (reinterpret_cast<uword>(SP) >= thread->stack_limit()) {
+        Exit(thread, FP, SP + 1, pc);
+        NativeArguments args(thread, 0, NULL, NULL);
+        INVOKE_RUNTIME(DRT_StackOverflow, args);
+      }
+#endif
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckStackAlwaysExit, A);
+    {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_StackOverflow, args);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckFunctionTypeArgs, A_D);
+    const uint16_t declared_type_args_len = rA;
+    const uint16_t first_stack_local_index = rD;
+
+    // Decode arguments descriptor's type args len.
+    const intptr_t type_args_len =
+        InterpreterHelpers::ArgDescTypeArgsLen(argdesc_);
+    if ((type_args_len != declared_type_args_len) && (type_args_len != 0)) {
+      goto ClosureNoSuchMethod;
+    }
+    if (type_args_len > 0) {
+      // Decode arguments descriptor's argument count (excluding type args).
+      const intptr_t arg_count = InterpreterHelpers::ArgDescArgCount(argdesc_);
+      // Copy passed-in type args to first local slot.
+      FP[first_stack_local_index] = *FrameArguments(FP, arg_count + 1);
+    } else if (declared_type_args_len > 0) {
+      FP[first_stack_local_index] = Object::null();
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DebugStep, A);
+    if (thread->isolate()->single_step()) {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_SingleStepHandler, args);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DebugBreak, A);
+#if !defined(PRODUCT)
+    {
+      const uint32_t original_bc =
+          static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
+              thread->isolate()->debugger()->GetPatchedStubAddress(
+                  reinterpret_cast<uword>(pc))));
+
+      SP[1] = null_value;
+      Exit(thread, FP, SP + 2, pc);
+      NativeArguments args(thread, 0, NULL, SP + 1);
+      INVOKE_RUNTIME(DRT_BreakpointRuntimeHandler, args)
+      DISPATCH_OP(original_bc);
+    }
+#else
+    // There should be no debug breaks in product mode.
+    UNREACHABLE();
+#endif
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InstantiateType, A_D);
+    // Stack: instantiator type args, function type args
+    RawObject* type = LOAD_CONSTANT(rD);
+    SP[1] = type;
+    SP[2] = SP[-1];
+    SP[3] = SP[0];
+    Exit(thread, FP, SP + 4, pc);
+    {
+      NativeArguments args(thread, 3, SP + 1, SP - 1);
+      INVOKE_RUNTIME(DRT_InstantiateType, args);
+    }
+    SP -= 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InstantiateTypeArgumentsTOS, A_D);
+    // Stack: instantiator type args, function type args
+    RawTypeArguments* type_arguments =
+        static_cast<RawTypeArguments*>(LOAD_CONSTANT(rD));
+
+    RawObject* instantiator_type_args = SP[-1];
+    RawObject* function_type_args = SP[0];
+    // If both instantiators are null and if the type argument vector
+    // instantiated from null becomes a vector of dynamic, then use null as
+    // the type arguments.
+    if ((rA == 0) || (null_value != instantiator_type_args) ||
+        (null_value != function_type_args)) {
+      // First lookup in the cache.
+      RawArray* instantiations = type_arguments->ptr()->instantiations_;
+      for (intptr_t i = 0;
+           instantiations->ptr()->data()[i] != NULL;  // kNoInstantiator
+           i += 3) {  // kInstantiationSizeInWords
+        if ((instantiations->ptr()->data()[i] == instantiator_type_args) &&
+            (instantiations->ptr()->data()[i + 1] == function_type_args)) {
+          // Found in the cache.
+          SP[-1] = instantiations->ptr()->data()[i + 2];
+          goto InstantiateTypeArgumentsTOSDone;
+        }
+      }
+
+      // Cache lookup failed, call runtime.
+      SP[1] = type_arguments;
+      SP[2] = instantiator_type_args;
+      SP[3] = function_type_args;
+
+      Exit(thread, FP, SP + 4, pc);
+      NativeArguments args(thread, 3, SP + 1, SP - 1);
+      INVOKE_RUNTIME(DRT_InstantiateTypeArguments, args);
+    }
+
+  InstantiateTypeArgumentsTOSDone:
+    SP -= 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Throw, A);
+    {
+      SP[1] = 0;  // Space for result.
+      Exit(thread, FP, SP + 2, pc);
+      if (rA == 0) {  // Throw
+        NativeArguments args(thread, 1, SP, SP + 1);
+        INVOKE_RUNTIME(DRT_Throw, args);
+      } else {  // ReThrow
+        NativeArguments args(thread, 2, SP - 1, SP + 1);
+        INVOKE_RUNTIME(DRT_ReThrow, args);
+      }
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Drop1, 0);
+    SP--;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Drop, 0);
+    SP -= rA;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DropR, 0);
+    RawObject* result = SP[0];
+    SP -= rA;
+    SP[0] = result;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadConstant, A_D);
+    FP[rA] = LOAD_CONSTANT(rD);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(PushConstant, __D);
+    *++SP = LOAD_CONSTANT(rD);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Push, A_X);
+    *++SP = FP[rD];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Move, A_X);
+    FP[rA] = FP[rD];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Swap, A_X);
+    RawObject* tmp = FP[rD];
+    FP[rD] = FP[rA];
+    FP[rA] = tmp;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreLocal, A_X);
+    FP[rD] = *SP;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(PopLocal, A_X);
+    FP[rD] = *SP--;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(MoveSpecial, A_D);
+    FP[rA] = special_[rD];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(BooleanNegateTOS, 0);
+    SP[0] = (SP[0] == true_value) ? false_value : true_value;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(BooleanNegate, A_D);
+    FP[rA] = (FP[rD] == true_value) ? false_value : true_value;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IndirectStaticCall, A_D);
+
+    // Check if single stepping.
+    if (thread->isolate()->single_step()) {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_SingleStepHandler, args);
+    }
+
+    // Invoke target function.
+    {
+      const uint16_t argc = rA;
+      // Look up the function in the ICData.
+      RawObject* ic_data_obj = SP[0];
+      RawICData* ic_data = RAW_CAST(ICData, ic_data_obj);
+      RawObject** data = ic_data->ptr()->ic_data_->ptr()->data();
+      InterpreterHelpers::IncrementICUsageCount(data, 0, 0);
+      SP[0] = data[ICData::TargetIndexFor(ic_data->ptr()->state_bits_ & 0x3)];
+      RawObject** call_base = SP - argc;
+      RawObject** call_top = SP;  // *SP contains function
+      argdesc_ = static_cast<RawArray*>(LOAD_CONSTANT(rD));
+      Invoke(thread, call_base, call_top, &pc, &FP, &SP);
+    }
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StaticCall, A_D);
+    const uint16_t argc = rA;
+    RawObject** call_base = SP - argc;
+    RawObject** call_top = SP;  // *SP contains function
+    argdesc_ = static_cast<RawArray*>(LOAD_CONSTANT(rD));
+    Invoke(thread, call_base, call_top, &pc, &FP, &SP);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InstanceCall1, A_D);
+
+    // Check if single stepping.
+    if (thread->isolate()->single_step()) {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_SingleStepHandler, args);
+    }
+
+    {
+      const uint16_t argc = rA;
+      const uint16_t kidx = rD;
+
+      RawObject** call_base = SP - argc + 1;
+      RawObject** call_top = SP + 1;
+
+      RawICData* icdata = RAW_CAST(ICData, LOAD_CONSTANT(kidx));
+      InterpreterHelpers::IncrementUsageCounter(
+          RAW_CAST(Function, icdata->ptr()->owner_));
+      InstanceCall1(thread, icdata, call_base, call_top, &pc, &FP, &SP,
+                    false /* optimized */);
+    }
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InstanceCall2, A_D);
+    if (thread->isolate()->single_step()) {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_SingleStepHandler, args);
+    }
+
+    {
+      const uint16_t argc = rA;
+      const uint16_t kidx = rD;
+
+      RawObject** call_base = SP - argc + 1;
+      RawObject** call_top = SP + 1;
+
+      RawICData* icdata = RAW_CAST(ICData, LOAD_CONSTANT(kidx));
+      InterpreterHelpers::IncrementUsageCounter(
+          RAW_CAST(Function, icdata->ptr()->owner_));
+      InstanceCall2(thread, icdata, call_base, call_top, &pc, &FP, &SP,
+                    false /* optimized */);
+    }
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InstanceCall1Opt, A_D);
+
+    {
+      const uint16_t argc = rA;
+      const uint16_t kidx = rD;
+
+      RawObject** call_base = SP - argc + 1;
+      RawObject** call_top = SP + 1;
+
+      RawICData* icdata = RAW_CAST(ICData, LOAD_CONSTANT(kidx));
+      InterpreterHelpers::IncrementUsageCounter(FrameFunction(FP));
+      InstanceCall1(thread, icdata, call_base, call_top, &pc, &FP, &SP,
+                    true /* optimized */);
+    }
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InstanceCall2Opt, A_D);
+
+    {
+      const uint16_t argc = rA;
+      const uint16_t kidx = rD;
+
+      RawObject** call_base = SP - argc + 1;
+      RawObject** call_top = SP + 1;
+
+      RawICData* icdata = RAW_CAST(ICData, LOAD_CONSTANT(kidx));
+      InterpreterHelpers::IncrementUsageCounter(FrameFunction(FP));
+      InstanceCall2(thread, icdata, call_base, call_top, &pc, &FP, &SP,
+                    true /* optimized */);
+    }
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(PushPolymorphicInstanceCall, A_D);
+    const uint8_t argc = rA;
+    const intptr_t cids_length = rD;
+    RawObject** args = SP - argc + 1;
+    const intptr_t receiver_cid = InterpreterHelpers::GetClassId(args[0]);
+    for (intptr_t i = 0; i < 2 * cids_length; i += 2) {
+      const intptr_t icdata_cid = KernelBytecode::DecodeD(*(pc + i));
+      if (receiver_cid == icdata_cid) {
+        RawFunction* target = RAW_CAST(
+            Function, LOAD_CONSTANT(KernelBytecode::DecodeD(*(pc + i + 1))));
+        *++SP = target;
+        pc++;
+        break;
+      }
+    }
+    pc += 2 * cids_length;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(PushPolymorphicInstanceCallByRange, A_D);
+    const uint8_t argc = rA;
+    const intptr_t cids_length = rD;
+    RawObject** args = SP - argc + 1;
+    const intptr_t receiver_cid = InterpreterHelpers::GetClassId(args[0]);
+    for (intptr_t i = 0; i < 3 * cids_length; i += 3) {
+      // Note unsigned types to get an unsigned range compare.
+      const uintptr_t cid_start = KernelBytecode::DecodeD(*(pc + i));
+      const uintptr_t cids = KernelBytecode::DecodeD(*(pc + i + 1));
+      if (receiver_cid - cid_start < cids) {
+        RawFunction* target = RAW_CAST(
+            Function, LOAD_CONSTANT(KernelBytecode::DecodeD(*(pc + i + 2))));
+        *++SP = target;
+        pc++;
+        break;
+      }
+    }
+    pc += 3 * cids_length;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(NativeCall, A_B_C);
+    NativeFunctionWrapper trampoline =
+        reinterpret_cast<NativeFunctionWrapper>(LOAD_CONSTANT(rA));
+    Dart_NativeFunction function =
+        reinterpret_cast<Dart_NativeFunction>(LOAD_CONSTANT(rB));
+    intptr_t argc_tag = reinterpret_cast<intptr_t>(LOAD_CONSTANT(rC));
+    const intptr_t num_arguments = NativeArguments::ArgcBits::decode(argc_tag);
+
+    *++SP = null_value;  // Result slot.
+
+    RawObject** incoming_args = SP - num_arguments;
+    RawObject** return_slot = SP;
+    Exit(thread, FP, SP, pc);
+    NativeArguments args(thread, argc_tag, incoming_args, return_slot);
+    INVOKE_NATIVE(trampoline, function,
+                  reinterpret_cast<Dart_NativeArguments>(&args));
+
+    *(SP - num_arguments) = *return_slot;
+    SP -= num_arguments;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(OneByteStringFromCharCode, A_X);
+    const intptr_t char_code = Smi::Value(RAW_CAST(Smi, FP[rD]));
+    ASSERT(char_code >= 0);
+    ASSERT(char_code <= 255);
+    RawString** strings = Symbols::PredefinedAddress();
+    const intptr_t index = char_code + Symbols::kNullCharCodeSymbolOffset;
+    FP[rA] = strings[index];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StringToCharCode, A_X);
+    RawOneByteString* str = RAW_CAST(OneByteString, FP[rD]);
+    if (str->ptr()->length_ == Smi::New(1)) {
+      FP[rA] = Smi::New(str->ptr()->data()[0]);
+    } else {
+      FP[rA] = Smi::New(-1);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AddTOS, A_B_C);
+    SMI_FASTPATH_TOS(intptr_t, SignedAddWithOverflow);
+    DISPATCH();
+  }
+  {
+    BYTECODE(SubTOS, A_B_C);
+    SMI_FASTPATH_TOS(intptr_t, SignedSubWithOverflow);
+    DISPATCH();
+  }
+  {
+    BYTECODE(MulTOS, A_B_C);
+    SMI_FASTPATH_TOS(intptr_t, SMI_MUL);
+    DISPATCH();
+  }
+  {
+    BYTECODE(BitOrTOS, A_B_C);
+    SMI_FASTPATH_TOS(intptr_t, SMI_BITOR);
+    DISPATCH();
+  }
+  {
+    BYTECODE(BitAndTOS, A_B_C);
+    SMI_FASTPATH_TOS(intptr_t, SMI_BITAND);
+    DISPATCH();
+  }
+  {
+    BYTECODE(EqualTOS, A_B_C);
+    SMI_FASTPATH_TOS(RawObject*, SMI_EQ);
+    DISPATCH();
+  }
+  {
+    BYTECODE(LessThanTOS, A_B_C);
+    SMI_FASTPATH_TOS(RawObject*, SMI_LT);
+    DISPATCH();
+  }
+  {
+    BYTECODE(GreaterThanTOS, A_B_C);
+    SMI_FASTPATH_TOS(RawObject*, SMI_GT);
+    DISPATCH();
+  }
+  {
+    BYTECODE(SmiAddTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    SP--;
+    SP[0] = Smi::New(Smi::Value(left) + Smi::Value(right));
+    DISPATCH();
+  }
+  {
+    BYTECODE(SmiSubTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    SP--;
+    SP[0] = Smi::New(Smi::Value(left) - Smi::Value(right));
+    DISPATCH();
+  }
+  {
+    BYTECODE(SmiMulTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    SP--;
+    SP[0] = Smi::New(Smi::Value(left) * Smi::Value(right));
+    DISPATCH();
+  }
+  {
+    BYTECODE(SmiBitAndTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    SP--;
+    SP[0] = Smi::New(Smi::Value(left) & Smi::Value(right));
+    DISPATCH();
+  }
+  {
+    BYTECODE(Add, A_B_C);
+    SMI_OP_CHECK(intptr_t, SignedAddWithOverflow);
+    DISPATCH();
+  }
+  {
+    BYTECODE(Sub, A_B_C);
+    SMI_OP_CHECK(intptr_t, SignedSubWithOverflow);
+    DISPATCH();
+  }
+  {
+    BYTECODE(Mul, A_B_C);
+    SMI_OP_CHECK(intptr_t, SMI_MUL);
+    DISPATCH();
+  }
+  {
+    BYTECODE(Neg, A_D);
+    const intptr_t value = reinterpret_cast<intptr_t>(FP[rD]);
+    intptr_t* out = reinterpret_cast<intptr_t*>(&FP[rA]);
+    if (LIKELY(!SignedSubWithOverflow(0, value, out))) {
+      pc++;
+    }
+    DISPATCH();
+  }
+  {
+    BYTECODE(BitOr, A_B_C);
+    SMI_OP_NOCHECK(intptr_t, SMI_BITOR);
+    DISPATCH();
+  }
+  {
+    BYTECODE(BitAnd, A_B_C);
+    SMI_OP_NOCHECK(intptr_t, SMI_BITAND);
+    DISPATCH();
+  }
+  {
+    BYTECODE(BitXor, A_B_C);
+    SMI_OP_NOCHECK(intptr_t, SMI_BITXOR);
+    DISPATCH();
+  }
+  {
+    BYTECODE(BitNot, A_D);
+    const intptr_t value = reinterpret_cast<intptr_t>(FP[rD]);
+    *reinterpret_cast<intptr_t*>(&FP[rA]) = ~value & (~kSmiTagMask);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Div, A_B_C);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]);
+    if (rhs != 0) {
+      const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
+      const intptr_t res = (lhs >> kSmiTagSize) / (rhs >> kSmiTagSize);
+      const intptr_t untaggable = 0x40000000L;
+      if (res != untaggable) {
+        *reinterpret_cast<intptr_t*>(&FP[rA]) = res << kSmiTagSize;
+        pc++;
+      }
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Mod, A_B_C);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]);
+    if (rhs != 0) {
+      const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
+      const intptr_t res = ((lhs >> kSmiTagSize) % (rhs >> kSmiTagSize))
+                           << kSmiTagSize;
+      *reinterpret_cast<intptr_t*>(&FP[rA]) =
+          (res < 0) ? ((rhs < 0) ? (res - rhs) : (res + rhs)) : res;
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Shl, A_B_C);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
+    const int kBitsPerInt32 = 32;
+    if (static_cast<uintptr_t>(rhs) < kBitsPerInt32) {
+      const int32_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
+      const int32_t res = lhs << rhs;
+      if (lhs == (res >> rhs)) {
+        *reinterpret_cast<intptr_t*>(&FP[rA]) = static_cast<intptr_t>(res);
+        pc++;
+      }
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Shr, A_B_C);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
+    if (rhs >= 0) {
+      const intptr_t shift_amount = (rhs >= 32) ? (32 - 1) : rhs;
+      const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]) >> kSmiTagSize;
+      *reinterpret_cast<intptr_t*>(&FP[rA]) = (lhs >> shift_amount)
+                                              << kSmiTagSize;
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(ShlImm, A_B_C);
+    const uint8_t shift = rC;
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
+    FP[rA] = reinterpret_cast<RawObject*>(lhs << shift);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Min, A_B_C);
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]);
+    FP[rA] = reinterpret_cast<RawObject*>((lhs < rhs) ? lhs : rhs);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Max, A_B_C);
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]);
+    FP[rA] = reinterpret_cast<RawObject*>((lhs > rhs) ? lhs : rhs);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(UnboxInt32, A_B_C);
+    const intptr_t box_cid = InterpreterHelpers::GetClassId(FP[rB]);
+    const bool may_truncate = rC == 1;
+    if (box_cid == kSmiCid) {
+      const intptr_t value = reinterpret_cast<intptr_t>(FP[rB]) >> kSmiTagSize;
+      const int32_t value32 = static_cast<int32_t>(value);
+      if (may_truncate || (value == static_cast<intptr_t>(value32))) {
+        FP[rA] = reinterpret_cast<RawObject*>(value);
+        pc++;
+      }
+    } else if (box_cid == kMintCid) {
+      RawMint* mint = RAW_CAST(Mint, FP[rB]);
+      const int64_t value = mint->ptr()->value_;
+      const int32_t value32 = static_cast<int32_t>(value);
+      if (may_truncate || (value == static_cast<int64_t>(value32))) {
+        FP[rA] = reinterpret_cast<RawObject*>(value);
+        pc++;
+      }
+    }
+    DISPATCH();
+  }
+
+#if defined(ARCH_IS_64_BIT)
+  {
+    BYTECODE(WriteIntoDouble, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    RawDouble* box = RAW_CAST(Double, FP[rA]);
+    box->ptr()->value_ = value;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(UnboxDouble, A_D);
+    const RawDouble* box = RAW_CAST(Double, FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(box->ptr()->value_);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckedUnboxDouble, A_D);
+    const intptr_t box_cid = InterpreterHelpers::GetClassId(FP[rD]);
+    if (box_cid == kSmiCid) {
+      const intptr_t value = reinterpret_cast<intptr_t>(FP[rD]) >> kSmiTagSize;
+      const double result = static_cast<double>(value);
+      FP[rA] = bit_cast<RawObject*, double>(result);
+      pc++;
+    } else if (box_cid == kDoubleCid) {
+      const RawDouble* box = RAW_CAST(Double, FP[rD]);
+      FP[rA] = bit_cast<RawObject*, double>(box->ptr()->value_);
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleToSmi, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    if (!isnan(value)) {
+      const intptr_t result = static_cast<intptr_t>(value);
+      if ((result <= Smi::kMaxValue) && (result >= Smi::kMinValue)) {
+        FP[rA] = reinterpret_cast<RawObject*>(result << kSmiTagSize);
+        pc++;
+      }
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(SmiToDouble, A_D);
+    const intptr_t value = reinterpret_cast<intptr_t>(FP[rD]) >> kSmiTagSize;
+    const double result = static_cast<double>(value);
+    FP[rA] = bit_cast<RawObject*, double>(result);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DAdd, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    FP[rA] = bit_cast<RawObject*, double>(lhs + rhs);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DSub, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    FP[rA] = bit_cast<RawObject*, double>(lhs - rhs);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMul, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    FP[rA] = bit_cast<RawObject*, double>(lhs * rhs);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DDiv, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    const double result = lhs / rhs;
+    FP[rA] = bit_cast<RawObject*, double>(result);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DNeg, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(-value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DSqrt, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(sqrt(value));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DSin, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(sin(value));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DCos, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(cos(value));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DPow, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    const double result = pow(lhs, rhs);
+    FP[rA] = bit_cast<RawObject*, double>(result);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMod, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    const double result = DartModulo(lhs, rhs);
+    FP[rA] = bit_cast<RawObject*, double>(result);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMin, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    FP[rA] = bit_cast<RawObject*, double>(fmin(lhs, rhs));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMax, A_B_C);
+    const double lhs = bit_cast<double, RawObject*>(FP[rB]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rC]);
+    FP[rA] = bit_cast<RawObject*, double>(fmax(lhs, rhs));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DTruncate, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(trunc(value));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DFloor, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(floor(value));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DCeil, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    FP[rA] = bit_cast<RawObject*, double>(ceil(value));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleToFloat, A_D);
+    const double value = bit_cast<double, RawObject*>(FP[rD]);
+    const float valuef = static_cast<float>(value);
+    *reinterpret_cast<float*>(&FP[rA]) = valuef;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(FloatToDouble, A_D);
+    const float valuef = *reinterpret_cast<float*>(&FP[rD]);
+    const double value = static_cast<double>(valuef);
+    FP[rA] = bit_cast<RawObject*, double>(value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleIsNaN, A);
+    const double v = bit_cast<double, RawObject*>(FP[rA]);
+    if (!isnan(v)) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleIsInfinite, A);
+    const double v = bit_cast<double, RawObject*>(FP[rA]);
+    if (!isinf(v)) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedFloat32, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rB], FP[rC]);
+    const uint32_t value = *reinterpret_cast<uint32_t*>(data);
+    const uint64_t value64 = value;
+    FP[rA] = reinterpret_cast<RawObject*>(value64);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexed4Float32, A_B_C);
+    ASSERT(RawObject::IsTypedDataClassId(FP[rB]->GetClassId()));
+    RawTypedData* array = reinterpret_cast<RawTypedData*>(FP[rB]);
+    RawSmi* index = RAW_CAST(Smi, FP[rC]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    const uint32_t value =
+        reinterpret_cast<uint32_t*>(array->ptr()->data())[Smi::Value(index)];
+    const uint64_t value64 = value;  // sign extend to clear high bits.
+    FP[rA] = reinterpret_cast<RawObject*>(value64);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedFloat64, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rB], FP[rC]);
+    *reinterpret_cast<uint64_t*>(&FP[rA]) = *reinterpret_cast<uint64_t*>(data);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexed8Float64, A_B_C);
+    ASSERT(RawObject::IsTypedDataClassId(FP[rB]->GetClassId()));
+    RawTypedData* array = reinterpret_cast<RawTypedData*>(FP[rB]);
+    RawSmi* index = RAW_CAST(Smi, FP[rC]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    const int64_t value =
+        reinterpret_cast<int64_t*>(array->ptr()->data())[Smi::Value(index)];
+    FP[rA] = reinterpret_cast<RawObject*>(value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedFloat32, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rA], FP[rB]);
+    const uint64_t value = reinterpret_cast<uint64_t>(FP[rC]);
+    const uint32_t value32 = value;
+    *reinterpret_cast<uint32_t*>(data) = value32;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexed4Float32, A_B_C);
+    ASSERT(RawObject::IsTypedDataClassId(FP[rA]->GetClassId()));
+    RawTypedData* array = reinterpret_cast<RawTypedData*>(FP[rA]);
+    RawSmi* index = RAW_CAST(Smi, FP[rB]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    const uint64_t value = reinterpret_cast<uint64_t>(FP[rC]);
+    const uint32_t value32 = value;
+    reinterpret_cast<uint32_t*>(array->ptr()->data())[Smi::Value(index)] =
+        value32;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedFloat64, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rA], FP[rB]);
+    *reinterpret_cast<uint64_t*>(data) = reinterpret_cast<uint64_t>(FP[rC]);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexed8Float64, A_B_C);
+    ASSERT(RawObject::IsTypedDataClassId(FP[rA]->GetClassId()));
+    RawTypedData* array = reinterpret_cast<RawTypedData*>(FP[rA]);
+    RawSmi* index = RAW_CAST(Smi, FP[rB]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    const int64_t value = reinterpret_cast<int64_t>(FP[rC]);
+    reinterpret_cast<int64_t*>(array->ptr()->data())[Smi::Value(index)] = value;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(BoxInt32, A_D);
+    // Casts sign-extend high 32 bits from low 32 bits.
+    const intptr_t value = reinterpret_cast<intptr_t>(FP[rD]);
+    const int32_t value32 = static_cast<int32_t>(value);
+    FP[rA] = Smi::New(static_cast<intptr_t>(value32));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(BoxUint32, A_D);
+    // Casts to zero out high 32 bits.
+    const uintptr_t value = reinterpret_cast<uintptr_t>(FP[rD]);
+    const uint32_t value32 = static_cast<uint32_t>(value);
+    FP[rA] = Smi::New(static_cast<intptr_t>(value32));
+    DISPATCH();
+  }
+#else   // defined(ARCH_IS_64_BIT)
+  {
+    BYTECODE(WriteIntoDouble, A_D);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(UnboxDouble, A_D);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckedUnboxDouble, A_D);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleToSmi, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(SmiToDouble, A_D);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DAdd, A_B_C);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DSub, A_B_C);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMul, A_B_C);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DDiv, A_B_C);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DNeg, A_D);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DSqrt, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DSin, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DCos, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DPow, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMod, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMin, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DMax, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DTruncate, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DFloor, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DCeil, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleToFloat, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(FloatToDouble, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleIsNaN, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DoubleIsInfinite, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedFloat32, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexed4Float32, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedFloat64, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexed8Float64, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedFloat32, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexed4Float32, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedFloat64, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexed8Float64, A_B_C);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(BoxInt32, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(BoxUint32, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+#endif  // defined(ARCH_IS_64_BIT)
+
+  // Return and return like instructions (Intrinsic).
+  {
+    RawObject* result;  // result to return to the caller.
+
+    BYTECODE(Intrinsic, A);
+    // Try invoking intrinsic handler. If it succeeds (returns true)
+    // then just return the value it returned to the caller.
+    result = null_value;
+    if (!intrinsics_[rA](thread, FP, &result)) {
+      DISPATCH();
+    }
+    goto ReturnImpl;
+
+    BYTECODE(Return, A);
+    result = FP[rA];
+    goto ReturnImpl;
+
+    BYTECODE(ReturnTOS, 0);
+    result = *SP;
+    // Fall through to the ReturnImpl.
+
+  ReturnImpl:
+    // Restore caller PC.
+    pc = SavedCallerPC(FP);
+    pc_ = reinterpret_cast<uword>(pc);  // For the profiler.
+
+    // Check if it is a fake PC marking the entry frame.
+    if ((reinterpret_cast<uword>(pc) & 2) != 0) {
+      const intptr_t argc = reinterpret_cast<uword>(pc) >> 2;
+      fp_ = reinterpret_cast<RawObject**>(FrameArguments(FP, argc + 1)[0]);
+      thread->set_top_exit_frame_info(reinterpret_cast<uword>(fp_));
+      thread->set_top_resource(top_resource);
+      thread->set_vm_tag(vm_tag);
+      return result;
+    }
+
+    // Look at the caller to determine how many arguments to pop.
+    const uint8_t argc = KernelBytecode::DecodeArgc(pc[-1]);
+
+    // Restore SP, FP and PP. Push result and dispatch.
+    SP = FrameArguments(FP, argc);
+    FP = SavedCallerFP(FP);
+    pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;
+    *SP = result;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreStaticTOS, A_D);
+    RawField* field = reinterpret_cast<RawField*>(LOAD_CONSTANT(rD));
+    RawInstance* value = static_cast<RawInstance*>(*SP--);
+    field->StorePointer(&field->ptr()->value_.static_value_, value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(PushStatic, A_D);
+    RawField* field = reinterpret_cast<RawField*>(LOAD_CONSTANT(rD));
+    // Note: field is also on the stack, hence no increment.
+    *SP = field->ptr()->value_.static_value_;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreField, A_B_C);
+    const uint16_t offset_in_words = rB;
+    const uint16_t value_reg = rC;
+
+    RawInstance* instance = reinterpret_cast<RawInstance*>(FP[rA]);
+    RawObject* value = FP[value_reg];
+
+    instance->StorePointer(
+        reinterpret_cast<RawObject**>(instance->ptr()) + offset_in_words,
+        value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreFieldExt, A_D);
+    // The offset is stored in the following nop-instruction which is skipped.
+    const uint16_t offset_in_words = KernelBytecode::DecodeD(*pc++);
+    RawInstance* instance = reinterpret_cast<RawInstance*>(FP[rA]);
+    RawObject* value = FP[rD];
+
+    instance->StorePointer(
+        reinterpret_cast<RawObject**>(instance->ptr()) + offset_in_words,
+        value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreFieldTOS, A_D);
+    const uword offset_in_words =
+        static_cast<uword>(Smi::Value(RAW_CAST(Smi, LOAD_CONSTANT(rD))));
+    RawInstance* instance = reinterpret_cast<RawInstance*>(SP[-1]);
+    RawObject* value = reinterpret_cast<RawObject*>(SP[0]);
+    SP -= 2;  // Drop instance and value.
+    instance->StorePointer(
+        reinterpret_cast<RawObject**>(instance->ptr()) + offset_in_words,
+        value);
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadField, A_B_C);
+    const uint16_t instance_reg = rB;
+    const uint16_t offset_in_words = rC;
+    RawInstance* instance = reinterpret_cast<RawInstance*>(FP[instance_reg]);
+    FP[rA] = reinterpret_cast<RawObject**>(instance->ptr())[offset_in_words];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadFieldExt, A_D);
+    // The offset is stored in the following nop-instruction which is skipped.
+    const uint16_t offset_in_words = KernelBytecode::DecodeD(*pc++);
+    const uint16_t instance_reg = rD;
+    RawInstance* instance = reinterpret_cast<RawInstance*>(FP[instance_reg]);
+    FP[rA] = reinterpret_cast<RawObject**>(instance->ptr())[offset_in_words];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadUntagged, A_B_C);
+    const uint16_t instance_reg = rB;
+    const uint16_t offset_in_words = rC;
+    RawInstance* instance = reinterpret_cast<RawInstance*>(FP[instance_reg]);
+    FP[rA] = reinterpret_cast<RawObject**>(instance)[offset_in_words];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadFieldTOS, __D);
+    const uword offset_in_words =
+        static_cast<uword>(Smi::Value(RAW_CAST(Smi, LOAD_CONSTANT(rD))));
+    RawInstance* instance = static_cast<RawInstance*>(SP[0]);
+    SP[0] = reinterpret_cast<RawObject**>(instance->ptr())[offset_in_words];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InitStaticTOS, 0);
+    RawField* field = static_cast<RawField*>(*SP--);
+    RawObject* value = field->ptr()->value_.static_value_;
+    if ((value == Object::sentinel().raw()) ||
+        (value == Object::transition_sentinel().raw())) {
+      // Note: SP[1] already contains the field object.
+      SP[2] = 0;
+      Exit(thread, FP, SP + 3, pc);
+      NativeArguments args(thread, 1, SP + 1, SP + 2);
+      INVOKE_RUNTIME(DRT_InitStaticField, args);
+    }
+    DISPATCH();
+  }
+
+  // TODO(vegorov) allocation bytecodes can benefit from the new-space
+  // allocation fast-path that does not transition into the runtime system.
+  {
+    BYTECODE(AllocateUninitializedContext, A_D);
+    const uint16_t num_context_variables = rD;
+    const intptr_t instance_size = Context::InstanceSize(num_context_variables);
+    const uword start =
+        thread->heap()->new_space()->TryAllocateInTLAB(thread, instance_size);
+    if (LIKELY(start != 0)) {
+      uint32_t tags = 0;
+      tags = RawObject::ClassIdTag::update(kContextCid, tags);
+      tags = RawObject::SizeTag::update(instance_size, tags);
+      // Also writes 0 in the hash_ field of the header.
+      *reinterpret_cast<uword*>(start + Array::tags_offset()) = tags;
+      *reinterpret_cast<uword*>(start + Context::num_variables_offset()) =
+          num_context_variables;
+      FP[rA] = reinterpret_cast<RawObject*>(start + kHeapObjectTag);
+      pc += 2;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AllocateContext, A_D);
+    const uint16_t num_context_variables = rD;
+    {
+      *++SP = 0;
+      SP[1] = Smi::New(num_context_variables);
+      Exit(thread, FP, SP + 2, pc);
+      NativeArguments args(thread, 1, SP + 1, SP);
+      INVOKE_RUNTIME(DRT_AllocateContext, args);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CloneContext, A);
+    {
+      SP[1] = SP[0];  // Context to clone.
+      Exit(thread, FP, SP + 2, pc);
+      NativeArguments args(thread, 1, SP + 1, SP);
+      INVOKE_RUNTIME(DRT_CloneContext, args);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AllocateOpt, A_D);
+    const uword tags =
+        static_cast<uword>(Smi::Value(RAW_CAST(Smi, LOAD_CONSTANT(rD))));
+    const intptr_t instance_size = RawObject::SizeTag::decode(tags);
+    const uword start =
+        thread->heap()->new_space()->TryAllocateInTLAB(thread, instance_size);
+    if (LIKELY(start != 0)) {
+      // Writes both the tags and the initial identity hash on 64 bit platforms.
+      *reinterpret_cast<uword*>(start + Instance::tags_offset()) = tags;
+      for (intptr_t current_offset = sizeof(RawInstance);
+           current_offset < instance_size; current_offset += kWordSize) {
+        *reinterpret_cast<RawObject**>(start + current_offset) = null_value;
+      }
+      FP[rA] = reinterpret_cast<RawObject*>(start + kHeapObjectTag);
+      pc += 2;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Allocate, A_D);
+    SP[1] = 0;                  // Space for the result.
+    SP[2] = LOAD_CONSTANT(rD);  // Class object.
+    SP[3] = null_value;         // Type arguments.
+    Exit(thread, FP, SP + 4, pc);
+    NativeArguments args(thread, 2, SP + 2, SP + 1);
+    INVOKE_RUNTIME(DRT_AllocateObject, args);
+    SP++;  // Result is in SP[1].
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AllocateTOpt, A_D);
+    const uword tags = Smi::Value(RAW_CAST(Smi, LOAD_CONSTANT(rD)));
+    const intptr_t instance_size = RawObject::SizeTag::decode(tags);
+    const uword start =
+        thread->heap()->new_space()->TryAllocateInTLAB(thread, instance_size);
+    if (LIKELY(start != 0)) {
+      RawObject* type_args = SP[0];
+      const intptr_t type_args_offset = KernelBytecode::DecodeD(*pc);
+      // Writes both the tags and the initial identity hash on 64 bit platforms.
+      *reinterpret_cast<uword*>(start + Instance::tags_offset()) = tags;
+      for (intptr_t current_offset = sizeof(RawInstance);
+           current_offset < instance_size; current_offset += kWordSize) {
+        *reinterpret_cast<RawObject**>(start + current_offset) = null_value;
+      }
+      *reinterpret_cast<RawObject**>(start + type_args_offset) = type_args;
+      FP[rA] = reinterpret_cast<RawObject*>(start + kHeapObjectTag);
+      SP -= 1;  // Consume the type arguments on the stack.
+      pc += 4;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AllocateT, 0);
+    SP[1] = SP[-0];  // Class object.
+    SP[2] = SP[-1];  // Type arguments
+    Exit(thread, FP, SP + 3, pc);
+    NativeArguments args(thread, 2, SP + 1, SP - 1);
+    INVOKE_RUNTIME(DRT_AllocateObject, args);
+    SP -= 1;  // Result is in SP - 1.
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CreateArrayOpt, A_B_C);
+    if (LIKELY(!FP[rB]->IsHeapObject())) {
+      const intptr_t length = Smi::Value(RAW_CAST(Smi, FP[rB]));
+      if (LIKELY(static_cast<uintptr_t>(length) <= Array::kMaxElements)) {
+        const intptr_t fixed_size_plus_alignment_padding =
+            sizeof(RawArray) + kObjectAlignment - 1;
+        const intptr_t instance_size =
+            (fixed_size_plus_alignment_padding + length * kWordSize) &
+            ~(kObjectAlignment - 1);
+        const uword start = thread->heap()->new_space()->TryAllocateInTLAB(
+            thread, instance_size);
+        if (LIKELY(start != 0)) {
+          const intptr_t cid = kArrayCid;
+          uword tags = 0;
+          if (LIKELY(instance_size <= RawObject::SizeTag::kMaxSizeTag)) {
+            tags = RawObject::SizeTag::update(instance_size, tags);
+          }
+          tags = RawObject::ClassIdTag::update(cid, tags);
+          // Writes both the tags and the initial identity hash on 64 bit
+          // platforms.
+          *reinterpret_cast<uword*>(start + Instance::tags_offset()) = tags;
+          *reinterpret_cast<RawObject**>(start + Array::length_offset()) =
+              FP[rB];
+          *reinterpret_cast<RawObject**>(
+              start + Array::type_arguments_offset()) = FP[rC];
+          RawObject** data =
+              reinterpret_cast<RawObject**>(start + Array::data_offset());
+          for (intptr_t i = 0; i < length; i++) {
+            data[i] = null_value;
+          }
+          FP[rA] = reinterpret_cast<RawObject*>(start + kHeapObjectTag);
+          pc += 4;
+        }
+      }
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CreateArrayTOS, 0);
+    SP[1] = SP[-0];  // Length.
+    SP[2] = SP[-1];  // Type.
+    Exit(thread, FP, SP + 3, pc);
+    NativeArguments args(thread, 2, SP + 1, SP - 1);
+    INVOKE_RUNTIME(DRT_AllocateArray, args);
+    SP -= 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(InstanceOf, 0);
+    // Stack: instance, instantiator type args, function type args, type, cache
+    RawInstance* instance = static_cast<RawInstance*>(SP[-4]);
+    RawTypeArguments* instantiator_type_arguments =
+        static_cast<RawTypeArguments*>(SP[-3]);
+    RawTypeArguments* function_type_arguments =
+        static_cast<RawTypeArguments*>(SP[-2]);
+    RawAbstractType* type = static_cast<RawAbstractType*>(SP[-1]);
+    RawSubtypeTestCache* cache = static_cast<RawSubtypeTestCache*>(SP[0]);
+
+    if (cache != null_value) {
+      const intptr_t cid = InterpreterHelpers::GetClassId(instance);
+
+      RawTypeArguments* instance_type_arguments =
+          static_cast<RawTypeArguments*>(null_value);
+      RawObject* instance_cid_or_function;
+      if (cid == kClosureCid) {
+        RawClosure* closure = static_cast<RawClosure*>(instance);
+        if (closure->ptr()->function_type_arguments_ != TypeArguments::null()) {
+          // Cache cannot be used for generic closures.
+          goto InstanceOfCallRuntime;
+        }
+        instance_type_arguments = closure->ptr()->instantiator_type_arguments_;
+        instance_cid_or_function = closure->ptr()->function_;
+      } else {
+        instance_cid_or_function = Smi::New(cid);
+
+        RawClass* instance_class = thread->isolate()->class_table()->At(cid);
+        if (instance_class->ptr()->num_type_arguments_ < 0) {
+          goto InstanceOfCallRuntime;
+        } else if (instance_class->ptr()->num_type_arguments_ > 0) {
+          instance_type_arguments = reinterpret_cast<RawTypeArguments**>(
+              instance->ptr())[instance_class->ptr()
+                                   ->type_arguments_field_offset_in_words_];
+        }
+      }
+
+      for (RawObject** entries = cache->ptr()->cache_->ptr()->data();
+           entries[0] != null_value;
+           entries += SubtypeTestCache::kTestEntryLength) {
+        if ((entries[SubtypeTestCache::kInstanceClassIdOrFunction] ==
+             instance_cid_or_function) &&
+            (entries[SubtypeTestCache::kInstanceTypeArguments] ==
+             instance_type_arguments) &&
+            (entries[SubtypeTestCache::kInstantiatorTypeArguments] ==
+             instantiator_type_arguments) &&
+            (entries[SubtypeTestCache::kFunctionTypeArguments] ==
+             function_type_arguments)) {
+          SP[-4] = entries[SubtypeTestCache::kTestResult];
+          goto InstanceOfOk;
+        }
+      }
+    }
+
+  // clang-format off
+  InstanceOfCallRuntime:
+    {
+      SP[1] = instance;
+      SP[2] = type;
+      SP[3] = instantiator_type_arguments;
+      SP[4] = function_type_arguments;
+      SP[5] = cache;
+      Exit(thread, FP, SP + 6, pc);
+      NativeArguments native_args(thread, 5, SP + 1, SP - 4);
+      INVOKE_RUNTIME(DRT_Instanceof, native_args);
+    }
+    // clang-format on
+
+  InstanceOfOk:
+    SP -= 4;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(BadTypeError, 0);
+    // Stack: instance, instantiator type args, function type args, type, name
+    RawObject** args = SP - 4;
+    if (args[0] != null_value) {
+      SP[1] = args[0];  // instance.
+      SP[2] = args[4];  // name.
+      SP[3] = args[3];  // type.
+      Exit(thread, FP, SP + 4, pc);
+      NativeArguments native_args(thread, 3, SP + 1, SP - 4);
+      INVOKE_RUNTIME(DRT_BadTypeError, native_args);
+      UNREACHABLE();
+    }
+    SP -= 4;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AssertAssignable, A_D);
+    // Stack: instance, instantiator type args, function type args, type, name
+    RawObject** args = SP - 4;
+    const bool may_be_smi = (rA == 1);
+    const bool is_smi =
+        ((reinterpret_cast<intptr_t>(args[0]) & kSmiTagMask) == kSmiTag);
+    const bool smi_ok = is_smi && may_be_smi;
+    if (!smi_ok && (args[0] != null_value)) {
+      RawSubtypeTestCache* cache =
+          static_cast<RawSubtypeTestCache*>(LOAD_CONSTANT(rD));
+      if (cache != null_value) {
+        RawInstance* instance = static_cast<RawInstance*>(args[0]);
+        RawTypeArguments* instantiator_type_arguments =
+            static_cast<RawTypeArguments*>(args[1]);
+        RawTypeArguments* function_type_arguments =
+            static_cast<RawTypeArguments*>(args[2]);
+
+        const intptr_t cid = InterpreterHelpers::GetClassId(instance);
+
+        RawTypeArguments* instance_type_arguments =
+            static_cast<RawTypeArguments*>(null_value);
+        RawObject* instance_cid_or_function;
+        if (cid == kClosureCid) {
+          RawClosure* closure = static_cast<RawClosure*>(instance);
+          if (closure->ptr()->function_type_arguments_ !=
+              TypeArguments::null()) {
+            // Cache cannot be used for generic closures.
+            goto AssertAssignableCallRuntime;
+          }
+          instance_type_arguments =
+              closure->ptr()->instantiator_type_arguments_;
+          instance_cid_or_function = closure->ptr()->function_;
+        } else {
+          instance_cid_or_function = Smi::New(cid);
+
+          RawClass* instance_class = thread->isolate()->class_table()->At(cid);
+          if (instance_class->ptr()->num_type_arguments_ < 0) {
+            goto AssertAssignableCallRuntime;
+          } else if (instance_class->ptr()->num_type_arguments_ > 0) {
+            instance_type_arguments = reinterpret_cast<RawTypeArguments**>(
+                instance->ptr())[instance_class->ptr()
+                                     ->type_arguments_field_offset_in_words_];
+          }
+        }
+
+        for (RawObject** entries = cache->ptr()->cache_->ptr()->data();
+             entries[0] != null_value;
+             entries += SubtypeTestCache::kTestEntryLength) {
+          if ((entries[SubtypeTestCache::kInstanceClassIdOrFunction] ==
+               instance_cid_or_function) &&
+              (entries[SubtypeTestCache::kInstanceTypeArguments] ==
+               instance_type_arguments) &&
+              (entries[SubtypeTestCache::kInstantiatorTypeArguments] ==
+               instantiator_type_arguments) &&
+              (entries[SubtypeTestCache::kFunctionTypeArguments] ==
+               function_type_arguments)) {
+            if (true_value == entries[SubtypeTestCache::kTestResult]) {
+              goto AssertAssignableOk;
+            } else {
+              break;
+            }
+          }
+        }
+      }
+
+    AssertAssignableCallRuntime:
+      SP[1] = args[0];  // instance
+      SP[2] = args[3];  // type
+      SP[3] = args[1];  // instantiator type args
+      SP[4] = args[2];  // function type args
+      SP[5] = args[4];  // name
+      SP[6] = cache;
+      Exit(thread, FP, SP + 7, pc);
+      NativeArguments native_args(thread, 6, SP + 1, SP - 4);
+      INVOKE_RUNTIME(DRT_TypeCheck, native_args);
+    }
+
+  AssertAssignableOk:
+    SP -= 4;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AssertSubtype, A);
+    RawObject** args = SP - 4;
+
+    // TODO(kustermann): Implement fast case for common arguments.
+
+    // The arguments on the stack look like:
+    //     args[0]  instantiator type args
+    //     args[1]  function type args
+    //     args[2]  sub_type
+    //     args[3]  super_type
+    //     args[4]  name
+
+    // This is unused, since the negative case throws an exception.
+    SP++;
+    RawObject** result_slot = SP;
+
+    Exit(thread, FP, SP + 1, pc);
+    NativeArguments native_args(thread, 5, args, result_slot);
+    INVOKE_RUNTIME(DRT_SubtypeCheck, native_args);
+
+    // Result slot not used anymore.
+    SP--;
+
+    // Drop all arguments.
+    SP -= 5;
+
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(AssertBoolean, A);
+    RawObject* value = SP[0];
+    if (rA) {  // Should we perform type check?
+      if ((value == true_value) || (value == false_value)) {
+        goto AssertBooleanOk;
+      }
+    } else if (value != null_value) {
+      goto AssertBooleanOk;
+    }
+
+    // Assertion failed.
+    {
+      SP[1] = SP[0];  // instance
+      Exit(thread, FP, SP + 2, pc);
+      NativeArguments args(thread, 1, SP + 1, SP);
+      INVOKE_RUNTIME(DRT_NonBoolTypeError, args);
+    }
+
+  AssertBooleanOk:
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(TestSmi, A_D);
+    intptr_t left = reinterpret_cast<intptr_t>(RAW_CAST(Smi, FP[rA]));
+    intptr_t right = reinterpret_cast<intptr_t>(RAW_CAST(Smi, FP[rD]));
+    if ((left & right) != 0) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(TestCids, A_D);
+    const intptr_t cid = InterpreterHelpers::GetClassId(FP[rA]);
+    const intptr_t num_cases = rD;
+    for (intptr_t i = 0; i < num_cases; i++) {
+      ASSERT(KernelBytecode::DecodeOpcode(pc[i]) == KernelBytecode::kNop);
+      intptr_t test_target = KernelBytecode::DecodeA(pc[i]);
+      intptr_t test_cid = KernelBytecode::DecodeD(pc[i]);
+      if (cid == test_cid) {
+        if (test_target != 0) {
+          pc += 1;  // Match true.
+        } else {
+          pc += 2;  // Match false.
+        }
+        break;
+      }
+    }
+    pc += num_cases;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckSmi, 0);
+    intptr_t obj = reinterpret_cast<intptr_t>(FP[rA]);
+    if ((obj & kSmiTagMask) == kSmiTag) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckEitherNonSmi, A_D);
+    const intptr_t obj1 = reinterpret_cast<intptr_t>(FP[rA]);
+    const intptr_t obj2 = reinterpret_cast<intptr_t>(FP[rD]);
+    const intptr_t tag = (obj1 | obj2) & kSmiTagMask;
+    if (tag != kSmiTag) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckClassId, A_D);
+    const intptr_t actual_cid =
+        reinterpret_cast<intptr_t>(FP[rA]) >> kSmiTagSize;
+    const intptr_t desired_cid = rD;
+    pc += (actual_cid == desired_cid) ? 1 : 0;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckClassIdRange, A_D);
+    const intptr_t actual_cid =
+        reinterpret_cast<intptr_t>(FP[rA]) >> kSmiTagSize;
+    const uintptr_t cid_start = rD;
+    const uintptr_t cid_range = KernelBytecode::DecodeD(*pc);
+    // Unsigned comparison.  Skip either just the nop or both the nop and the
+    // following instruction.
+    pc += (actual_cid - cid_start <= cid_range) ? 2 : 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckBitTest, A_D);
+    const intptr_t raw_value = reinterpret_cast<intptr_t>(FP[rA]);
+    const bool is_smi = ((raw_value & kSmiTagMask) == kSmiTag);
+    const intptr_t cid_min = KernelBytecode::DecodeD(*pc);
+    const intptr_t cid_mask = Smi::Value(
+        RAW_CAST(Smi, LOAD_CONSTANT(KernelBytecode::DecodeD(*(pc + 1)))));
+    if (LIKELY(!is_smi)) {
+      const intptr_t cid_max = Utils::HighestBit(cid_mask) + cid_min;
+      const intptr_t cid = InterpreterHelpers::GetClassId(FP[rA]);
+      // The cid is in-bounds, and the bit is set in the mask.
+      if ((cid >= cid_min) && (cid <= cid_max) &&
+          ((cid_mask & (1 << (cid - cid_min))) != 0)) {
+        pc += 3;
+      } else {
+        pc += 2;
+      }
+    } else {
+      const bool may_be_smi = (rD == 1);
+      pc += (may_be_smi ? 3 : 2);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckCids, A_B_C);
+    const intptr_t raw_value = reinterpret_cast<intptr_t>(FP[rA]);
+    const bool is_smi = ((raw_value & kSmiTagMask) == kSmiTag);
+    const bool may_be_smi = (rB == 1);
+    const intptr_t cids_length = rC;
+    if (LIKELY(!is_smi)) {
+      const intptr_t cid = InterpreterHelpers::GetClassId(FP[rA]);
+      for (intptr_t i = 0; i < cids_length; i++) {
+        const intptr_t desired_cid = KernelBytecode::DecodeD(*(pc + i));
+        if (cid == desired_cid) {
+          pc++;
+          break;
+        }
+      }
+      pc += cids_length;
+    } else {
+      pc += cids_length;
+      pc += (may_be_smi ? 1 : 0);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(CheckCidsByRange, A_B_C);
+    const intptr_t raw_value = reinterpret_cast<intptr_t>(FP[rA]);
+    const bool is_smi = ((raw_value & kSmiTagMask) == kSmiTag);
+    const bool may_be_smi = (rB == 1);
+    const intptr_t cids_length = rC;
+    if (LIKELY(!is_smi)) {
+      const intptr_t cid = InterpreterHelpers::GetClassId(FP[rA]);
+      for (intptr_t i = 0; i < cids_length; i += 2) {
+        // Note unsigned type to get unsigned range check below.
+        const uintptr_t cid_start = KernelBytecode::DecodeD(*(pc + i));
+        const uintptr_t cids = KernelBytecode::DecodeD(*(pc + i + 1));
+        if (cid - cid_start < cids) {
+          pc++;
+          break;
+        }
+      }
+      pc += cids_length;
+    } else {
+      pc += cids_length;
+      pc += (may_be_smi ? 1 : 0);
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfEqStrictTOS, 0);
+    SP -= 2;
+    if (SP[1] != SP[2]) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfNeStrictTOS, 0);
+    SP -= 2;
+    if (SP[1] == SP[2]) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfEqStrictNumTOS, 0);
+    if (thread->isolate()->single_step()) {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_SingleStepHandler, args);
+    }
+
+    SP -= 2;
+    if (!InterpreterHelpers::IsStrictEqualWithNumberCheck(SP[1], SP[2])) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfNeStrictNumTOS, 0);
+    if (thread->isolate()->single_step()) {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_SingleStepHandler, args);
+    }
+
+    SP -= 2;
+    if (InterpreterHelpers::IsStrictEqualWithNumberCheck(SP[1], SP[2])) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfSmiLtTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    if (!(Smi::Value(left) < Smi::Value(right))) {
+      pc++;
+    }
+    SP -= 2;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfSmiLeTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    if (!(Smi::Value(left) <= Smi::Value(right))) {
+      pc++;
+    }
+    SP -= 2;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfSmiGeTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    if (!(Smi::Value(left) >= Smi::Value(right))) {
+      pc++;
+    }
+    SP -= 2;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfSmiGtTOS, 0);
+    RawSmi* left = Smi::RawCast(SP[-1]);
+    RawSmi* right = Smi::RawCast(SP[-0]);
+    if (!(Smi::Value(left) > Smi::Value(right))) {
+      pc++;
+    }
+    SP -= 2;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfEqStrict, A_D);
+    RawObject* lhs = FP[rA];
+    RawObject* rhs = FP[rD];
+    if (lhs != rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfNeStrict, A_D);
+    RawObject* lhs = FP[rA];
+    RawObject* rhs = FP[rD];
+    if (lhs == rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfLe, A_D);
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rA]);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rD]);
+    if (lhs > rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfLt, A_D);
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rA]);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rD]);
+    if (lhs >= rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfGe, A_D);
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rA]);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rD]);
+    if (lhs < rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfGt, A_D);
+    const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rA]);
+    const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rD]);
+    if (lhs <= rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfULe, A_D);
+    const uintptr_t lhs = reinterpret_cast<uintptr_t>(FP[rA]);
+    const uintptr_t rhs = reinterpret_cast<uintptr_t>(FP[rD]);
+    if (lhs > rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfULt, A_D);
+    const uintptr_t lhs = reinterpret_cast<uintptr_t>(FP[rA]);
+    const uintptr_t rhs = reinterpret_cast<uintptr_t>(FP[rD]);
+    if (lhs >= rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfUGe, A_D);
+    const uintptr_t lhs = reinterpret_cast<uintptr_t>(FP[rA]);
+    const uintptr_t rhs = reinterpret_cast<uintptr_t>(FP[rD]);
+    if (lhs < rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfUGt, A_D);
+    const uintptr_t lhs = reinterpret_cast<uintptr_t>(FP[rA]);
+    const uintptr_t rhs = reinterpret_cast<uintptr_t>(FP[rD]);
+    if (lhs <= rhs) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+#if defined(ARCH_IS_64_BIT)
+  {
+    BYTECODE(IfDEq, A_D);
+    const double lhs = bit_cast<double, RawObject*>(FP[rA]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rD]);
+    pc += (lhs == rhs) ? 0 : 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDNe, A_D);
+    const double lhs = bit_cast<double, RawObject*>(FP[rA]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rD]);
+    pc += (lhs != rhs) ? 0 : 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDLe, A_D);
+    const double lhs = bit_cast<double, RawObject*>(FP[rA]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rD]);
+    pc += (lhs <= rhs) ? 0 : 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDLt, A_D);
+    const double lhs = bit_cast<double, RawObject*>(FP[rA]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rD]);
+    pc += (lhs < rhs) ? 0 : 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDGe, A_D);
+    const double lhs = bit_cast<double, RawObject*>(FP[rA]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rD]);
+    pc += (lhs >= rhs) ? 0 : 1;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDGt, A_D);
+    const double lhs = bit_cast<double, RawObject*>(FP[rA]);
+    const double rhs = bit_cast<double, RawObject*>(FP[rD]);
+    pc += (lhs > rhs) ? 0 : 1;
+    DISPATCH();
+  }
+#else   // defined(ARCH_IS_64_BIT)
+  {
+    BYTECODE(IfDEq, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDNe, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDLe, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDLt, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDGe, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfDGt, A_D);
+    UNREACHABLE();
+    DISPATCH();
+  }
+#endif  // defined(ARCH_IS_64_BIT)
+
+  {
+    BYTECODE(IfEqStrictNum, A_D);
+    RawObject* lhs = FP[rA];
+    RawObject* rhs = FP[rD];
+    if (!InterpreterHelpers::IsStrictEqualWithNumberCheck(lhs, rhs)) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfNeStrictNum, A_D);
+    RawObject* lhs = FP[rA];
+    RawObject* rhs = FP[rD];
+    if (InterpreterHelpers::IsStrictEqualWithNumberCheck(lhs, rhs)) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfEqNull, A);
+    if (FP[rA] != null_value) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(IfNeNull, A_D);
+    if (FP[rA] == null_value) {
+      pc++;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Jump, 0);
+    const int32_t target = static_cast<int32_t>(op) >> 8;
+    pc += (target - 1);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadClassId, A_D);
+    const uint16_t object_reg = rD;
+    RawObject* obj = static_cast<RawObject*>(FP[object_reg]);
+    FP[rA] = InterpreterHelpers::GetClassIdAsSmi(obj);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadClassIdTOS, 0);
+    RawObject* obj = static_cast<RawObject*>(SP[0]);
+    SP[0] = InterpreterHelpers::GetClassIdAsSmi(obj);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedTOS, 0);
+    SP -= 3;
+    RawArray* array = RAW_CAST(Array, SP[1]);
+    RawSmi* index = RAW_CAST(Smi, SP[2]);
+    RawObject* value = SP[3];
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    array->StorePointer(array->ptr()->data() + Smi::Value(index), value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexed, A_B_C);
+    RawArray* array = RAW_CAST(Array, FP[rA]);
+    RawSmi* index = RAW_CAST(Smi, FP[rB]);
+    RawObject* value = FP[rC];
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    array->StorePointer(array->ptr()->data() + Smi::Value(index), value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedUint8, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rA], FP[rB]);
+    *data = Smi::Value(RAW_CAST(Smi, FP[rC]));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedExternalUint8, A_B_C);
+    uint8_t* array = reinterpret_cast<uint8_t*>(FP[rA]);
+    RawSmi* index = RAW_CAST(Smi, FP[rB]);
+    RawSmi* value = RAW_CAST(Smi, FP[rC]);
+    array[Smi::Value(index)] = Smi::Value(value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedOneByteString, A_B_C);
+    RawOneByteString* array = RAW_CAST(OneByteString, FP[rA]);
+    RawSmi* index = RAW_CAST(Smi, FP[rB]);
+    RawSmi* value = RAW_CAST(Smi, FP[rC]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    array->ptr()->data()[Smi::Value(index)] = Smi::Value(value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreIndexedUint32, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rA], FP[rB]);
+    const uintptr_t value = reinterpret_cast<uintptr_t>(FP[rC]);
+    *reinterpret_cast<uint32_t*>(data) = static_cast<uint32_t>(value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(TailCall, 0);
+    RawCode* code = RAW_CAST(Code, SP[-0]);
+    RawImmutableArray* args_desc = RAW_CAST(ImmutableArray, SP[-1]);
+    PrepareForTailCall(code, args_desc, FP, &SP, &pc);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(TailCallOpt, A_D);
+    RawImmutableArray* args_desc = RAW_CAST(ImmutableArray, FP[rA]);
+    RawCode* code = RAW_CAST(Code, FP[rD]);
+    PrepareForTailCall(code, args_desc, FP, &SP, &pc);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadArgDescriptor, 0);
+    SP++;
+    SP[0] = argdesc_;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadArgDescriptorOpt, A);
+    FP[rA] = argdesc_;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(NoSuchMethod, 0);
+    goto ClosureNoSuchMethod;
+  }
+
+  {
+    BYTECODE(LoadFpRelativeSlot, A_X);
+    RawSmi* index = RAW_CAST(Smi, SP[-0]);
+    const int16_t offset = rD;
+    SP[-0] = FP[-(Smi::Value(index) + offset)];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadFpRelativeSlotOpt, A_B_Y);
+    RawSmi* index = RAW_CAST(Smi, FP[rB]);
+    const int8_t offset = rY;
+    FP[rA] = FP[-(Smi::Value(index) + offset)];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreFpRelativeSlot, A_X);
+    RawSmi* index = RAW_CAST(Smi, SP[-1]);
+    const int16_t offset = rD;
+    FP[-(Smi::Value(index) + offset) - 0] = SP[-0];
+    SP--;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(StoreFpRelativeSlotOpt, A_B_Y);
+    RawSmi* index = RAW_CAST(Smi, FP[rB]);
+    const int8_t offset = rY;
+    FP[-(Smi::Value(index) + offset) - 0] = FP[rA];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedTOS, 0);
+    // Currently this instruction is only emitted if it's safe to do.
+    ASSERT(!SP[0]->IsHeapObject());
+    ASSERT(SP[-1]->IsArray() || SP[-1]->IsImmutableArray());
+
+    const intptr_t index_scale = rA;
+    RawSmi* index = RAW_CAST(Smi, SP[-0]);
+    RawArray* array = Array::RawCast(SP[-1]);
+
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    SP[-1] = array->ptr()->data()[Smi::Value(index) << index_scale];
+    SP--;
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexed, A_B_C);
+    RawObject* obj = FP[rB];
+    ASSERT(obj->IsArray() || obj->IsImmutableArray());
+    RawArray* array = reinterpret_cast<RawArray*>(obj);
+    RawSmi* index = RAW_CAST(Smi, FP[rC]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    FP[rA] = array->ptr()->data()[Smi::Value(index)];
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedUint8, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rB], FP[rC]);
+    FP[rA] = Smi::New(*data);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedInt8, A_B_C);
+    uint8_t* data = InterpreterHelpers::GetTypedData(FP[rB], FP[rC]);
+    FP[rA] = Smi::New(*reinterpret_cast<int8_t*>(data));
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedUint32, A_B_C);
+    const uint8_t* data = InterpreterHelpers::GetTypedData(FP[rB], FP[rC]);
+    const uint32_t value = *reinterpret_cast<const uint32_t*>(data);
+    FP[rA] = reinterpret_cast<RawObject*>(value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedInt32, A_B_C);
+    const uint8_t* data = InterpreterHelpers::GetTypedData(FP[rB], FP[rC]);
+    const int32_t value = *reinterpret_cast<const int32_t*>(data);
+    FP[rA] = reinterpret_cast<RawObject*>(value);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedExternalUint8, A_B_C);
+    uint8_t* data = reinterpret_cast<uint8_t*>(FP[rB]);
+    RawSmi* index = RAW_CAST(Smi, FP[rC]);
+    FP[rA] = Smi::New(data[Smi::Value(index)]);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedExternalInt8, A_B_C);
+    int8_t* data = reinterpret_cast<int8_t*>(FP[rB]);
+    RawSmi* index = RAW_CAST(Smi, FP[rC]);
+    FP[rA] = Smi::New(data[Smi::Value(index)]);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedOneByteString, A_B_C);
+    RawOneByteString* array = RAW_CAST(OneByteString, FP[rB]);
+    RawSmi* index = RAW_CAST(Smi, FP[rC]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    FP[rA] = Smi::New(array->ptr()->data()[Smi::Value(index)]);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(LoadIndexedTwoByteString, A_B_C);
+    RawTwoByteString* array = RAW_CAST(TwoByteString, FP[rB]);
+    RawSmi* index = RAW_CAST(Smi, FP[rC]);
+    ASSERT(InterpreterHelpers::CheckIndex(index, array->ptr()->length_));
+    FP[rA] = Smi::New(array->ptr()->data()[Smi::Value(index)]);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Deopt, A_D);
+    const bool is_lazy = rD == 0;
+    if (!Deoptimize(thread, &pc, &FP, &SP, is_lazy)) {
+      HANDLE_EXCEPTION;
+    }
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(DeoptRewind, 0);
+    pc = reinterpret_cast<uint32_t*>(thread->resume_pc());
+    if (!Deoptimize(thread, &pc, &FP, &SP, false /* eager */)) {
+      HANDLE_EXCEPTION;
+    }
+    {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments args(thread, 0, NULL, NULL);
+      INVOKE_RUNTIME(DRT_RewindPostDeopt, args);
+    }
+    UNREACHABLE();  // DRT_RewindPostDeopt does not exit normally.
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Nop, 0);
+    DISPATCH();
+  }
+
+  {
+    BYTECODE(Trap, 0);
+    UNIMPLEMENTED();
+    DISPATCH();
+  }
+
+  // Helper used to handle noSuchMethod on closures.
+  {
+  ClosureNoSuchMethod:
+#if defined(DEBUG)
+    function_h ^= FrameFunction(FP);
+    ASSERT(function_h.IsNull() || function_h.IsClosureFunction());
+#endif
+
+    // Restore caller context as we are going to throw NoSuchMethod.
+    pc = SavedCallerPC(FP);
+
+    const bool has_dart_caller = (reinterpret_cast<uword>(pc) & 2) == 0;
+    const intptr_t argc = has_dart_caller ? KernelBytecode::DecodeArgc(pc[-1])
+                                          : (reinterpret_cast<uword>(pc) >> 2);
+    const bool has_function_type_args =
+        has_dart_caller && InterpreterHelpers::ArgDescTypeArgsLen(argdesc_) > 0;
+
+    SP = FrameArguments(FP, 0);
+    RawObject** args = SP - argc;
+    FP = SavedCallerFP(FP);
+    if (has_dart_caller) {
+      pp_ = InterpreterHelpers::FrameCode(FP)->ptr()->object_pool_;
+    }
+
+    *++SP = null_value;
+    *++SP = args[has_function_type_args ? 1 : 0];  // Closure object.
+    *++SP = argdesc_;
+    *++SP = null_value;  // Array of arguments (will be filled).
+
+    // Allocate array of arguments.
+    {
+      SP[1] = Smi::New(argc);  // length
+      SP[2] = null_value;      // type
+      Exit(thread, FP, SP + 3, pc);
+      NativeArguments native_args(thread, 2, SP + 1, SP);
+      if (!InvokeRuntime(thread, this, DRT_AllocateArray, native_args)) {
+        HANDLE_EXCEPTION;
+      } else if (has_dart_caller) {
+        HANDLE_RETURN;
+      }
+
+      // Copy arguments into the newly allocated array.
+      RawArray* array = static_cast<RawArray*>(SP[0]);
+      ASSERT(array->GetClassId() == kArrayCid);
+      for (intptr_t i = 0; i < argc; i++) {
+        array->ptr()->data()[i] = args[i];
+      }
+    }
+
+    // Invoke noSuchMethod passing down closure, argument descriptor and
+    // array of arguments.
+    {
+      Exit(thread, FP, SP + 1, pc);
+      NativeArguments native_args(thread, 3, SP - 2, SP - 3);
+      INVOKE_RUNTIME(DRT_InvokeClosureNoSuchMethod, native_args);
+      UNREACHABLE();
+    }
+
+    DISPATCH();
+  }
+
+  // Single dispatch point used by exception handling macros.
+  {
+  DispatchAfterException:
+    DISPATCH();
+  }
+
+  UNREACHABLE();
+  return 0;
+}
+
+void Interpreter::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) {
+  // Walk over all setjmp buffers (simulated --> C++ transitions)
+  // and try to find the setjmp associated with the simulated frame pointer.
+  InterpreterSetjmpBuffer* buf = last_setjmp_buffer();
+  while ((buf->link() != NULL) && (buf->link()->fp() > fp)) {
+    buf = buf->link();
+  }
+  ASSERT(buf != NULL);
+  ASSERT(last_setjmp_buffer() == buf);
+
+  // The C++ caller has not cleaned up the stack memory of C++ frames.
+  // Prepare for unwinding frames by destroying all the stack resources
+  // in the previous C++ frames.
+  StackResource::Unwind(thread);
+
+  // Set the tag.
+  thread->set_vm_tag(VMTag::kDartTagId);
+  // Clear top exit frame.
+  thread->set_top_exit_frame_info(0);
+
+  fp_ = reinterpret_cast<RawObject**>(fp);
+
+  if (pc == StubCode::RunExceptionHandler_entry()->EntryPoint()) {
+    // The RunExceptionHandler stub is a placeholder.  We implement
+    // its behavior here.
+    RawObject* raw_exception = thread->active_exception();
+    RawObject* raw_stacktrace = thread->active_stacktrace();
+    ASSERT(raw_exception != Object::null());
+    special_[kExceptionSpecialIndex] = raw_exception;
+    special_[kStackTraceSpecialIndex] = raw_stacktrace;
+    pc_ = thread->resume_pc();
+  } else {
+    pc_ = pc;
+  }
+
+  buf->Longjmp();
+  UNREACHABLE();
+}
+
+void Interpreter::VisitObjectPointers(ObjectPointerVisitor* visitor) {
+  visitor->VisitPointer(reinterpret_cast<RawObject**>(&pp_));
+  visitor->VisitPointer(reinterpret_cast<RawObject**>(&argdesc_));
+}
+
+}  // namespace dart
+
+#endif  // defined(DART_USE_INTERPRETER)
diff --git a/runtime/vm/interpreter.h b/runtime/vm/interpreter.h
new file mode 100644
index 0000000..def46e6
--- /dev/null
+++ b/runtime/vm/interpreter.h
@@ -0,0 +1,199 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_INTERPRETER_H_
+#define RUNTIME_VM_INTERPRETER_H_
+
+#include "vm/compiler/method_recognizer.h"
+#include "vm/constants_kbc.h"
+
+namespace dart {
+
+class Isolate;
+class RawObject;
+class InterpreterSetjmpBuffer;
+class Thread;
+class Code;
+class Array;
+class RawICData;
+class RawImmutableArray;
+class RawArray;
+class RawObjectPool;
+class RawFunction;
+class ObjectPointerVisitor;
+
+// Interpreter intrinsic handler. It is invoked on entry to the intrinsified
+// function via Intrinsic bytecode before the frame is setup.
+// If the handler returns true then Intrinsic bytecode works as a return
+// instruction returning the value in result. Otherwise interpreter proceeds to
+// execute the body of the function.
+typedef bool (*IntrinsicHandler)(Thread* thread,
+                                 RawObject** FP,
+                                 RawObject** result);
+
+class Interpreter {
+ public:
+  static const uword kInterpreterStackUnderflowSize = 0x80;
+
+  Interpreter();
+  ~Interpreter();
+
+  // The currently executing Interpreter instance, which is associated to the
+  // current isolate
+  static Interpreter* Current();
+
+  // Low address (KBC stack grows up).
+  uword stack_base() const { return stack_base_; }
+  // High address (KBC stack grows up).
+  uword stack_limit() const { return stack_limit_; }
+
+  // The thread's top_exit_frame_info refers to a Dart frame in the interpreter
+  // stack. The interpreter's top_exit_frame_info refers to a C++ frame in the
+  // native stack.
+  uword top_exit_frame_info() const { return top_exit_frame_info_; }
+  void set_top_exit_frame_info(uword value) { top_exit_frame_info_ = value; }
+
+  // Call on program start.
+  static void InitOnce();
+
+  RawObject* Call(const Code& code,
+                  const Array& arguments_descriptor,
+                  const Array& arguments,
+                  Thread* thread);
+
+  void JumpToFrame(uword pc, uword sp, uword fp, Thread* thread);
+
+  uword get_sp() const { return reinterpret_cast<uword>(fp_); }  // Yes, fp_.
+  uword get_fp() const { return reinterpret_cast<uword>(fp_); }
+  uword get_pc() const { return pc_; }
+
+  enum IntrinsicId {
+#define V(test_class_name, test_function_name, enum_name, type, fp)            \
+  k##enum_name##Intrinsic,
+    ALL_INTRINSICS_LIST(V) GRAPH_INTRINSICS_LIST(V)
+#undef V
+        kIntrinsicCount,
+  };
+
+  static bool IsSupportedIntrinsic(IntrinsicId id) {
+    return intrinsics_[id] != NULL;
+  }
+
+  enum SpecialIndex {
+    kExceptionSpecialIndex,
+    kStackTraceSpecialIndex,
+    kSpecialIndexCount
+  };
+
+  void VisitObjectPointers(ObjectPointerVisitor* visitor);
+
+ private:
+  uintptr_t* stack_;
+  uword stack_base_;
+  uword stack_limit_;
+
+  RawObject** fp_;
+  uword pc_;
+  DEBUG_ONLY(uint64_t icount_;)
+
+  InterpreterSetjmpBuffer* last_setjmp_buffer_;
+  uword top_exit_frame_info_;
+
+  RawObjectPool* pp_;  // Pool Pointer.
+  RawArray* argdesc_;  // Arguments Descriptor: used to pass information between
+                       // call instruction and the function entry.
+  RawObject* special_[kSpecialIndexCount];
+
+  static IntrinsicHandler intrinsics_[kIntrinsicCount];
+
+  void Exit(Thread* thread,
+            RawObject** base,
+            RawObject** exit_frame,
+            uint32_t* pc);
+
+  void CallRuntime(Thread* thread,
+                   RawObject** base,
+                   RawObject** exit_frame,
+                   uint32_t* pc,
+                   intptr_t argc_tag,
+                   RawObject** args,
+                   RawObject** result,
+                   uword target);
+
+  void Invoke(Thread* thread,
+              RawObject** call_base,
+              RawObject** call_top,
+              uint32_t** pc,
+              RawObject*** FP,
+              RawObject*** SP);
+
+  bool InvokeCompiled(Thread* thread,
+                      RawFunction* function,
+                      RawArray* argdesc,
+                      RawObject** call_base,
+                      RawObject** call_top,
+                      uint32_t** pc,
+                      RawObject*** FP,
+                      RawObject*** SP);
+
+  bool Deoptimize(Thread* thread,
+                  uint32_t** pc,
+                  RawObject*** FP,
+                  RawObject*** SP,
+                  bool is_lazy);
+
+  void InlineCacheMiss(int checked_args,
+                       Thread* thread,
+                       RawICData* icdata,
+                       RawObject** call_base,
+                       RawObject** top,
+                       uint32_t* pc,
+                       RawObject** FP,
+                       RawObject** SP);
+
+  void InstanceCall1(Thread* thread,
+                     RawICData* icdata,
+                     RawObject** call_base,
+                     RawObject** call_top,
+                     uint32_t** pc,
+                     RawObject*** FP,
+                     RawObject*** SP,
+                     bool optimized);
+
+  void InstanceCall2(Thread* thread,
+                     RawICData* icdata,
+                     RawObject** call_base,
+                     RawObject** call_top,
+                     uint32_t** pc,
+                     RawObject*** FP,
+                     RawObject*** SP,
+                     bool optimized);
+
+  void PrepareForTailCall(RawCode* code,
+                          RawImmutableArray* args_desc,
+                          RawObject** FP,
+                          RawObject*** SP,
+                          uint32_t** pc);
+
+#if !defined(PRODUCT)
+  // Returns true if tracing of executed instructions is enabled.
+  bool IsTracingExecution() const;
+
+  // Prints bytecode instruction at given pc for instruction tracing.
+  void TraceInstruction(uint32_t* pc) const;
+#endif  // !defined(PRODUCT)
+
+  // Longjmp support for exceptions.
+  InterpreterSetjmpBuffer* last_setjmp_buffer() { return last_setjmp_buffer_; }
+  void set_last_setjmp_buffer(InterpreterSetjmpBuffer* buffer) {
+    last_setjmp_buffer_ = buffer;
+  }
+
+  friend class InterpreterSetjmpBuffer;
+  DISALLOW_COPY_AND_ASSIGN(Interpreter);
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_INTERPRETER_H_
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index c3dcbe0..10ec3e4 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -21,6 +21,7 @@
 #include "vm/flags.h"
 #include "vm/heap.h"
 #include "vm/image_snapshot.h"
+#include "vm/interpreter.h"
 #include "vm/isolate_reload.h"
 #include "vm/kernel_isolate.h"
 #include "vm/lockers.h"
@@ -905,6 +906,7 @@
       library_tag_handler_(NULL),
       api_state_(NULL),
       random_(),
+      interpreter_(NULL),
       simulator_(NULL),
       mutex_(new Mutex(NOT_IN_PRODUCT("Isolate::mutex_"))),
       symbols_mutex_(new Mutex(NOT_IN_PRODUCT("Isolate::symbols_mutex_"))),
@@ -980,6 +982,9 @@
   delete heap_;
   delete object_store_;
   delete api_state_;
+#if defined(DART_USE_INTERPRETER)
+  delete interpreter_;
+#endif
 #if defined(USING_SIMULATOR)
   delete simulator_;
 #endif
@@ -1938,6 +1943,12 @@
   }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
+#if defined(DART_USE_INTERPRETER)
+  if (interpreter() != NULL) {
+    interpreter()->VisitObjectPointers(visitor);
+  }
+#endif  // defined(DART_USE_INTERPRETER)
+
 #if defined(TARGET_ARCH_DBC)
   if (simulator() != NULL) {
     simulator()->VisitObjectPointers(visitor);
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index 66f75f9..bf2f38c 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -38,6 +38,7 @@
 class HandleVisitor;
 class Heap;
 class ICData;
+class Interpreter;
 class IsolateProfilerData;
 class IsolateReloadContext;
 class IsolateSpawnState;
@@ -403,6 +404,9 @@
 
   Random* random() { return &random_; }
 
+  Interpreter* interpreter() const { return interpreter_; }
+  void set_interpreter(Interpreter* value) { interpreter_ = value; }
+
   Simulator* simulator() const { return simulator_; }
   void set_simulator(Simulator* value) { simulator_ = value; }
 
@@ -936,6 +940,7 @@
   Dart_LibraryTagHandler library_tag_handler_;
   ApiState* api_state_;
   Random random_;
+  Interpreter* interpreter_;
   Simulator* simulator_;
   Mutex* mutex_;          // Protects compiler stats.
   Mutex* symbols_mutex_;  // Protects concurrent access to the symbol table.
diff --git a/runtime/vm/native_arguments.h b/runtime/vm/native_arguments.h
index f7151e3..910e2d6 100644
--- a/runtime/vm/native_arguments.h
+++ b/runtime/vm/native_arguments.h
@@ -96,13 +96,8 @@
 
   RawObject* ArgAt(int index) const {
     ASSERT((index >= 0) && (index < ArgCount()));
-#if defined(TARGET_ARCH_DBC)
-    // On DBC stack is growing upwards, in reverse direction from all other
-    // architectures.
-    RawObject** arg_ptr = &(argv_[index]);
-#else
-    RawObject** arg_ptr = &(argv_[-index]);
-#endif
+    RawObject** arg_ptr =
+        &(argv_[ReverseArgOrderBit::decode(argc_tag_) ? index : -index]);
     // Tell MemorySanitizer the RawObject* was initialized (by generated code).
     MSAN_UNPOISON(arg_ptr, kWordSize);
     return *arg_ptr;
@@ -205,23 +200,32 @@
   enum ArgcTagBits {
     kArgcBit = 0,
     kArgcSize = 24,
-    kFunctionBit = 24,
+    kFunctionBit = kArgcBit + kArgcSize,
     kFunctionSize = 3,
+    kReverseArgOrderBit = kFunctionBit + kFunctionSize,
+    kReverseArgOrderSize = 1,
   };
   class ArgcBits : public BitField<intptr_t, int32_t, kArgcBit, kArgcSize> {};
   class FunctionBits
       : public BitField<intptr_t, int, kFunctionBit, kFunctionSize> {};
+  class ReverseArgOrderBit
+      : public BitField<intptr_t, bool, kReverseArgOrderBit, 1> {};
   friend class Api;
   friend class BootstrapNatives;
+  friend class Interpreter;
   friend class Simulator;
 
-#if defined(TARGET_ARCH_DBC)
-  // Allow simulator to create NativeArguments on the stack.
+#if defined(TARGET_ARCH_DBC) || defined(DART_USE_INTERPRETER)
+  // Allow simulator and interpreter to create NativeArguments in reverse order
+  // on the stack.
   NativeArguments(Thread* thread,
                   int argc_tag,
                   RawObject** argv,
                   RawObject** retval)
-      : thread_(thread), argc_tag_(argc_tag), argv_(argv), retval_(retval) {}
+      : thread_(thread),
+        argc_tag_(ReverseArgOrderBit::update(kReverseArgOrderBit, argc_tag)),
+        argv_(argv),
+        retval_(retval) {}
 #endif
 
   // Since this function is passed a RawObject directly, we need to be
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 1e84c31..3476896 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -5592,10 +5592,46 @@
 }
 
 bool Function::HasCode() const {
+  NoSafepointScope no_safepoint;
   ASSERT(raw_ptr()->code_ != Code::null());
+#if defined(DART_USE_INTERPRETER)
+  return raw_ptr()->code_ != StubCode::LazyCompile_entry()->code() &&
+         raw_ptr()->code_ != StubCode::InterpretCall_entry()->code();
+#else
   return raw_ptr()->code_ != StubCode::LazyCompile_entry()->code();
+#endif
 }
 
+#if defined(DART_USE_INTERPRETER)
+void Function::AttachBytecode(const Code& value) const {
+  DEBUG_ASSERT(IsMutatorOrAtSafepoint());
+  // Finish setting up code before activating it.
+  value.set_owner(*this);
+  StorePointer(&raw_ptr()->bytecode_, value.raw());
+
+  // We should not have loaded the bytecode if the function had code.
+  ASSERT(!HasCode());
+
+  // Set the code entry_point to to InterpretCall stub.
+  SetInstructions(Code::Handle(StubCode::InterpretCall_entry()->code()));
+}
+
+bool Function::HasBytecode() const {
+  return raw_ptr()->bytecode_ != Code::null();
+}
+
+bool Function::HasCode(RawFunction* function) {
+  NoSafepointScope no_safepoint;
+  ASSERT(function->ptr()->code_ != Code::null());
+  return function->ptr()->code_ != StubCode::LazyCompile_entry()->code() &&
+         function->ptr()->code_ != StubCode::InterpretCall_entry()->code();
+}
+
+bool Function::HasBytecode(RawFunction* function) {
+  return function->ptr()->bytecode_ != Code::null();
+}
+#endif
+
 void Function::ClearCode() const {
 #if defined(DART_PRECOMPILED_RUNTIME)
   UNREACHABLE();
@@ -14565,6 +14601,64 @@
 }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
+#if defined(DART_USE_INTERPRETER)
+RawCode* Code::FinalizeBytecode(void* bytecode_data,
+                                intptr_t bytecode_size,
+                                const ObjectPool& object_pool,
+                                CodeStatistics* stats /* = nullptr */) {
+  // Allocate the Code and Instructions objects.  Code is allocated first
+  // because a GC during allocation of the code will leave the instruction
+  // pages read-only.
+  const intptr_t pointer_offset_count = 0;  // No fixups in bytecode.
+  Code& code = Code::ZoneHandle(Code::New(pointer_offset_count));
+  Instructions& instrs = Instructions::ZoneHandle(
+      Instructions::New(bytecode_size, true /* has_single_entry_point */));
+  INC_STAT(Thread::Current(), total_instr_size, bytecode_size);
+  INC_STAT(Thread::Current(), total_code_size, bytecode_size);
+
+  // Copy the bytecode data into the instruction area. No fixups to apply.
+  MemoryRegion instrs_region(reinterpret_cast<void*>(instrs.PayloadStart()),
+                             instrs.Size());
+  MemoryRegion bytecode_region(bytecode_data, bytecode_size);
+  // TODO(regis): Avoid copying bytecode.
+  instrs_region.CopyFrom(0, bytecode_region);
+
+  // TODO(regis): Keep following lines or not?
+  code.set_compile_timestamp(OS::GetCurrentMonotonicMicros());
+  // TODO(regis): Do we need to notify CodeObservers for bytecode too?
+  // If so, provide a better name using ToLibNamePrefixedQualifiedCString().
+  CodeObservers::NotifyAll("bytecode", instrs.PayloadStart(),
+                           0 /* prologue_offset */, instrs.Size(),
+                           false /* optimized */);
+  {
+    NoSafepointScope no_safepoint;
+
+    // Hook up Code and Instructions objects.
+    code.SetActiveInstructions(instrs);
+    code.set_instructions(instrs);
+    code.set_is_alive(true);
+
+    // Set object pool in Instructions object.
+    INC_STAT(Thread::Current(), total_code_size,
+             object_pool.Length() * sizeof(uintptr_t));
+    code.set_object_pool(object_pool.raw());
+
+    if (FLAG_write_protect_code) {
+      uword address = RawObject::ToAddr(instrs.raw());
+      VirtualMemory::Protect(reinterpret_cast<void*>(address),
+                             instrs.raw()->Size(), VirtualMemory::kReadExecute);
+    }
+  }
+  // No Code::Comments to set. Default is 0 length Comments.
+  // No prologue was ever entered, optimistically assume nothing was ever
+  // pushed onto the stack.
+  code.SetPrologueOffset(bytecode_size);  // TODO(regis): Correct?
+  INC_STAT(Thread::Current(), total_code_size,
+           code.comments().comments_.Length());
+  return code.raw();
+}
+#endif  // defined(DART_USE_INTERPRETER)
+
 bool Code::SlowFindRawCodeVisitor::FindObject(RawObject* raw_obj) const {
   return RawCode::ContainsPC(raw_obj, pc_);
 }
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 6a99622..6784a13 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -2229,6 +2229,10 @@
   }
   void set_unoptimized_code(const Code& value) const;
   bool HasCode() const;
+#if defined(DART_USE_INTERPRETER)
+  static bool HasCode(RawFunction* function);
+  static bool HasBytecode(RawFunction* function);
+#endif
 
   static intptr_t code_offset() { return OFFSET_OF(RawFunction, code_); }
 
@@ -2236,6 +2240,12 @@
     return OFFSET_OF(RawFunction, entry_point_);
   }
 
+#if defined(DART_USE_INTERPRETER)
+  void AttachBytecode(const Code& bytecode) const;
+  RawCode* Bytecode() const { return raw_ptr()->bytecode_; }
+  bool HasBytecode() const;
+#endif
+
   virtual intptr_t Hash() const;
 
   // Returns true if there is at least one debugger breakpoint
@@ -4915,6 +4925,12 @@
                                Assembler* assembler,
                                bool optimized,
                                CodeStatistics* stats = nullptr);
+#if defined(DART_USE_INTERPRETER)
+  static RawCode* FinalizeBytecode(void* bytecode_data,
+                                   intptr_t bytecode_size,
+                                   const ObjectPool& object_pool,
+                                   CodeStatistics* stats = nullptr);
+#endif
 #endif
   static RawCode* LookupCode(uword pc);
   static RawCode* LookupCodeInVmIsolate(uword pc);
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index e0128b1..2e3e2f1 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -260,6 +260,8 @@
   friend class object;                                                         \
   friend class RawObject;                                                      \
   friend class Heap;                                                           \
+  friend class Interpreter;                                                    \
+  friend class InterpreterHelpers;                                             \
   friend class Simulator;                                                      \
   friend class SimulatorHelpers;                                               \
   DISALLOW_ALLOCATION();                                                       \
@@ -725,6 +727,8 @@
   friend class CodeLookupTableBuilder;  // profiler
   friend class NativeEntry;             // GetClassId
   friend class WritePointerVisitor;     // GetClassId
+  friend class Interpreter;
+  friend class InterpreterHelpers;
   friend class Simulator;
   friend class SimulatorHelpers;
   friend class ObjectLocator;
@@ -926,6 +930,9 @@
   RawObject** to_no_code() {
     return reinterpret_cast<RawObject**>(&ptr()->ic_data_array_);
   }
+#if defined(DART_USE_INTERPRETER)
+  RawCode* bytecode_;
+#endif
   RawCode* code_;  // Currently active code. Accessed from generated code.
   NOT_IN_PRECOMPILED(RawCode* unoptimized_code_);  // Unoptimized code, keep it
                                                    // after optimization.
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index fca1d73..aadb840 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -15,6 +15,7 @@
 #include "vm/exceptions.h"
 #include "vm/flags.h"
 #include "vm/instructions.h"
+#include "vm/interpreter.h"
 #include "vm/kernel_isolate.h"
 #include "vm/message.h"
 #include "vm/message_handler.h"
@@ -1716,6 +1717,42 @@
   arguments.SetReturn(result);
 }
 
+// Interpret a function call. Should be called only for uncompiled functions.
+// Arg0: function object
+// Arg1: ICData or MegamorphicCache
+// Arg2: arguments descriptor array
+// Arg3: arguments array
+DEFINE_RUNTIME_ENTRY(InterpretCall, 4) {
+#if defined(DART_USE_INTERPRETER)
+  const Function& function = Function::CheckedHandle(zone, arguments.ArgAt(0));
+  // TODO(regis): Use icdata.
+  // const Object& ic_data_or_cache = Object::Handle(zone, arguments.ArgAt(1));
+  const Array& orig_arguments_desc =
+      Array::CheckedHandle(zone, arguments.ArgAt(2));
+  const Array& orig_arguments = Array::CheckedHandle(zone, arguments.ArgAt(3));
+  ASSERT(!function.HasCode());
+  ASSERT(function.HasBytecode());
+  const Code& bytecode = Code::Handle(zone, function.Bytecode());
+  Object& result = Object::Handle(zone);
+  Interpreter* interpreter = Interpreter::Current();
+  ASSERT(interpreter != NULL);
+  {
+    TransitionToGenerated transition(thread);
+    result = interpreter->Call(bytecode, orig_arguments_desc, orig_arguments,
+                               thread);
+  }
+  if (result.IsError()) {
+    if (result.IsLanguageError()) {
+      Exceptions::ThrowCompileTimeError(LanguageError::Cast(result));
+      UNREACHABLE();
+    }
+    Exceptions::PropagateError(Error::Cast(result));
+  }
+#else
+  UNREACHABLE();
+#endif
+}
+
 #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
 // The following code is used to stress test
 //  - deoptimization
diff --git a/runtime/vm/runtime_entry_list.h b/runtime/vm/runtime_entry_list.h
index b8ed437..d9c7367 100644
--- a/runtime/vm/runtime_entry_list.h
+++ b/runtime/vm/runtime_entry_list.h
@@ -45,6 +45,7 @@
   V(UpdateFieldCid)                                                            \
   V(InitStaticField)                                                           \
   V(CompileFunction)                                                           \
+  V(InterpretCall)                                                             \
   V(MonomorphicMiss)                                                           \
   V(SingleTargetMiss)                                                          \
   V(UnlinkedCall)
diff --git a/runtime/vm/stack_frame_kbc.h b/runtime/vm/stack_frame_kbc.h
new file mode 100644
index 0000000..c4e81b5
--- /dev/null
+++ b/runtime/vm/stack_frame_kbc.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_STACK_FRAME_KBC_H_
+#define RUNTIME_VM_STACK_FRAME_KBC_H_
+
+namespace dart {
+
+/* Kernel Bytecode Frame Layout
+
+IMPORTANT: KBC stack is growing upwards which is different from all other
+architectures. This enables efficient addressing for locals via unsigned index.
+
+               |                    | <- TOS
+Callee frame   | ...                |
+               | saved FP           |    (FP of current frame)
+               | saved PC           |    (PC of current frame)
+               | code object        |
+               | function object    |
+               +--------------------+
+Current frame  | ...               T| <- SP of current frame
+               | ...               T|
+               | first local       T| <- FP of current frame
+               | caller's FP       *|
+               | caller's PC       *|
+               | code object       T|    (current frame's code object)
+               | function object   T|    (current frame's function object)
+               +--------------------+
+Caller frame   | last parameter     | <- SP of caller frame
+               |  ...               |
+
+               T against a slot indicates it needs to be traversed during GC.
+               * against a slot indicates that it can be traversed during GC
+                 because it will look like a smi to the visitor.
+*/
+
+static const int kKBCDartFrameFixedSize = 4;  // Function, Code, PC, FP
+static const int kKBCSavedPcSlotFromSp = 3;
+
+static const int kKBCFirstObjectSlotFromFp = -4;  // Used by GC.
+static const int kKBCLastFixedObjectSlotFromFp = -3;
+
+static const int kKBCSavedCallerFpSlotFromFp = -1;
+static const int kKBCSavedCallerPpSlotFromFp = kKBCSavedCallerFpSlotFromFp;
+static const int kKBCSavedCallerPcSlotFromFp = -2;
+static const int kKBCCallerSpSlotFromFp = -kKBCDartFrameFixedSize - 1;
+static const int kKBCPcMarkerSlotFromFp = -3;
+static const int kKBCFunctionSlotFromFp = -4;
+
+// Note: These constants don't match actual KBC behavior. This is done because
+// setting kKBCFirstLocalSlotFromFp to 0 breaks assumptions spread across the
+// code.
+// Instead for the purposes of local variable allocation we pretend that KBC
+// behaves as other architectures (stack growing downwards) and later fix
+// these indices during code generation in the backend.
+static const int kKBCParamEndSlotFromFp = 4;  // One slot past last parameter.
+static const int kKBCFirstLocalSlotFromFp = -1;
+static const int kKBCExitLinkSlotFromEntryFp = 0;
+
+// Value for stack limit that is used to cause an interrupt.
+// Note that on KBC stack is growing upwards so interrupt limit is 0 unlike
+// on all other architectures.
+static const uword kKBCInterruptStackLimit = 0;
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_STACK_FRAME_KBC_H_
diff --git a/runtime/vm/stub_code.h b/runtime/vm/stub_code.h
index 4fd3d6a..01ffc6f 100644
--- a/runtime/vm/stub_code.h
+++ b/runtime/vm/stub_code.h
@@ -28,8 +28,11 @@
   V(DeoptForRewind)                                                            \
   V(UpdateStoreBuffer)                                                         \
   V(PrintStopMessage)                                                          \
+  V(AllocateArray)                                                             \
+  V(AllocateContext)                                                           \
   V(CallToRuntime)                                                             \
   V(LazyCompile)                                                               \
+  V(InterpretCall)                                                             \
   V(CallBootstrapNative)                                                       \
   V(CallNoScopeNative)                                                         \
   V(CallAutoScopeNative)                                                       \
@@ -37,6 +40,7 @@
   V(CallStaticFunction)                                                        \
   V(OptimizeFunction)                                                          \
   V(InvokeDartCode)                                                            \
+  V(InvokeDartCodeFromBytecode)                                                \
   V(DebugStepCheck)                                                            \
   V(UnlinkedCall)                                                              \
   V(MonomorphicMiss)                                                           \
@@ -52,8 +56,6 @@
   V(OptimizedIdenticalWithNumberCheck)                                         \
   V(ICCallBreakpoint)                                                          \
   V(RuntimeCallBreakpoint)                                                     \
-  V(AllocateArray)                                                             \
-  V(AllocateContext)                                                           \
   V(OneArgCheckInlineCache)                                                    \
   V(TwoArgsCheckInlineCache)                                                   \
   V(SmiAddInlineCache)                                                         \
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/stub_code_arm.cc
index 8f51b9f..a97ccc5 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/stub_code_arm.cc
@@ -886,6 +886,10 @@
   __ Ret();
 }
 
+void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+  __ Unimplemented("Interpreter not yet supported");
+}
+
 // Called for inline allocation of contexts.
 // Input:
 //   R1: number of context variables.
@@ -1678,6 +1682,10 @@
   __ bx(R2);
 }
 
+void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+  __ Unimplemented("Interpreter not yet supported");
+}
+
 // R9: Contains an ICData.
 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
   __ EnterStubFrame();
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index f0354a8..df85dd5 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -943,6 +943,10 @@
   __ ret();
 }
 
+void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+  __ Unimplemented("Interpreter not yet supported");
+}
+
 // Called for inline allocation of contexts.
 // Input:
 //   R1: number of context variables.
@@ -1725,6 +1729,10 @@
   __ br(R2);
 }
 
+void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+  __ Unimplemented("Interpreter not yet supported");
+}
+
 // R5: Contains an ICData.
 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
   __ EnterStubFrame();
diff --git a/runtime/vm/stub_code_ia32.cc b/runtime/vm/stub_code_ia32.cc
index a632adf..feb5068 100644
--- a/runtime/vm/stub_code_ia32.cc
+++ b/runtime/vm/stub_code_ia32.cc
@@ -800,6 +800,10 @@
   __ ret();
 }
 
+void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+  __ Unimplemented("Interpreter not yet supported");
+}
+
 // Called for inline allocation of contexts.
 // Input:
 // EDX: number of context variables.
@@ -1606,6 +1610,10 @@
   __ jmp(EAX);
 }
 
+void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+  __ Unimplemented("Interpreter not yet supported");
+}
+
 // ECX: Contains an ICData.
 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
   __ EnterStubFrame();
diff --git a/runtime/vm/stub_code_x64.cc b/runtime/vm/stub_code_x64.cc
index fc8e781..d87f045 100644
--- a/runtime/vm/stub_code_x64.cc
+++ b/runtime/vm/stub_code_x64.cc
@@ -789,9 +789,10 @@
   __ pushq(RAX);
   __ movq(Address(THR, Thread::top_resource_offset()), Immediate(0));
   __ movq(RAX, Address(THR, Thread::top_exit_frame_info_offset()));
+  __ pushq(RAX);
+
   // The constant kExitLinkSlotFromEntryFp must be kept in sync with the
   // code below.
-  __ pushq(RAX);
 #if defined(DEBUG)
   {
     Label ok;
@@ -871,6 +872,146 @@
   __ ret();
 }
 
+// Called when invoking compiled Dart code from interpreted Dart code.
+// Input parameters:
+//   RSP : points to return address.
+//   RDI : target raw code
+//   RSI : arguments raw descriptor array.
+//   RDX : address of first argument.
+//   RCX : current thread.
+void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+#if defined(DART_USE_INTERPRETER)
+  // Save frame pointer coming in.
+  __ EnterFrame(0);
+
+  const Register kTargetCodeReg = CallingConventions::kArg1Reg;
+  const Register kArgDescReg = CallingConventions::kArg2Reg;
+  const Register kArg0Reg = CallingConventions::kArg3Reg;
+  const Register kThreadReg = CallingConventions::kArg4Reg;
+
+  // Push code object to PC marker slot.
+  __ pushq(Address(kThreadReg,
+                   Thread::invoke_dart_code_from_bytecode_stub_offset()));
+
+  // At this point, the stack looks like:
+  // | stub code object
+  // | saved RBP                                         | <-- RBP
+  // | saved PC (return to interpreter's InvokeCompiled) |
+
+  const intptr_t kInitialOffset = 2;
+  // Save arguments descriptor array, later replaced by Smi argument count.
+  const intptr_t kArgumentsDescOffset = -(kInitialOffset)*kWordSize;
+  __ pushq(kArgDescReg);
+
+  // Save C++ ABI callee-saved registers.
+  __ PushRegisters(CallingConventions::kCalleeSaveCpuRegisters,
+                   CallingConventions::kCalleeSaveXmmRegisters);
+
+  // If any additional (or fewer) values are pushed, the offsets in
+  // kExitLinkSlotFromEntryFp will need to be changed.
+
+  // Set up THR, which caches the current thread in Dart code.
+  if (THR != kThreadReg) {
+    __ movq(THR, kThreadReg);
+  }
+
+  // Save the current VMTag on the stack.
+  __ movq(RAX, Assembler::VMTagAddress());
+  __ pushq(RAX);
+
+  // Mark that the thread is executing Dart code.
+  __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
+
+  // Save top resource and top exit frame info. Use RAX as a temporary register.
+  // StackFrameIterator reads the top exit frame info saved in this frame.
+  __ movq(RAX, Address(THR, Thread::top_resource_offset()));
+  __ pushq(RAX);
+  __ movq(Address(THR, Thread::top_resource_offset()), Immediate(0));
+  __ movq(RAX, Address(THR, Thread::top_exit_frame_info_offset()));
+  __ pushq(RAX);
+
+  // The constant kExitLinkSlotFromEntryFp must be kept in sync with the
+  // code below.
+#if defined(DEBUG)
+  {
+    Label ok;
+    __ leaq(RAX, Address(RBP, kExitLinkSlotFromEntryFp * kWordSize));
+    __ cmpq(RAX, RSP);
+    __ j(EQUAL, &ok);
+    __ Stop("kExitLinkSlotFromEntryFp mismatch");
+    __ Bind(&ok);
+  }
+#endif
+
+  __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+
+  // Load arguments descriptor array into R10, which is passed to Dart code.
+  __ movq(R10, kArgDescReg);
+
+  // Push arguments. At this point we only need to preserve kTargetCodeReg.
+  ASSERT(kTargetCodeReg != RDX);
+
+  // Load number of arguments into RBX and adjust count for type arguments.
+  __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
+  __ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
+          Immediate(0));
+  Label args_count_ok;
+  __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
+  __ addq(RBX, Immediate(Smi::RawValue(1)));  // Include the type arguments.
+  __ Bind(&args_count_ok);
+  // Save number of arguments as Smi on stack, replacing saved ArgumentsDesc.
+  __ movq(Address(RBP, kArgumentsDescOffset), RBX);
+  __ SmiUntag(RBX);
+
+  // Compute address of first argument into RDX.
+  ASSERT(kArg0Reg == RDX);
+
+  // Set up arguments for the Dart call.
+  Label push_arguments;
+  Label done_push_arguments;
+  __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
+  __ LoadImmediate(RAX, Immediate(0));
+  __ Bind(&push_arguments);
+  __ pushq(Address(RDX, RAX, TIMES_8, 0));
+  __ incq(RAX);
+  __ cmpq(RAX, RBX);
+  __ j(LESS, &push_arguments, Assembler::kNearJump);
+  __ Bind(&done_push_arguments);
+
+  // Call the Dart code entrypoint.
+  __ xorq(PP, PP);  // GC-safe value into PP.
+  __ movq(CODE_REG, kTargetCodeReg);
+  __ movq(kTargetCodeReg, FieldAddress(CODE_REG, Code::entry_point_offset()));
+  __ call(kTargetCodeReg);  // R10 is the arguments descriptor array.
+
+  // Read the saved number of passed arguments as Smi.
+  __ movq(RDX, Address(RBP, kArgumentsDescOffset));
+
+  // Get rid of arguments pushed on the stack.
+  __ leaq(RSP, Address(RSP, RDX, TIMES_4, 0));  // RDX is a Smi.
+
+  // Restore the saved top exit frame info and top resource back into the
+  // Isolate structure.
+  __ popq(Address(THR, Thread::top_exit_frame_info_offset()));
+  __ popq(Address(THR, Thread::top_resource_offset()));
+
+  // Restore the current VMTag from the stack.
+  __ popq(Assembler::VMTagAddress());
+
+  // Restore C++ ABI callee-saved registers.
+  __ PopRegisters(CallingConventions::kCalleeSaveCpuRegisters,
+                  CallingConventions::kCalleeSaveXmmRegisters);
+  __ set_constant_pool_allowed(false);
+
+  // Restore the frame pointer.
+  __ LeaveFrame();
+
+  __ ret();
+#else
+  __ Stop("Not using interpreter");
+#endif
+}
+
 // Called for inline allocation of contexts.
 // Input:
 // R10: number of context variables.
@@ -1647,7 +1788,7 @@
 }
 
 // Stub for compiling a function and jumping to the compiled code.
-// RCX: IC-Data (for methods).
+// RBX: IC-Data (for methods).
 // R10: Arguments descriptor.
 // RAX: Function.
 void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
@@ -1661,9 +1802,46 @@
   __ popq(R10);  // Restore arguments descriptor array.
   __ LeaveStubFrame();
 
+  // When using the interpreter, the function's code may now point to the
+  // InterpretCall stub. Make sure RAX, R10, and RBX are preserved.
   __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
-  __ movq(RAX, FieldAddress(RAX, Function::entry_point_offset()));
-  __ jmp(RAX);
+  __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
+  __ jmp(RCX);
+}
+
+// Stub for interpreting a function call.
+// RBX: IC-Data (for methods).
+// R10: Arguments descriptor.
+// RAX: Function.
+void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+#if defined(DART_USE_INTERPRETER)
+  __ EnterStubFrame();
+  __ movq(RDI, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
+  __ pushq(Immediate(0));  // Setup space on stack for result.
+  __ pushq(RAX);           // Function.
+  __ pushq(RBX);           // ICData/MegamorphicCache.
+  __ pushq(R10);           // Arguments descriptor array.
+
+  // Adjust arguments count.
+  __ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
+          Immediate(0));
+  __ movq(R10, RDI);
+  Label args_count_ok;
+  __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
+  __ addq(R10, Immediate(Smi::RawValue(1)));  // Include the type arguments.
+  __ Bind(&args_count_ok);
+
+  // R10: Smi-tagged arguments array length.
+  PushArrayOfArguments(assembler);
+  const intptr_t kNumArgs = 4;
+  __ CallRuntime(kInterpretCallRuntimeEntry, kNumArgs);
+  __ Drop(kNumArgs);
+  __ popq(RAX);  // Return value.
+  __ LeaveStubFrame();
+  __ ret();
+#else
+  __ Stop("Not using interpreter");
+#endif
 }
 
 // RBX: Contains an ICData.
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index 06655b0..802233a 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -89,6 +89,8 @@
     StubCode::FixAllocationStubTarget_entry()->code(), NULL)                   \
   V(RawCode*, invoke_dart_code_stub_,                                          \
     StubCode::InvokeDartCode_entry()->code(), NULL)                            \
+  V(RawCode*, invoke_dart_code_from_bytecode_stub_,                            \
+    StubCode::InvokeDartCodeFromBytecode_entry()->code(), NULL)                \
   V(RawCode*, call_to_runtime_stub_, StubCode::CallToRuntime_entry()->code(),  \
     NULL)                                                                      \
   V(RawCode*, monomorphic_miss_stub_,                                          \
@@ -871,6 +873,7 @@
 #undef REUSABLE_FRIEND_DECLARATION
 
   friend class ApiZone;
+  friend class Interpreter;
   friend class InterruptChecker;
   friend class Isolate;
   friend class IsolateTestHelper;
diff --git a/runtime/vm/vm_sources.gni b/runtime/vm/vm_sources.gni
index 65be0c4..f15b129 100644
--- a/runtime/vm/vm_sources.gni
+++ b/runtime/vm/vm_sources.gni
@@ -48,7 +48,9 @@
   "compiler_stats.h",
   "constants_arm.h",
   "constants_arm64.h",
+  "constants_dbc.h",
   "constants_ia32.h",
+  "constants_kbc.h",
   "constants_x64.h",
   "cpu.h",
   "cpu_arm.cc",
@@ -129,6 +131,8 @@
   "instructions_ia32.h",
   "instructions_x64.cc",
   "instructions_x64.h",
+  "interpreter.cc",
+  "interpreter.h",
   "isolate.cc",
   "isolate.h",
   "isolate_reload.cc",
@@ -296,7 +300,9 @@
   "stack_frame.h",
   "stack_frame_arm.h",
   "stack_frame_arm64.h",
+  "stack_frame_dbc",
   "stack_frame_ia32.h",
+  "stack_frame_kbc",
   "stack_frame_x64.h",
   "stack_trace.cc",
   "stack_trace.h",