[vm] Decouple assemblers from runtime.

This is the next step towards preventing compiler from directly peeking
into runtime and instead interact with runtime through a well defined
surface. The goal of the refactoring to locate all places where compiler
accesses some runtime information and partion those accesses into two
categories:

- creating objects in the host runtime (e.g. allocating strings, numbers, etc)
during compilation;
- accessing properties of the target runtime (e.g. offsets of fields) to
embed those into the generated code;

This change introduces dart::compiler and dart::compiler::target namespaces.

All code in the compiler will gradually be moved into dart::compiler namespace.
One of the motivations for this change is to be able to prevent access to
globally defined host constants like kWordSize by shadowing them in the
dart::compiler namespace.

The nested namespace dart::compiler::target hosts all information about
target runtime that compiler could access, e.g. compiler::target::kWordSize
defines word size of the target which will eventually be made different
from the host kWordSize (defined by dart::kWordSize).

The API for compiler to runtime interaction is placed into compiler_api.h.

Note that we still permit runtime to access compiler internals directly -
this is not going to be decoupled as part of this work.

Issue https://github.com/dart-lang/sdk/issues/31709

Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20
Reviewed-on: https://dart-review.googlesource.com/c/90242
Commit-Queue: Vyacheslav Egorov <vegorov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
diff --git a/runtime/vm/allocation.h b/runtime/vm/allocation.h
index 420b85e..834b28b 100644
--- a/runtime/vm/allocation.h
+++ b/runtime/vm/allocation.h
@@ -7,13 +7,13 @@
 
 #include "platform/allocation.h"
 #include "platform/assert.h"
-#include "vm/base_isolate.h"
 #include "vm/globals.h"
 
 namespace dart {
 
 // Forward declarations.
 class ThreadState;
+class Zone;
 
 // Stack resources subclass from this base class. The VM will ensure that the
 // destructors of these objects are called before the stack is unwound past the
diff --git a/runtime/vm/bitfield.h b/runtime/vm/bitfield.h
index af2ca2a..7d15b0a 100644
--- a/runtime/vm/bitfield.h
+++ b/runtime/vm/bitfield.h
@@ -5,6 +5,7 @@
 #ifndef RUNTIME_VM_BITFIELD_H_
 #define RUNTIME_VM_BITFIELD_H_
 
+#include "platform/assert.h"
 #include "platform/globals.h"
 
 namespace dart {
@@ -16,6 +17,9 @@
 template <typename S, typename T, int position, int size>
 class BitField {
  public:
+  static_assert((sizeof(S) * kBitsPerByte) >= (position + size),
+                "BitField does not fit into the type.");
+
   static const intptr_t kNextBit = position + size;
 
   // Tells whether the provided value fits into the bit field.
@@ -39,7 +43,6 @@
 
   // Returns an S with the bit field value encoded.
   static S encode(T value) {
-    COMPILE_ASSERT((sizeof(S) * kBitsPerByte) >= (position + size));
     ASSERT(is_valid(value));
     return static_cast<S>(value) << position;
   }
diff --git a/runtime/vm/bitmap.h b/runtime/vm/bitmap.h
index 40a589c..469153b 100644
--- a/runtime/vm/bitmap.h
+++ b/runtime/vm/bitmap.h
@@ -6,15 +6,11 @@
 #define RUNTIME_VM_BITMAP_H_
 
 #include "vm/allocation.h"
-#include "vm/isolate.h"
+#include "vm/thread_state.h"
 #include "vm/zone.h"
 
 namespace dart {
 
-// Forward declarations.
-class RawStackMap;
-class StackMap;
-
 // BitmapBuilder is used to build a bitmap. The implementation is optimized
 // for a dense set of small bit maps without a fixed upper bound (e.g: a
 // pointer map description of a stack).
@@ -23,7 +19,8 @@
   BitmapBuilder()
       : length_(0),
         data_size_in_bytes_(kInitialSizeInBytes),
-        data_(Thread::Current()->zone()->Alloc<uint8_t>(kInitialSizeInBytes)) {
+        data_(ThreadState::Current()->zone()->Alloc<uint8_t>(
+            kInitialSizeInBytes)) {
     memset(data_, 0, kInitialSizeInBytes);
   }
 
diff --git a/runtime/vm/class_id.h b/runtime/vm/class_id.h
new file mode 100644
index 0000000..90b1988
--- /dev/null
+++ b/runtime/vm/class_id.h
@@ -0,0 +1,184 @@
+// Copyright (c) 2019, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_CLASS_ID_H_
+#define RUNTIME_VM_CLASS_ID_H_
+
+// This header defines the list of VM implementation classes and their ids.
+//
+// Note: we assume that all builds of Dart VM use exactly the same class ids
+// for these classes.
+
+namespace dart {
+
+#define CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V)                           \
+  V(Class)                                                                     \
+  V(PatchClass)                                                                \
+  V(Function)                                                                  \
+  V(ClosureData)                                                               \
+  V(SignatureData)                                                             \
+  V(RedirectionData)                                                           \
+  V(Field)                                                                     \
+  V(Script)                                                                    \
+  V(Library)                                                                   \
+  V(Namespace)                                                                 \
+  V(KernelProgramInfo)                                                         \
+  V(Code)                                                                      \
+  V(Bytecode)                                                                  \
+  V(Instructions)                                                              \
+  V(ObjectPool)                                                                \
+  V(PcDescriptors)                                                             \
+  V(CodeSourceMap)                                                             \
+  V(StackMap)                                                                  \
+  V(LocalVarDescriptors)                                                       \
+  V(ExceptionHandlers)                                                         \
+  V(Context)                                                                   \
+  V(ContextScope)                                                              \
+  V(SingleTargetCache)                                                         \
+  V(UnlinkedCall)                                                              \
+  V(ICData)                                                                    \
+  V(MegamorphicCache)                                                          \
+  V(SubtypeTestCache)                                                          \
+  V(Error)                                                                     \
+  V(ApiError)                                                                  \
+  V(LanguageError)                                                             \
+  V(UnhandledException)                                                        \
+  V(UnwindError)                                                               \
+  V(Instance)                                                                  \
+  V(LibraryPrefix)                                                             \
+  V(TypeArguments)                                                             \
+  V(AbstractType)                                                              \
+  V(Type)                                                                      \
+  V(TypeRef)                                                                   \
+  V(TypeParameter)                                                             \
+  V(Closure)                                                                   \
+  V(Number)                                                                    \
+  V(Integer)                                                                   \
+  V(Smi)                                                                       \
+  V(Mint)                                                                      \
+  V(Double)                                                                    \
+  V(Bool)                                                                      \
+  V(GrowableObjectArray)                                                       \
+  V(Float32x4)                                                                 \
+  V(Int32x4)                                                                   \
+  V(Float64x2)                                                                 \
+  V(TypedData)                                                                 \
+  V(ExternalTypedData)                                                         \
+  V(Capability)                                                                \
+  V(ReceivePort)                                                               \
+  V(SendPort)                                                                  \
+  V(StackTrace)                                                                \
+  V(RegExp)                                                                    \
+  V(WeakProperty)                                                              \
+  V(MirrorReference)                                                           \
+  V(LinkedHashMap)                                                             \
+  V(UserTag)
+
+#define CLASS_LIST_ARRAYS(V)                                                   \
+  V(Array)                                                                     \
+  V(ImmutableArray)
+
+#define CLASS_LIST_STRINGS(V)                                                  \
+  V(String)                                                                    \
+  V(OneByteString)                                                             \
+  V(TwoByteString)                                                             \
+  V(ExternalOneByteString)                                                     \
+  V(ExternalTwoByteString)
+
+#define CLASS_LIST_TYPED_DATA(V)                                               \
+  V(Int8Array)                                                                 \
+  V(Uint8Array)                                                                \
+  V(Uint8ClampedArray)                                                         \
+  V(Int16Array)                                                                \
+  V(Uint16Array)                                                               \
+  V(Int32Array)                                                                \
+  V(Uint32Array)                                                               \
+  V(Int64Array)                                                                \
+  V(Uint64Array)                                                               \
+  V(Float32Array)                                                              \
+  V(Float64Array)                                                              \
+  V(Float32x4Array)                                                            \
+  V(Int32x4Array)                                                              \
+  V(Float64x2Array)
+
+#define DART_CLASS_LIST_TYPED_DATA(V)                                          \
+  V(Int8)                                                                      \
+  V(Uint8)                                                                     \
+  V(Uint8Clamped)                                                              \
+  V(Int16)                                                                     \
+  V(Uint16)                                                                    \
+  V(Int32)                                                                     \
+  V(Uint32)                                                                    \
+  V(Int64)                                                                     \
+  V(Uint64)                                                                    \
+  V(Float32)                                                                   \
+  V(Float64)                                                                   \
+  V(Float32x4)                                                                 \
+  V(Int32x4)                                                                   \
+  V(Float64x2)
+
+#define CLASS_LIST_FOR_HANDLES(V)                                              \
+  CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V)                                 \
+  V(Array)                                                                     \
+  V(String)
+
+#define CLASS_LIST_NO_OBJECT(V)                                                \
+  CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V)                                 \
+  CLASS_LIST_ARRAYS(V)                                                         \
+  CLASS_LIST_STRINGS(V)
+
+#define CLASS_LIST(V)                                                          \
+  V(Object)                                                                    \
+  CLASS_LIST_NO_OBJECT(V)
+
+enum ClassId {
+  // Illegal class id.
+  kIllegalCid = 0,
+
+  // A sentinel used by the vm service's heap snapshots to represent references
+  // from the stack.
+  kStackCid = 1,
+
+  // The following entries describes classes for pseudo-objects in the heap
+  // that should never be reachable from live objects. Free list elements
+  // maintain the free list for old space, and forwarding corpses are used to
+  // implement one-way become.
+  kFreeListElement,
+  kForwardingCorpse,
+
+// List of Ids for predefined classes.
+#define DEFINE_OBJECT_KIND(clazz) k##clazz##Cid,
+  CLASS_LIST(DEFINE_OBJECT_KIND)
+#undef DEFINE_OBJECT_KIND
+
+// clang-format off
+#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##Cid,
+  CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
+#undef DEFINE_OBJECT_KIND
+
+#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##ViewCid,
+  CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
+#undef DEFINE_OBJECT_KIND
+
+  kByteDataViewCid,
+
+#define DEFINE_OBJECT_KIND(clazz) kExternalTypedData##clazz##Cid,
+  CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
+#undef DEFINE_OBJECT_KIND
+
+  kByteBufferCid,
+  // clang-format on
+
+  // The following entries do not describe a predefined class, but instead
+  // are class indexes for pre-allocated instances (Null, dynamic and Void).
+  kNullCid,
+  kDynamicCid,
+  kVoidCid,
+
+  kNumPredefinedCids,
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_CLASS_ID_H_
diff --git a/runtime/vm/clustered_snapshot.cc b/runtime/vm/clustered_snapshot.cc
index 27d1244..3c9d19a 100644
--- a/runtime/vm/clustered_snapshot.cc
+++ b/runtime/vm/clustered_snapshot.cc
@@ -1633,8 +1633,8 @@
     uint8_t* entry_bits = pool->ptr()->entry_bits();
     for (intptr_t i = 0; i < length; i++) {
       auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
-      if ((entry_type == ObjectPool::kTaggedObject) ||
-          (entry_type == ObjectPool::kNativeEntryData)) {
+      if ((entry_type == ObjectPool::EntryType::kTaggedObject) ||
+          (entry_type == ObjectPool::EntryType::kNativeEntryData)) {
         s->Push(pool->ptr()->data()[i].raw_obj_);
       }
     }
@@ -1665,7 +1665,7 @@
         s->Write<uint8_t>(entry_bits[j]);
         RawObjectPool::Entry& entry = pool->ptr()->data()[j];
         switch (ObjectPool::TypeBits::decode(entry_bits[j])) {
-          case ObjectPool::kTaggedObject: {
+          case ObjectPool::EntryType::kTaggedObject: {
 #if !defined(TARGET_ARCH_DBC)
             if ((entry.raw_obj_ == StubCode::CallNoScopeNative().raw()) ||
                 (entry.raw_obj_ == StubCode::CallAutoScopeNative().raw())) {
@@ -1679,11 +1679,11 @@
             s->WriteElementRef(entry.raw_obj_, j);
             break;
           }
-          case ObjectPool::kImmediate: {
+          case ObjectPool::EntryType::kImmediate: {
             s->Write<intptr_t>(entry.raw_value_);
             break;
           }
-          case ObjectPool::kNativeEntryData: {
+          case ObjectPool::EntryType::kNativeEntryData: {
             RawObject* raw = entry.raw_obj_;
             RawTypedData* raw_data = reinterpret_cast<RawTypedData*>(raw);
             // kNativeEntryData object pool entries are for linking natives for
@@ -1699,8 +1699,8 @@
             s->WriteElementRef(raw, j);
             break;
           }
-          case ObjectPool::kNativeFunction:
-          case ObjectPool::kNativeFunctionWrapper: {
+          case ObjectPool::EntryType::kNativeFunction:
+          case ObjectPool::EntryType::kNativeFunctionWrapper: {
             // Write nothing. Will initialize with the lazy link entry.
             break;
           }
@@ -1746,21 +1746,21 @@
         pool->ptr()->entry_bits()[j] = entry_bits;
         RawObjectPool::Entry& entry = pool->ptr()->data()[j];
         switch (ObjectPool::TypeBits::decode(entry_bits)) {
-          case ObjectPool::kNativeEntryData:
-          case ObjectPool::kTaggedObject:
+          case ObjectPool::EntryType::kNativeEntryData:
+          case ObjectPool::EntryType::kTaggedObject:
             entry.raw_obj_ = d->ReadRef();
             break;
-          case ObjectPool::kImmediate:
+          case ObjectPool::EntryType::kImmediate:
             entry.raw_value_ = d->Read<intptr_t>();
             break;
-          case ObjectPool::kNativeFunction: {
+          case ObjectPool::EntryType::kNativeFunction: {
             // Read nothing. Initialize with the lazy link entry.
             uword new_entry = NativeEntry::LinkNativeCallEntry();
             entry.raw_value_ = static_cast<intptr_t>(new_entry);
             break;
           }
 #if defined(TARGET_ARCH_DBC)
-          case ObjectPool::kNativeFunctionWrapper: {
+          case ObjectPool::EntryType::kNativeFunctionWrapper: {
             // Read nothing. Initialize with the lazy link entry.
             uword new_entry = NativeEntry::BootstrapNativeCallWrapperEntry();
             entry.raw_value_ = static_cast<intptr_t>(new_entry);
@@ -5715,12 +5715,13 @@
     auto& entry = Object::Handle(zone);
     auto& smi = Smi::Handle(zone);
     for (intptr_t i = 0; i < pool.Length(); i++) {
-      if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
+      if (pool.TypeAt(i) == ObjectPool::EntryType::kTaggedObject) {
         entry = pool.ObjectAt(i);
         if (entry.raw() == StubCode::UnlinkedCall().raw()) {
           smi = Smi::FromAlignedAddress(
               StubCode::UnlinkedCall().MonomorphicEntryPoint());
-          pool.SetTypeAt(i, ObjectPool::kImmediate, ObjectPool::kPatchable);
+          pool.SetTypeAt(i, ObjectPool::EntryType::kImmediate,
+                         ObjectPool::Patchability::kPatchable);
           pool.SetObjectAt(i, smi);
         }
       }
diff --git a/runtime/vm/code_entry_kind.h b/runtime/vm/code_entry_kind.h
new file mode 100644
index 0000000..52b6e9e
--- /dev/null
+++ b/runtime/vm/code_entry_kind.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2019, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_CODE_ENTRY_KIND_H_
+#define RUNTIME_VM_CODE_ENTRY_KIND_H_
+
+namespace dart {
+
+// Compiled functions might have several different entry points, which either
+// perform additional checking on entry into the function or skip some of the
+// checks normally performed on the entry.
+//
+// Which checks are performed and skipped depend on the function and VM mode.
+enum class CodeEntryKind {
+  // Normal entry into the function.
+  //
+  // Usually such entries perform type checks for all parameters which are not
+  // guaranteed to be type checked on the callee side. This can happen if
+  // parameter type depends on the type parameter of an enclosing class.
+  kNormal,
+
+  // Unchecked entry into the function.
+  //
+  // These entries usually skip most of the type checks that normal entries
+  // perform and are used when optimizing compiler can prove that those
+  // checks are not needed at a specific call site.
+  kUnchecked,
+
+  // Monomorphic entry into the function.
+  //
+  // In AOT mode we might patch call-site to directly invoke target function,
+  // which would then validate that it is invoked with the expected type of
+  // the receiver. This validation is handled by monomorphic entry, which then
+  // falls through to the normal entry.
+  kMonomorphic,
+
+  // Similar to monomorphic entry but with a fallthrough into unchecked entry.
+  kMonomorphicUnchecked,
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_CODE_ENTRY_KIND_H_
diff --git a/runtime/vm/code_patcher.h b/runtime/vm/code_patcher.h
index 9b31dcd..6aa62af 100644
--- a/runtime/vm/code_patcher.h
+++ b/runtime/vm/code_patcher.h
@@ -12,17 +12,12 @@
 namespace dart {
 
 // Forward declaration.
-class Array;
 class Code;
-class ExternalLabel;
-class Function;
 class ICData;
 class RawArray;
 class RawCode;
 class RawFunction;
-class RawICData;
 class RawObject;
-class String;
 
 // Stack-allocated class to create a scope where the specified region
 // [address, address + size] has write access enabled. This is used
diff --git a/runtime/vm/code_patcher_x64.cc b/runtime/vm/code_patcher_x64.cc
index 1794125..f53cf2a 100644
--- a/runtime/vm/code_patcher_x64.cc
+++ b/runtime/vm/code_patcher_x64.cc
@@ -378,11 +378,13 @@
     } else {
       FATAL1("Failed to decode at %" Px, pc);
     }
-    ASSERT(object_pool_.TypeAt(target_index_) == ObjectPool::kImmediate);
+    ASSERT(object_pool_.TypeAt(target_index_) ==
+           ObjectPool::EntryType::kImmediate);
   }
 
   void SetTarget(const Code& target) const {
-    ASSERT(object_pool_.TypeAt(target_index()) == ObjectPool::kImmediate);
+    ASSERT(object_pool_.TypeAt(target_index()) ==
+           ObjectPool::EntryType::kImmediate);
     object_pool_.SetRawValueAt(target_index(), target.MonomorphicEntryPoint());
   }
 
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index 3784b1b..49c5b8f 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -198,7 +198,7 @@
       // Since we keep the object pool until the end of AOT compilation, it
       // will hang on to its entries until the very end. Therefore we have
       // to use handles which survive that long, so we use [zone_] here.
-      global_object_pool_wrapper_.InitializeWithZone(zone_);
+      global_object_pool_builder_.InitializeWithZone(zone_);
     }
 
     {
@@ -238,50 +238,50 @@
         const Code& code = StubCode::InterpretCall();
         const ObjectPool& stub_pool = ObjectPool::Handle(code.object_pool());
 
-        global_object_pool_wrapper()->Reset();
-        global_object_pool_wrapper()->InitializeFrom(stub_pool);
+        global_object_pool_builder()->Reset();
+        stub_pool.CopyInto(global_object_pool_builder());
 
         // We have two global code objects we need to re-generate with the new
         // global object pool, namely the
         //   - megamorphic miss handler code and the
         //   - build method extractor code
         MegamorphicCacheTable::ReInitMissHandlerCode(
-            isolate_, global_object_pool_wrapper());
+            isolate_, global_object_pool_builder());
 
         auto& stub_code = Code::Handle();
 
         stub_code =
-            StubCode::GetBuildMethodExtractorStub(global_object_pool_wrapper());
+            StubCode::GetBuildMethodExtractorStub(global_object_pool_builder());
         I->object_store()->set_build_method_extractor_code(stub_code);
 
         stub_code =
             StubCode::BuildIsolateSpecificNullErrorSharedWithFPURegsStub(
-                global_object_pool_wrapper());
+                global_object_pool_builder());
         I->object_store()->set_null_error_stub_with_fpu_regs_stub(stub_code);
 
         stub_code =
             StubCode::BuildIsolateSpecificNullErrorSharedWithoutFPURegsStub(
-                global_object_pool_wrapper());
+                global_object_pool_builder());
         I->object_store()->set_null_error_stub_without_fpu_regs_stub(stub_code);
 
         stub_code =
             StubCode::BuildIsolateSpecificStackOverflowSharedWithFPURegsStub(
-                global_object_pool_wrapper());
+                global_object_pool_builder());
         I->object_store()->set_stack_overflow_stub_with_fpu_regs_stub(
             stub_code);
 
         stub_code =
             StubCode::BuildIsolateSpecificStackOverflowSharedWithoutFPURegsStub(
-                global_object_pool_wrapper());
+                global_object_pool_builder());
         I->object_store()->set_stack_overflow_stub_without_fpu_regs_stub(
             stub_code);
 
         stub_code = StubCode::BuildIsolateSpecificWriteBarrierWrappersStub(
-            global_object_pool_wrapper());
+            global_object_pool_builder());
         I->object_store()->set_write_barrier_wrappers_stub(stub_code);
 
         stub_code = StubCode::BuildIsolateSpecificArrayWriteBarrierStub(
-            global_object_pool_wrapper());
+            global_object_pool_builder());
         I->object_store()->set_array_write_barrier_stub(stub_code);
       }
 
@@ -303,10 +303,10 @@
         // Now we generate the actual object pool instance and attach it to the
         // object store. The AOT runtime will use it from there in the enter
         // dart code stub.
-        const auto& pool =
-            ObjectPool::Handle(global_object_pool_wrapper()->MakeObjectPool());
+        const auto& pool = ObjectPool::Handle(
+            ObjectPool::NewFromBuilder(*global_object_pool_builder()));
         I->object_store()->set_global_object_pool(pool);
-        global_object_pool_wrapper()->Reset();
+        global_object_pool_builder()->Reset();
 
         if (FLAG_print_gop) {
           THR_Print("Global object pool:\n");
@@ -527,7 +527,7 @@
 
 void Precompiler::ProcessFunction(const Function& function) {
   const intptr_t gop_offset =
-      FLAG_use_bare_instructions ? global_object_pool_wrapper()->CurrentLength()
+      FLAG_use_bare_instructions ? global_object_pool_builder()->CurrentLength()
                                  : 0;
 
   if (!function.HasCode()) {
@@ -594,9 +594,10 @@
   String& selector = String::Handle(Z);
   if (FLAG_use_bare_instructions) {
     for (intptr_t i = gop_offset;
-         i < global_object_pool_wrapper()->CurrentLength(); i++) {
-      const auto& wrapper_entry = global_object_pool_wrapper()->EntryAt(i);
-      if (wrapper_entry.type() == ObjectPool::kTaggedObject) {
+         i < global_object_pool_builder()->CurrentLength(); i++) {
+      const auto& wrapper_entry = global_object_pool_builder()->EntryAt(i);
+      if (wrapper_entry.type() ==
+          compiler::ObjectPoolBuilderEntry::kTaggedObject) {
         const auto& entry = *wrapper_entry.obj_;
         AddCalleesOfHelper(entry, &selector, &cls);
       }
@@ -605,7 +606,7 @@
     const auto& pool = ObjectPool::Handle(Z, code.object_pool());
     auto& entry = Object::Handle(Z);
     for (intptr_t i = 0; i < pool.Length(); i++) {
-      if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
+      if (pool.TypeAt(i) == ObjectPool::EntryType::kTaggedObject) {
         entry = pool.ObjectAt(i);
         AddCalleesOfHelper(entry, &selector, &cls);
       }
@@ -881,7 +882,7 @@
         }
         const intptr_t gop_offset =
             FLAG_use_bare_instructions
-                ? global_object_pool_wrapper()->CurrentLength()
+                ? global_object_pool_builder()->CurrentLength()
                 : 0;
         ASSERT(Dart::vm_snapshot_kind() != Snapshot::kFullAOT);
         const Function& initializer =
@@ -2032,7 +2033,9 @@
 
     void SwitchPool(const ObjectPool& pool) {
       for (intptr_t i = 0; i < pool.Length(); i++) {
-        if (pool.TypeAt(i) != ObjectPool::kTaggedObject) continue;
+        if (pool.TypeAt(i) != ObjectPool::EntryType::kTaggedObject) {
+          continue;
+        }
         entry_ = pool.ObjectAt(i);
         if (entry_.IsICData()) {
           // The only IC calls generated by precompilation are for switchable
@@ -2372,12 +2375,12 @@
 
       ASSERT(!FLAG_use_bare_instructions || precompiler_ != nullptr);
 
-      ObjectPoolWrapper object_pool;
-      ObjectPoolWrapper* active_object_pool_wrapper =
+      ObjectPoolBuilder object_pool;
+      ObjectPoolBuilder* active_object_pool_builder =
           FLAG_use_bare_instructions
-              ? precompiler_->global_object_pool_wrapper()
+              ? precompiler_->global_object_pool_builder()
               : &object_pool;
-      Assembler assembler(active_object_pool_wrapper, use_far_branches);
+      Assembler assembler(active_object_pool_builder, use_far_branches);
 
       CodeStatistics* function_stats = NULL;
       if (FLAG_print_instruction_stats) {
diff --git a/runtime/vm/compiler/aot/precompiler.h b/runtime/vm/compiler/aot/precompiler.h
index 9454afc..742a8eb 100644
--- a/runtime/vm/compiler/aot/precompiler.h
+++ b/runtime/vm/compiler/aot/precompiler.h
@@ -247,9 +247,9 @@
     return get_runtime_type_is_unique_;
   }
 
-  ObjectPoolWrapper* global_object_pool_wrapper() {
+  compiler::ObjectPoolBuilder* global_object_pool_builder() {
     ASSERT(FLAG_use_bare_instructions);
-    return &global_object_pool_wrapper_;
+    return &global_object_pool_builder_;
   }
 
   static Precompiler* Instance() { return singleton_; }
@@ -331,7 +331,7 @@
   intptr_t dropped_type_count_;
   intptr_t dropped_library_count_;
 
-  ObjectPoolWrapper global_object_pool_wrapper_;
+  compiler::ObjectPoolBuilder global_object_pool_builder_;
   GrowableObjectArray& libraries_;
   const GrowableObjectArray& pending_functions_;
   SymbolSet sent_selectors_;
diff --git a/runtime/vm/compiler/assembler/assembler.cc b/runtime/vm/compiler/assembler/assembler.cc
index f945e3f..f6c6175 100644
--- a/runtime/vm/compiler/assembler/assembler.cc
+++ b/runtime/vm/compiler/assembler/assembler.cc
@@ -28,6 +28,8 @@
 DEFINE_FLAG(bool, use_far_branches, false, "Enable far branches for ARM.");
 #endif
 
+namespace compiler {
+
 static uword NewContents(intptr_t capacity) {
   Zone* zone = Thread::Current()->zone();
   uword result = zone->AllocUnsafe(capacity);
@@ -163,14 +165,16 @@
   return count;
 }
 
+#if defined(TARGET_ARCH_IA32)
 void AssemblerBuffer::EmitObject(const Object& object) {
   // Since we are going to store the handle as part of the fixup information
   // the handle needs to be a zone handle.
-  ASSERT(object.IsNotTemporaryScopedHandle());
-  ASSERT(object.IsOld());
+  ASSERT(IsNotTemporaryScopedHandle(object));
+  ASSERT(IsInOldSpace(object));
   EmitFixup(new PatchCodeWithHandle(pointer_offsets_, object));
-  cursor_ += kWordSize;  // Reserve space for pointer.
+  cursor_ += target::kWordSize;  // Reserve space for pointer.
 }
+#endif
 
 // Shared macros are implemented here.
 void AssemblerBase::Unimplemented(const char* message) {
@@ -207,8 +211,7 @@
     va_end(args);
 
     comments_.Add(
-        new CodeComment(buffer_.GetPosition(),
-                        String::ZoneHandle(String::New(buffer, Heap::kOld))));
+        new CodeComment(buffer_.GetPosition(), AllocateString(buffer)));
   }
 }
 
@@ -216,46 +219,27 @@
   return FLAG_code_comments || FLAG_disassemble || FLAG_disassemble_optimized;
 }
 
-const Code::Comments& AssemblerBase::GetCodeComments() const {
-  Code::Comments& comments = Code::Comments::New(comments_.length());
-
-  for (intptr_t i = 0; i < comments_.length(); i++) {
-    comments.SetPCOffsetAt(i, comments_[i]->pc_offset());
-    comments.SetCommentAt(i, comments_[i]->comment());
-  }
-
-  return comments;
+#if !defined(TARGET_ARCH_DBC)
+void Assembler::Stop(const char* message) {
+  Comment("Stop: %s", message);
+  Breakpoint();
 }
+#endif
 
 intptr_t ObjIndexPair::Hashcode(Key key) {
-  if (key.type() != ObjectPool::kTaggedObject) {
+  if (key.type() != ObjectPoolBuilderEntry::kTaggedObject) {
     return key.raw_value_;
   }
-  if (key.obj_->IsNull()) {
-    return 2011;
-  }
-  if (key.obj_->IsString() || key.obj_->IsNumber()) {
-    return Instance::Cast(*key.obj_).CanonicalizeHash();
-  }
-  if (key.obj_->IsCode()) {
-    // Instructions don't move during compaction.
-    return Code::Cast(*key.obj_).PayloadStart();
-  }
-  if (key.obj_->IsFunction()) {
-    return Function::Cast(*key.obj_).Hash();
-  }
-  if (key.obj_->IsField()) {
-    return String::HashRawSymbol(Field::Cast(*key.obj_).name());
-  }
-  // Unlikely.
-  return key.obj_->GetClassId();
+
+  return ObjectHash(*key.obj_);
 }
-void ObjectPoolWrapper::Reset() {
+
+void ObjectPoolBuilder::Reset() {
   // Null out the handles we've accumulated.
   for (intptr_t i = 0; i < object_pool_.length(); ++i) {
-    if (object_pool_[i].type() == ObjectPool::kTaggedObject) {
-      *const_cast<Object*>(object_pool_[i].obj_) = Object::null();
-      *const_cast<Object*>(object_pool_[i].equivalence_) = Object::null();
+    if (object_pool_[i].type() == ObjectPoolBuilderEntry::kTaggedObject) {
+      SetToNull(const_cast<Object*>(object_pool_[i].obj_));
+      SetToNull(const_cast<Object*>(object_pool_[i].equivalence_));
     }
   }
 
@@ -263,65 +247,38 @@
   object_pool_index_table_.Clear();
 }
 
-void ObjectPoolWrapper::InitializeFrom(const ObjectPool& other) {
-  ASSERT(object_pool_.length() == 0);
-
-  for (intptr_t i = 0; i < other.Length(); i++) {
-    auto type = other.TypeAt(i);
-    auto patchable = other.PatchableAt(i);
-    switch (type) {
-      case ObjectPool::kTaggedObject: {
-        ObjectPoolWrapperEntry entry(&Object::ZoneHandle(other.ObjectAt(i)),
-                                     patchable);
-        AddObject(entry);
-        break;
-      }
-      case ObjectPool::kImmediate:
-      case ObjectPool::kNativeFunction:
-      case ObjectPool::kNativeFunctionWrapper: {
-        ObjectPoolWrapperEntry entry(other.RawValueAt(i), type, patchable);
-        AddObject(entry);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-  }
-
-  ASSERT(CurrentLength() == other.Length());
+intptr_t ObjectPoolBuilder::AddObject(
+    const Object& obj,
+    ObjectPoolBuilderEntry::Patchability patchable) {
+  ASSERT(IsNotTemporaryScopedHandle(obj));
+  return AddObject(ObjectPoolBuilderEntry(&obj, patchable));
 }
 
-intptr_t ObjectPoolWrapper::AddObject(const Object& obj,
-                                      ObjectPool::Patchability patchable) {
-  ASSERT(obj.IsNotTemporaryScopedHandle());
-  return AddObject(ObjectPoolWrapperEntry(&obj, patchable));
+intptr_t ObjectPoolBuilder::AddImmediate(uword imm) {
+  return AddObject(
+      ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate,
+                             ObjectPoolBuilderEntry::kNotPatchable));
 }
 
-intptr_t ObjectPoolWrapper::AddImmediate(uword imm) {
-  return AddObject(ObjectPoolWrapperEntry(imm, ObjectPool::kImmediate,
-                                          ObjectPool::kNotPatchable));
-}
-
-intptr_t ObjectPoolWrapper::AddObject(ObjectPoolWrapperEntry entry) {
-  ASSERT((entry.type() != ObjectPool::kTaggedObject) ||
-         (entry.obj_->IsNotTemporaryScopedHandle() &&
+intptr_t ObjectPoolBuilder::AddObject(ObjectPoolBuilderEntry entry) {
+  ASSERT((entry.type() != ObjectPoolBuilderEntry::kTaggedObject) ||
+         (IsNotTemporaryScopedHandle(*entry.obj_) &&
           (entry.equivalence_ == NULL ||
-           entry.equivalence_->IsNotTemporaryScopedHandle())));
+           IsNotTemporaryScopedHandle(*entry.equivalence_))));
 
-  if (entry.type() == ObjectPool::kTaggedObject) {
+  if (entry.type() == ObjectPoolBuilderEntry::kTaggedObject) {
     // If the owner of the object pool wrapper specified a specific zone we
     // shoulld use we'll do so.
     if (zone_ != NULL) {
-      entry.obj_ = &Object::ZoneHandle(zone_, entry.obj_->raw());
+      entry.obj_ = &NewZoneHandle(zone_, *entry.obj_);
       if (entry.equivalence_ != NULL) {
-        entry.equivalence_ =
-            &Object::ZoneHandle(zone_, entry.equivalence_->raw());
+        entry.equivalence_ = &NewZoneHandle(zone_, *entry.equivalence_);
       }
     }
   }
 
   object_pool_.Add(entry);
-  if (entry.patchable() == ObjectPool::kNotPatchable) {
+  if (entry.patchable() == ObjectPoolBuilderEntry::kNotPatchable) {
     // The object isn't patchable. Record the index for fast lookup.
     object_pool_index_table_.Insert(
         ObjIndexPair(entry, object_pool_.length() - 1));
@@ -329,10 +286,10 @@
   return object_pool_.length() - 1;
 }
 
-intptr_t ObjectPoolWrapper::FindObject(ObjectPoolWrapperEntry entry) {
+intptr_t ObjectPoolBuilder::FindObject(ObjectPoolBuilderEntry entry) {
   // If the object is not patchable, check if we've already got it in the
   // object pool.
-  if (entry.patchable() == ObjectPool::kNotPatchable) {
+  if (entry.patchable() == ObjectPoolBuilderEntry::kNotPatchable) {
     intptr_t idx = object_pool_index_table_.LookupValue(entry);
     if (idx != ObjIndexPair::kNoIndex) {
       return idx;
@@ -341,54 +298,40 @@
   return AddObject(entry);
 }
 
-intptr_t ObjectPoolWrapper::FindObject(const Object& obj,
-                                       ObjectPool::Patchability patchable) {
-  return FindObject(ObjectPoolWrapperEntry(&obj, patchable));
+intptr_t ObjectPoolBuilder::FindObject(
+    const Object& obj,
+    ObjectPoolBuilderEntry::Patchability patchable) {
+  return FindObject(ObjectPoolBuilderEntry(&obj, patchable));
 }
 
-intptr_t ObjectPoolWrapper::FindObject(const Object& obj,
+intptr_t ObjectPoolBuilder::FindObject(const Object& obj,
                                        const Object& equivalence) {
+  return FindObject(ObjectPoolBuilderEntry(
+      &obj, &equivalence, ObjectPoolBuilderEntry::kNotPatchable));
+}
+
+intptr_t ObjectPoolBuilder::FindImmediate(uword imm) {
   return FindObject(
-      ObjectPoolWrapperEntry(&obj, &equivalence, ObjectPool::kNotPatchable));
+      ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate,
+                             ObjectPoolBuilderEntry::kNotPatchable));
 }
 
-intptr_t ObjectPoolWrapper::FindImmediate(uword imm) {
-  return FindObject(ObjectPoolWrapperEntry(imm, ObjectPool::kImmediate,
-                                           ObjectPool::kNotPatchable));
-}
-
-intptr_t ObjectPoolWrapper::FindNativeFunction(
+intptr_t ObjectPoolBuilder::FindNativeFunction(
     const ExternalLabel* label,
-    ObjectPool::Patchability patchable) {
-  return FindObject(ObjectPoolWrapperEntry(
-      label->address(), ObjectPool::kNativeFunction, patchable));
+    ObjectPoolBuilderEntry::Patchability patchable) {
+  return FindObject(ObjectPoolBuilderEntry(
+      label->address(), ObjectPoolBuilderEntry::kNativeFunction, patchable));
 }
 
-intptr_t ObjectPoolWrapper::FindNativeFunctionWrapper(
+intptr_t ObjectPoolBuilder::FindNativeFunctionWrapper(
     const ExternalLabel* label,
-    ObjectPool::Patchability patchable) {
-  return FindObject(ObjectPoolWrapperEntry(
-      label->address(), ObjectPool::kNativeFunctionWrapper, patchable));
+    ObjectPoolBuilderEntry::Patchability patchable) {
+  return FindObject(ObjectPoolBuilderEntry(
+      label->address(), ObjectPoolBuilderEntry::kNativeFunctionWrapper,
+      patchable));
 }
 
-RawObjectPool* ObjectPoolWrapper::MakeObjectPool() {
-  intptr_t len = object_pool_.length();
-  if (len == 0) {
-    return Object::empty_object_pool().raw();
-  }
-  const ObjectPool& result = ObjectPool::Handle(ObjectPool::New(len));
-  for (intptr_t i = 0; i < len; ++i) {
-    auto type = object_pool_[i].type();
-    auto patchable = object_pool_[i].patchable();
-    result.SetTypeAt(i, type, patchable);
-    if (type == ObjectPool::kTaggedObject) {
-      result.SetObjectAt(i, *object_pool_[i].obj_);
-    } else {
-      result.SetRawValueAt(i, object_pool_[i].raw_value_);
-    }
-  }
-  return result.raw();
-}
+}  // namespace compiler
 
 }  // namespace dart
 
diff --git a/runtime/vm/compiler/assembler/assembler.h b/runtime/vm/compiler/assembler/assembler.h
index cf70a99..2a5feb5 100644
--- a/runtime/vm/compiler/assembler/assembler.h
+++ b/runtime/vm/compiler/assembler/assembler.h
@@ -7,11 +7,11 @@
 
 #include "platform/assert.h"
 #include "vm/allocation.h"
+#include "vm/compiler/assembler/object_pool_builder.h"
+#include "vm/compiler/runtime_api.h"
 #include "vm/globals.h"
 #include "vm/growable_array.h"
 #include "vm/hash_map.h"
-#include "vm/object.h"
-#include "vm/thread.h"
 
 namespace dart {
 
@@ -19,11 +19,14 @@
 DECLARE_FLAG(bool, use_far_branches);
 #endif
 
+class MemoryRegion;
+
+namespace compiler {
+
 // Forward declarations.
 class Assembler;
 class AssemblerFixup;
 class AssemblerBuffer;
-class MemoryRegion;
 
 class Label : public ZoneAllocated {
  public:
@@ -45,12 +48,12 @@
   // for unused labels.
   intptr_t Position() const {
     ASSERT(!IsUnused());
-    return IsBound() ? -position_ - kWordSize : position_ - kWordSize;
+    return IsBound() ? -position_ - kBias : position_ - kBias;
   }
 
   intptr_t LinkPosition() const {
     ASSERT(IsLinked());
-    return position_ - kWordSize;
+    return position_ - kBias;
   }
 
   intptr_t NearPosition() {
@@ -69,6 +72,12 @@
 #else
   static const int kMaxUnresolvedBranches = 1;  // Unused on non-Intel.
 #endif
+  // Zero position_ means unused (neither bound nor linked to).
+  // Thus we offset actual positions by the given bias to prevent zero
+  // positions from occurring.
+  // Note: we use target::kWordSize as a bias because on ARM
+  // there are assertions that check that distance is aligned.
+  static constexpr int kBias = 4;
 
   intptr_t position_;
   intptr_t unresolved_;
@@ -79,13 +88,13 @@
   void BindTo(intptr_t position) {
     ASSERT(!IsBound());
     ASSERT(!HasNear());
-    position_ = -position - kWordSize;
+    position_ = -position - kBias;
     ASSERT(IsBound());
   }
 
   void LinkTo(intptr_t position) {
     ASSERT(!IsBound());
-    position_ = position + kWordSize;
+    position_ = position + kBias;
     ASSERT(IsLinked());
   }
 
@@ -185,8 +194,10 @@
     return *pointer_offsets_;
   }
 
+#if defined(TARGET_ARCH_IA32)
   // Emit an object pointer directly in the code.
   void EmitObject(const Object& object);
+#endif
 
   // Emit a fixup at the current location.
   void EmitFixup(AssemblerFixup* fixup) {
@@ -285,173 +296,23 @@
   friend class AssemblerFixup;
 };
 
-struct ObjectPoolWrapperEntry {
-  ObjectPoolWrapperEntry() : raw_value_(), entry_bits_(0), equivalence_() {}
-  ObjectPoolWrapperEntry(const Object* obj, ObjectPool::Patchability patchable)
-      : obj_(obj),
-        entry_bits_(ObjectPool::TypeBits::encode(ObjectPool::kTaggedObject) |
-                    ObjectPool::PatchableBit::encode(patchable)),
-        equivalence_(obj) {}
-  ObjectPoolWrapperEntry(const Object* obj,
-                         const Object* eqv,
-                         ObjectPool::Patchability patchable)
-      : obj_(obj),
-        entry_bits_(ObjectPool::TypeBits::encode(ObjectPool::kTaggedObject) |
-                    ObjectPool::PatchableBit::encode(patchable)),
-        equivalence_(eqv) {}
-  ObjectPoolWrapperEntry(uword value,
-                         ObjectPool::EntryType info,
-                         ObjectPool::Patchability patchable)
-      : raw_value_(value),
-        entry_bits_(ObjectPool::TypeBits::encode(info) |
-                    ObjectPool::PatchableBit::encode(patchable)),
-        equivalence_() {}
-
-  ObjectPool::EntryType type() const {
-    return ObjectPool::TypeBits::decode(entry_bits_);
-  }
-
-  ObjectPool::Patchability patchable() const {
-    return ObjectPool::PatchableBit::decode(entry_bits_);
-  }
-
-  union {
-    const Object* obj_;
-    uword raw_value_;
-  };
-  uint8_t entry_bits_;
-  const Object* equivalence_;
-};
-
-// Pair type parameter for DirectChainedHashMap used for the constant pool.
-class ObjIndexPair {
- public:
-  // Typedefs needed for the DirectChainedHashMap template.
-  typedef ObjectPoolWrapperEntry Key;
-  typedef intptr_t Value;
-  typedef ObjIndexPair Pair;
-
-  static const intptr_t kNoIndex = -1;
-
-  ObjIndexPair()
-      : key_(static_cast<uword>(NULL),
-             ObjectPool::kTaggedObject,
-             ObjectPool::kPatchable),
-        value_(kNoIndex) {}
-
-  ObjIndexPair(Key key, Value value) : value_(value) {
-    key_.entry_bits_ = key.entry_bits_;
-    if (key.type() == ObjectPool::kTaggedObject) {
-      key_.obj_ = key.obj_;
-      key_.equivalence_ = key.equivalence_;
-    } else {
-      key_.raw_value_ = key.raw_value_;
-    }
-  }
-
-  static Key KeyOf(Pair kv) { return kv.key_; }
-
-  static Value ValueOf(Pair kv) { return kv.value_; }
-
-  static intptr_t Hashcode(Key key);
-
-  static inline bool IsKeyEqual(Pair kv, Key key) {
-    if (kv.key_.entry_bits_ != key.entry_bits_) return false;
-    if (kv.key_.type() == ObjectPool::kTaggedObject) {
-      return (kv.key_.obj_->raw() == key.obj_->raw()) &&
-             (kv.key_.equivalence_->raw() == key.equivalence_->raw());
-    }
-    return kv.key_.raw_value_ == key.raw_value_;
-  }
-
- private:
-  Key key_;
-  Value value_;
-};
-
-class ObjectPoolWrapper : public ValueObject {
- public:
-  ObjectPoolWrapper() : zone_(nullptr) {}
-  ~ObjectPoolWrapper() {
-    if (zone_ != nullptr) {
-      Reset();
-      zone_ = nullptr;
-    }
-  }
-
-  // Clears all existing entries in this object pool builder.
-  //
-  // Note: Any code which has been compiled via this builder might use offsets
-  // into the pool which are not correct anymore.
-  void Reset();
-
-  // Initializes this object pool builder from [other].
-  //
-  // All entries from [other] will be populated, including their
-  // kind/patchability bits.
-  void InitializeFrom(const ObjectPool& other);
-
-  // Initialize this object pool builder with a [zone].
-  //
-  // Any objects added later on will be referenced using handles from [zone].
-  void InitializeWithZone(Zone* zone) {
-    ASSERT(object_pool_.length() == 0);
-    ASSERT(zone_ == nullptr && zone != nullptr);
-    zone_ = zone;
-  }
-
-  intptr_t AddObject(
-      const Object& obj,
-      ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
-  intptr_t AddImmediate(uword imm);
-
-  intptr_t FindObject(
-      const Object& obj,
-      ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
-  intptr_t FindObject(const Object& obj, const Object& equivalence);
-  intptr_t FindImmediate(uword imm);
-  intptr_t FindNativeFunction(const ExternalLabel* label,
-                              ObjectPool::Patchability patchable);
-  intptr_t FindNativeFunctionWrapper(const ExternalLabel* label,
-                                     ObjectPool::Patchability patchable);
-
-  RawObjectPool* MakeObjectPool();
-
-  intptr_t CurrentLength() { return object_pool_.length(); }
-  ObjectPoolWrapperEntry& EntryAt(intptr_t i) { return object_pool_[i]; }
-
- private:
-  intptr_t AddObject(ObjectPoolWrapperEntry entry);
-  intptr_t FindObject(ObjectPoolWrapperEntry entry);
-
-  // Objects and jump targets.
-  GrowableArray<ObjectPoolWrapperEntry> object_pool_;
-
-  // Hashmap for fast lookup in object pool.
-  DirectChainedHashMap<ObjIndexPair> object_pool_index_table_;
-
-  // The zone used for allocating the handles we keep in the map and array (or
-  // NULL, in which case allocations happen using the zone active at the point
-  // of insertion).
-  Zone* zone_;
-};
-
 enum RestorePP { kRestoreCallerPP, kKeepCalleePP };
 
 class AssemblerBase : public StackResource {
  public:
-  explicit AssemblerBase(ObjectPoolWrapper* object_pool_wrapper)
-      : StackResource(Thread::Current()),
+  explicit AssemblerBase(ObjectPoolBuilder* object_pool_builder)
+      : StackResource(ThreadState::Current()),
         prologue_offset_(-1),
         has_single_entry_point_(true),
-        object_pool_wrapper_(object_pool_wrapper) {}
+        object_pool_builder_(object_pool_builder) {}
   virtual ~AssemblerBase() {}
 
   intptr_t CodeSize() const { return buffer_.Size(); }
 
   uword CodeAddress(intptr_t offset) { return buffer_.Address(offset); }
 
-  ObjectPoolWrapper& object_pool_wrapper() { return *object_pool_wrapper_; }
+  bool HasObjectPoolBuilder() const { return object_pool_builder_ != nullptr; }
+  ObjectPoolBuilder& object_pool_builder() { return *object_pool_builder_; }
 
   intptr_t prologue_offset() const { return prologue_offset_; }
   bool has_single_entry_point() const { return has_single_entry_point_; }
@@ -459,8 +320,6 @@
   void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3);
   static bool EmittingComments();
 
-  const Code::Comments& GetCodeComments() const;
-
   void Unimplemented(const char* message);
   void Untested(const char* message);
   void Unreachable(const char* message);
@@ -478,19 +337,6 @@
     return buffer_.pointer_offsets();
   }
 
-  RawObjectPool* MakeObjectPool() {
-    if (object_pool_wrapper_ != nullptr) {
-      return object_pool_wrapper_->MakeObjectPool();
-    }
-    return ObjectPool::null();
-  }
-
- protected:
-  AssemblerBuffer buffer_;  // Contains position independent code.
-  int32_t prologue_offset_;
-  bool has_single_entry_point_;
-
- private:
   class CodeComment : public ZoneAllocated {
    public:
     CodeComment(intptr_t pc_offset, const String& comment)
@@ -506,10 +352,20 @@
     DISALLOW_COPY_AND_ASSIGN(CodeComment);
   };
 
+  const GrowableArray<CodeComment*>& comments() const { return comments_; }
+
+ protected:
+  AssemblerBuffer buffer_;  // Contains position independent code.
+  int32_t prologue_offset_;
+  bool has_single_entry_point_;
+
+ private:
   GrowableArray<CodeComment*> comments_;
-  ObjectPoolWrapper* object_pool_wrapper_;
+  ObjectPoolBuilder* object_pool_builder_;
 };
 
+}  // namespace compiler
+
 }  // namespace dart
 
 #if defined(TARGET_ARCH_IA32)
@@ -526,4 +382,11 @@
 #error Unknown architecture.
 #endif
 
+namespace dart {
+using compiler::Assembler;
+using compiler::ExternalLabel;
+using compiler::Label;
+using compiler::ObjectPoolBuilder;
+}  // namespace dart
+
 #endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 5a58868..d441d61 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -5,15 +5,13 @@
 #include "vm/globals.h"  // NOLINT
 #if defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
 
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/class_id.h"
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/compiler/backend/locations.h"
 #include "vm/cpu.h"
 #include "vm/instructions.h"
-#include "vm/longjump.h"
-#include "vm/runtime_entry.h"
-#include "vm/simulator.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
 
 // An extra check since we are assuming the existence of /proc/cpuinfo below.
 #if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) &&   \
@@ -27,9 +25,26 @@
 DECLARE_FLAG(bool, inline_alloc);
 DECLARE_FLAG(bool, precompiled_mode);
 
-Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
+namespace compiler {
+
+#ifndef PRODUCT
+using target::ClassHeapStats;
+#endif
+using target::ClassTable;
+using target::Double;
+using target::Float32x4;
+using target::Float64x2;
+using target::Heap;
+using target::Instance;
+using target::Instructions;
+using target::Isolate;
+using target::ObjectPool;
+using target::RawObject;
+using target::Thread;
+
+Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches)
-    : AssemblerBase(object_pool_wrapper),
+    : AssemblerBase(object_pool_builder),
       use_far_branches_(use_far_branches),
       constant_pool_allowed_(false) {
   generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) {
@@ -37,7 +52,7 @@
         cond);
     blx(LR, cond);
   };
-  invoke_array_write_barrier_ = [&](Condition cond) {
+  generate_invoke_array_write_barrier_ = [&](Condition cond) {
     ldr(LR, Address(THR, Thread::array_write_barrier_entry_point_offset()),
         cond);
     blx(LR, cond);
@@ -469,7 +484,7 @@
   ASSERT(rd2 == rd + 1);
   if (TargetCPUFeatures::arm_version() == ARMv5TE) {
     ldr(rd, Address(rn, offset), cond);
-    ldr(rd2, Address(rn, offset + kWordSize), cond);
+    ldr(rd2, Address(rn, offset + target::kWordSize), cond);
   } else {
     EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset));
   }
@@ -484,7 +499,7 @@
   ASSERT(rd2 == rd + 1);
   if (TargetCPUFeatures::arm_version() == ARMv5TE) {
     str(rd, Address(rn, offset), cond);
-    str(rd2, Address(rn, offset + kWordSize), cond);
+    str(rd2, Address(rn, offset + target::kWordSize), cond);
   } else {
     EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset));
   }
@@ -1391,12 +1406,12 @@
 void Assembler::Drop(intptr_t stack_elements) {
   ASSERT(stack_elements >= 0);
   if (stack_elements > 0) {
-    AddImmediate(SP, stack_elements * kWordSize);
+    AddImmediate(SP, stack_elements * target::kWordSize);
   }
 }
 
 intptr_t Assembler::FindImmediate(int32_t imm) {
-  return object_pool_wrapper().FindImmediate(imm);
+  return object_pool_builder().FindImmediate(imm);
 }
 
 // Uses a code sequence that can easily be decoded.
@@ -1442,7 +1457,7 @@
                           Instructions::HeaderSize() - kHeapObjectTag;
   mov(R0, Operand(PC));
   AddImmediate(R0, -offset);
-  ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
+  ldr(IP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
   cmp(R0, Operand(IP));
   b(&instructions_ok, EQ);
   bkpt(1);
@@ -1453,14 +1468,15 @@
 }
 
 void Assembler::RestoreCodePointer() {
-  ldr(CODE_REG, Address(FP, compiler_frame_layout.code_from_fp * kWordSize));
+  ldr(CODE_REG,
+      Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
   CheckCodePointer();
 }
 
 void Assembler::LoadPoolPointer(Register reg) {
   // Load new pool pointer.
   CheckCodePointer();
-  ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset()));
+  ldr(reg, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
   set_constant_pool_allowed(reg == PP);
 }
 
@@ -1469,15 +1485,14 @@
 }
 
 bool Assembler::CanLoadFromObjectPool(const Object& object) const {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  ASSERT(!Thread::CanLoadFromThread(object));
+  ASSERT(IsOriginalObject(object));
+  ASSERT(!target::CanLoadFromThread(object));
   if (!constant_pool_allowed()) {
     return false;
   }
 
-  ASSERT(object.IsNotTemporaryScopedHandle());
-  ASSERT(object.IsOld());
+  ASSERT(IsNotTemporaryScopedHandle(object));
+  ASSERT(IsInOldSpace(object));
   return true;
 }
 
@@ -1486,21 +1501,21 @@
                                  Condition cond,
                                  bool is_unique,
                                  Register pp) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Thread::CanLoadFromThread(object)) {
+  ASSERT(IsOriginalObject(object));
+  intptr_t offset = 0;
+  if (target::CanLoadFromThread(object, &offset)) {
     // Load common VM constants from the thread. This works also in places where
     // no constant pool is set up (e.g. intrinsic code).
-    ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond);
-  } else if (object.IsSmi()) {
+    ldr(rd, Address(THR, offset), cond);
+  } else if (target::IsSmi(object)) {
     // Relocation doesn't apply to Smis.
-    LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond);
+    LoadImmediate(rd, target::ToRawSmi(object), cond);
   } else if (CanLoadFromObjectPool(object)) {
     // Make sure that class CallPattern is able to decode this load from the
     // object pool.
     const int32_t offset = ObjectPool::element_offset(
-        is_unique ? object_pool_wrapper().AddObject(object)
-                  : object_pool_wrapper().FindObject(object));
+        is_unique ? object_pool_builder().AddObject(object)
+                  : object_pool_builder().FindObject(object));
     LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
   } else {
     UNREACHABLE();
@@ -1520,33 +1535,31 @@
 void Assembler::LoadFunctionFromCalleePool(Register dst,
                                            const Function& function,
                                            Register new_pp) {
-  const int32_t offset =
-      ObjectPool::element_offset(object_pool_wrapper().FindObject(function));
+  const int32_t offset = ObjectPool::element_offset(
+      object_pool_builder().FindObject(ToObject(function)));
   LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL);
 }
 
 void Assembler::LoadNativeEntry(Register rd,
                                 const ExternalLabel* label,
-                                ObjectPool::Patchability patchable,
+                                ObjectPoolBuilderEntry::Patchability patchable,
                                 Condition cond) {
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindNativeFunction(label, patchable));
+      object_pool_builder().FindNativeFunction(label, patchable));
   LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
 }
 
 void Assembler::PushObject(const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
+  ASSERT(IsOriginalObject(object));
   LoadObject(IP, object);
   Push(IP);
 }
 
 void Assembler::CompareObject(Register rn, const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
+  ASSERT(IsOriginalObject(object));
   ASSERT(rn != IP);
-  if (object.IsSmi()) {
-    CompareImmediate(rn, reinterpret_cast<int32_t>(object.raw()));
+  if (target::IsSmi(object)) {
+    CompareImmediate(rn, target::ToRawSmi(object));
   } else {
     LoadObject(IP, object);
     cmp(rn, Operand(IP));
@@ -1559,12 +1572,15 @@
                                       Label* label,
                                       CanBeSmi value_can_be_smi,
                                       BarrierFilterMode how_to_jump) {
-  COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
-                 (kOldObjectAlignmentOffset == 0));
+  COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
+                  target::kWordSize) &&
+                 (target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
   // For the value we are only interested in the new/old bit and the tag bit.
   // And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
   if (value_can_be_smi == kValueCanBeSmi) {
-    and_(IP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1));
+    and_(
+        IP, value,
+        Operand(value, LSL, target::ObjectAlignment::kObjectAlignmentLog2 - 1));
     // And the result with the negated space bit of the object.
     bic(IP, IP, Operand(object));
   } else {
@@ -1576,7 +1592,7 @@
 #endif
     bic(IP, value, Operand(object));
   }
-  tst(IP, Operand(kNewObjectAlignmentOffset));
+  tst(IP, Operand(target::ObjectAlignment::kNewObjectAlignmentOffset));
   if (how_to_jump != kNoJump) {
     b(label, how_to_jump == kJumpToNoUpdate ? EQ : NE);
   }
@@ -1627,8 +1643,8 @@
     BranchIfSmi(value, &done);
   }
   if (!lr_reserved) Push(LR);
-  ldrb(TMP, FieldAddress(object, Object::tags_offset()));
-  ldrb(LR, FieldAddress(value, Object::tags_offset()));
+  ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
+  ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
   and_(TMP, LR, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
   ldr(LR, Address(THR, Thread::write_barrier_mask_offset()));
   tst(TMP, Operand(LR));
@@ -1691,8 +1707,8 @@
     BranchIfSmi(value, &done);
   }
   if (!lr_reserved) Push(LR);
-  ldrb(TMP, FieldAddress(object, Object::tags_offset()));
-  ldrb(LR, FieldAddress(value, Object::tags_offset()));
+  ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
+  ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
   and_(TMP, LR, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
   ldr(LR, Address(THR, Thread::write_barrier_mask_offset()));
   tst(TMP, Operand(LR));
@@ -1704,7 +1720,7 @@
     // allocator.
     UNIMPLEMENTED();
   }
-  invoke_array_write_barrier_(NE);
+  generate_invoke_array_write_barrier_(NE);
   if (!lr_reserved) Pop(LR);
   Bind(&done);
 }
@@ -1740,10 +1756,8 @@
 void Assembler::StoreIntoObjectNoBarrier(Register object,
                                          const Address& dest,
                                          const Object& value) {
-  ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
-  ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
-  ASSERT(value.IsSmi() || value.InVMHeap() ||
-         (value.IsOld() && value.IsNotTemporaryScopedHandle()));
+  ASSERT(IsOriginalObject(value));
+  ASSERT(IsNotTemporaryScopedHandle(value));
   // No store buffer update.
   LoadObject(IP, value);
   str(IP, dest);
@@ -1767,8 +1781,7 @@
 void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
                                                int32_t offset,
                                                const Object& value) {
-  ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
-  ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
+  ASSERT(IsOriginalObject(value));
   int32_t ignored = 0;
   if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
     StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
@@ -1789,11 +1802,11 @@
   ASSERT(value_odd == value_even + 1);
   Label init_loop;
   Bind(&init_loop);
-  AddImmediate(begin, 2 * kWordSize);
+  AddImmediate(begin, 2 * target::kWordSize);
   cmp(begin, Operand(end));
-  strd(value_even, value_odd, begin, -2 * kWordSize, LS);
+  strd(value_even, value_odd, begin, -2 * target::kWordSize, LS);
   b(&init_loop, CC);
-  str(value_even, Address(begin, -2 * kWordSize), HI);
+  str(value_even, Address(begin, -2 * target::kWordSize), HI);
 #if defined(DEBUG)
   Label done;
   StoreIntoObjectFilter(object, value_even, &done, kValueCanBeSmi,
@@ -1814,13 +1827,13 @@
                                                   Register value_odd) {
   ASSERT(value_odd == value_even + 1);
   intptr_t current_offset = begin_offset;
-  while (current_offset + kWordSize < end_offset) {
+  while (current_offset + target::kWordSize < end_offset) {
     strd(value_even, value_odd, base, current_offset);
-    current_offset += 2 * kWordSize;
+    current_offset += 2 * target::kWordSize;
   }
   while (current_offset < end_offset) {
     str(value_even, Address(base, current_offset));
-    current_offset += kWordSize;
+    current_offset += target::kWordSize;
   }
 #if defined(DEBUG)
   Label done;
@@ -1849,7 +1862,7 @@
   ASSERT(RawObject::kClassIdTagPos == 16);
   ASSERT(RawObject::kClassIdTagSize == 16);
   const intptr_t class_id_offset =
-      Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
+      target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
   ldrh(result, FieldAddress(object, class_id_offset), cond);
 }
 
@@ -1859,7 +1872,7 @@
   const intptr_t offset =
       Isolate::class_table_offset() + ClassTable::table_offset();
   LoadFromOffset(kWord, result, result, offset);
-  ldr(result, Address(result, class_id, LSL, kSizeOfClassPairLog2));
+  ldr(result, Address(result, class_id, LSL, ClassTable::kSizeOfClassPairLog2));
 }
 
 void Assembler::CompareClassId(Register object,
@@ -1883,7 +1896,7 @@
 void Assembler::BailoutIfInvalidBranchOffset(int32_t offset) {
   if (!CanEncodeBranchDistance(offset)) {
     ASSERT(!use_far_branches());
-    Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error());
+    BailoutWithBranchOffsetError();
   }
 }
 
@@ -2317,11 +2330,11 @@
 }
 
 void Assembler::Push(Register rd, Condition cond) {
-  str(rd, Address(SP, -kWordSize, Address::PreIndex), cond);
+  str(rd, Address(SP, -target::kWordSize, Address::PreIndex), cond);
 }
 
 void Assembler::Pop(Register rd, Condition cond) {
-  ldr(rd, Address(SP, kWordSize, Address::PostIndex), cond);
+  ldr(rd, Address(SP, target::kWordSize, Address::PostIndex), cond);
 }
 
 void Assembler::PushList(RegList regs, Condition cond) {
@@ -2538,13 +2551,13 @@
 }
 
 void Assembler::Branch(const Code& target,
-                       ObjectPool::Patchability patchable,
+                       ObjectPoolBuilderEntry::Patchability patchable,
                        Register pp,
                        Condition cond) {
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindObject(target, patchable));
+      object_pool_builder().FindObject(ToObject(target), patchable));
   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond);
-  Branch(FieldAddress(CODE_REG, Code::entry_point_offset()), cond);
+  Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()), cond);
 }
 
 void Assembler::Branch(const Address& address, Condition cond) {
@@ -2552,22 +2565,22 @@
 }
 
 void Assembler::BranchLink(const Code& target,
-                           ObjectPool::Patchability patchable,
-                           Code::EntryKind entry_kind) {
+                           ObjectPoolBuilderEntry::Patchability patchable,
+                           CodeEntryKind entry_kind) {
   // Make sure that class CallPattern is able to patch the label referred
   // to by this code sequence.
   // For added code robustness, use 'blx lr' in a patchable sequence and
   // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindObject(target, patchable));
+      object_pool_builder().FindObject(ToObject(target), patchable));
   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
-  ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
+  ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
   blx(LR);  // Use blx instruction so that the return branch prediction works.
 }
 
 void Assembler::BranchLinkPatchable(const Code& target,
-                                    Code::EntryKind entry_kind) {
-  BranchLink(target, ObjectPool::kPatchable, entry_kind);
+                                    CodeEntryKind entry_kind) {
+  BranchLink(target, ObjectPoolBuilderEntry::kPatchable, entry_kind);
 }
 
 void Assembler::BranchLinkToRuntime() {
@@ -2586,15 +2599,15 @@
 
 void Assembler::BranchLinkWithEquivalence(const Code& target,
                                           const Object& equivalence,
-                                          Code::EntryKind entry_kind) {
+                                          CodeEntryKind entry_kind) {
   // Make sure that class CallPattern is able to patch the label referred
   // to by this code sequence.
   // For added code robustness, use 'blx lr' in a patchable sequence and
   // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindObject(target, equivalence));
+      object_pool_builder().FindObject(ToObject(target), equivalence));
   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
-  ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
+  ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
   blx(LR);  // Use blx instruction so that the return branch prediction works.
 }
 
@@ -2838,10 +2851,10 @@
   } else {
     LoadFromOffset(kWord, tmp1, src, Double::value_offset() - kHeapObjectTag);
     LoadFromOffset(kWord, tmp2, src,
-                   Double::value_offset() + kWordSize - kHeapObjectTag);
+                   Double::value_offset() + target::kWordSize - kHeapObjectTag);
     StoreToOffset(kWord, tmp1, dst, Double::value_offset() - kHeapObjectTag);
     StoreToOffset(kWord, tmp2, dst,
-                  Double::value_offset() + kWordSize - kHeapObjectTag);
+                  Double::value_offset() + target::kWordSize - kHeapObjectTag);
   }
 }
 
@@ -2858,25 +2871,29 @@
   } else {
     LoadFromOffset(
         kWord, tmp1, src,
-        (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag);
+        (Float32x4::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
     LoadFromOffset(
         kWord, tmp2, src,
-        (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag);
+        (Float32x4::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp1, dst,
+        (Float32x4::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp2, dst,
+        (Float32x4::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
 
     LoadFromOffset(
         kWord, tmp1, src,
-        (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag);
+        (Float32x4::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
     LoadFromOffset(
         kWord, tmp2, src,
-        (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag);
+        (Float32x4::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp1, dst,
+        (Float32x4::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp2, dst,
+        (Float32x4::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
   }
 }
 
@@ -2893,25 +2910,29 @@
   } else {
     LoadFromOffset(
         kWord, tmp1, src,
-        (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag);
+        (Float64x2::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
     LoadFromOffset(
         kWord, tmp2, src,
-        (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag);
+        (Float64x2::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp1, dst,
+        (Float64x2::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp2, dst,
+        (Float64x2::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
 
     LoadFromOffset(
         kWord, tmp1, src,
-        (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag);
+        (Float64x2::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
     LoadFromOffset(
         kWord, tmp2, src,
-        (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag);
+        (Float64x2::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp1, dst,
+        (Float64x2::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
+    StoreToOffset(
+        kWord, tmp2, dst,
+        (Float64x2::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
   }
 }
 
@@ -3145,7 +3166,7 @@
   // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
   // it is pushed ahead of FP.
   const intptr_t kPushedRegistersSize =
-      kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
+      kDartVolatileCpuRegCount * target::kWordSize + kPushedFpuRegisterSize;
   AddImmediate(SP, FP, -kPushedRegistersSize);
 
   // Restore all volatile FPU registers.
@@ -3208,8 +3229,8 @@
 
 void Assembler::LeaveDartFrame() {
   if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
-    ldr(PP,
-        Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
+    ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
+                            target::kWordSize));
   }
   set_constant_pool_allowed(false);
 
@@ -3220,8 +3241,8 @@
 
 void Assembler::LeaveDartFrameAndReturn() {
   if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
-    ldr(PP,
-        Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
+    ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
+                            target::kWordSize));
   }
   set_constant_pool_allowed(false);
 
@@ -3285,15 +3306,12 @@
 }
 
 void Assembler::IncrementAllocationStats(Register stats_addr_reg,
-                                         intptr_t cid,
-                                         Heap::Space space) {
+                                         intptr_t cid) {
   ASSERT(stats_addr_reg != kNoRegister);
   ASSERT(stats_addr_reg != TMP);
   ASSERT(cid > 0);
   const uword count_field_offset =
-      (space == Heap::kNew)
-          ? ClassHeapStats::allocated_since_gc_new_space_offset()
-          : ClassHeapStats::allocated_since_gc_old_space_offset();
+      ClassHeapStats::allocated_since_gc_new_space_offset();
   const Address& count_address = Address(stats_addr_reg, count_field_offset);
   ldr(TMP, count_address);
   AddImmediate(TMP, 1);
@@ -3301,18 +3319,13 @@
 }
 
 void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg,
-                                                 Register size_reg,
-                                                 Heap::Space space) {
+                                                 Register size_reg) {
   ASSERT(stats_addr_reg != kNoRegister);
   ASSERT(stats_addr_reg != TMP);
   const uword count_field_offset =
-      (space == Heap::kNew)
-          ? ClassHeapStats::allocated_since_gc_new_space_offset()
-          : ClassHeapStats::allocated_since_gc_old_space_offset();
+      ClassHeapStats::allocated_since_gc_new_space_offset();
   const uword size_field_offset =
-      (space == Heap::kNew)
-          ? ClassHeapStats::allocated_size_since_gc_new_space_offset()
-          : ClassHeapStats::allocated_size_since_gc_old_space_offset();
+      ClassHeapStats::allocated_size_since_gc_new_space_offset();
   const Address& count_address = Address(stats_addr_reg, count_field_offset);
   const Address& size_address = Address(stats_addr_reg, size_field_offset);
   ldr(TMP, count_address);
@@ -3329,13 +3342,13 @@
                             Register instance_reg,
                             Register temp_reg) {
   ASSERT(failure != NULL);
-  const intptr_t instance_size = cls.instance_size();
+  const intptr_t instance_size = target::Class::GetInstanceSize(cls);
   if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
+    const classid_t cid = target::Class::GetId(cls);
     ASSERT(instance_reg != temp_reg);
     ASSERT(temp_reg != IP);
     ASSERT(instance_size != 0);
-    NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp_reg, cls.id()));
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
+    NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp_reg, cid));
     ldr(instance_reg, Address(THR, Thread::top_offset()));
     // TODO(koda): Protect against unsigned overflow here.
     AddImmediateSetFlags(instance_reg, instance_reg, instance_size);
@@ -3358,15 +3371,12 @@
     ASSERT(instance_size >= kHeapObjectTag);
     AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
 
-    uint32_t tags = 0;
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    ASSERT(cls.id() != kIllegalCid);
-    tags = RawObject::ClassIdTag::update(cls.id(), tags);
-    tags = RawObject::NewBit::update(true, tags);
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
     LoadImmediate(IP, tags);
-    str(IP, FieldAddress(instance_reg, Object::tags_offset()));
+    str(IP, FieldAddress(instance_reg, target::Object::tags_offset()));
 
-    NOT_IN_PRODUCT(IncrementAllocationStats(temp_reg, cls.id(), space));
+    NOT_IN_PRODUCT(IncrementAllocationStats(temp_reg, cid));
   } else {
     b(failure);
   }
@@ -3381,7 +3391,6 @@
                                  Register temp2) {
   if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
     NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp1, cid));
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     // Potential new object start.
     ldr(instance, Address(THR, Thread::top_offset()));
     AddImmediateSetFlags(end_address, instance, instance_size);
@@ -3406,15 +3415,14 @@
 
     // Initialize the tags.
     // instance: new object start as a tagged pointer.
-    uint32_t tags = 0;
-    tags = RawObject::ClassIdTag::update(cid, tags);
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    tags = RawObject::NewBit::update(true, tags);
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
     LoadImmediate(temp2, tags);
-    str(temp2, FieldAddress(instance, Array::tags_offset()));  // Store tags.
+    str(temp2,
+        FieldAddress(instance, target::Object::tags_offset()));  // Store tags.
 
     LoadImmediate(temp2, instance_size);
-    NOT_IN_PRODUCT(IncrementAllocationStatsWithSize(temp1, temp2, space));
+    NOT_IN_PRODUCT(IncrementAllocationStatsWithSize(temp1, temp2));
   } else {
     b(failure);
   }
@@ -3430,18 +3438,6 @@
   pattern.set_distance(offset_into_target);
 }
 
-void Assembler::Stop(const char* message) {
-  if (FLAG_print_stop_message) {
-    PushList((1 << R0) | (1 << IP) | (1 << LR));  // Preserve R0, IP, LR.
-    LoadImmediate(R0, reinterpret_cast<int32_t>(message));
-    // PrintStopMessage() preserves all registers.
-    ExternalLabel label(StubCode::PrintStopMessage().EntryPoint());
-    BranchLink(&label);
-    PopList((1 << R0) | (1 << IP) | (1 << LR));  // Restore R0, IP, LR.
-  }
-  bkpt(Instr::kStopMessageCode);
-}
-
 Address Assembler::ElementAddressForIntIndex(bool is_load,
                                              bool is_external,
                                              intptr_t cid,
@@ -3610,6 +3606,7 @@
   return fpu_reg_names[reg];
 }
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index 8214e49..3f8820e 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -13,17 +13,49 @@
 
 #include "platform/assert.h"
 #include "platform/utils.h"
+#include "vm/code_entry_kind.h"
+#include "vm/compiler/runtime_api.h"
 #include "vm/constants_arm.h"
 #include "vm/cpu.h"
 #include "vm/hash_map.h"
-#include "vm/object.h"
 #include "vm/simulator.h"
 
 namespace dart {
 
 // Forward declarations.
-class RuntimeEntry;
+class FlowGraphCompiler;
 class RegisterSet;
+class RuntimeEntry;
+
+// TODO(vegorov) these enumerations are temporarily moved out of compiler
+// namespace to make refactoring easier.
+enum OperandSize {
+  kByte,
+  kUnsignedByte,
+  kHalfword,
+  kUnsignedHalfword,
+  kWord,
+  kUnsignedWord,
+  kWordPair,
+  kSWord,
+  kDWord,
+  kRegList,
+};
+
+// Load/store multiple addressing mode.
+enum BlockAddressMode {
+  // bit encoding P U W
+  DA = (0 | 0 | 0) << 21,    // decrement after
+  IA = (0 | 4 | 0) << 21,    // increment after
+  DB = (8 | 0 | 0) << 21,    // decrement before
+  IB = (8 | 4 | 0) << 21,    // increment before
+  DA_W = (0 | 0 | 1) << 21,  // decrement after with writeback to base
+  IA_W = (0 | 4 | 1) << 21,  // increment after with writeback to base
+  DB_W = (8 | 0 | 1) << 21,  // decrement before with writeback to base
+  IB_W = (8 | 4 | 1) << 21   // increment before with writeback to base
+};
+
+namespace compiler {
 
 // Instruction encoding bits.
 enum {
@@ -182,32 +214,6 @@
   friend class Address;
 };
 
-enum OperandSize {
-  kByte,
-  kUnsignedByte,
-  kHalfword,
-  kUnsignedHalfword,
-  kWord,
-  kUnsignedWord,
-  kWordPair,
-  kSWord,
-  kDWord,
-  kRegList,
-};
-
-// Load/store multiple addressing mode.
-enum BlockAddressMode {
-  // bit encoding P U W
-  DA = (0 | 0 | 0) << 21,    // decrement after
-  IA = (0 | 4 | 0) << 21,    // increment after
-  DB = (8 | 0 | 0) << 21,    // decrement before
-  IB = (8 | 4 | 0) << 21,    // increment before
-  DA_W = (0 | 0 | 1) << 21,  // decrement after with writeback to base
-  IA_W = (0 | 4 | 1) << 21,  // increment after with writeback to base
-  DB_W = (8 | 0 | 1) << 21,  // decrement before with writeback to base
-  IB_W = (8 | 4 | 1) << 21   // increment before with writeback to base
-};
-
 class Address : public ValueObject {
  public:
   enum OffsetKind {
@@ -339,7 +345,7 @@
 
 class Assembler : public AssemblerBase {
  public:
-  explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
+  explicit Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches = false);
   ~Assembler() {}
 
@@ -654,31 +660,31 @@
   void blx(Register rm, Condition cond = AL);
 
   void Branch(const Code& code,
-              ObjectPool::Patchability patchable = ObjectPool::kNotPatchable,
+              ObjectPoolBuilderEntry::Patchability patchable =
+                  ObjectPoolBuilderEntry::kNotPatchable,
               Register pp = PP,
               Condition cond = AL);
 
   void Branch(const Address& address, Condition cond = AL);
 
-  void BranchLink(
-      const Code& code,
-      ObjectPool::Patchability patchable = ObjectPool::kNotPatchable,
-      Code::EntryKind entry_kind = Code::EntryKind::kNormal);
+  void BranchLink(const Code& code,
+                  ObjectPoolBuilderEntry::Patchability patchable =
+                      ObjectPoolBuilderEntry::kNotPatchable,
+                  CodeEntryKind entry_kind = CodeEntryKind::kNormal);
   void BranchLinkToRuntime();
 
   void CallNullErrorShared(bool save_fpu_registers);
 
   // Branch and link to an entry address. Call sequence can be patched.
-  void BranchLinkPatchable(
-      const Code& code,
-      Code::EntryKind entry_kind = Code::EntryKind::kNormal);
+  void BranchLinkPatchable(const Code& code,
+                           CodeEntryKind entry_kind = CodeEntryKind::kNormal);
 
   // Emit a call that shares its object pool entries with other calls
   // that have the same equivalence marker.
   void BranchLinkWithEquivalence(
       const Code& code,
       const Object& equivalence,
-      Code::EntryKind entry_kind = Code::EntryKind::kNormal);
+      CodeEntryKind entry_kind = CodeEntryKind::kNormal);
 
   // Branch and link to [base + offset]. Call sequence is never patched.
   void BranchLinkOffset(Register base, int32_t offset);
@@ -754,7 +760,7 @@
                                   Register new_pp);
   void LoadNativeEntry(Register dst,
                        const ExternalLabel* label,
-                       ObjectPool::Patchability patchable,
+                       ObjectPoolBuilderEntry::Patchability patchable,
                        Condition cond = AL);
   void PushObject(const Object& object);
   void CompareObject(Register rn, const Object& object);
@@ -1028,12 +1034,9 @@
   // allocation stats. These are separate assembler macros so we can
   // avoid a dependent load too nearby the load of the table address.
   void LoadAllocationStatsAddress(Register dest, intptr_t cid);
-  void IncrementAllocationStats(Register stats_addr,
-                                intptr_t cid,
-                                Heap::Space space);
+  void IncrementAllocationStats(Register stats_addr, intptr_t cid);
   void IncrementAllocationStatsWithSize(Register stats_addr_reg,
-                                        Register size_reg,
-                                        Heap::Space space);
+                                        Register size_reg);
 
   Address ElementAddressForIntIndex(bool is_load,
                                     bool is_external,
@@ -1117,7 +1120,7 @@
   // On some other platforms, we draw a distinction between safe and unsafe
   // smis.
   static bool IsSafe(const Object& object) { return true; }
-  static bool IsSafeSmi(const Object& object) { return object.IsSmi(); }
+  static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
 
   bool constant_pool_allowed() const { return constant_pool_allowed_; }
   void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
@@ -1276,15 +1279,23 @@
                              CanBeSmi can_be_smi,
                              BarrierFilterMode barrier_filter_mode);
 
-  friend class FlowGraphCompiler;
+  friend class dart::FlowGraphCompiler;
   std::function<void(Condition, Register)>
       generate_invoke_write_barrier_wrapper_;
-  std::function<void(Condition)> invoke_array_write_barrier_;
+  std::function<void(Condition)> generate_invoke_array_write_barrier_;
 
   DISALLOW_ALLOCATION();
   DISALLOW_COPY_AND_ASSIGN(Assembler);
 };
 
+}  // namespace compiler
+
+// TODO(vegorov) temporary export commonly used classes into dart namespace
+// to ease migration.
+using compiler::Address;
+using compiler::FieldAddress;
+using compiler::Operand;
+
 }  // namespace dart
 
 #endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 8e5cb81..b41c7a4 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -5,15 +5,13 @@
 #include "vm/globals.h"  // NOLINT
 #if defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
 
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/compiler/backend/locations.h"
 #include "vm/cpu.h"
 #include "vm/instructions.h"
-#include "vm/longjump.h"
-#include "vm/runtime_entry.h"
 #include "vm/simulator.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
 
 namespace dart {
 
@@ -23,16 +21,33 @@
 
 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
 
-Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
+namespace compiler {
+
+#ifndef PRODUCT
+using target::ClassHeapStats;
+#endif
+using target::ClassTable;
+using target::Double;
+using target::Float32x4;
+using target::Float64x2;
+using target::Heap;
+using target::Instance;
+using target::Instructions;
+using target::Isolate;
+using target::ObjectPool;
+using target::RawObject;
+using target::Thread;
+
+Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches)
-    : AssemblerBase(object_pool_wrapper),
+    : AssemblerBase(object_pool_builder),
       use_far_branches_(use_far_branches),
       constant_pool_allowed_(false) {
   generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
     ldr(LR, Address(THR, Thread::write_barrier_wrappers_thread_offset(reg)));
     blr(LR);
   };
-  invoke_array_write_barrier_ = [&]() {
+  generate_invoke_array_write_barrier_ = [&]() {
     ldr(LR, Address(THR, Thread::array_write_barrier_entry_point_offset()));
     blr(LR);
   };
@@ -227,13 +242,6 @@
   label->BindTo(bound_pc);
 }
 
-void Assembler::Stop(const char* message) {
-  if (FLAG_print_stop_message) {
-    UNIMPLEMENTED();
-  }
-  brk(Instr::kStopMessageCode);
-}
-
 static int CountLeadingZeros(uint64_t value, int width) {
   ASSERT((width == 32) || (width == 64));
   if (value == 0) {
@@ -363,7 +371,7 @@
 
 void Assembler::LoadPoolPointer(Register pp) {
   CheckCodePointer();
-  ldr(pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
+  ldr(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
 
   // When in the PP register, the pool pointer is untagged. When we
   // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
@@ -456,35 +464,34 @@
 }
 
 intptr_t Assembler::FindImmediate(int64_t imm) {
-  return object_pool_wrapper().FindImmediate(imm);
+  return object_pool_builder().FindImmediate(imm);
 }
 
 bool Assembler::CanLoadFromObjectPool(const Object& object) const {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  ASSERT(!Thread::CanLoadFromThread(object));
+  ASSERT(IsOriginalObject(object));
+  ASSERT(!target::CanLoadFromThread(object));
   if (!constant_pool_allowed()) {
     return false;
   }
 
   // TODO(zra, kmillikin): Also load other large immediates from the object
   // pool
-  if (object.IsSmi()) {
-    ASSERT(Smi::IsValid(Smi::Value(reinterpret_cast<RawSmi*>(object.raw()))));
+  if (target::IsSmi(object)) {
     // If the raw smi does not fit into a 32-bit signed int, then we'll keep
     // the raw value in the object pool.
-    return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw()));
+    return !Utils::IsInt(32, target::ToRawSmi(object));
   }
-  ASSERT(object.IsNotTemporaryScopedHandle());
-  ASSERT(object.IsOld());
+  ASSERT(IsNotTemporaryScopedHandle(object));
+  ASSERT(IsInOldSpace(object));
   return true;
 }
 
-void Assembler::LoadNativeEntry(Register dst,
-                                const ExternalLabel* label,
-                                ObjectPool::Patchability patchable) {
+void Assembler::LoadNativeEntry(
+    Register dst,
+    const ExternalLabel* label,
+    ObjectPoolBuilderEntry::Patchability patchable) {
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindNativeFunction(label, patchable));
+      object_pool_builder().FindNativeFunction(label, patchable));
   LoadWordFromPoolOffset(dst, offset);
 }
 
@@ -495,18 +502,18 @@
 void Assembler::LoadObjectHelper(Register dst,
                                  const Object& object,
                                  bool is_unique) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Thread::CanLoadFromThread(object)) {
-    ldr(dst, Address(THR, Thread::OffsetFromThread(object)));
+  ASSERT(IsOriginalObject(object));
+  word offset = 0;
+  if (target::CanLoadFromThread(object, &offset)) {
+    ldr(dst, Address(THR, offset));
   } else if (CanLoadFromObjectPool(object)) {
     const int32_t offset = ObjectPool::element_offset(
-        is_unique ? object_pool_wrapper().AddObject(object)
-                  : object_pool_wrapper().FindObject(object));
+        is_unique ? object_pool_builder().AddObject(object)
+                  : object_pool_builder().FindObject(object));
     LoadWordFromPoolOffset(dst, offset);
   } else {
-    ASSERT(object.IsSmi());
-    LoadImmediate(dst, reinterpret_cast<int64_t>(object.raw()));
+    ASSERT(target::IsSmi(object));
+    LoadImmediate(dst, target::ToRawSmi(object));
   }
 }
 
@@ -515,8 +522,8 @@
                                            Register new_pp) {
   ASSERT(!constant_pool_allowed());
   ASSERT(new_pp != PP);
-  const int32_t offset =
-      ObjectPool::element_offset(object_pool_wrapper().FindObject(function));
+  const int32_t offset = ObjectPool::element_offset(
+      object_pool_builder().FindObject(ToObject(function)));
   ASSERT(Address::CanHoldOffset(offset));
   ldr(dst, Address(new_pp, offset));
 }
@@ -530,17 +537,17 @@
 }
 
 void Assembler::CompareObject(Register reg, const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Thread::CanLoadFromThread(object)) {
-    ldr(TMP, Address(THR, Thread::OffsetFromThread(object)));
+  ASSERT(IsOriginalObject(object));
+  word offset = 0;
+  if (target::CanLoadFromThread(object, &offset)) {
+    ldr(TMP, Address(THR, offset));
     CompareRegisters(reg, TMP);
   } else if (CanLoadFromObjectPool(object)) {
     LoadObject(TMP, object);
     CompareRegisters(reg, TMP);
   } else {
-    ASSERT(object.IsSmi());
-    CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw()));
+    ASSERT(target::IsSmi(object));
+    CompareImmediate(reg, target::ToRawSmi(object));
   }
 }
 
@@ -639,24 +646,24 @@
 
 void Assembler::Branch(const Code& target,
                        Register pp,
-                       ObjectPool::Patchability patchable) {
+                       ObjectPoolBuilderEntry::Patchability patchable) {
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindObject(target, patchable));
+      object_pool_builder().FindObject(ToObject(target), patchable));
   LoadWordFromPoolOffset(CODE_REG, offset, pp);
-  ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
+  ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
   br(TMP);
 }
 
 void Assembler::BranchPatchable(const Code& code) {
-  Branch(code, PP, ObjectPool::kPatchable);
+  Branch(code, PP, ObjectPoolBuilderEntry::kPatchable);
 }
 
 void Assembler::BranchLink(const Code& target,
-                           ObjectPool::Patchability patchable) {
+                           ObjectPoolBuilderEntry::Patchability patchable) {
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindObject(target, patchable));
+      object_pool_builder().FindObject(ToObject(target), patchable));
   LoadWordFromPoolOffset(CODE_REG, offset);
-  ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
+  ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
   blr(TMP);
 }
 
@@ -668,9 +675,9 @@
 void Assembler::BranchLinkWithEquivalence(const Code& target,
                                           const Object& equivalence) {
   const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindObject(target, equivalence));
+      object_pool_builder().FindObject(ToObject(target), equivalence));
   LoadWordFromPoolOffset(CODE_REG, offset);
-  ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
+  ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
   blr(TMP);
 }
 
@@ -927,8 +934,9 @@
                                       Label* label,
                                       CanBeSmi value_can_be_smi,
                                       BarrierFilterMode how_to_jump) {
-  COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
-                 (kOldObjectAlignmentOffset == 0));
+  COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
+                  target::kWordSize) &&
+                 (target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
 
   // Write-barrier triggers if the value is in the new space (has bit set) and
   // the object is in the old space (has bit cleared).
@@ -945,14 +953,15 @@
   } else {
     // For the value we are only interested in the new/old bit and the tag bit.
     // And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
-    and_(TMP, value, Operand(value, LSL, kNewObjectBitPosition));
+    and_(TMP, value,
+         Operand(value, LSL, target::ObjectAlignment::kNewObjectBitPosition));
     // And the result with the negated space bit of the object.
     bic(TMP, TMP, Operand(object));
   }
   if (how_to_jump == kJumpToNoUpdate) {
-    tbz(label, TMP, kNewObjectBitPosition);
+    tbz(label, TMP, target::ObjectAlignment::kNewObjectBitPosition);
   } else {
-    tbnz(label, TMP, kNewObjectBitPosition);
+    tbnz(label, TMP, target::ObjectAlignment::kNewObjectBitPosition);
   }
 }
 
@@ -997,8 +1006,8 @@
   if (can_be_smi == kValueCanBeSmi) {
     BranchIfSmi(value, &done);
   }
-  ldr(TMP, FieldAddress(object, Object::tags_offset()), kUnsignedByte);
-  ldr(TMP2, FieldAddress(value, Object::tags_offset()), kUnsignedByte);
+  ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
+  ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
   and_(TMP, TMP2, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
   tst(TMP, Operand(BARRIER_MASK));
   b(&done, ZERO);
@@ -1058,8 +1067,8 @@
   if (can_be_smi == kValueCanBeSmi) {
     BranchIfSmi(value, &done);
   }
-  ldr(TMP, FieldAddress(object, Object::tags_offset()), kUnsignedByte);
-  ldr(TMP2, FieldAddress(value, Object::tags_offset()), kUnsignedByte);
+  ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
+  ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
   and_(TMP, TMP2, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
   tst(TMP, Operand(BARRIER_MASK));
   b(&done, ZERO);
@@ -1072,7 +1081,7 @@
     // allocator.
     UNIMPLEMENTED();
   }
-  invoke_array_write_barrier_();
+  generate_invoke_array_write_barrier_();
   if (!lr_reserved) Pop(LR);
   Bind(&done);
 }
@@ -1104,10 +1113,8 @@
 void Assembler::StoreIntoObjectNoBarrier(Register object,
                                          const Address& dest,
                                          const Object& value) {
-  ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
-  ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
-  ASSERT(value.IsSmi() || value.InVMHeap() ||
-         (value.IsOld() && value.IsNotTemporaryScopedHandle()));
+  ASSERT(IsOriginalObject(value));
+  ASSERT(IsNotTemporaryScopedHandle(value));
   // No store buffer update.
   LoadObject(TMP2, value);
   str(TMP2, dest);
@@ -1128,7 +1135,7 @@
   ASSERT(RawObject::kClassIdTagPos == 16);
   ASSERT(RawObject::kClassIdTagSize == 16);
   const intptr_t class_id_offset =
-      Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
+      target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
   LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
                  kUnsignedHalfword);
 }
@@ -1139,7 +1146,7 @@
   const intptr_t offset =
       Isolate::class_table_offset() + ClassTable::table_offset();
   LoadFromOffset(result, result, offset);
-  ASSERT(kSizeOfClassPairLog2 == 4);
+  ASSERT(ClassTable::kSizeOfClassPairLog2 == 4);
   add(class_id, class_id, Operand(class_id));
   ldr(result, Address(result, class_id, UXTX, Address::Scaled));
 }
@@ -1180,7 +1187,8 @@
 }
 
 void Assembler::RestoreCodePointer() {
-  ldr(CODE_REG, Address(FP, compiler_frame_layout.code_from_fp * kWordSize));
+  ldr(CODE_REG, Address(FP, compiler::target::frame_layout.code_from_fp *
+                                target::kWordSize));
   CheckCodePointer();
 }
 
@@ -1200,7 +1208,7 @@
   const intptr_t entry_offset =
       CodeSize() + Instructions::HeaderSize() - kHeapObjectTag;
   adr(R0, Immediate(-entry_offset));
-  ldr(TMP, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
+  ldr(TMP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
   cmp(R0, Operand(TMP));
   b(&instructions_ok, EQ);
   brk(1);
@@ -1299,7 +1307,8 @@
     if (restore_pp == kRestoreCallerPP) {
       // Restore and untag PP.
       LoadFromOffset(PP, FP,
-                     compiler_frame_layout.saved_caller_pp_from_fp * kWordSize);
+                     compiler::target::frame_layout.saved_caller_pp_from_fp *
+                         target::kWordSize);
       sub(PP, PP, Operand(kHeapObjectTag));
     }
   }
@@ -1337,10 +1346,10 @@
   // and ensure proper alignment of the stack frame.
   // We need to restore it before restoring registers.
   const intptr_t kPushedRegistersSize =
-      kDartVolatileCpuRegCount * kWordSize +
-      kDartVolatileFpuRegCount * kWordSize +
-      (compiler_frame_layout.dart_fixed_frame_size - 2) *
-          kWordSize;  // From EnterStubFrame (excluding PC / FP)
+      kDartVolatileCpuRegCount * target::kWordSize +
+      kDartVolatileFpuRegCount * target::kWordSize +
+      (compiler::target::frame_layout.dart_fixed_frame_size - 2) *
+          target::kWordSize;  // From EnterStubFrame (excluding PC / FP)
   AddImmediate(SP, FP, -kPushedRegistersSize);
   for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
     const Register reg = static_cast<Register>(i);
@@ -1415,10 +1424,9 @@
   b(trace, NE);
 }
 
-void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
+void Assembler::UpdateAllocationStats(intptr_t cid) {
   ASSERT(cid > 0);
-  intptr_t counter_offset =
-      ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
+  intptr_t counter_offset = ClassTable::CounterOffsetFor(cid, /*is_new=*/true);
   LoadIsolate(TMP2);
   intptr_t table_offset =
       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
@@ -1429,19 +1437,13 @@
   str(TMP, Address(TMP2, 0));
 }
 
-void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
-                                              Register size_reg,
-                                              Heap::Space space) {
+void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
   ASSERT(cid > 0);
   const uword class_offset = ClassTable::ClassOffsetFor(cid);
   const uword count_field_offset =
-      (space == Heap::kNew)
-          ? ClassHeapStats::allocated_since_gc_new_space_offset()
-          : ClassHeapStats::allocated_since_gc_old_space_offset();
+      ClassHeapStats::allocated_since_gc_new_space_offset();
   const uword size_field_offset =
-      (space == Heap::kNew)
-          ? ClassHeapStats::allocated_size_since_gc_new_space_offset()
-          : ClassHeapStats::allocated_size_since_gc_old_space_offset();
+      ClassHeapStats::allocated_size_since_gc_new_space_offset();
   LoadIsolate(TMP2);
   intptr_t table_offset =
       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
@@ -1462,18 +1464,19 @@
                             Register top_reg,
                             bool tag_result) {
   ASSERT(failure != NULL);
-  const intptr_t instance_size = cls.instance_size();
+  const intptr_t instance_size = target::Class::GetInstanceSize(cls);
   if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
     // If this allocation is traced, program will jump to failure path
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
-    NOT_IN_PRODUCT(
-        MaybeTraceAllocation(cls.id(), /*temp_reg=*/top_reg, failure));
+    const classid_t cid = target::Class::GetId(cls);
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, /*temp_reg=*/top_reg, failure));
 
     const Register kEndReg = TMP;
 
     // instance_reg: potential next object start.
-    RELEASE_ASSERT((Thread::top_offset() + kWordSize) == Thread::end_offset());
+    RELEASE_ASSERT((Thread::top_offset() + target::kWordSize) ==
+                   Thread::end_offset());
     ldp(instance_reg, kEndReg,
         Address(THR, Thread::top_offset(), Address::PairOffset));
 
@@ -1486,18 +1489,14 @@
     // next object start and store the class in the class field of object.
     str(top_reg, Address(THR, Thread::top_offset()));
 
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
-    NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), space));
+    NOT_IN_PRODUCT(UpdateAllocationStats(cid));
 
-    uint32_t tags = 0;
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    ASSERT(cls.id() != kIllegalCid);
-    tags = RawObject::ClassIdTag::update(cls.id(), tags);
-    tags = RawObject::NewBit::update(true, tags);
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
     // Extends the 32 bit tags with zeros, which is the uninitialized
     // hash code.
     LoadImmediate(TMP, tags);
-    StoreToOffset(TMP, instance_reg, Object::tags_offset());
+    StoreToOffset(TMP, instance_reg, target::Object::tags_offset());
 
     if (tag_result) {
       AddImmediate(instance_reg, kHeapObjectTag);
@@ -1519,7 +1518,6 @@
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
     NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure));
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     // Potential new object start.
     ldr(instance, Address(THR, Thread::top_offset()));
     AddImmediateSetFlags(end_address, instance, instance_size);
@@ -1537,18 +1535,16 @@
     str(end_address, Address(THR, Thread::top_offset()));
     add(instance, instance, Operand(kHeapObjectTag));
     LoadImmediate(temp2, instance_size);
-    NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, temp2, space));
+    NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, temp2));
 
     // Initialize the tags.
     // instance: new object start as a tagged pointer.
-    uint32_t tags = 0;
-    tags = RawObject::ClassIdTag::update(cid, tags);
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    tags = RawObject::NewBit::update(true, tags);
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
     // Extends the 32 bit tags with zeros, which is the uninitialized
     // hash code.
     LoadImmediate(temp2, tags);
-    str(temp2, FieldAddress(instance, Array::tags_offset()));  // Store tags.
+    str(temp2, FieldAddress(instance, target::Object::tags_offset()));
   } else {
     b(failure);
   }
@@ -1777,6 +1773,8 @@
   }
 }
 
+}  // namespace compiler
+
 }  // namespace dart
 
 #endif  // defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 28198ec..ed2c5a5 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -13,18 +13,20 @@
 
 #include "platform/assert.h"
 #include "platform/utils.h"
+#include "vm/class_id.h"
 #include "vm/constants_arm64.h"
 #include "vm/hash_map.h"
-#include "vm/longjump.h"
-#include "vm/object.h"
 #include "vm/simulator.h"
 
 namespace dart {
 
 // Forward declarations.
+class FlowGraphCompiler;
 class RuntimeEntry;
 class RegisterSet;
 
+namespace compiler {
+
 class Immediate : public ValueObject {
  public:
   explicit Immediate(int64_t value) : value_(value) {}
@@ -426,7 +428,7 @@
 
 class Assembler : public AssemblerBase {
  public:
-  explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
+  explicit Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches = false);
   ~Assembler() {}
 
@@ -445,7 +447,7 @@
   void Drop(intptr_t stack_elements) {
     ASSERT(stack_elements >= 0);
     if (stack_elements > 0) {
-      add(SP, SP, Operand(stack_elements * kWordSize));
+      add(SP, SP, Operand(stack_elements * target::kWordSize));
     }
   }
 
@@ -497,7 +499,7 @@
   // On some other platforms, we draw a distinction between safe and unsafe
   // smis.
   static bool IsSafe(const Object& object) { return true; }
-  static bool IsSafeSmi(const Object& object) { return object.IsSmi(); }
+  static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
 
   // Addition and subtraction.
   // For add and sub, to use CSP for rn, o must be of type Operand::Extend.
@@ -1256,19 +1258,19 @@
   }
   void Push(Register reg) {
     ASSERT(reg != PP);  // Only push PP with TagAndPushPP().
-    str(reg, Address(SP, -1 * kWordSize, Address::PreIndex));
+    str(reg, Address(SP, -1 * target::kWordSize, Address::PreIndex));
   }
   void Pop(Register reg) {
     ASSERT(reg != PP);  // Only pop PP with PopAndUntagPP().
-    ldr(reg, Address(SP, 1 * kWordSize, Address::PostIndex));
+    ldr(reg, Address(SP, 1 * target::kWordSize, Address::PostIndex));
   }
   void PushPair(Register low, Register high) {
     ASSERT((low != PP) && (high != PP));
-    stp(low, high, Address(SP, -2 * kWordSize, Address::PairPreIndex));
+    stp(low, high, Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
   }
   void PopPair(Register low, Register high) {
     ASSERT((low != PP) && (high != PP));
-    ldp(low, high, Address(SP, 2 * kWordSize, Address::PairPostIndex));
+    ldp(low, high, Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
   }
   void PushFloat(VRegister reg) {
     fstrs(reg, Address(SP, -1 * kFloatSize, Address::PreIndex));
@@ -1291,16 +1293,17 @@
   void TagAndPushPP() {
     // Add the heap object tag back to PP before putting it on the stack.
     add(TMP, PP, Operand(kHeapObjectTag));
-    str(TMP, Address(SP, -1 * kWordSize, Address::PreIndex));
+    str(TMP, Address(SP, -1 * target::kWordSize, Address::PreIndex));
   }
   void TagAndPushPPAndPcMarker() {
     COMPILE_ASSERT(CODE_REG != TMP2);
     // Add the heap object tag back to PP before putting it on the stack.
     add(TMP2, PP, Operand(kHeapObjectTag));
-    stp(TMP2, CODE_REG, Address(SP, -2 * kWordSize, Address::PairPreIndex));
+    stp(TMP2, CODE_REG,
+        Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
   }
   void PopAndUntagPP() {
-    ldr(PP, Address(SP, 1 * kWordSize, Address::PostIndex));
+    ldr(PP, Address(SP, 1 * target::kWordSize, Address::PostIndex));
     sub(PP, PP, Operand(kHeapObjectTag));
     // The caller of PopAndUntagPP() must explicitly allow use of popped PP.
     set_constant_pool_allowed(false);
@@ -1353,15 +1356,16 @@
 
   void Branch(const Code& code,
               Register pp,
-              ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
+              ObjectPoolBuilderEntry::Patchability patchable =
+                  ObjectPoolBuilderEntry::kNotPatchable);
   void BranchPatchable(const Code& code);
 
-  void BranchLink(
-      const Code& code,
-      ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
+  void BranchLink(const Code& code,
+                  ObjectPoolBuilderEntry::Patchability patchable =
+                      ObjectPoolBuilderEntry::kNotPatchable);
 
   void BranchLinkPatchable(const Code& code) {
-    BranchLink(code, ObjectPool::kPatchable);
+    BranchLink(code, ObjectPoolBuilderEntry::kPatchable);
   }
   void BranchLinkToRuntime();
 
@@ -1481,7 +1485,7 @@
   bool CanLoadFromObjectPool(const Object& object) const;
   void LoadNativeEntry(Register dst,
                        const ExternalLabel* label,
-                       ObjectPool::Patchability patchable);
+                       ObjectPoolBuilderEntry::Patchability patchable);
   void LoadFunctionFromCalleePool(Register dst,
                                   const Function& function,
                                   Register new_pp);
@@ -1538,11 +1542,9 @@
 
   void MonomorphicCheckedEntry();
 
-  void UpdateAllocationStats(intptr_t cid, Heap::Space space);
+  void UpdateAllocationStats(intptr_t cid);
 
-  void UpdateAllocationStatsWithSize(intptr_t cid,
-                                     Register size_reg,
-                                     Heap::Space space);
+  void UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg);
 
   // If allocation tracing for |cid| is enabled, will jump to |trace| label,
   // which will allocate in the runtime where tracing occurs.
@@ -1761,8 +1763,7 @@
   int32_t EncodeImm19BranchOffset(int64_t imm, int32_t instr) {
     if (!CanEncodeImm19BranchOffset(imm)) {
       ASSERT(!use_far_branches());
-      Thread::Current()->long_jump_base()->Jump(1,
-                                                Object::branch_offset_error());
+      BailoutWithBranchOffsetError();
     }
     const int32_t imm32 = static_cast<int32_t>(imm);
     const int32_t off = (((imm32 >> 2) << kImm19Shift) & kImm19Mask);
@@ -1777,8 +1778,7 @@
   int32_t EncodeImm14BranchOffset(int64_t imm, int32_t instr) {
     if (!CanEncodeImm14BranchOffset(imm)) {
       ASSERT(!use_far_branches());
-      Thread::Current()->long_jump_base()->Jump(1,
-                                                Object::branch_offset_error());
+      BailoutWithBranchOffsetError();
     }
     const int32_t imm32 = static_cast<int32_t>(imm);
     const int32_t off = (((imm32 >> 2) << kImm14Shift) & kImm14Mask);
@@ -2239,14 +2239,21 @@
                              CanBeSmi can_be_smi,
                              BarrierFilterMode barrier_filter_mode);
 
-  friend class FlowGraphCompiler;
+  friend class dart::FlowGraphCompiler;
   std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
-  std::function<void()> invoke_array_write_barrier_;
+  std::function<void()> generate_invoke_array_write_barrier_;
 
   DISALLOW_ALLOCATION();
   DISALLOW_COPY_AND_ASSIGN(Assembler);
 };
 
+}  // namespace compiler
+
+using compiler::Address;
+using compiler::FieldAddress;
+using compiler::Immediate;
+using compiler::Operand;
+
 }  // namespace dart
 
 #endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_
diff --git a/runtime/vm/compiler/assembler/assembler_arm64_test.cc b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
index e3625d0..2817361 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
@@ -12,7 +12,7 @@
 #include "vm/virtual_memory.h"
 
 namespace dart {
-
+namespace compiler {
 #define __ assembler->
 
 ASSEMBLER_TEST_GENERATE(Simple, assembler) {
@@ -339,12 +339,13 @@
 ASSEMBLER_TEST_GENERATE(SimpleLoadStore, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(2 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(2 * target::kWordSize));  // Must not access beyond CSP.
 
   __ movz(R0, Immediate(43), 0);
   __ movz(R1, Immediate(42), 0);
-  __ str(R1, Address(SP, -1 * kWordSize, Address::PreIndex));
-  __ ldr(R0, Address(SP, 1 * kWordSize, Address::PostIndex));
+  __ str(R1, Address(SP, -1 * target::kWordSize, Address::PreIndex));
+  __ ldr(R0, Address(SP, 1 * target::kWordSize, Address::PostIndex));
   __ RestoreCSP();
   __ ret();
 }
@@ -373,16 +374,17 @@
 ASSEMBLER_TEST_GENERATE(LoadStoreLargeIndex, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(32 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(32 * target::kWordSize));  // Must not access beyond CSP.
 
   __ movz(R0, Immediate(43), 0);
   __ movz(R1, Immediate(42), 0);
   // Largest negative offset that can fit in the signed 9-bit immediate field.
-  __ str(R1, Address(SP, -32 * kWordSize, Address::PreIndex));
+  __ str(R1, Address(SP, -32 * target::kWordSize, Address::PreIndex));
   // Largest positive kWordSize aligned offset that we can fit.
-  __ ldr(R0, Address(SP, 31 * kWordSize, Address::PostIndex));
+  __ ldr(R0, Address(SP, 31 * target::kWordSize, Address::PostIndex));
   // Correction.
-  __ add(SP, SP, Operand(kWordSize));  // Restore SP.
+  __ add(SP, SP, Operand(target::kWordSize));  // Restore SP.
   __ RestoreCSP();
   __ ret();
 }
@@ -396,10 +398,10 @@
   __ SetupDartSP();
   __ movz(R0, Immediate(43), 0);
   __ movz(R1, Immediate(42), 0);
-  __ sub(SP, SP, Operand(512 * kWordSize));
+  __ sub(SP, SP, Operand(512 * target::kWordSize));
   __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
-  __ str(R1, Address(SP, 512 * kWordSize, Address::Offset));
-  __ add(SP, SP, Operand(512 * kWordSize));
+  __ str(R1, Address(SP, 512 * target::kWordSize, Address::Offset));
+  __ add(SP, SP, Operand(512 * target::kWordSize));
   __ ldr(R0, Address(SP));
   __ RestoreCSP();
   __ ret();
@@ -419,10 +421,10 @@
   // This should sign extend R2, and add to SP to get address,
   // i.e. SP - kWordSize.
   __ str(R1, Address(SP, R2, SXTW));
-  __ sub(SP, SP, Operand(kWordSize));
+  __ sub(SP, SP, Operand(target::kWordSize));
   __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
   __ ldr(R0, Address(SP));
-  __ add(SP, SP, Operand(kWordSize));
+  __ add(SP, SP, Operand(target::kWordSize));
   __ RestoreCSP();
   __ ret();
 }
@@ -437,12 +439,12 @@
   __ movz(R0, Immediate(43), 0);
   __ movz(R1, Immediate(42), 0);
   __ movz(R2, Immediate(10), 0);
-  __ sub(SP, SP, Operand(10 * kWordSize));
+  __ sub(SP, SP, Operand(10 * target::kWordSize));
   __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
   // Store R1 into SP + R2 * kWordSize.
   __ str(R1, Address(SP, R2, UXTX, Address::Scaled));
   __ ldr(R0, Address(SP, R2, UXTX, Address::Scaled));
-  __ add(SP, SP, Operand(10 * kWordSize));
+  __ add(SP, SP, Operand(10 * target::kWordSize));
   __ RestoreCSP();
   __ ret();
 }
@@ -455,7 +457,8 @@
 ASSEMBLER_TEST_GENERATE(LoadSigned32Bit, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(2 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(2 * target::kWordSize));  // Must not access beyond CSP.
 
   __ LoadImmediate(R1, 0xffffffff);
   __ str(R1, Address(SP, -4, Address::PreIndex, kWord), kWord);
@@ -473,12 +476,13 @@
 ASSEMBLER_TEST_GENERATE(SimpleLoadStorePair, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(2 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(2 * target::kWordSize));  // Must not access beyond CSP.
 
   __ LoadImmediate(R2, 43);
   __ LoadImmediate(R3, 42);
-  __ stp(R2, R3, Address(SP, -2 * kWordSize, Address::PairPreIndex));
-  __ ldp(R0, R1, Address(SP, 2 * kWordSize, Address::PairPostIndex));
+  __ stp(R2, R3, Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
+  __ ldp(R0, R1, Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
   __ sub(R0, R0, Operand(R1));
   __ RestoreCSP();
   __ ret();
@@ -493,11 +497,11 @@
   __ SetupDartSP();
   __ LoadImmediate(R2, 43);
   __ LoadImmediate(R3, 42);
-  __ sub(SP, SP, Operand(4 * kWordSize));
+  __ sub(SP, SP, Operand(4 * target::kWordSize));
   __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
-  __ stp(R2, R3, Address::Pair(SP, 2 * kWordSize));
-  __ ldp(R0, R1, Address::Pair(SP, 2 * kWordSize));
-  __ add(SP, SP, Operand(4 * kWordSize));
+  __ stp(R2, R3, Address::Pair(SP, 2 * target::kWordSize));
+  __ ldp(R0, R1, Address::Pair(SP, 2 * target::kWordSize));
+  __ add(SP, SP, Operand(4 * target::kWordSize));
   __ sub(R0, R0, Operand(R1));
   __ RestoreCSP();
   __ ret();
@@ -2520,11 +2524,12 @@
 ASSEMBLER_TEST_GENERATE(FldrdFstrdPrePostIndex, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(2 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(2 * target::kWordSize));  // Must not access beyond CSP.
 
   __ LoadDImmediate(V1, 42.0);
-  __ fstrd(V1, Address(SP, -1 * kWordSize, Address::PreIndex));
-  __ fldrd(V0, Address(SP, 1 * kWordSize, Address::PostIndex));
+  __ fstrd(V1, Address(SP, -1 * target::kWordSize, Address::PreIndex));
+  __ fldrd(V0, Address(SP, 1 * target::kWordSize, Address::PostIndex));
   __ RestoreCSP();
   __ ret();
 }
@@ -2537,12 +2542,13 @@
 ASSEMBLER_TEST_GENERATE(FldrsFstrsPrePostIndex, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(2 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(2 * target::kWordSize));  // Must not access beyond CSP.
 
   __ LoadDImmediate(V1, 42.0);
   __ fcvtsd(V2, V1);
-  __ fstrs(V2, Address(SP, -1 * kWordSize, Address::PreIndex));
-  __ fldrs(V3, Address(SP, 1 * kWordSize, Address::PostIndex));
+  __ fstrs(V2, Address(SP, -1 * target::kWordSize, Address::PreIndex));
+  __ fldrs(V3, Address(SP, 1 * target::kWordSize, Address::PostIndex));
   __ fcvtds(V0, V3);
   __ RestoreCSP();
   __ ret();
@@ -2556,7 +2562,8 @@
 ASSEMBLER_TEST_GENERATE(FldrqFstrqPrePostIndex, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(2 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(2 * target::kWordSize));  // Must not access beyond CSP.
 
   __ LoadDImmediate(V1, 21.0);
   __ LoadDImmediate(V2, 21.0);
@@ -2564,9 +2571,9 @@
   __ Push(R1);
   __ PushDouble(V1);
   __ PushDouble(V2);
-  __ fldrq(V3, Address(SP, 2 * kWordSize, Address::PostIndex));
+  __ fldrq(V3, Address(SP, 2 * target::kWordSize, Address::PostIndex));
   __ Pop(R0);
-  __ fstrq(V3, Address(SP, -2 * kWordSize, Address::PreIndex));
+  __ fstrq(V3, Address(SP, -2 * target::kWordSize, Address::PreIndex));
   __ PopDouble(V0);
   __ PopDouble(V1);
   __ faddd(V0, V0, V1);
@@ -2720,11 +2727,11 @@
   __ SetupDartSP();
   __ LoadDImmediate(V0, 43.0);
   __ LoadDImmediate(V1, 42.0);
-  __ AddImmediate(SP, SP, -1 * kWordSize);
+  __ AddImmediate(SP, SP, -1 * target::kWordSize);
   __ add(R2, SP, Operand(1));
   __ fstrd(V1, Address(R2, -1));
   __ fldrd(V0, Address(R2, -1));
-  __ AddImmediate(SP, 1 * kWordSize);
+  __ AddImmediate(SP, 1 * target::kWordSize);
   __ RestoreCSP();
   __ ret();
 }
@@ -2737,16 +2744,17 @@
 ASSEMBLER_TEST_GENERATE(FldrdFstrdLargeIndex, assembler) {
   __ SetupDartSP();
 
-  __ sub(CSP, CSP, Operand(32 * kWordSize));  // Must not access beyond CSP.
+  __ sub(CSP, CSP,
+         Operand(32 * target::kWordSize));  // Must not access beyond CSP.
 
   __ LoadDImmediate(V0, 43.0);
   __ LoadDImmediate(V1, 42.0);
   // Largest negative offset that can fit in the signed 9-bit immediate field.
-  __ fstrd(V1, Address(SP, -32 * kWordSize, Address::PreIndex));
+  __ fstrd(V1, Address(SP, -32 * target::kWordSize, Address::PreIndex));
   // Largest positive kWordSize aligned offset that we can fit.
-  __ fldrd(V0, Address(SP, 31 * kWordSize, Address::PostIndex));
+  __ fldrd(V0, Address(SP, 31 * target::kWordSize, Address::PostIndex));
   // Correction.
-  __ add(SP, SP, Operand(kWordSize));  // Restore SP.
+  __ add(SP, SP, Operand(target::kWordSize));  // Restore SP.
   __ RestoreCSP();
   __ ret();
 }
@@ -2760,10 +2768,10 @@
   __ SetupDartSP();
   __ LoadDImmediate(V0, 43.0);
   __ LoadDImmediate(V1, 42.0);
-  __ sub(SP, SP, Operand(512 * kWordSize));
+  __ sub(SP, SP, Operand(512 * target::kWordSize));
   __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
-  __ fstrd(V1, Address(SP, 512 * kWordSize, Address::Offset));
-  __ add(SP, SP, Operand(512 * kWordSize));
+  __ fstrd(V1, Address(SP, 512 * target::kWordSize, Address::Offset));
+  __ add(SP, SP, Operand(512 * target::kWordSize));
   __ fldrd(V0, Address(SP));
   __ RestoreCSP();
   __ ret();
@@ -2783,10 +2791,10 @@
   // This should sign extend R2, and add to SP to get address,
   // i.e. SP - kWordSize.
   __ fstrd(V1, Address(SP, R2, SXTW));
-  __ sub(SP, SP, Operand(kWordSize));
+  __ sub(SP, SP, Operand(target::kWordSize));
   __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
   __ fldrd(V0, Address(SP));
-  __ add(SP, SP, Operand(kWordSize));
+  __ add(SP, SP, Operand(target::kWordSize));
   __ RestoreCSP();
   __ ret();
 }
@@ -2801,12 +2809,12 @@
   __ LoadDImmediate(V0, 43.0);
   __ LoadDImmediate(V1, 42.0);
   __ movz(R2, Immediate(10), 0);
-  __ sub(SP, SP, Operand(10 * kWordSize));
+  __ sub(SP, SP, Operand(10 * target::kWordSize));
   __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
   // Store V1 into SP + R2 * kWordSize.
   __ fstrd(V1, Address(SP, R2, UXTX, Address::Scaled));
   __ fldrd(V0, Address(SP, R2, UXTX, Address::Scaled));
-  __ add(SP, SP, Operand(10 * kWordSize));
+  __ add(SP, SP, Operand(10 * target::kWordSize));
   __ RestoreCSP();
   __ ret();
 }
@@ -4107,6 +4115,7 @@
   __ ret();
 }
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined(TARGET_ARCH_ARM64)
diff --git a/runtime/vm/compiler/assembler/assembler_arm_test.cc b/runtime/vm/compiler/assembler/assembler_arm_test.cc
index 2809d58..4e69f74 100644
--- a/runtime/vm/compiler/assembler/assembler_arm_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm_test.cc
@@ -12,6 +12,7 @@
 #include "vm/virtual_memory.h"
 
 namespace dart {
+namespace compiler {
 
 #define __ assembler->
 
@@ -262,11 +263,11 @@
   if (TargetCPUFeatures::vfp_supported()) {
     __ LoadImmediate(R0, bit_cast<int32_t, float>(12.3f));
     __ mov(R2, Operand(SP));
-    __ str(R0, Address(SP, (-kWordSize * 30), Address::PreIndex));
-    __ vldrs(S0, Address(R2, (-kWordSize * 30)));
+    __ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
+    __ vldrs(S0, Address(R2, (-target::kWordSize * 30)));
     __ vadds(S0, S0, S0);
-    __ vstrs(S0, Address(R2, (-kWordSize * 30)));
-    __ ldr(R0, Address(SP, (kWordSize * 30), Address::PostIndex));
+    __ vstrs(S0, Address(R2, (-target::kWordSize * 30)));
+    __ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex));
   }
   __ bx(LR);
 }
@@ -286,11 +287,11 @@
     __ mov(R2, Operand(SP));
     // Expressing __str(R0, Address(SP, (-kWordSize * 32), Address::PreIndex));
     // as:
-    __ mov(R1, Operand(kWordSize));
+    __ mov(R1, Operand(target::kWordSize));
     __ str(R0, Address(SP, R1, LSL, 5, Address::NegPreIndex));
-    __ vldrs(S0, Address(R2, (-kWordSize * 32)));
+    __ vldrs(S0, Address(R2, (-target::kWordSize * 32)));
     __ vadds(S0, S0, S0);
-    __ vstrs(S0, Address(R2, (-kWordSize * 32)));
+    __ vstrs(S0, Address(R2, (-target::kWordSize * 32)));
     // Expressing __ldr(R0, Address(SP, (kWordSize * 32), Address::PostIndex));
     // as:
     __ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex));
@@ -313,13 +314,13 @@
     __ LoadImmediate(R0, Utils::Low32Bits(value));
     __ LoadImmediate(R1, Utils::High32Bits(value));
     __ mov(R2, Operand(SP));
-    __ str(R0, Address(SP, (-kWordSize * 30), Address::PreIndex));
-    __ str(R1, Address(R2, (-kWordSize * 29)));
-    __ vldrd(D0, Address(R2, (-kWordSize * 30)));
+    __ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
+    __ str(R1, Address(R2, (-target::kWordSize * 29)));
+    __ vldrd(D0, Address(R2, (-target::kWordSize * 30)));
     __ vaddd(D0, D0, D0);
-    __ vstrd(D0, Address(R2, (-kWordSize * 30)));
-    __ ldr(R1, Address(R2, (-kWordSize * 29)));
-    __ ldr(R0, Address(SP, (kWordSize * 30), Address::PostIndex));
+    __ vstrd(D0, Address(R2, (-target::kWordSize * 30)));
+    __ ldr(R1, Address(R2, (-target::kWordSize * 29)));
+    __ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex));
   }
   __ bx(LR);
 }
@@ -1082,8 +1083,8 @@
 
   __ mov(R1, Operand(0x11));
   __ mov(R2, Operand(SP));
-  __ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
-  __ ldrh(R0, Address(R2, (-kWordSize * 30)));
+  __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
+  __ ldrh(R0, Address(R2, (-target::kWordSize * 30)));
   __ cmp(R0, Operand(0x11));
   __ b(&Test1, EQ);
   __ mov(R0, Operand(1));
@@ -1091,8 +1092,8 @@
   __ Bind(&Test1);
 
   __ mov(R0, Operand(0x22));
-  __ strh(R0, Address(R2, (-kWordSize * 30)));
-  __ ldrh(R1, Address(R2, (-kWordSize * 30)));
+  __ strh(R0, Address(R2, (-target::kWordSize * 30)));
+  __ ldrh(R1, Address(R2, (-target::kWordSize * 30)));
   __ cmp(R1, Operand(0x22));
   __ b(&Test2, EQ);
   __ mov(R0, Operand(1));
@@ -1100,7 +1101,7 @@
   __ Bind(&Test2);
 
   __ mov(R0, Operand(0));
-  __ AddImmediate(R2, (-kWordSize * 30));
+  __ AddImmediate(R2, (-target::kWordSize * 30));
   __ strh(R0, Address(R2));
   __ ldrh(R1, Address(R2));
   __ cmp(R1, Operand(0));
@@ -1111,7 +1112,7 @@
 
   __ mov(R0, Operand(0));
   __ Bind(&Done);
-  __ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
+  __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
   __ bx(LR);
 }
 
@@ -1124,9 +1125,9 @@
 ASSEMBLER_TEST_GENERATE(Ldrsb, assembler) {
   __ mov(R1, Operand(0xFF));
   __ mov(R2, Operand(SP));
-  __ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
-  __ ldrsb(R0, Address(R2, (-kWordSize * 30)));
-  __ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
+  __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
+  __ ldrsb(R0, Address(R2, (-target::kWordSize * 30)));
+  __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
   __ bx(LR);
 }
 
@@ -1139,9 +1140,9 @@
 ASSEMBLER_TEST_GENERATE(Ldrb, assembler) {
   __ mov(R1, Operand(0xFF));
   __ mov(R2, Operand(SP));
-  __ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
-  __ ldrb(R0, Address(R2, (-kWordSize * 30)));
-  __ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
+  __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
+  __ ldrb(R0, Address(R2, (-target::kWordSize * 30)));
+  __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
   __ bx(LR);
 }
 
@@ -1154,9 +1155,9 @@
 ASSEMBLER_TEST_GENERATE(Ldrsh, assembler) {
   __ mov(R1, Operand(0xFF));
   __ mov(R2, Operand(SP));
-  __ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
-  __ ldrsh(R0, Address(R2, (-kWordSize * 30)));
-  __ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
+  __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
+  __ ldrsh(R0, Address(R2, (-target::kWordSize * 30)));
+  __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
   __ bx(LR);
 }
 
@@ -1169,9 +1170,9 @@
 ASSEMBLER_TEST_GENERATE(Ldrh1, assembler) {
   __ mov(R1, Operand(0xFF));
   __ mov(R2, Operand(SP));
-  __ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
-  __ ldrh(R0, Address(R2, (-kWordSize * 30)));
-  __ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
+  __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
+  __ ldrh(R0, Address(R2, (-target::kWordSize * 30)));
+  __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
   __ bx(LR);
 }
 
@@ -1183,12 +1184,12 @@
 
 ASSEMBLER_TEST_GENERATE(Ldrd, assembler) {
   __ mov(IP, Operand(SP));
-  __ sub(SP, SP, Operand(kWordSize * 30));
+  __ sub(SP, SP, Operand(target::kWordSize * 30));
   __ strd(R2, R3, SP, 0);
-  __ strd(R0, R1, IP, (-kWordSize * 28));
-  __ ldrd(R2, R3, IP, (-kWordSize * 28));
+  __ strd(R0, R1, IP, (-target::kWordSize * 28));
+  __ ldrd(R2, R3, IP, (-target::kWordSize * 28));
   __ ldrd(R0, R1, SP, 0);
-  __ add(SP, SP, Operand(kWordSize * 30));
+  __ add(SP, SP, Operand(target::kWordSize * 30));
   __ sub(R0, R0, Operand(R2));
   __ add(R1, R1, Operand(R3));
   __ bx(LR);
@@ -1215,12 +1216,12 @@
   __ Push(R0);  // Make room, so we can decrement after.
   __ stm(DA_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3));
   __ str(R2, Address(SP));                 // Should be a free slot.
-  __ ldr(R9, Address(SP, 1 * kWordSize));  // R0.  R9 = +1.
-  __ ldr(IP, Address(SP, 2 * kWordSize));  // R1.
+  __ ldr(R9, Address(SP, 1 * target::kWordSize));  // R0.  R9 = +1.
+  __ ldr(IP, Address(SP, 2 * target::kWordSize));  // R1.
   __ sub(R9, R9, Operand(IP));             // -R1. R9 = -6.
-  __ ldr(IP, Address(SP, 3 * kWordSize));  // R2.
+  __ ldr(IP, Address(SP, 3 * target::kWordSize));  // R2.
   __ add(R9, R9, Operand(IP));             // +R2. R9 = +5.
-  __ ldr(IP, Address(SP, 4 * kWordSize));  // R3.
+  __ ldr(IP, Address(SP, 4 * target::kWordSize));  // R3.
   __ sub(R9, R9, Operand(IP));             // -R3. R9 = -26.
   __ ldm(IB_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3));
   // Same operations again. But this time from the restore registers.
@@ -1245,9 +1246,9 @@
 
 ASSEMBLER_TEST_GENERATE(AddressShiftStrLSL1NegOffset, assembler) {
   __ mov(R2, Operand(42));
-  __ mov(R1, Operand(kWordSize));
+  __ mov(R1, Operand(target::kWordSize));
   __ str(R2, Address(SP, R1, LSL, 1, Address::NegOffset));
-  __ ldr(R0, Address(SP, (-kWordSize * 2), Address::Offset));
+  __ ldr(R0, Address(SP, (-target::kWordSize * 2), Address::Offset));
   __ bx(LR);
 }
 
@@ -1259,8 +1260,8 @@
 
 ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSL5NegOffset, assembler) {
   __ mov(R2, Operand(42));
-  __ mov(R1, Operand(kWordSize));
-  __ str(R2, Address(SP, (-kWordSize * 32), Address::Offset));
+  __ mov(R1, Operand(target::kWordSize));
+  __ str(R2, Address(SP, (-target::kWordSize * 32), Address::Offset));
   __ ldr(R0, Address(SP, R1, LSL, 5, Address::NegOffset));
   __ bx(LR);
 }
@@ -1273,9 +1274,9 @@
 
 ASSEMBLER_TEST_GENERATE(AddressShiftStrLRS1NegOffset, assembler) {
   __ mov(R2, Operand(42));
-  __ mov(R1, Operand(kWordSize * 2));
+  __ mov(R1, Operand(target::kWordSize * 2));
   __ str(R2, Address(SP, R1, LSR, 1, Address::NegOffset));
-  __ ldr(R0, Address(SP, -kWordSize, Address::Offset));
+  __ ldr(R0, Address(SP, -target::kWordSize, Address::Offset));
   __ bx(LR);
 }
 
@@ -1287,8 +1288,8 @@
 
 ASSEMBLER_TEST_GENERATE(AddressShiftLdrLRS1NegOffset, assembler) {
   __ mov(R2, Operand(42));
-  __ mov(R1, Operand(kWordSize * 2));
-  __ str(R2, Address(SP, -kWordSize, Address::Offset));
+  __ mov(R1, Operand(target::kWordSize * 2));
+  __ str(R2, Address(SP, -target::kWordSize, Address::Offset));
   __ ldr(R0, Address(SP, R1, LSR, 1, Address::NegOffset));
   __ bx(LR);
 }
@@ -1301,10 +1302,10 @@
 
 ASSEMBLER_TEST_GENERATE(AddressShiftStrLSLNegPreIndex, assembler) {
   __ mov(R2, Operand(42));
-  __ mov(R1, Operand(kWordSize));
+  __ mov(R1, Operand(target::kWordSize));
   __ mov(R3, Operand(SP));
   __ str(R2, Address(SP, R1, LSL, 5, Address::NegPreIndex));
-  __ ldr(R0, Address(R3, (-kWordSize * 32), Address::Offset));
+  __ ldr(R0, Address(R3, (-target::kWordSize * 32), Address::Offset));
   __ mov(SP, Operand(R3));
   __ bx(LR);
 }
@@ -1317,8 +1318,8 @@
 
 ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSLNegPreIndex, assembler) {
   __ mov(R2, Operand(42));
-  __ mov(R1, Operand(kWordSize));
-  __ str(R2, Address(SP, (-kWordSize * 32), Address::PreIndex));
+  __ mov(R1, Operand(target::kWordSize));
+  __ str(R2, Address(SP, (-target::kWordSize * 32), Address::PreIndex));
   __ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex));
   __ bx(LR);
 }
@@ -3845,6 +3846,7 @@
   __ Ret();
 }
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined TARGET_ARCH_ARM
diff --git a/runtime/vm/compiler/assembler/assembler_dbc.cc b/runtime/vm/compiler/assembler/assembler_dbc.cc
index a7643b6..141c36a 100644
--- a/runtime/vm/compiler/assembler/assembler_dbc.cc
+++ b/runtime/vm/compiler/assembler/assembler_dbc.cc
@@ -5,19 +5,19 @@
 #include "vm/globals.h"  // NOLINT
 #if defined(TARGET_ARCH_DBC)
 
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/cpu.h"
 #include "vm/longjump.h"
-#include "vm/runtime_entry.h"
 #include "vm/simulator.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
 
 namespace dart {
-
 DECLARE_FLAG(bool, check_code_pointer);
 DECLARE_FLAG(bool, inline_alloc);
 
+namespace compiler {
+
 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
   const uword end = data + length;
   while (data < end) {
@@ -72,11 +72,11 @@
 }
 
 const char* Assembler::RegisterName(Register reg) {
-  return Thread::Current()->zone()->PrintToString("R%d", reg);
+  return ThreadState::Current()->zone()->PrintToString("R%d", reg);
 }
 
 const char* Assembler::FpuRegisterName(FpuRegister reg) {
-  return Thread::Current()->zone()->PrintToString("F%d", reg);
+  return ThreadState::Current()->zone()->PrintToString("F%d", reg);
 }
 
 static int32_t EncodeJump(int32_t relative_pc) {
@@ -125,9 +125,11 @@
 }
 
 intptr_t Assembler::AddConstant(const Object& obj) {
-  return object_pool_wrapper().FindObject(Object::ZoneHandle(obj.raw()));
+  return object_pool_builder().FindObject(
+      NewZoneHandle(ThreadState::Current()->zone(), obj));
 }
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined TARGET_ARCH_DBC
diff --git a/runtime/vm/compiler/assembler/assembler_dbc.h b/runtime/vm/compiler/assembler/assembler_dbc.h
index 9787191..3c57878 100644
--- a/runtime/vm/compiler/assembler/assembler_dbc.h
+++ b/runtime/vm/compiler/assembler/assembler_dbc.h
@@ -14,11 +14,12 @@
 #include "vm/constants_dbc.h"
 #include "vm/cpu.h"
 #include "vm/hash_map.h"
-#include "vm/object.h"
 #include "vm/simulator.h"
 
 namespace dart {
 
+namespace compiler {
+
 // Dummy declaration to make things compile.
 class Address : public ValueObject {
  private:
@@ -27,9 +28,9 @@
 
 class Assembler : public AssemblerBase {
  public:
-  explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
+  explicit Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches = false)
-      : AssemblerBase(object_pool_wrapper) {}
+      : AssemblerBase(object_pool_builder) {}
   ~Assembler() {}
 
   void Bind(Label* label);
@@ -99,6 +100,10 @@
   DISALLOW_COPY_AND_ASSIGN(Assembler);
 };
 
+}  // namespace compiler
+
+using compiler::Address;
+
 }  // namespace dart
 
 #endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_DBC_H_
diff --git a/runtime/vm/compiler/assembler/assembler_dbc_test.cc b/runtime/vm/compiler/assembler/assembler_dbc_test.cc
index 7652b45..c87d7aa 100644
--- a/runtime/vm/compiler/assembler/assembler_dbc_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_dbc_test.cc
@@ -12,6 +12,7 @@
 #include "vm/unit_test.h"
 
 namespace dart {
+namespace compiler {
 
 static RawObject* ExecuteTest(const Code& code) {
   const intptr_t kTypeArgsLen = 0;
@@ -68,8 +69,8 @@
 
 static void MakeDummyInstanceCall(Assembler* assembler, const Object& result) {
   // Make a dummy function.
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateDummyCode(&_assembler_, result);
   const char* dummy_function_name = "dummy_instance_function";
   const Function& dummy_instance_function =
@@ -134,7 +135,7 @@
   __ Frame(2);
   __ Move(0, -kParamEndSlotFromFp - 1);
   __ Move(1, -kParamEndSlotFromFp - 2);
-  __ StoreField(0, GrowableObjectArray::data_offset() / kWordSize, 1);
+  __ StoreField(0, GrowableObjectArray::data_offset() / target::kWordSize, 1);
   __ Return(0);
 }
 
@@ -2513,6 +2514,7 @@
 
 #endif  // defined(ARCH_IS_64_BIT)
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined(TARGET_ARCH_DBC)
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index 97823b4..f1ef6b1 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -5,20 +5,30 @@
 #include "vm/globals.h"  // NOLINT
 #if defined(TARGET_ARCH_IA32)
 
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/class_id.h"
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/cpu.h"
-#include "vm/heap/heap.h"
 #include "vm/instructions.h"
-#include "vm/memory_region.h"
-#include "vm/runtime_entry.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
 
 namespace dart {
 
 #if !defined(DART_PRECOMPILED_RUNTIME)
-
 DECLARE_FLAG(bool, inline_alloc);
+#endif
+
+namespace compiler {
+
+using target::ClassTable;
+using target::Heap;
+using target::Instance;
+using target::Instructions;
+using target::Isolate;
+using target::RawObject;
+using target::Thread;
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
 
 class DirectCallRelocation : public AssemblerFixup {
  public:
@@ -34,8 +44,7 @@
 
 int32_t Assembler::jit_cookie() {
   if (jit_cookie_ == 0) {
-    jit_cookie_ =
-        static_cast<int32_t>(Isolate::Current()->random()->NextUInt32());
+    jit_cookie_ = CreateJitCookie();
   }
   return jit_cookie_;
 }
@@ -1759,7 +1768,7 @@
 void Assembler::Drop(intptr_t stack_elements) {
   ASSERT(stack_elements >= 0);
   if (stack_elements > 0) {
-    addl(ESP, Immediate(stack_elements * kWordSize));
+    addl(ESP, Immediate(stack_elements * target::kWordSize));
   }
 }
 
@@ -1770,17 +1779,18 @@
 void Assembler::LoadObject(Register dst,
                            const Object& object,
                            bool movable_referent) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
+  ASSERT(IsOriginalObject(object));
+
   // movable_referent: some references to VM heap objects may be patched with
   // references to isolate-local objects (e.g., optimized static calls).
   // We need to track such references since the latter may move during
   // compaction.
-  if (object.IsSmi() || (object.InVMHeap() && !movable_referent)) {
-    movl(dst, Immediate(reinterpret_cast<int32_t>(object.raw())));
+  if (target::CanEmbedAsRawPointerInGeneratedCode(object) &&
+      !movable_referent) {
+    movl(dst, Immediate(target::ToRawPointer(object)));
   } else {
-    ASSERT(object.IsNotTemporaryScopedHandle());
-    ASSERT(object.IsOld());
+    ASSERT(IsNotTemporaryScopedHandle(object));
+    ASSERT(IsInOldSpace(object));
     AssemblerBuffer::EnsureCapacity ensured(&buffer_);
     EmitUint8(0xB8 + dst);
     buffer_.EmitObject(object);
@@ -1788,25 +1798,23 @@
 }
 
 void Assembler::LoadObjectSafely(Register dst, const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Assembler::IsSafe(object)) {
-    LoadObject(dst, object);
-  } else {
-    int32_t cookie = jit_cookie();
-    movl(dst, Immediate(reinterpret_cast<int32_t>(object.raw()) ^ cookie));
+  ASSERT(IsOriginalObject(object));
+  if (target::IsSmi(object) && !IsSafeSmi(object)) {
+    const int32_t cookie = jit_cookie();
+    movl(dst, Immediate(target::ToRawSmi(object) ^ cookie));
     xorl(dst, Immediate(cookie));
+  } else {
+    LoadObject(dst, object);
   }
 }
 
 void Assembler::PushObject(const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (object.IsSmi() || object.InVMHeap()) {
-    pushl(Immediate(reinterpret_cast<int32_t>(object.raw())));
+  ASSERT(IsOriginalObject(object));
+  if (target::CanEmbedAsRawPointerInGeneratedCode(object)) {
+    pushl(Immediate(target::ToRawPointer(object)));
   } else {
-    ASSERT(object.IsNotTemporaryScopedHandle());
-    ASSERT(object.IsOld());
+    ASSERT(IsNotTemporaryScopedHandle(object));
+    ASSERT(IsInOldSpace(object));
     AssemblerBuffer::EnsureCapacity ensured(&buffer_);
     EmitUint8(0x68);
     buffer_.EmitObject(object);
@@ -1814,13 +1822,12 @@
 }
 
 void Assembler::CompareObject(Register reg, const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (object.IsSmi() || object.InVMHeap()) {
-    cmpl(reg, Immediate(reinterpret_cast<int32_t>(object.raw())));
+  ASSERT(IsOriginalObject(object));
+  if (target::CanEmbedAsRawPointerInGeneratedCode(object)) {
+    cmpl(reg, Immediate(target::ToRawPointer(object)));
   } else {
-    ASSERT(object.IsNotTemporaryScopedHandle());
-    ASSERT(object.IsOld());
+    ASSERT(IsNotTemporaryScopedHandle(object));
+    ASSERT(IsInOldSpace(object));
     AssemblerBuffer::EnsureCapacity ensured(&buffer_);
     if (reg == EAX) {
       EmitUint8(0x05 + (7 << 3));
@@ -1846,8 +1853,9 @@
     Stop("Unexpected Smi!");
     Bind(&okay);
 #endif
-    COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
-                   (kOldObjectAlignmentOffset == 0));
+    COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
+                    target::kWordSize) &&
+                   (target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
     // Write-barrier triggers if the value is in the new space (has bit set) and
     // the object is in the old space (has bit cleared).
     // To check that we could compute value & ~object and skip the write barrier
@@ -1856,9 +1864,9 @@
     // ~value | object instead and skip the write barrier if the bit is set.
     notl(value);
     orl(value, object);
-    testl(value, Immediate(kNewObjectAlignmentOffset));
+    testl(value, Immediate(target::ObjectAlignment::kNewObjectAlignmentOffset));
   } else {
-    ASSERT(kNewObjectAlignmentOffset == 4);
+    ASSERT(target::ObjectAlignment::kNewObjectAlignmentOffset == 4);
     ASSERT(kHeapObjectTag == 1);
     // Detect value being ...101 and object being ...001.
     andl(value, Immediate(7));
@@ -1951,28 +1959,18 @@
   Bind(&done);
 }
 
-void Assembler::UnverifiedStoreOldObject(const Address& dest,
-                                         const Object& value) {
-  ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
-  ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
-  ASSERT(value.IsOld());
-  ASSERT(!value.InVMHeap());
-  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  EmitUint8(0xC7);
-  EmitOperand(0, dest);
-  buffer_.EmitObject(value);
-}
-
 void Assembler::StoreIntoObjectNoBarrier(Register object,
                                          const Address& dest,
                                          const Object& value) {
-  ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
-  ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
-  if (value.IsSmi() || value.InVMHeap()) {
-    Immediate imm_value(reinterpret_cast<int32_t>(value.raw()));
+  ASSERT(IsOriginalObject(value));
+  if (target::CanEmbedAsRawPointerInGeneratedCode(value)) {
+    Immediate imm_value(target::ToRawPointer(value));
     movl(dest, imm_value);
   } else {
-    UnverifiedStoreOldObject(dest, value);
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    EmitUint8(0xC7);
+    EmitOperand(0, dest);
+    buffer_.EmitObject(value);
   }
   // No store buffer update.
 }
@@ -1989,14 +1987,14 @@
 }
 
 void Assembler::ZeroInitSmiField(const Address& dest) {
-  Immediate zero(Smi::RawValue(0));
+  Immediate zero(target::ToRawSmi(0));
   movl(dest, zero);
 }
 
 void Assembler::IncrementSmiField(const Address& dest, int32_t increment) {
   // Note: FlowGraphCompiler::EdgeCounterIncrementSizeInBytes depends on
   // the length of this instruction sequence.
-  Immediate inc_imm(Smi::RawValue(increment));
+  Immediate inc_imm(target::ToRawSmi(increment));
   addl(dest, inc_imm);
 }
 
@@ -2006,7 +2004,7 @@
   pushl(Immediate(Utils::High32Bits(constant)));
   pushl(Immediate(Utils::Low32Bits(constant)));
   movsd(dst, Address(ESP, 0));
-  addl(ESP, Immediate(2 * kWordSize));
+  addl(ESP, Immediate(2 * target::kWordSize));
 }
 
 void Assembler::FloatNegate(XmmRegister f) {
@@ -2105,7 +2103,7 @@
   // and ensure proper alignment of the stack frame.
   // We need to restore it before restoring registers.
   const intptr_t kPushedRegistersSize =
-      kNumberOfVolatileCpuRegisters * kWordSize +
+      kNumberOfVolatileCpuRegisters * target::kWordSize +
       kNumberOfVolatileXmmRegisters * kFpuRegisterSize;
   leal(ESP, Address(EBP, -kPushedRegistersSize));
 
@@ -2133,8 +2131,8 @@
 }
 
 void Assembler::Call(const Code& target, bool movable_target) {
-  LoadObject(CODE_REG, target, movable_target);
-  call(FieldAddress(CODE_REG, Code::entry_point_offset()));
+  LoadObject(CODE_REG, ToObject(target), movable_target);
+  call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
 }
 
 void Assembler::CallToRuntime() {
@@ -2142,12 +2140,12 @@
 }
 
 void Assembler::Jmp(const Code& target) {
-  const ExternalLabel label(target.EntryPoint());
+  const ExternalLabel label(target::Code::EntryPointOf(target));
   jmp(&label);
 }
 
 void Assembler::J(Condition condition, const Code& target) {
-  const ExternalLabel label(target.EntryPoint());
+  const ExternalLabel label(target::Code::EntryPointOf(target));
   j(condition, &label);
 }
 
@@ -2201,18 +2199,17 @@
       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
   movl(temp_reg, Address(temp_reg, table_offset));
   state_address = Address(temp_reg, state_offset);
-  testb(state_address, Immediate(ClassHeapStats::TraceAllocationMask()));
+  testb(state_address,
+        Immediate(target::ClassHeapStats::TraceAllocationMask()));
   // We are tracing for this class, jump to the trace label which will use
   // the allocation stub.
   j(NOT_ZERO, trace, near_jump);
 }
 
-void Assembler::UpdateAllocationStats(intptr_t cid,
-                                      Register temp_reg,
-                                      Heap::Space space) {
+void Assembler::UpdateAllocationStats(intptr_t cid, Register temp_reg) {
   ASSERT(cid > 0);
   intptr_t counter_offset =
-      ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
+      ClassTable::CounterOffsetFor(cid, /*is_new_space=*/true);
   ASSERT(temp_reg != kNoRegister);
   LoadIsolate(temp_reg);
   intptr_t table_offset =
@@ -2223,23 +2220,21 @@
 
 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
                                               Register size_reg,
-                                              Register temp_reg,
-                                              Heap::Space space) {
+                                              Register temp_reg) {
   ASSERT(cid > 0);
   ASSERT(cid < kNumPredefinedCids);
-  UpdateAllocationStats(cid, temp_reg, space);
-  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
+  UpdateAllocationStats(cid, temp_reg);
+  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
   addl(Address(temp_reg, size_offset), size_reg);
 }
 
 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
                                               intptr_t size_in_bytes,
-                                              Register temp_reg,
-                                              Heap::Space space) {
+                                              Register temp_reg) {
   ASSERT(cid > 0);
   ASSERT(cid < kNumPredefinedCids);
-  UpdateAllocationStats(cid, temp_reg, space);
-  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
+  UpdateAllocationStats(cid, temp_reg);
+  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
   addl(Address(temp_reg, size_offset), Immediate(size_in_bytes));
 }
 #endif  // !PRODUCT
@@ -2251,14 +2246,13 @@
                             Register temp_reg) {
   ASSERT(failure != NULL);
   ASSERT(temp_reg != kNoRegister);
-  const intptr_t instance_size = cls.instance_size();
+  const intptr_t instance_size = target::Class::GetInstanceSize(cls);
   if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
     // If this allocation is traced, program will jump to failure path
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
-    NOT_IN_PRODUCT(
-        MaybeTraceAllocation(cls.id(), temp_reg, failure, near_jump));
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
+    const classid_t cid = target::Class::GetId(cls);
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
     movl(instance_reg, Address(THR, Thread::top_offset()));
     addl(instance_reg, Immediate(instance_size));
     // instance_reg: potential next object start.
@@ -2267,15 +2261,13 @@
     // Successfully allocated the object, now update top to point to
     // next object start and store the class in the class field of object.
     movl(Address(THR, Thread::top_offset()), instance_reg);
-    NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), temp_reg, space));
+    NOT_IN_PRODUCT(UpdateAllocationStats(cid, temp_reg));
     ASSERT(instance_size >= kHeapObjectTag);
     subl(instance_reg, Immediate(instance_size - kHeapObjectTag));
-    uint32_t tags = 0;
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    ASSERT(cls.id() != kIllegalCid);
-    tags = RawObject::ClassIdTag::update(cls.id(), tags);
-    tags = RawObject::NewBit::update(true, tags);
-    movl(FieldAddress(instance_reg, Object::tags_offset()), Immediate(tags));
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
+    movl(FieldAddress(instance_reg, target::Object::tags_offset()),
+         Immediate(tags));
   } else {
     jmp(failure);
   }
@@ -2295,7 +2287,6 @@
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
     NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     movl(instance, Address(THR, Thread::top_offset()));
     movl(end_address, instance);
 
@@ -2312,22 +2303,20 @@
     // next object start and initialize the object.
     movl(Address(THR, Thread::top_offset()), end_address);
     addl(instance, Immediate(kHeapObjectTag));
-    NOT_IN_PRODUCT(
-        UpdateAllocationStatsWithSize(cid, instance_size, temp_reg, space));
+    NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size, temp_reg));
 
     // Initialize the tags.
-    uint32_t tags = 0;
-    tags = RawObject::ClassIdTag::update(cid, tags);
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    tags = RawObject::NewBit::update(true, tags);
-    movl(FieldAddress(instance, Object::tags_offset()), Immediate(tags));
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
+    movl(FieldAddress(instance, target::Object::tags_offset()),
+         Immediate(tags));
   } else {
     jmp(failure);
   }
 }
 
 void Assembler::PushCodeObject() {
-  ASSERT(code_.IsNotTemporaryScopedHandle());
+  ASSERT(IsNotTemporaryScopedHandle(code_));
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x68);
   buffer_.EmitObject(code_);
@@ -2363,17 +2352,6 @@
   EnterDartFrame(0);
 }
 
-void Assembler::Stop(const char* message) {
-  if (FLAG_print_stop_message) {
-    pushl(EAX);  // Preserve EAX.
-    movl(EAX, Immediate(reinterpret_cast<int32_t>(message)));
-    Call(StubCode::PrintStopMessage());  // Passing message in EAX.
-    popl(EAX);                           // Restore EAX.
-  }
-  // Emit the int3 instruction.
-  int3();  // Execution can be resumed with the 'cont' command in gdb.
-}
-
 void Assembler::EmitOperand(int rm, const Operand& operand) {
   ASSERT(rm >= 0 && rm < 8);
   const intptr_t length = operand.length_;
@@ -2461,7 +2439,7 @@
   ASSERT(RawObject::kClassIdTagPos == 16);
   ASSERT(RawObject::kClassIdTagSize == 16);
   const intptr_t class_id_offset =
-      Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
+      target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
   movzxw(result, FieldAddress(object, class_id_offset));
 }
 
@@ -2471,7 +2449,7 @@
   const intptr_t offset =
       Isolate::class_table_offset() + ClassTable::table_offset();
   movl(result, Address(result, offset));
-  ASSERT(kSizeOfClassPairLog2 == 3);
+  ASSERT(ClassTable::kSizeOfClassPairLog2 == 3);
   movl(result, Address(result, class_id, TIMES_8, 0));
 }
 
@@ -2490,7 +2468,7 @@
   ASSERT(RawObject::kClassIdTagPos == 16);
   ASSERT(RawObject::kClassIdTagSize == 16);
   const intptr_t class_id_offset =
-      Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
+      target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
 
   // Untag optimistically. Tag bit is shifted into the CARRY.
   SmiUntag(object);
@@ -2542,7 +2520,7 @@
     jmp(&join, Assembler::kNearJump);
 
     Bind(&smi);
-    movl(result, Immediate(Smi::RawValue(kSmiCid)));
+    movl(result, Immediate(target::ToRawSmi(kSmiCid)));
 
     Bind(&join);
   } else {
@@ -2623,6 +2601,7 @@
   return xmm_reg_names[reg];
 }
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined(TARGET_ARCH_IA32)
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index 9fcc3eb..1b110d7 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -13,11 +13,10 @@
 #include "platform/utils.h"
 #include "vm/constants_ia32.h"
 #include "vm/constants_x86.h"
+#include "vm/pointer_tagging.h"
 
 namespace dart {
-
-// Forward declarations.
-class RuntimeEntry;
+namespace compiler {
 
 class Immediate : public ValueObject {
  public:
@@ -222,11 +221,11 @@
 
 class Assembler : public AssemblerBase {
  public:
-  explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
+  explicit Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches = false)
-      : AssemblerBase(object_pool_wrapper),
+      : AssemblerBase(object_pool_builder),
         jit_cookie_(0),
-        code_(Code::ZoneHandle()) {
+        code_(NewZoneHandle(ThreadState::Current()->zone())) {
     // This mode is only needed and implemented for ARM.
     ASSERT(!use_far_branches);
   }
@@ -689,7 +688,7 @@
                                            intptr_t extra_disp = 0);
 
   static Address VMTagAddress() {
-    return Address(THR, Thread::vm_tag_offset());
+    return Address(THR, target::Thread::vm_tag_offset());
   }
 
   /*
@@ -774,18 +773,14 @@
                             Label* trace,
                             bool near_jump);
 
-  void UpdateAllocationStats(intptr_t cid,
-                             Register temp_reg,
-                             Heap::Space space);
+  void UpdateAllocationStats(intptr_t cid, Register temp_reg);
 
   void UpdateAllocationStatsWithSize(intptr_t cid,
                                      Register size_reg,
-                                     Register temp_reg,
-                                     Heap::Space space);
+                                     Register temp_reg);
   void UpdateAllocationStatsWithSize(intptr_t cid,
                                      intptr_t instance_size,
-                                     Register temp_reg,
-                                     Heap::Space space);
+                                     Register temp_reg);
 
   // Inlined allocation of an instance of class 'cls', code has no runtime
   // calls. Jump to 'failure' if the instance cannot be allocated here.
@@ -814,29 +809,23 @@
   static const char* RegisterName(Register reg);
   static const char* FpuRegisterName(FpuRegister reg);
 
-  // Smis that do not fit into 17 bits (16 bits of payload) are unsafe.
+  // Check if the given value is an integer value that can be directly
+  // emdedded into the code without additional XORing with jit_cookie.
+  // We consider 16-bit integers, powers of two and corresponding masks
+  // as safe values that can be emdedded into the code object.
   static bool IsSafeSmi(const Object& object) {
-    if (!object.IsSmi()) {
-      return false;
+    int64_t value;
+    if (HasIntegerValue(object, &value)) {
+      return Utils::IsInt(16, value) || Utils::IsPowerOfTwo(value) ||
+             Utils::IsPowerOfTwo(value + 1);
     }
-
-    if (Utils::IsInt(17, reinterpret_cast<intptr_t>(object.raw()))) {
-      return true;
-    }
-
-    // Single bit smis (powers of two) and corresponding masks are safe.
-    const intptr_t value = Smi::Cast(object).Value();
-    if (Utils::IsPowerOfTwo(value) || Utils::IsPowerOfTwo(value + 1)) {
-      return true;
-    }
-
     return false;
   }
   static bool IsSafe(const Object& object) {
-    return !object.IsSmi() || IsSafeSmi(object);
+    return !target::IsSmi(object) || IsSafeSmi(object);
   }
 
-  void set_code_object(const Code& code) { code_ ^= code.raw(); }
+  Object& GetSelfHandle() const { return code_; }
 
   void PushCodeObject();
 
@@ -880,12 +869,10 @@
                              CanBeSmi can_be_smi,
                              BarrierFilterMode barrier_filter_mode);
 
-  void UnverifiedStoreOldObject(const Address& dest, const Object& value);
-
   int32_t jit_cookie();
 
   int32_t jit_cookie_;
-  Code& code_;
+  Object& code_;
 
   DISALLOW_ALLOCATION();
   DISALLOW_COPY_AND_ASSIGN(Assembler);
@@ -916,6 +903,12 @@
   EmitUint8(0x66);
 }
 
+}  // namespace compiler
+
+using compiler::Address;
+using compiler::FieldAddress;
+using compiler::Immediate;
+
 }  // namespace dart
 
 #endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_IA32_H_
diff --git a/runtime/vm/compiler/assembler/assembler_ia32_test.cc b/runtime/vm/compiler/assembler/assembler_ia32_test.cc
index 154cd89..26d3784 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32_test.cc
@@ -19,6 +19,7 @@
 #endif
 
 namespace dart {
+namespace compiler {
 
 #define __ assembler->
 
@@ -36,7 +37,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(ReadArgument, assembler) {
-  __ movl(EAX, Address(ESP, kWordSize));
+  __ movl(EAX, Address(ESP, target::kWordSize));
   __ ret();
 }
 
@@ -54,21 +55,21 @@
   __ movl(EAX, Address(EBP, 0));
   __ movl(EAX, Address(EAX, 0));
 
-  __ movl(EAX, Address(ESP, kWordSize));
-  __ movl(EAX, Address(EBP, kWordSize));
-  __ movl(EAX, Address(EAX, kWordSize));
+  __ movl(EAX, Address(ESP, target::kWordSize));
+  __ movl(EAX, Address(EBP, target::kWordSize));
+  __ movl(EAX, Address(EAX, target::kWordSize));
 
-  __ movl(EAX, Address(ESP, -kWordSize));
-  __ movl(EAX, Address(EBP, -kWordSize));
-  __ movl(EAX, Address(EAX, -kWordSize));
+  __ movl(EAX, Address(ESP, -target::kWordSize));
+  __ movl(EAX, Address(EBP, -target::kWordSize));
+  __ movl(EAX, Address(EAX, -target::kWordSize));
 
-  __ movl(EAX, Address(ESP, 256 * kWordSize));
-  __ movl(EAX, Address(EBP, 256 * kWordSize));
-  __ movl(EAX, Address(EAX, 256 * kWordSize));
+  __ movl(EAX, Address(ESP, 256 * target::kWordSize));
+  __ movl(EAX, Address(EBP, 256 * target::kWordSize));
+  __ movl(EAX, Address(EAX, 256 * target::kWordSize));
 
-  __ movl(EAX, Address(ESP, -256 * kWordSize));
-  __ movl(EAX, Address(EBP, -256 * kWordSize));
-  __ movl(EAX, Address(EAX, -256 * kWordSize));
+  __ movl(EAX, Address(ESP, -256 * target::kWordSize));
+  __ movl(EAX, Address(EBP, -256 * target::kWordSize));
+  __ movl(EAX, Address(EAX, -256 * target::kWordSize));
 
   __ movl(EAX, Address(EAX, TIMES_1));
   __ movl(EAX, Address(EAX, TIMES_2));
@@ -78,11 +79,11 @@
   __ movl(EAX, Address(EBP, TIMES_2));
   __ movl(EAX, Address(EAX, TIMES_2));
 
-  __ movl(EAX, Address(EBP, TIMES_2, kWordSize));
-  __ movl(EAX, Address(EAX, TIMES_2, kWordSize));
+  __ movl(EAX, Address(EBP, TIMES_2, target::kWordSize));
+  __ movl(EAX, Address(EAX, TIMES_2, target::kWordSize));
 
-  __ movl(EAX, Address(EBP, TIMES_2, 256 * kWordSize));
-  __ movl(EAX, Address(EAX, TIMES_2, 256 * kWordSize));
+  __ movl(EAX, Address(EBP, TIMES_2, 256 * target::kWordSize));
+  __ movl(EAX, Address(EAX, TIMES_2, 256 * target::kWordSize));
 
   __ movl(EAX, Address(EAX, EBP, TIMES_2, 0));
   __ movl(EAX, Address(EAX, EAX, TIMES_2, 0));
@@ -91,19 +92,19 @@
   __ movl(EAX, Address(ESP, EBP, TIMES_2, 0));
   __ movl(EAX, Address(ESP, EAX, TIMES_2, 0));
 
-  __ movl(EAX, Address(EAX, EBP, TIMES_2, kWordSize));
-  __ movl(EAX, Address(EAX, EAX, TIMES_2, kWordSize));
-  __ movl(EAX, Address(EBP, EBP, TIMES_2, kWordSize));
-  __ movl(EAX, Address(EBP, EAX, TIMES_2, kWordSize));
-  __ movl(EAX, Address(ESP, EBP, TIMES_2, kWordSize));
-  __ movl(EAX, Address(ESP, EAX, TIMES_2, kWordSize));
+  __ movl(EAX, Address(EAX, EBP, TIMES_2, target::kWordSize));
+  __ movl(EAX, Address(EAX, EAX, TIMES_2, target::kWordSize));
+  __ movl(EAX, Address(EBP, EBP, TIMES_2, target::kWordSize));
+  __ movl(EAX, Address(EBP, EAX, TIMES_2, target::kWordSize));
+  __ movl(EAX, Address(ESP, EBP, TIMES_2, target::kWordSize));
+  __ movl(EAX, Address(ESP, EAX, TIMES_2, target::kWordSize));
 
-  __ movl(EAX, Address(EAX, EBP, TIMES_2, 256 * kWordSize));
-  __ movl(EAX, Address(EAX, EAX, TIMES_2, 256 * kWordSize));
-  __ movl(EAX, Address(EBP, EBP, TIMES_2, 256 * kWordSize));
-  __ movl(EAX, Address(EBP, EAX, TIMES_2, 256 * kWordSize));
-  __ movl(EAX, Address(ESP, EBP, TIMES_2, 256 * kWordSize));
-  __ movl(EAX, Address(ESP, EAX, TIMES_2, 256 * kWordSize));
+  __ movl(EAX, Address(EAX, EBP, TIMES_2, 256 * target::kWordSize));
+  __ movl(EAX, Address(EAX, EAX, TIMES_2, 256 * target::kWordSize));
+  __ movl(EAX, Address(EBP, EBP, TIMES_2, 256 * target::kWordSize));
+  __ movl(EAX, Address(EBP, EAX, TIMES_2, 256 * target::kWordSize));
+  __ movl(EAX, Address(ESP, EBP, TIMES_2, 256 * target::kWordSize));
+  __ movl(EAX, Address(ESP, EAX, TIMES_2, 256 * target::kWordSize));
 }
 
 ASSEMBLER_TEST_RUN(AddressingModes, test) {
@@ -376,11 +377,11 @@
 }
 
 ASSEMBLER_TEST_GENERATE(AddressBinOp, assembler) {
-  __ movl(EAX, Address(ESP, kWordSize));
-  __ addl(EAX, Address(ESP, kWordSize));
+  __ movl(EAX, Address(ESP, target::kWordSize));
+  __ addl(EAX, Address(ESP, target::kWordSize));
   __ incl(EAX);
-  __ subl(EAX, Address(ESP, kWordSize));
-  __ imull(EAX, Address(ESP, kWordSize));
+  __ subl(EAX, Address(ESP, target::kWordSize));
+  __ imull(EAX, Address(ESP, target::kWordSize));
   __ ret();
 }
 
@@ -480,7 +481,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(BitScanReverse, assembler) {
-  __ movl(ECX, Address(ESP, kWordSize));
+  __ movl(ECX, Address(ESP, target::kWordSize));
   __ movl(EAX, Immediate(666));  // Marker for conditional write.
   __ bsrl(EAX, ECX);
   __ ret();
@@ -538,7 +539,7 @@
   __ movzxb(EAX, Address(ESP, 0));  // EAX = 0xff
   __ movsxw(EBX, Address(ESP, 0));  // EBX = -1
   __ movzxw(ECX, Address(ESP, 0));  // ECX = 0xffff
-  __ addl(ESP, Immediate(kWordSize));
+  __ addl(ESP, Immediate(target::kWordSize));
 
   __ addl(EBX, ECX);
   __ addl(EAX, EBX);
@@ -584,7 +585,7 @@
   __ pushl(Immediate(0x1C));
   __ xorl(ECX, Address(ESP, 0));  // 0x65B.
   __ popl(EAX);                   // Discard.
-  __ movl(EAX, Address(ESP, kWordSize));
+  __ movl(EAX, Address(ESP, target::kWordSize));
   __ movl(EDX, Immediate(0xB0));
   __ orl(Address(EAX, 0), EDX);
   __ movl(EAX, ECX);
@@ -762,7 +763,7 @@
   __ Bind(&donetest13);
 
   Label donetest14;
-  __ subl(ESP, Immediate(kWordSize));
+  __ subl(ESP, Immediate(target::kWordSize));
   __ movl(Address(ESP, 0), Immediate(0x80000000));
   __ movl(EAX, Immediate(0));
   __ movl(ECX, Immediate(3));
@@ -772,10 +773,10 @@
   __ j(EQUAL, &donetest14);
   __ int3();
   __ Bind(&donetest14);
-  __ addl(ESP, Immediate(kWordSize));
+  __ addl(ESP, Immediate(target::kWordSize));
 
   Label donetest15;
-  __ subl(ESP, Immediate(kWordSize));
+  __ subl(ESP, Immediate(target::kWordSize));
   __ movl(Address(ESP, 0), Immediate(0xFF000000));
   __ movl(EAX, Immediate(-1));
   __ movl(ECX, Immediate(2));
@@ -785,7 +786,7 @@
   __ j(EQUAL, &donetest15);
   __ int3();
   __ Bind(&donetest15);
-  __ addl(ESP, Immediate(kWordSize));
+  __ addl(ESP, Immediate(target::kWordSize));
 
   Label donetest16;
   __ movl(EDX, Immediate(0x80000000));
@@ -2645,10 +2646,10 @@
 ASSEMBLER_TEST_GENERATE(SingleFPOperationsStack, assembler) {
   __ movl(EAX, Immediate(bit_cast<int32_t, float>(12.3f)));
   __ movd(XMM0, EAX);
-  __ addss(XMM0, Address(ESP, kWordSize));  // 15.7f
-  __ mulss(XMM0, Address(ESP, kWordSize));  // 53.38f
-  __ subss(XMM0, Address(ESP, kWordSize));  // 49.98f
-  __ divss(XMM0, Address(ESP, kWordSize));  // 14.7f
+  __ addss(XMM0, Address(ESP, target::kWordSize));  // 15.7f
+  __ mulss(XMM0, Address(ESP, target::kWordSize));  // 53.38f
+  __ subss(XMM0, Address(ESP, target::kWordSize));  // 49.98f
+  __ divss(XMM0, Address(ESP, target::kWordSize));  // 14.7f
   __ pushl(EAX);
   __ movss(Address(ESP, 0), XMM0);
   __ flds(Address(ESP, 0));
@@ -2689,7 +2690,7 @@
   __ movsd(XMM6, XMM5);
   __ movsd(XMM7, XMM6);
   __ movl(Address(ESP, 0), Immediate(0));
-  __ movl(Address(ESP, kWordSize), Immediate(0));
+  __ movl(Address(ESP, target::kWordSize), Immediate(0));
   __ movsd(XMM0, Address(ESP, 0));
   __ movsd(Address(ESP, 0), XMM7);
   __ movsd(XMM7, Address(ESP, 0));
@@ -2701,7 +2702,7 @@
   __ movaps(XMM1, XMM2);
   __ movaps(XMM0, XMM1);
   __ movl(Address(ESP, 0), Immediate(0));
-  __ movl(Address(ESP, kWordSize), Immediate(0));
+  __ movl(Address(ESP, target::kWordSize), Immediate(0));
   __ movsd(Address(ESP, 0), XMM0);
   __ fldl(Address(ESP, 0));
   __ popl(EAX);
@@ -2755,7 +2756,7 @@
   __ pushl(EAX);
   __ fldl(Address(ESP, 0));
   __ movl(Address(ESP, 0), Immediate(0));
-  __ movl(Address(ESP, kWordSize), Immediate(0));
+  __ movl(Address(ESP, target::kWordSize), Immediate(0));
   __ fstpl(Address(ESP, 0));
   __ popl(EAX);
   __ popl(EDX);
@@ -2844,10 +2845,10 @@
   __ popl(EAX);
   __ popl(EAX);
 
-  __ addsd(XMM0, Address(ESP, kWordSize));  // 15.7
-  __ mulsd(XMM0, Address(ESP, kWordSize));  // 53.38
-  __ subsd(XMM0, Address(ESP, kWordSize));  // 49.98
-  __ divsd(XMM0, Address(ESP, kWordSize));  // 14.7
+  __ addsd(XMM0, Address(ESP, target::kWordSize));  // 15.7
+  __ mulsd(XMM0, Address(ESP, target::kWordSize));  // 53.38
+  __ subsd(XMM0, Address(ESP, target::kWordSize));  // 49.98
+  __ divsd(XMM0, Address(ESP, target::kWordSize));  // 14.7
 
   __ pushl(EAX);
   __ pushl(EAX);
@@ -2913,7 +2914,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(IntToDoubleConversion2, assembler) {
-  __ filds(Address(ESP, kWordSize));
+  __ filds(Address(ESP, target::kWordSize));
   __ ret();
 }
 
@@ -3004,7 +3005,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(FloatToIntConversionRound, assembler) {
-  __ movsd(XMM1, Address(ESP, kWordSize));
+  __ movsd(XMM1, Address(ESP, target::kWordSize));
   __ cvtss2si(EDX, XMM1);
   __ movl(EAX, EDX);
   __ ret();
@@ -3025,7 +3026,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(FloatToIntConversionTrunc, assembler) {
-  __ movsd(XMM1, Address(ESP, kWordSize));
+  __ movsd(XMM1, Address(ESP, target::kWordSize));
   __ cvttss2si(EDX, XMM1);
   __ movl(EAX, EDX);
   __ ret();
@@ -3286,7 +3287,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(DoubleToIntConversionRound, assembler) {
-  __ movsd(XMM3, Address(ESP, kWordSize));
+  __ movsd(XMM3, Address(ESP, target::kWordSize));
   __ cvtsd2si(EAX, XMM3);
   __ ret();
 }
@@ -3305,7 +3306,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(DoubleToIntConversionTrunc, assembler) {
-  __ movsd(XMM3, Address(ESP, kWordSize));
+  __ movsd(XMM3, Address(ESP, target::kWordSize));
   __ cvttsd2si(EAX, XMM3);
   __ ret();
 }
@@ -3324,7 +3325,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(DoubleToDoubleTrunc, assembler) {
-  __ movsd(XMM3, Address(ESP, kWordSize));
+  __ movsd(XMM3, Address(ESP, target::kWordSize));
   __ roundsd(XMM2, XMM3, Assembler::kRoundToZero);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -3386,7 +3387,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(Sine, assembler) {
-  __ flds(Address(ESP, kWordSize));
+  __ flds(Address(ESP, target::kWordSize));
   __ fsin();
   __ ret();
 }
@@ -3403,7 +3404,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(Cosine, assembler) {
-  __ flds(Address(ESP, kWordSize));
+  __ flds(Address(ESP, target::kWordSize));
   __ fcos();
   __ ret();
 }
@@ -3420,9 +3421,9 @@
 }
 
 ASSEMBLER_TEST_GENERATE(SinCos, assembler) {
-  __ fldl(Address(ESP, kWordSize));
+  __ fldl(Address(ESP, target::kWordSize));
   __ fsincos();
-  __ subl(ESP, Immediate(2 * kWordSize));
+  __ subl(ESP, Immediate(2 * target::kWordSize));
   __ fstpl(Address(ESP, 0));  // cos result.
   __ movsd(XMM0, Address(ESP, 0));
   __ fstpl(Address(ESP, 0));  // sin result.
@@ -3430,7 +3431,7 @@
   __ subsd(XMM1, XMM0);  // sin - cos.
   __ movsd(Address(ESP, 0), XMM1);
   __ fldl(Address(ESP, 0));
-  __ addl(ESP, Immediate(2 * kWordSize));
+  __ addl(ESP, Immediate(2 * target::kWordSize));
   __ ret();
 }
 
@@ -3456,7 +3457,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(Tangent, assembler) {
-  __ fldl(Address(ESP, kWordSize));
+  __ fldl(Address(ESP, target::kWordSize));
   __ fptan();
   __ ffree(0);
   __ fincstp();
@@ -3477,7 +3478,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(SquareRootFloat, assembler) {
-  __ movss(XMM0, Address(ESP, kWordSize));
+  __ movss(XMM0, Address(ESP, target::kWordSize));
   __ sqrtss(XMM1, XMM0);
   __ pushl(EAX);
   __ movss(Address(ESP, 0), XMM1);
@@ -3502,7 +3503,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(SquareRootDouble, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ sqrtsd(XMM1, XMM0);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -3563,7 +3564,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(FloatNegate, assembler) {
-  __ movss(XMM0, Address(ESP, kWordSize));
+  __ movss(XMM0, Address(ESP, target::kWordSize));
   __ FloatNegate(XMM0);
   __ pushl(EAX);
   __ movss(Address(ESP, 0), XMM0);
@@ -3588,7 +3589,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(DoubleNegate, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ DoubleNegate(XMM0);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -3617,8 +3618,8 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LongMulReg, assembler) {
-  __ movl(ECX, Address(ESP, kWordSize));
-  __ movl(EAX, Address(ESP, 2 * kWordSize));
+  __ movl(ECX, Address(ESP, target::kWordSize));
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));
   __ imull(ECX);
   __ ret();
 }
@@ -3638,8 +3639,8 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LongMulAddress, assembler) {
-  __ movl(EAX, Address(ESP, 2 * kWordSize));
-  __ imull(Address(ESP, kWordSize));
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));
+  __ imull(Address(ESP, target::kWordSize));
   __ ret();
 }
 
@@ -3657,8 +3658,8 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LongUnsignedMulReg, assembler) {
-  __ movl(ECX, Address(ESP, kWordSize));
-  __ movl(EAX, Address(ESP, 2 * kWordSize));
+  __ movl(ECX, Address(ESP, target::kWordSize));
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));
   __ mull(ECX);
   __ ret();
 }
@@ -3683,8 +3684,8 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LongUnsignedMulAddress, assembler) {
-  __ movl(EAX, Address(ESP, 2 * kWordSize));
-  __ mull(Address(ESP, kWordSize));
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));
+  __ mull(Address(ESP, target::kWordSize));
   __ ret();
 }
 
@@ -3710,10 +3711,10 @@
 ASSEMBLER_TEST_GENERATE(LongAddReg, assembler) {
   // Preserve clobbered callee-saved register (EBX).
   __ pushl(EBX);
-  __ movl(EAX, Address(ESP, 2 * kWordSize));  // left low.
-  __ movl(EDX, Address(ESP, 3 * kWordSize));  // left high.
-  __ movl(ECX, Address(ESP, 4 * kWordSize));  // right low.
-  __ movl(EBX, Address(ESP, 5 * kWordSize));  // right high
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));  // left low.
+  __ movl(EDX, Address(ESP, 3 * target::kWordSize));  // left high.
+  __ movl(ECX, Address(ESP, 4 * target::kWordSize));  // right low.
+  __ movl(EBX, Address(ESP, 5 * target::kWordSize));  // right high
   __ addl(EAX, ECX);
   __ adcl(EDX, EBX);
   __ popl(EBX);
@@ -3744,10 +3745,10 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LongAddAddress, assembler) {
-  __ movl(EAX, Address(ESP, 1 * kWordSize));  // left low.
-  __ movl(EDX, Address(ESP, 2 * kWordSize));  // left high.
-  __ addl(EAX, Address(ESP, 3 * kWordSize));  // low.
-  __ adcl(EDX, Address(ESP, 4 * kWordSize));  // high.
+  __ movl(EAX, Address(ESP, 1 * target::kWordSize));  // left low.
+  __ movl(EDX, Address(ESP, 2 * target::kWordSize));  // left high.
+  __ addl(EAX, Address(ESP, 3 * target::kWordSize));  // low.
+  __ adcl(EDX, Address(ESP, 4 * target::kWordSize));  // high.
   // Result is in EAX/EDX.
   __ ret();
 }
@@ -3773,10 +3774,10 @@
 ASSEMBLER_TEST_GENERATE(LongSubReg, assembler) {
   // Preserve clobbered callee-saved register (EBX).
   __ pushl(EBX);
-  __ movl(EAX, Address(ESP, 2 * kWordSize));  // left low.
-  __ movl(EDX, Address(ESP, 3 * kWordSize));  // left high.
-  __ movl(ECX, Address(ESP, 4 * kWordSize));  // right low.
-  __ movl(EBX, Address(ESP, 5 * kWordSize));  // right high
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));  // left low.
+  __ movl(EDX, Address(ESP, 3 * target::kWordSize));  // left high.
+  __ movl(ECX, Address(ESP, 4 * target::kWordSize));  // right low.
+  __ movl(EBX, Address(ESP, 5 * target::kWordSize));  // right high
   __ subl(EAX, ECX);
   __ sbbl(EDX, EBX);
   __ popl(EBX);
@@ -3807,10 +3808,10 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LongSubAddress, assembler) {
-  __ movl(EAX, Address(ESP, 1 * kWordSize));  // left low.
-  __ movl(EDX, Address(ESP, 2 * kWordSize));  // left high.
-  __ subl(EAX, Address(ESP, 3 * kWordSize));  // low.
-  __ sbbl(EDX, Address(ESP, 4 * kWordSize));  // high.
+  __ movl(EAX, Address(ESP, 1 * target::kWordSize));  // left low.
+  __ movl(EDX, Address(ESP, 2 * target::kWordSize));  // left high.
+  __ subl(EAX, Address(ESP, 3 * target::kWordSize));  // low.
+  __ sbbl(EDX, Address(ESP, 4 * target::kWordSize));  // high.
   // Result is in EAX/EDX.
   __ ret();
 }
@@ -3836,18 +3837,18 @@
 ASSEMBLER_TEST_GENERATE(LongSubAddress2, assembler) {
   // Preserve clobbered callee-saved register (EBX).
   __ pushl(EBX);
-  __ movl(EAX, Address(ESP, 2 * kWordSize));  // left low.
-  __ movl(EDX, Address(ESP, 3 * kWordSize));  // left high.
-  __ movl(ECX, Address(ESP, 4 * kWordSize));  // right low.
-  __ movl(EBX, Address(ESP, 5 * kWordSize));  // right high
-  __ subl(ESP, Immediate(2 * kWordSize));
-  __ movl(Address(ESP, 0 * kWordSize), EAX);  // left low.
-  __ movl(Address(ESP, 1 * kWordSize), EDX);  // left high.
-  __ subl(Address(ESP, 0 * kWordSize), ECX);
-  __ sbbl(Address(ESP, 1 * kWordSize), EBX);
-  __ movl(EAX, Address(ESP, 0 * kWordSize));
-  __ movl(EDX, Address(ESP, 1 * kWordSize));
-  __ addl(ESP, Immediate(2 * kWordSize));
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));  // left low.
+  __ movl(EDX, Address(ESP, 3 * target::kWordSize));  // left high.
+  __ movl(ECX, Address(ESP, 4 * target::kWordSize));  // right low.
+  __ movl(EBX, Address(ESP, 5 * target::kWordSize));  // right high
+  __ subl(ESP, Immediate(2 * target::kWordSize));
+  __ movl(Address(ESP, 0 * target::kWordSize), EAX);  // left low.
+  __ movl(Address(ESP, 1 * target::kWordSize), EDX);  // left high.
+  __ subl(Address(ESP, 0 * target::kWordSize), ECX);
+  __ sbbl(Address(ESP, 1 * target::kWordSize), EBX);
+  __ movl(EAX, Address(ESP, 0 * target::kWordSize));
+  __ movl(EDX, Address(ESP, 1 * target::kWordSize));
+  __ addl(ESP, Immediate(2 * target::kWordSize));
   __ popl(EBX);
   // Result is in EAX/EDX.
   __ ret();
@@ -3884,18 +3885,18 @@
 ASSEMBLER_TEST_GENERATE(LongAddAddress2, assembler) {
   // Preserve clobbered callee-saved register (EBX).
   __ pushl(EBX);
-  __ movl(EAX, Address(ESP, 2 * kWordSize));  // left low.
-  __ movl(EDX, Address(ESP, 3 * kWordSize));  // left high.
-  __ movl(ECX, Address(ESP, 4 * kWordSize));  // right low.
-  __ movl(EBX, Address(ESP, 5 * kWordSize));  // right high
-  __ subl(ESP, Immediate(2 * kWordSize));
-  __ movl(Address(ESP, 0 * kWordSize), EAX);  // left low.
-  __ movl(Address(ESP, 1 * kWordSize), EDX);  // left high.
-  __ addl(Address(ESP, 0 * kWordSize), ECX);
-  __ adcl(Address(ESP, 1 * kWordSize), EBX);
-  __ movl(EAX, Address(ESP, 0 * kWordSize));
-  __ movl(EDX, Address(ESP, 1 * kWordSize));
-  __ addl(ESP, Immediate(2 * kWordSize));
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));  // left low.
+  __ movl(EDX, Address(ESP, 3 * target::kWordSize));  // left high.
+  __ movl(ECX, Address(ESP, 4 * target::kWordSize));  // right low.
+  __ movl(EBX, Address(ESP, 5 * target::kWordSize));  // right high
+  __ subl(ESP, Immediate(2 * target::kWordSize));
+  __ movl(Address(ESP, 0 * target::kWordSize), EAX);  // left low.
+  __ movl(Address(ESP, 1 * target::kWordSize), EDX);  // left high.
+  __ addl(Address(ESP, 0 * target::kWordSize), ECX);
+  __ adcl(Address(ESP, 1 * target::kWordSize), EBX);
+  __ movl(EAX, Address(ESP, 0 * target::kWordSize));
+  __ movl(EDX, Address(ESP, 1 * target::kWordSize));
+  __ addl(ESP, Immediate(2 * target::kWordSize));
   __ popl(EBX);
   // Result is in EAX/EDX.
   __ ret();
@@ -3931,7 +3932,7 @@
 
 // Testing only the lower 64-bit value of 'cvtdq2pd'.
 ASSEMBLER_TEST_GENERATE(IntegerToDoubleConversion, assembler) {
-  __ movsd(XMM1, Address(ESP, kWordSize));
+  __ movsd(XMM1, Address(ESP, target::kWordSize));
   __ cvtdq2pd(XMM2, XMM1);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -3962,21 +3963,21 @@
 
 // Implement with truncation.
 ASSEMBLER_TEST_GENERATE(FPUStoreLong, assembler) {
-  __ fldl(Address(ESP, kWordSize));
+  __ fldl(Address(ESP, target::kWordSize));
   __ pushl(EAX);
   __ pushl(EAX);
   __ fnstcw(Address(ESP, 0));
   __ movzxw(EAX, Address(ESP, 0));
   __ orl(EAX, Immediate(0x0c00));
-  __ movw(Address(ESP, kWordSize), EAX);
-  __ fldcw(Address(ESP, kWordSize));
+  __ movw(Address(ESP, target::kWordSize), EAX);
+  __ fldcw(Address(ESP, target::kWordSize));
   __ pushl(EAX);
   __ pushl(EAX);
   __ fistpl(Address(ESP, 0));
   __ popl(EAX);
   __ popl(EDX);
   __ fldcw(Address(ESP, 0));
-  __ addl(ESP, Immediate(kWordSize * 2));
+  __ addl(ESP, Immediate(target::kWordSize * 2));
   __ ret();
 }
 
@@ -4014,7 +4015,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(XorpdZeroing, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ xorpd(XMM0, XMM0);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -4042,7 +4043,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(Pxor, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ pxor(XMM0, XMM0);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -4070,7 +4071,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(Orpd, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ xorpd(XMM1, XMM1);
   __ DoubleNegate(XMM1);
   __ orpd(XMM0, XMM1);
@@ -4103,7 +4104,7 @@
 
 ASSEMBLER_TEST_GENERATE(Pextrd0, assembler) {
   if (TargetCPUFeatures::sse4_1_supported()) {
-    __ movsd(XMM0, Address(ESP, kWordSize));
+    __ movsd(XMM0, Address(ESP, target::kWordSize));
     __ pextrd(EAX, XMM0, Immediate(0));
   }
   __ ret();
@@ -4123,7 +4124,7 @@
 
 ASSEMBLER_TEST_GENERATE(Pextrd1, assembler) {
   if (TargetCPUFeatures::sse4_1_supported()) {
-    __ movsd(XMM0, Address(ESP, kWordSize));
+    __ movsd(XMM0, Address(ESP, target::kWordSize));
     __ pextrd(EAX, XMM0, Immediate(1));
   }
   __ ret();
@@ -4143,7 +4144,7 @@
 
 ASSEMBLER_TEST_GENERATE(Pmovsxdq, assembler) {
   if (TargetCPUFeatures::sse4_1_supported()) {
-    __ movsd(XMM0, Address(ESP, kWordSize));
+    __ movsd(XMM0, Address(ESP, target::kWordSize));
     __ pmovsxdq(XMM0, XMM0);
     __ pextrd(EAX, XMM0, Immediate(1));
   }
@@ -4165,7 +4166,7 @@
 
 ASSEMBLER_TEST_GENERATE(Pcmpeqq, assembler) {
   if (TargetCPUFeatures::sse4_1_supported()) {
-    __ movsd(XMM0, Address(ESP, kWordSize));
+    __ movsd(XMM0, Address(ESP, target::kWordSize));
     __ xorpd(XMM1, XMM1);
     __ pcmpeqq(XMM0, XMM1);
     __ movd(EAX, XMM0);
@@ -4188,7 +4189,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(AndPd, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ andpd(XMM0, XMM0);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -4216,7 +4217,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(Movq, assembler) {
-  __ movq(XMM0, Address(ESP, kWordSize));
+  __ movq(XMM0, Address(ESP, target::kWordSize));
   __ subl(ESP, Immediate(kDoubleSize));
   __ movq(Address(ESP, 0), XMM0);
   __ fldl(Address(ESP, 0));
@@ -4238,7 +4239,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(DoubleAbs, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ DoubleAbs(XMM0);
   __ pushl(EAX);
   __ pushl(EAX);
@@ -4270,7 +4271,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(ExtractSignBits, assembler) {
-  __ movsd(XMM0, Address(ESP, kWordSize));
+  __ movsd(XMM0, Address(ESP, target::kWordSize));
   __ movmskpd(EAX, XMM0);
   __ andl(EAX, Immediate(0x1));
   __ ret();
@@ -4296,7 +4297,7 @@
   // Preserve clobbered callee-saved register (EBX).
   __ pushl(EBX);
 
-  __ movl(EDX, Address(ESP, 2 * kWordSize));
+  __ movl(EDX, Address(ESP, 2 * target::kWordSize));
   __ xorl(EAX, EAX);
   __ movl(EBX, Immediate(1));
   __ movl(ECX, Immediate(-1));
@@ -4332,8 +4333,8 @@
 
 // Return 1 if overflow, 0 if no overflow.
 ASSEMBLER_TEST_GENERATE(ConditionalMovesNoOverflow, assembler) {
-  __ movl(EDX, Address(ESP, 1 * kWordSize));
-  __ addl(EDX, Address(ESP, 2 * kWordSize));
+  __ movl(EDX, Address(ESP, 1 * target::kWordSize));
+  __ addl(EDX, Address(ESP, 2 * target::kWordSize));
   __ movl(EAX, Immediate(1));
   __ movl(ECX, Immediate(0));
   __ cmovno(EAX, ECX);
@@ -4360,7 +4361,7 @@
 ASSEMBLER_TEST_GENERATE(ConditionalMovesEqual, assembler) {
   __ xorl(EAX, EAX);
   __ movl(ECX, Immediate(1));
-  __ movl(EDX, Address(ESP, 1 * kWordSize));
+  __ movl(EDX, Address(ESP, 1 * target::kWordSize));
   __ cmpl(EDX, Immediate(785));
   __ cmove(EAX, ECX);
   __ ret();
@@ -4385,7 +4386,7 @@
 ASSEMBLER_TEST_GENERATE(ConditionalMovesNotEqual, assembler) {
   __ xorl(EAX, EAX);
   __ movl(ECX, Immediate(1));
-  __ movl(EDX, Address(ESP, 1 * kWordSize));
+  __ movl(EDX, Address(ESP, 1 * target::kWordSize));
   __ cmpl(EDX, Immediate(785));
   __ cmovne(EAX, ECX);
   __ ret();
@@ -4409,8 +4410,8 @@
 ASSEMBLER_TEST_GENERATE(ConditionalMovesCompare, assembler) {
   __ movl(EDX, Immediate(1));   // Greater equal.
   __ movl(ECX, Immediate(-1));  // Less
-  __ movl(EAX, Address(ESP, 1 * kWordSize));
-  __ cmpl(EAX, Address(ESP, 2 * kWordSize));
+  __ movl(EAX, Address(ESP, 1 * target::kWordSize));
+  __ cmpl(EAX, Address(ESP, 2 * target::kWordSize));
   __ cmovlessl(EAX, ECX);
   __ cmovgel(EAX, EDX);
   __ ret();
@@ -4620,9 +4621,9 @@
   __ pushl(ESI);
   __ pushl(EDI);
   __ pushl(ECX);
-  __ movl(ESI, Address(ESP, 4 * kWordSize));  // from.
-  __ movl(EDI, Address(ESP, 5 * kWordSize));  // to.
-  __ movl(ECX, Address(ESP, 6 * kWordSize));  // count.
+  __ movl(ESI, Address(ESP, 4 * target::kWordSize));  // from.
+  __ movl(EDI, Address(ESP, 5 * target::kWordSize));  // to.
+  __ movl(ECX, Address(ESP, 6 * target::kWordSize));  // count.
   __ rep_movsb();
   __ popl(ECX);
   __ popl(EDI);
@@ -4657,9 +4658,9 @@
 // Called from assembler_test.cc.
 ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) {
   __ pushl(THR);
-  __ movl(EAX, Address(ESP, 2 * kWordSize));
-  __ movl(ECX, Address(ESP, 3 * kWordSize));
-  __ movl(THR, Address(ESP, 4 * kWordSize));
+  __ movl(EAX, Address(ESP, 2 * target::kWordSize));
+  __ movl(ECX, Address(ESP, 3 * target::kWordSize));
+  __ movl(THR, Address(ESP, 4 * target::kWordSize));
   __ pushl(EAX);
   __ StoreIntoObject(ECX, FieldAddress(ECX, GrowableObjectArray::data_offset()),
                      EAX);
@@ -4804,6 +4805,7 @@
                Address(ESP, 0),
                __ popl(EAX))
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined TARGET_ARCH_IA32
diff --git a/runtime/vm/compiler/assembler/assembler_test.cc b/runtime/vm/compiler/assembler/assembler_test.cc
index 77947ed..7a7b54a 100644
--- a/runtime/vm/compiler/assembler/assembler_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_test.cc
@@ -11,7 +11,9 @@
 
 namespace dart {
 
+namespace compiler {
 ASSEMBLER_TEST_EXTERN(StoreIntoObject);
+}  // namespace compiler
 
 ASSEMBLER_TEST_RUN(StoreIntoObject, test) {
 #define TEST_CODE(value, growable_array, thread)                               \
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index 4ac2d43..af6b564 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -5,34 +5,43 @@
 #include "vm/globals.h"  // NOLINT
 #if defined(TARGET_ARCH_X64)
 
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/class_id.h"
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/compiler/backend/locations.h"
-#include "vm/cpu.h"
-#include "vm/heap/heap.h"
 #include "vm/instructions.h"
-#include "vm/memory_region.h"
-#include "vm/runtime_entry.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
 
 namespace dart {
 
 #if !defined(DART_PRECOMPILED_RUNTIME)
-
 DECLARE_FLAG(bool, check_code_pointer);
 DECLARE_FLAG(bool, inline_alloc);
 DECLARE_FLAG(bool, precompiled_mode);
+#endif
 
-Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
+namespace compiler {
+
+using target::ClassTable;
+using target::Heap;
+using target::Instance;
+using target::Instructions;
+using target::Isolate;
+using target::RawObject;
+using target::Thread;
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+
+Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches)
-    : AssemblerBase(object_pool_wrapper), constant_pool_allowed_(false) {
+    : AssemblerBase(object_pool_builder), constant_pool_allowed_(false) {
   // Far branching mode is only needed and implemented for ARM.
   ASSERT(!use_far_branches);
 
   generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
     call(Address(THR, Thread::write_barrier_wrappers_thread_offset(reg)));
   };
-  invoke_array_write_barrier_ = [&]() {
+  generate_invoke_array_write_barrier_ = [&]() {
     call(Address(THR, Thread::array_write_barrier_entry_point_offset()));
   };
 }
@@ -48,11 +57,12 @@
   EmitLabel(label, kSize);
 }
 
-void Assembler::LoadNativeEntry(Register dst,
-                                const ExternalLabel* label,
-                                ObjectPool::Patchability patchable) {
-  const int32_t offset = ObjectPool::element_offset(
-      object_pool_wrapper().FindNativeFunction(label, patchable));
+void Assembler::LoadNativeEntry(
+    Register dst,
+    const ExternalLabel* label,
+    ObjectPoolBuilderEntry::Patchability patchable) {
+  const int32_t offset = target::ObjectPool::element_offset(
+      object_pool_builder().FindNativeFunction(label, patchable));
   LoadWordFromPoolOffset(dst, offset - kHeapObjectTag);
 }
 
@@ -66,32 +76,33 @@
   call(TMP);
 }
 
-void Assembler::CallPatchable(const Code& target, Code::EntryKind entry_kind) {
+void Assembler::CallPatchable(const Code& target, CodeEntryKind entry_kind) {
   ASSERT(constant_pool_allowed());
-  const intptr_t idx =
-      object_pool_wrapper().AddObject(target, ObjectPool::kPatchable);
-  const int32_t offset = ObjectPool::element_offset(idx);
+  const intptr_t idx = object_pool_builder().AddObject(
+      ToObject(target), ObjectPoolBuilderEntry::kPatchable);
+  const int32_t offset = target::ObjectPool::element_offset(idx);
   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
-  call(FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
+  call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
 }
 
 void Assembler::CallWithEquivalence(const Code& target,
                                     const Object& equivalence,
-                                    Code::EntryKind entry_kind) {
+                                    CodeEntryKind entry_kind) {
   ASSERT(constant_pool_allowed());
-  const intptr_t idx = object_pool_wrapper().FindObject(target, equivalence);
-  const int32_t offset = ObjectPool::element_offset(idx);
+  const intptr_t idx =
+      object_pool_builder().FindObject(ToObject(target), equivalence);
+  const int32_t offset = target::ObjectPool::element_offset(idx);
   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
-  call(FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
+  call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
 }
 
 void Assembler::Call(const Code& target) {
   ASSERT(constant_pool_allowed());
-  const intptr_t idx =
-      object_pool_wrapper().FindObject(target, ObjectPool::kNotPatchable);
-  const int32_t offset = ObjectPool::element_offset(idx);
+  const intptr_t idx = object_pool_builder().FindObject(
+      ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
+  const int32_t offset = target::ObjectPool::element_offset(idx);
   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
-  call(FieldAddress(CODE_REG, Code::entry_point_offset()));
+  call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
 }
 
 void Assembler::CallToRuntime() {
@@ -925,21 +936,21 @@
 
 void Assembler::JmpPatchable(const Code& target, Register pp) {
   ASSERT((pp != PP) || constant_pool_allowed());
-  const intptr_t idx =
-      object_pool_wrapper().AddObject(target, ObjectPool::kPatchable);
-  const int32_t offset = ObjectPool::element_offset(idx);
+  const intptr_t idx = object_pool_builder().AddObject(
+      ToObject(target), ObjectPoolBuilderEntry::kPatchable);
+  const int32_t offset = target::ObjectPool::element_offset(idx);
   movq(CODE_REG, Address::AddressBaseImm32(pp, offset - kHeapObjectTag));
-  movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
+  movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
   jmp(TMP);
 }
 
 void Assembler::Jmp(const Code& target, Register pp) {
   ASSERT((pp != PP) || constant_pool_allowed());
-  const intptr_t idx =
-      object_pool_wrapper().FindObject(target, ObjectPool::kNotPatchable);
-  const int32_t offset = ObjectPool::element_offset(idx);
+  const intptr_t idx = object_pool_builder().FindObject(
+      ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
+  const int32_t offset = target::ObjectPool::element_offset(idx);
   movq(CODE_REG, FieldAddress(pp, offset));
-  movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
+  movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
   jmp(TMP);
 }
 
@@ -1078,26 +1089,23 @@
     }
     return;
   }
-  addq(RSP, Immediate(stack_elements * kWordSize));
+  addq(RSP, Immediate(stack_elements * target::kWordSize));
 }
 
 bool Assembler::CanLoadFromObjectPool(const Object& object) const {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  ASSERT(!Thread::CanLoadFromThread(object));
+  ASSERT(IsOriginalObject(object));
+  ASSERT(!target::CanLoadFromThread(object));
   if (!constant_pool_allowed()) {
     return false;
   }
 
-  // TODO(zra, kmillikin): Also load other large immediates from the object
-  // pool
-  if (object.IsSmi()) {
+  if (target::IsSmi(object)) {
     // If the raw smi does not fit into a 32-bit signed int, then we'll keep
     // the raw value in the object pool.
-    return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw()));
+    return !Utils::IsInt(32, target::ToRawSmi(object));
   }
-  ASSERT(object.IsNotTemporaryScopedHandle());
-  ASSERT(object.IsOld());
+  ASSERT(IsNotTemporaryScopedHandle(object));
+  ASSERT(IsInOldSpace(object));
   return true;
 }
 
@@ -1115,18 +1123,19 @@
 void Assembler::LoadObjectHelper(Register dst,
                                  const Object& object,
                                  bool is_unique) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Thread::CanLoadFromThread(object)) {
-    movq(dst, Address(THR, Thread::OffsetFromThread(object)));
+  ASSERT(IsOriginalObject(object));
+
+  target::word offset_from_thread;
+  if (target::CanLoadFromThread(object, &offset_from_thread)) {
+    movq(dst, Address(THR, offset_from_thread));
   } else if (CanLoadFromObjectPool(object)) {
-    const intptr_t idx = is_unique ? object_pool_wrapper().AddObject(object)
-                                   : object_pool_wrapper().FindObject(object);
-    const int32_t offset = ObjectPool::element_offset(idx);
+    const intptr_t idx = is_unique ? object_pool_builder().AddObject(object)
+                                   : object_pool_builder().FindObject(object);
+    const int32_t offset = target::ObjectPool::element_offset(idx);
     LoadWordFromPoolOffset(dst, offset - kHeapObjectTag);
   } else {
-    ASSERT(object.IsSmi());
-    LoadImmediate(dst, Immediate(reinterpret_cast<int64_t>(object.raw())));
+    ASSERT(target::IsSmi(object));
+    LoadImmediate(dst, Immediate(target::ToRawSmi(object)));
   }
 }
 
@@ -1135,9 +1144,9 @@
                                            Register new_pp) {
   ASSERT(!constant_pool_allowed());
   ASSERT(new_pp != PP);
-  const intptr_t idx =
-      object_pool_wrapper().FindObject(function, ObjectPool::kNotPatchable);
-  const int32_t offset = ObjectPool::element_offset(idx);
+  const intptr_t idx = object_pool_builder().FindObject(
+      ToObject(function), ObjectPoolBuilderEntry::kNotPatchable);
+  const int32_t offset = target::ObjectPool::element_offset(idx);
   movq(dst, Address::AddressBaseImm32(new_pp, offset - kHeapObjectTag));
 }
 
@@ -1150,52 +1159,55 @@
 }
 
 void Assembler::StoreObject(const Address& dst, const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Thread::CanLoadFromThread(object)) {
-    movq(TMP, Address(THR, Thread::OffsetFromThread(object)));
+  ASSERT(IsOriginalObject(object));
+
+  target::word offset_from_thread;
+  if (target::CanLoadFromThread(object, &offset_from_thread)) {
+    movq(TMP, Address(THR, offset_from_thread));
     movq(dst, TMP);
   } else if (CanLoadFromObjectPool(object)) {
     LoadObject(TMP, object);
     movq(dst, TMP);
   } else {
-    ASSERT(object.IsSmi());
-    MoveImmediate(dst, Immediate(reinterpret_cast<int64_t>(object.raw())));
+    ASSERT(target::IsSmi(object));
+    MoveImmediate(dst, Immediate(target::ToRawSmi(object)));
   }
 }
 
 void Assembler::PushObject(const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Thread::CanLoadFromThread(object)) {
-    pushq(Address(THR, Thread::OffsetFromThread(object)));
+  ASSERT(IsOriginalObject(object));
+
+  target::word offset_from_thread;
+  if (target::CanLoadFromThread(object, &offset_from_thread)) {
+    pushq(Address(THR, offset_from_thread));
   } else if (CanLoadFromObjectPool(object)) {
     LoadObject(TMP, object);
     pushq(TMP);
   } else {
-    ASSERT(object.IsSmi());
-    PushImmediate(Immediate(reinterpret_cast<int64_t>(object.raw())));
+    ASSERT(target::IsSmi(object));
+    PushImmediate(Immediate(target::ToRawSmi(object)));
   }
 }
 
 void Assembler::CompareObject(Register reg, const Object& object) {
-  ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
-  ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
-  if (Thread::CanLoadFromThread(object)) {
-    cmpq(reg, Address(THR, Thread::OffsetFromThread(object)));
+  ASSERT(IsOriginalObject(object));
+
+  target::word offset_from_thread;
+  if (target::CanLoadFromThread(object, &offset_from_thread)) {
+    cmpq(reg, Address(THR, offset_from_thread));
   } else if (CanLoadFromObjectPool(object)) {
-    const intptr_t idx =
-        object_pool_wrapper().FindObject(object, ObjectPool::kNotPatchable);
-    const int32_t offset = ObjectPool::element_offset(idx);
+    const intptr_t idx = object_pool_builder().FindObject(
+        object, ObjectPoolBuilderEntry::kNotPatchable);
+    const int32_t offset = target::ObjectPool::element_offset(idx);
     cmpq(reg, Address(PP, offset - kHeapObjectTag));
   } else {
-    ASSERT(object.IsSmi());
-    CompareImmediate(reg, Immediate(reinterpret_cast<int64_t>(object.raw())));
+    ASSERT(target::IsSmi(object));
+    CompareImmediate(reg, Immediate(target::ToRawSmi(object)));
   }
 }
 
 intptr_t Assembler::FindImmediate(int64_t imm) {
-  return object_pool_wrapper().FindImmediate(imm);
+  return object_pool_builder().FindImmediate(imm);
 }
 
 void Assembler::LoadImmediate(Register reg, const Immediate& imm) {
@@ -1204,7 +1216,8 @@
   } else if (imm.is_int32() || !constant_pool_allowed()) {
     movq(reg, imm);
   } else {
-    int32_t offset = ObjectPool::element_offset(FindImmediate(imm.value()));
+    int32_t offset =
+        target::ObjectPool::element_offset(FindImmediate(imm.value()));
     LoadWordFromPoolOffset(reg, offset - kHeapObjectTag);
   }
 }
@@ -1224,8 +1237,9 @@
                                       Label* label,
                                       CanBeSmi can_be_smi,
                                       BarrierFilterMode how_to_jump) {
-  COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
-                 (kOldObjectAlignmentOffset == 0));
+  COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
+                  target::kWordSize) &&
+                 (target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
 
   if (can_be_smi == kValueIsNotSmi) {
 #if defined(DEBUG)
@@ -1242,7 +1256,7 @@
     // ~value | object instead and skip the write barrier if the bit is set.
     notl(value);
     orl(value, object);
-    testl(value, Immediate(kNewObjectAlignmentOffset));
+    testl(value, Immediate(target::ObjectAlignment::kNewObjectAlignmentOffset));
   } else {
     ASSERT(kHeapObjectTag == 1);
     // Detect value being ...1001 and object being ...0001.
@@ -1278,13 +1292,13 @@
     testq(value, Immediate(kSmiTagMask));
     j(ZERO, &done, kNearJump);
   }
-  movb(TMP, FieldAddress(object, Object::tags_offset()));
+  movb(TMP, FieldAddress(object, target::Object::tags_offset()));
   shrl(TMP, Immediate(RawObject::kBarrierOverlapShift));
   andl(TMP, Address(THR, Thread::write_barrier_mask_offset()));
-  testb(FieldAddress(value, Object::tags_offset()), TMP);
+  testb(FieldAddress(value, target::Object::tags_offset()), TMP);
   j(ZERO, &done, kNearJump);
 
-  Register objectForCall = object;
+  Register object_for_call = object;
   if (value != kWriteBarrierValueReg) {
     // Unlikely. Only non-graph intrinsics.
     // TODO(rmacnak): Shuffle registers in intrinsics.
@@ -1292,16 +1306,16 @@
     if (object == kWriteBarrierValueReg) {
       COMPILE_ASSERT(RBX != kWriteBarrierValueReg);
       COMPILE_ASSERT(RCX != kWriteBarrierValueReg);
-      objectForCall = (value == RBX) ? RCX : RBX;
-      pushq(objectForCall);
-      movq(objectForCall, object);
+      object_for_call = (value == RBX) ? RCX : RBX;
+      pushq(object_for_call);
+      movq(object_for_call, object);
     }
     movq(kWriteBarrierValueReg, value);
   }
-  generate_invoke_write_barrier_wrapper_(objectForCall);
+  generate_invoke_write_barrier_wrapper_(object_for_call);
   if (value != kWriteBarrierValueReg) {
     if (object == kWriteBarrierValueReg) {
-      popq(objectForCall);
+      popq(object_for_call);
     }
     popq(kWriteBarrierValueReg);
   }
@@ -1330,10 +1344,10 @@
     testq(value, Immediate(kSmiTagMask));
     j(ZERO, &done, kNearJump);
   }
-  movb(TMP, FieldAddress(object, Object::tags_offset()));
-  shrl(TMP, Immediate(RawObject::kBarrierOverlapShift));
+  movb(TMP, FieldAddress(object, target::Object::tags_offset()));
+  shrl(TMP, Immediate(target::RawObject::kBarrierOverlapShift));
   andl(TMP, Address(THR, Thread::write_barrier_mask_offset()));
-  testb(FieldAddress(value, Object::tags_offset()), TMP);
+  testb(FieldAddress(value, target::Object::tags_offset()), TMP);
   j(ZERO, &done, kNearJump);
 
   if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
@@ -1344,7 +1358,7 @@
     UNIMPLEMENTED();
   }
 
-  invoke_array_write_barrier_();
+  generate_invoke_array_write_barrier_();
 
   Bind(&done);
 }
@@ -1367,8 +1381,6 @@
 void Assembler::StoreIntoObjectNoBarrier(Register object,
                                          const Address& dest,
                                          const Object& value) {
-  ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
-  ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
   StoreObject(dest, value);
 }
 
@@ -1384,32 +1396,17 @@
 }
 
 void Assembler::ZeroInitSmiField(const Address& dest) {
-  Immediate zero(Smi::RawValue(0));
+  Immediate zero(target::ToRawSmi(0));
   movq(dest, zero);
 }
 
 void Assembler::IncrementSmiField(const Address& dest, int64_t increment) {
   // Note: FlowGraphCompiler::EdgeCounterIncrementSizeInBytes depends on
   // the length of this instruction sequence.
-  Immediate inc_imm(Smi::RawValue(increment));
+  Immediate inc_imm(target::ToRawSmi(increment));
   addq(dest, inc_imm);
 }
 
-void Assembler::Stop(const char* message) {
-  if (FLAG_print_stop_message) {
-    int64_t message_address = reinterpret_cast<int64_t>(message);
-    pushq(TMP);  // Preserve TMP register.
-    pushq(RDI);  // Preserve RDI register.
-    LoadImmediate(RDI, Immediate(message_address));
-    ExternalLabel label(StubCode::PrintStopMessage().EntryPoint());
-    call(&label);
-    popq(RDI);  // Restore RDI register.
-    popq(TMP);  // Restore TMP register.
-  }
-  // Emit the int3 instruction.
-  int3();  // Execution can be resumed with the 'cont' command in gdb.
-}
-
 void Assembler::Bind(Label* label) {
   intptr_t bound = buffer_.Size();
   ASSERT(!label->IsBound());  // Labels can only be bound once.
@@ -1537,10 +1534,10 @@
   const intptr_t kPushedXmmRegistersCount =
       RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
   const intptr_t kPushedRegistersSize =
-      kPushedCpuRegistersCount * kWordSize +
+      kPushedCpuRegistersCount * target::kWordSize +
       kPushedXmmRegistersCount * kFpuRegisterSize +
-      (compiler_frame_layout.dart_fixed_frame_size - 2) *
-          kWordSize;  // From EnterStubFrame (excluding PC / FP)
+      (target::frame_layout.dart_fixed_frame_size - 2) *
+          target::kWordSize;  // From EnterStubFrame (excluding PC / FP)
 
   leaq(RSP, Address(RBP, -kPushedRegistersSize));
 
@@ -1565,13 +1562,14 @@
 }
 
 void Assembler::RestoreCodePointer() {
-  movq(CODE_REG, Address(RBP, compiler_frame_layout.code_from_fp * kWordSize));
+  movq(CODE_REG,
+       Address(RBP, target::frame_layout.code_from_fp * target::kWordSize));
 }
 
 void Assembler::LoadPoolPointer(Register pp) {
   // Load new pool pointer.
   CheckCodePointer();
-  movq(pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
+  movq(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
   set_constant_pool_allowed(pp == PP);
 }
 
@@ -1597,8 +1595,8 @@
   // Restore caller's PP register that was pushed in EnterDartFrame.
   if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
     if (restore_pp == kRestoreCallerPP) {
-      movq(PP, Address(RBP, (compiler_frame_layout.saved_caller_pp_from_fp *
-                             kWordSize)));
+      movq(PP, Address(RBP, (target::frame_layout.saved_caller_pp_from_fp *
+                             target::kWordSize)));
     }
   }
   set_constant_pool_allowed(false);
@@ -1627,7 +1625,7 @@
     leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
     ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
   }
-  cmpq(RAX, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
+  cmpq(RAX, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
   j(EQUAL, &instructions_ok);
   int3();
   Bind(&instructions_ok);
@@ -1711,16 +1709,16 @@
       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
   movq(temp_reg, Address(temp_reg, table_offset));
   testb(Address(temp_reg, state_offset),
-        Immediate(ClassHeapStats::TraceAllocationMask()));
+        Immediate(target::ClassHeapStats::TraceAllocationMask()));
   // We are tracing for this class, jump to the trace label which will use
   // the allocation stub.
   j(NOT_ZERO, trace, near_jump);
 }
 
-void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
+void Assembler::UpdateAllocationStats(intptr_t cid) {
   ASSERT(cid > 0);
   intptr_t counter_offset =
-      ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
+      ClassTable::CounterOffsetFor(cid, /*is_new_space=*/true);
   Register temp_reg = TMP;
   LoadIsolate(temp_reg);
   intptr_t table_offset =
@@ -1729,25 +1727,22 @@
   incq(Address(temp_reg, counter_offset));
 }
 
-void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
-                                              Register size_reg,
-                                              Heap::Space space) {
+void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
   ASSERT(cid > 0);
   ASSERT(cid < kNumPredefinedCids);
-  UpdateAllocationStats(cid, space);
+  UpdateAllocationStats(cid);
   Register temp_reg = TMP;
-  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
+  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
   addq(Address(temp_reg, size_offset), size_reg);
 }
 
 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
-                                              intptr_t size_in_bytes,
-                                              Heap::Space space) {
+                                              intptr_t size_in_bytes) {
   ASSERT(cid > 0);
   ASSERT(cid < kNumPredefinedCids);
-  UpdateAllocationStats(cid, space);
+  UpdateAllocationStats(cid);
   Register temp_reg = TMP;
-  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
+  intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
   addq(Address(temp_reg, size_offset), Immediate(size_in_bytes));
 }
 #endif  // !PRODUCT
@@ -1758,13 +1753,13 @@
                             Register instance_reg,
                             Register temp) {
   ASSERT(failure != NULL);
-  const intptr_t instance_size = cls.instance_size();
+  const intptr_t instance_size = target::Class::GetInstanceSize(cls);
   if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
+    const classid_t cid = target::Class::GetId(cls);
     // If this allocation is traced, program will jump to failure path
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
-    NOT_IN_PRODUCT(MaybeTraceAllocation(cls.id(), failure, near_jump));
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
     movq(instance_reg, Address(THR, Thread::top_offset()));
     addq(instance_reg, Immediate(instance_size));
     // instance_reg: potential next object start.
@@ -1773,17 +1768,14 @@
     // Successfully allocated the object, now update top to point to
     // next object start and store the class in the class field of object.
     movq(Address(THR, Thread::top_offset()), instance_reg);
-    NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), space));
+    NOT_IN_PRODUCT(UpdateAllocationStats(cid));
     ASSERT(instance_size >= kHeapObjectTag);
     AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size));
-    uint32_t tags = 0;
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    ASSERT(cls.id() != kIllegalCid);
-    tags = RawObject::ClassIdTag::update(cls.id(), tags);
-    tags = RawObject::NewBit::update(true, tags);
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
     // Extends the 32 bit tags with zeros, which is the uninitialized
     // hash code.
-    MoveImmediate(FieldAddress(instance_reg, Object::tags_offset()),
+    MoveImmediate(FieldAddress(instance_reg, target::Object::tags_offset()),
                   Immediate(tags));
   } else {
     jmp(failure);
@@ -1803,7 +1795,6 @@
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
     NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     movq(instance, Address(THR, Thread::top_offset()));
     movq(end_address, instance);
 
@@ -1820,17 +1811,16 @@
     // next object start and initialize the object.
     movq(Address(THR, Thread::top_offset()), end_address);
     addq(instance, Immediate(kHeapObjectTag));
-    NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size, space));
+    NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size));
 
     // Initialize the tags.
     // instance: new object start as a tagged pointer.
-    uint32_t tags = 0;
-    tags = RawObject::ClassIdTag::update(cid, tags);
-    tags = RawObject::SizeTag::update(instance_size, tags);
-    tags = RawObject::NewBit::update(true, tags);
+    const uint32_t tags =
+        target::MakeTagWordForNewSpaceObject(cid, instance_size);
     // Extends the 32 bit tags with zeros, which is the uninitialized
     // hash code.
-    movq(FieldAddress(instance, Array::tags_offset()), Immediate(tags));
+    movq(FieldAddress(instance, target::Object::tags_offset()),
+         Immediate(tags));
   } else {
     jmp(failure);
   }
@@ -1974,6 +1964,7 @@
 }
 
 void Assembler::LoadClassId(Register result, Register object) {
+  using target::Object;
   ASSERT(RawObject::kClassIdTagPos == 16);
   ASSERT(RawObject::kClassIdTagSize == 16);
   ASSERT(sizeof(classid_t) == sizeof(uint16_t));
@@ -1988,7 +1979,7 @@
   const intptr_t offset =
       Isolate::class_table_offset() + ClassTable::table_offset();
   movq(result, Address(result, offset));
-  ASSERT(kSizeOfClassPairLog2 == 4);
+  ASSERT(ClassTable::kSizeOfClassPairLog2 == 4);
   // TIMES_16 is not a real scale factor on x64, so we double the class id
   // and use TIMES_8.
   addq(class_id, class_id);
@@ -2006,6 +1997,7 @@
 void Assembler::SmiUntagOrCheckClass(Register object,
                                      intptr_t class_id,
                                      Label* is_smi) {
+  using target::Object;
   ASSERT(kSmiTagShift == 1);
   ASSERT(RawObject::kClassIdTagPos == 16);
   ASSERT(RawObject::kClassIdTagSize == 16);
@@ -2060,7 +2052,7 @@
     jmp(&join, Assembler::kNearJump);
 
     Bind(&smi);
-    movq(result, Immediate(Smi::RawValue(kSmiCid)));
+    movq(result, Immediate(target::ToRawSmi(kSmiCid)));
 
     Bind(&join);
   } else {
@@ -2074,6 +2066,10 @@
   }
 }
 
+Address Assembler::VMTagAddress() {
+  return Address(THR, Thread::vm_tag_offset());
+}
+
 Address Assembler::ElementAddressForIntIndex(bool is_external,
                                              intptr_t cid,
                                              intptr_t index_scale,
@@ -2146,6 +2142,7 @@
   return cpu_reg_names[reg];
 }
 
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined(TARGET_ARCH_X64)
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index 3353820..c69bef3 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -16,12 +16,14 @@
 #include "vm/constants_x64.h"
 #include "vm/constants_x86.h"
 #include "vm/hash_map.h"
-#include "vm/object.h"
+#include "vm/pointer_tagging.h"
 
 namespace dart {
 
 // Forward declarations.
-class RuntimeEntry;
+class FlowGraphCompiler;
+
+namespace compiler {
 
 class Immediate : public ValueObject {
  public:
@@ -278,7 +280,7 @@
 
 class Assembler : public AssemblerBase {
  public:
-  explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
+  explicit Assembler(ObjectPoolBuilder* object_pool_builder,
                      bool use_far_branches = false);
 
   ~Assembler() {}
@@ -686,7 +688,7 @@
   void LoadUniqueObject(Register dst, const Object& obj);
   void LoadNativeEntry(Register dst,
                        const ExternalLabel* label,
-                       ObjectPool::Patchability patchable);
+                       ObjectPoolBuilderEntry::Patchability patchable);
   void LoadFunctionFromCalleePool(Register dst,
                                   const Function& function,
                                   Register new_pp);
@@ -694,7 +696,7 @@
   void Jmp(const Code& code, Register pp = PP);
   void J(Condition condition, const Code& code, Register pp);
   void CallPatchable(const Code& code,
-                     Code::EntryKind entry_kind = Code::EntryKind::kNormal);
+                     CodeEntryKind entry_kind = CodeEntryKind::kNormal);
   void Call(const Code& stub_entry);
   void CallToRuntime();
 
@@ -702,10 +704,9 @@
 
   // Emit a call that shares its object pool entries with other calls
   // that have the same equivalence marker.
-  void CallWithEquivalence(
-      const Code& code,
-      const Object& equivalence,
-      Code::EntryKind entry_kind = Code::EntryKind::kNormal);
+  void CallWithEquivalence(const Code& code,
+                           const Object& equivalence,
+                           CodeEntryKind entry_kind = CodeEntryKind::kNormal);
 
   // Unaware of write barrier (use StoreInto* methods for storing to objects).
   // TODO(koda): Add StackAddress/HeapAddress types to prevent misuse.
@@ -774,8 +775,6 @@
   void LeaveCallRuntimeFrame();
 
   void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
-  void CallRuntimeSavingRegisters(const RuntimeEntry& entry,
-                                  intptr_t argument_count);
 
   // Call runtime function. Reserves shadow space on the stack before calling
   // if platform ABI requires that. Does not restore RSP after the call itself.
@@ -871,14 +870,10 @@
 
   void MonomorphicCheckedEntry();
 
-  void UpdateAllocationStats(intptr_t cid, Heap::Space space);
+  void UpdateAllocationStats(intptr_t cid);
 
-  void UpdateAllocationStatsWithSize(intptr_t cid,
-                                     Register size_reg,
-                                     Heap::Space space);
-  void UpdateAllocationStatsWithSize(intptr_t cid,
-                                     intptr_t instance_size,
-                                     Heap::Space space);
+  void UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg);
+  void UpdateAllocationStatsWithSize(intptr_t cid, intptr_t instance_size);
 
   // If allocation tracing for |cid| is enabled, will jump to |trace| label,
   // which will allocate in the runtime where tracing occurs.
@@ -940,14 +935,12 @@
                                            Register array,
                                            Register index);
 
-  static Address VMTagAddress() {
-    return Address(THR, Thread::vm_tag_offset());
-  }
+  static Address VMTagAddress();
 
   // On some other platforms, we draw a distinction between safe and unsafe
   // smis.
   static bool IsSafe(const Object& object) { return true; }
-  static bool IsSafeSmi(const Object& object) { return object.IsSmi(); }
+  static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
 
  private:
   bool constant_pool_allowed_;
@@ -1046,14 +1039,9 @@
   // Unaware of write barrier (use StoreInto* methods for storing to objects).
   void MoveImmediate(const Address& dst, const Immediate& imm);
 
-  void ComputeCounterAddressesForCid(intptr_t cid,
-                                     Heap::Space space,
-                                     Address* count_address,
-                                     Address* size_address);
-
-  friend class FlowGraphCompiler;
+  friend class dart::FlowGraphCompiler;
   std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
-  std::function<void()> invoke_array_write_barrier_;
+  std::function<void()> generate_invoke_array_write_barrier_;
 
   DISALLOW_ALLOCATION();
   DISALLOW_COPY_AND_ASSIGN(Assembler);
@@ -1106,6 +1094,13 @@
   EmitUint8(0x66);
 }
 
+}  // namespace compiler
+
+using compiler::Address;
+using compiler::FieldAddress;
+using compiler::Immediate;
+using compiler::Label;
+
 }  // namespace dart
 
 #endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_
diff --git a/runtime/vm/compiler/assembler/assembler_x64_test.cc b/runtime/vm/compiler/assembler/assembler_x64_test.cc
index c06b96a..9e380a4 100644
--- a/runtime/vm/compiler/assembler/assembler_x64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64_test.cc
@@ -11,6 +11,7 @@
 #include "vm/virtual_memory.h"
 
 namespace dart {
+namespace compiler {
 
 #define __ assembler->
 
@@ -72,33 +73,33 @@
   __ movq(RAX, Address(R13, 0));
   __ movq(R10, Address(RAX, 0));
 
-  __ movq(RAX, Address(RSP, kWordSize));
-  __ movq(RAX, Address(RBP, kWordSize));
-  __ movq(RAX, Address(RAX, kWordSize));
-  __ movq(RAX, Address(R10, kWordSize));
-  __ movq(RAX, Address(R12, kWordSize));
-  __ movq(RAX, Address(R13, kWordSize));
+  __ movq(RAX, Address(RSP, target::kWordSize));
+  __ movq(RAX, Address(RBP, target::kWordSize));
+  __ movq(RAX, Address(RAX, target::kWordSize));
+  __ movq(RAX, Address(R10, target::kWordSize));
+  __ movq(RAX, Address(R12, target::kWordSize));
+  __ movq(RAX, Address(R13, target::kWordSize));
 
-  __ movq(RAX, Address(RSP, -kWordSize));
-  __ movq(RAX, Address(RBP, -kWordSize));
-  __ movq(RAX, Address(RAX, -kWordSize));
-  __ movq(RAX, Address(R10, -kWordSize));
-  __ movq(RAX, Address(R12, -kWordSize));
-  __ movq(RAX, Address(R13, -kWordSize));
+  __ movq(RAX, Address(RSP, -target::kWordSize));
+  __ movq(RAX, Address(RBP, -target::kWordSize));
+  __ movq(RAX, Address(RAX, -target::kWordSize));
+  __ movq(RAX, Address(R10, -target::kWordSize));
+  __ movq(RAX, Address(R12, -target::kWordSize));
+  __ movq(RAX, Address(R13, -target::kWordSize));
 
-  __ movq(RAX, Address(RSP, 256 * kWordSize));
-  __ movq(RAX, Address(RBP, 256 * kWordSize));
-  __ movq(RAX, Address(RAX, 256 * kWordSize));
-  __ movq(RAX, Address(R10, 256 * kWordSize));
-  __ movq(RAX, Address(R12, 256 * kWordSize));
-  __ movq(RAX, Address(R13, 256 * kWordSize));
+  __ movq(RAX, Address(RSP, 256 * target::kWordSize));
+  __ movq(RAX, Address(RBP, 256 * target::kWordSize));
+  __ movq(RAX, Address(RAX, 256 * target::kWordSize));
+  __ movq(RAX, Address(R10, 256 * target::kWordSize));
+  __ movq(RAX, Address(R12, 256 * target::kWordSize));
+  __ movq(RAX, Address(R13, 256 * target::kWordSize));
 
-  __ movq(RAX, Address(RSP, -256 * kWordSize));
-  __ movq(RAX, Address(RBP, -256 * kWordSize));
-  __ movq(RAX, Address(RAX, -256 * kWordSize));
-  __ movq(RAX, Address(R10, -256 * kWordSize));
-  __ movq(RAX, Address(R12, -256 * kWordSize));
-  __ movq(RAX, Address(R13, -256 * kWordSize));
+  __ movq(RAX, Address(RSP, -256 * target::kWordSize));
+  __ movq(RAX, Address(RBP, -256 * target::kWordSize));
+  __ movq(RAX, Address(RAX, -256 * target::kWordSize));
+  __ movq(RAX, Address(R10, -256 * target::kWordSize));
+  __ movq(RAX, Address(R12, -256 * target::kWordSize));
+  __ movq(RAX, Address(R13, -256 * target::kWordSize));
 
   __ movq(RAX, Address(RAX, TIMES_1, 0));
   __ movq(RAX, Address(RAX, TIMES_2, 0));
@@ -111,17 +112,17 @@
   __ movq(RAX, Address(R12, TIMES_2, 0));
   __ movq(RAX, Address(R13, TIMES_2, 0));
 
-  __ movq(RAX, Address(RBP, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RAX, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R10, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R12, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R13, TIMES_2, kWordSize));
+  __ movq(RAX, Address(RBP, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RAX, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R10, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R12, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R13, TIMES_2, target::kWordSize));
 
-  __ movq(RAX, Address(RBP, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RAX, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R10, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R12, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R13, TIMES_2, 256 * kWordSize));
+  __ movq(RAX, Address(RBP, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RAX, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R10, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R12, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R13, TIMES_2, 256 * target::kWordSize));
 
   __ movq(RAX, Address(RAX, RBP, TIMES_2, 0));
   __ movq(RAX, Address(RAX, RAX, TIMES_2, 0));
@@ -159,77 +160,77 @@
   __ movq(RAX, Address(R13, R12, TIMES_2, 0));
   __ movq(RAX, Address(R13, R13, TIMES_2, 0));
 
-  __ movq(RAX, Address(RAX, RBP, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RAX, RAX, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RAX, R10, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RAX, R12, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RAX, R13, TIMES_2, kWordSize));
+  __ movq(RAX, Address(RAX, RBP, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RAX, RAX, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RAX, R10, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RAX, R12, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RAX, R13, TIMES_2, target::kWordSize));
 
-  __ movq(RAX, Address(RBP, RBP, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RBP, RAX, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RBP, R10, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RBP, R12, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RBP, R13, TIMES_2, kWordSize));
+  __ movq(RAX, Address(RBP, RBP, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RBP, RAX, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RBP, R10, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RBP, R12, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RBP, R13, TIMES_2, target::kWordSize));
 
-  __ movq(RAX, Address(RSP, RBP, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RSP, RAX, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RSP, R10, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RSP, R12, TIMES_2, kWordSize));
-  __ movq(RAX, Address(RSP, R13, TIMES_2, kWordSize));
+  __ movq(RAX, Address(RSP, RBP, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RSP, RAX, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RSP, R10, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RSP, R12, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(RSP, R13, TIMES_2, target::kWordSize));
 
-  __ movq(RAX, Address(R10, RBP, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R10, RAX, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R10, R10, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R10, R12, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R10, R13, TIMES_2, kWordSize));
+  __ movq(RAX, Address(R10, RBP, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R10, RAX, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R10, R10, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R10, R12, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R10, R13, TIMES_2, target::kWordSize));
 
-  __ movq(RAX, Address(R12, RBP, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R12, RAX, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R12, R10, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R12, R12, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R12, R13, TIMES_2, kWordSize));
+  __ movq(RAX, Address(R12, RBP, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R12, RAX, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R12, R10, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R12, R12, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R12, R13, TIMES_2, target::kWordSize));
 
-  __ movq(RAX, Address(R13, RBP, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R13, RAX, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R13, R10, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R13, R12, TIMES_2, kWordSize));
-  __ movq(RAX, Address(R13, R13, TIMES_2, kWordSize));
+  __ movq(RAX, Address(R13, RBP, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R13, RAX, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R13, R10, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R13, R12, TIMES_2, target::kWordSize));
+  __ movq(RAX, Address(R13, R13, TIMES_2, target::kWordSize));
 
-  __ movq(RAX, Address(RAX, RBP, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RAX, RAX, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RAX, R10, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RAX, R12, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RAX, R13, TIMES_2, 256 * kWordSize));
+  __ movq(RAX, Address(RAX, RBP, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RAX, RAX, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RAX, R10, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RAX, R12, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RAX, R13, TIMES_2, 256 * target::kWordSize));
 
-  __ movq(RAX, Address(RBP, RBP, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RBP, RAX, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RBP, R10, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RBP, R12, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RBP, R13, TIMES_2, 256 * kWordSize));
+  __ movq(RAX, Address(RBP, RBP, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RBP, RAX, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RBP, R10, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RBP, R12, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RBP, R13, TIMES_2, 256 * target::kWordSize));
 
-  __ movq(RAX, Address(RSP, RBP, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RSP, RAX, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RSP, R10, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RSP, R12, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(RSP, R13, TIMES_2, 256 * kWordSize));
+  __ movq(RAX, Address(RSP, RBP, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RSP, RAX, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RSP, R10, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RSP, R12, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(RSP, R13, TIMES_2, 256 * target::kWordSize));
 
-  __ movq(RAX, Address(R10, RBP, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R10, RAX, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R10, R10, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R10, R12, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R10, R13, TIMES_2, 256 * kWordSize));
+  __ movq(RAX, Address(R10, RBP, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R10, RAX, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R10, R10, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R10, R12, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R10, R13, TIMES_2, 256 * target::kWordSize));
 
-  __ movq(RAX, Address(R12, RBP, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R12, RAX, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R12, R10, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R12, R12, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R12, R13, TIMES_2, 256 * kWordSize));
+  __ movq(RAX, Address(R12, RBP, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R12, RAX, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R12, R10, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R12, R12, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R12, R13, TIMES_2, 256 * target::kWordSize));
 
-  __ movq(RAX, Address(R13, RBP, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R13, RAX, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R13, R10, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R13, R12, TIMES_2, 256 * kWordSize));
-  __ movq(RAX, Address(R13, R13, TIMES_2, 256 * kWordSize));
+  __ movq(RAX, Address(R13, RBP, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R13, RAX, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R13, R10, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R13, R12, TIMES_2, 256 * target::kWordSize));
+  __ movq(RAX, Address(R13, R13, TIMES_2, 256 * target::kWordSize));
 
   __ movq(RAX, Address::AddressBaseImm32(RSP, 0));
   __ movq(RAX, Address::AddressBaseImm32(RBP, 0));
@@ -239,19 +240,19 @@
   __ movq(RAX, Address::AddressBaseImm32(R13, 0));
   __ movq(R10, Address::AddressBaseImm32(RAX, 0));
 
-  __ movq(RAX, Address::AddressBaseImm32(RSP, kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(RBP, kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(RAX, kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(R10, kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(R12, kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(R13, kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(RSP, target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(RBP, target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(RAX, target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(R10, target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(R12, target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(R13, target::kWordSize));
 
-  __ movq(RAX, Address::AddressBaseImm32(RSP, -kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(RBP, -kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(RAX, -kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(R10, -kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(R12, -kWordSize));
-  __ movq(RAX, Address::AddressBaseImm32(R13, -kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(RSP, -target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(RBP, -target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(RAX, -target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(R10, -target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(R12, -target::kWordSize));
+  __ movq(RAX, Address::AddressBaseImm32(R13, -target::kWordSize));
 }
 
 ASSEMBLER_TEST_RUN(AddressingModes, test) {
@@ -1163,7 +1164,7 @@
   __ movzxb(RAX, Address(RSP, 0));  // RAX = 0xff
   __ movsxw(R8, Address(RSP, 0));   // R8 = -1
   __ movzxw(RCX, Address(RSP, 0));  // RCX = 0xffff
-  __ addq(RSP, Immediate(kWordSize));
+  __ addq(RSP, Immediate(target::kWordSize));
 
   __ addq(R8, RCX);
   __ addq(RAX, R8);
@@ -1189,9 +1190,9 @@
 ASSEMBLER_TEST_GENERATE(MoveExtend32Memory, assembler) {
   __ pushq(Immediate(0xffffffff));
   __ pushq(Immediate(0x7fffffff));
-  __ movsxd(RDX, Address(RSP, kWordSize));
+  __ movsxd(RDX, Address(RSP, target::kWordSize));
   __ movsxd(RAX, Address(RSP, 0));
-  __ addq(RSP, Immediate(kWordSize * 2));
+  __ addq(RSP, Immediate(target::kWordSize * 2));
 
   __ addq(RAX, RDX);
   __ ret();
@@ -1218,7 +1219,7 @@
   __ movq(RCX, Immediate(-1));
   __ movw(Address(RAX, 0), RCX);
   __ movzxw(RAX, Address(RAX, 0));  // RAX = 0xffff
-  __ addq(RSP, Immediate(kWordSize));
+  __ addq(RSP, Immediate(target::kWordSize));
   __ ret();
 }
 
@@ -1298,7 +1299,7 @@
   __ movzxw(R8, Address(R8, 0));  // 0xffff
   __ xorq(RAX, RAX);
   __ addq(RAX, R8);  // RAX = 0xffff
-  __ addq(RSP, Immediate(kWordSize));
+  __ addq(RSP, Immediate(target::kWordSize));
   __ ret();
 }
 
@@ -1615,10 +1616,10 @@
   __ pushq(CallingConventions::kArg3Reg);
   __ pushq(CallingConventions::kArg2Reg);
   __ pushq(CallingConventions::kArg1Reg);
-  __ movq(R10, Address(RSP, 0 * kWordSize));  // al.
-  __ addq(R10, Address(RSP, 2 * kWordSize));  // bl.
-  __ movq(RAX, Address(RSP, 1 * kWordSize));  // ah.
-  __ adcq(RAX, Address(RSP, 3 * kWordSize));  // bh.
+  __ movq(R10, Address(RSP, 0 * target::kWordSize));  // al.
+  __ addq(R10, Address(RSP, 2 * target::kWordSize));  // bl.
+  __ movq(RAX, Address(RSP, 1 * target::kWordSize));  // ah.
+  __ adcq(RAX, Address(RSP, 3 * target::kWordSize));  // bh.
   // RAX = high64(ah:al + bh:bl).
   __ Drop(4);
   __ ret();
@@ -1711,10 +1712,10 @@
   __ pushq(CallingConventions::kArg3Reg);
   __ pushq(CallingConventions::kArg2Reg);
   __ pushq(CallingConventions::kArg1Reg);
-  __ movq(R10, Address(RSP, 0 * kWordSize));  // al.
-  __ subq(R10, Address(RSP, 2 * kWordSize));  // bl.
-  __ movq(RAX, Address(RSP, 1 * kWordSize));  // ah.
-  __ sbbq(RAX, Address(RSP, 3 * kWordSize));  // bh.
+  __ movq(R10, Address(RSP, 0 * target::kWordSize));  // al.
+  __ subq(R10, Address(RSP, 2 * target::kWordSize));  // bl.
+  __ movq(RAX, Address(RSP, 1 * target::kWordSize));  // ah.
+  __ sbbq(RAX, Address(RSP, 3 * target::kWordSize));  // bh.
   // RAX = high64(ah:al - bh:bl).
   __ Drop(4);
   __ ret();
@@ -5417,9 +5418,9 @@
   __ pushq(CallingConventions::kArg1Reg);     // from.
   __ pushq(CallingConventions::kArg2Reg);     // to.
   __ pushq(CallingConventions::kArg3Reg);     // count.
-  __ movq(RSI, Address(RSP, 2 * kWordSize));  // from.
-  __ movq(RDI, Address(RSP, 1 * kWordSize));  // to.
-  __ movq(RCX, Address(RSP, 0 * kWordSize));  // count.
+  __ movq(RSI, Address(RSP, 2 * target::kWordSize));  // from.
+  __ movq(RDI, Address(RSP, 1 * target::kWordSize));  // to.
+  __ movq(RCX, Address(RSP, 0 * target::kWordSize));  // count.
   __ rep_movsb();
   // Remove saved arguments.
   __ popq(RAX);
@@ -5835,6 +5836,8 @@
                __ pushq(RAX),
                Address(RSP, 0),
                __ popq(RAX))
+
+}  // namespace compiler
 }  // namespace dart
 
 #endif  // defined TARGET_ARCH_X64
diff --git a/runtime/vm/compiler/assembler/disassembler.h b/runtime/vm/compiler/assembler/disassembler.h
index f23f08c..a58339a 100644
--- a/runtime/vm/compiler/assembler/disassembler.h
+++ b/runtime/vm/compiler/assembler/disassembler.h
@@ -9,6 +9,7 @@
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/globals.h"
 #include "vm/log.h"
+#include "vm/object.h"
 
 namespace dart {
 
diff --git a/runtime/vm/compiler/assembler/disassembler_arm.cc b/runtime/vm/compiler/assembler/disassembler_arm.cc
index d3880c0..f5266ae 100644
--- a/runtime/vm/compiler/assembler/disassembler_arm.cc
+++ b/runtime/vm/compiler/assembler/disassembler_arm.cc
@@ -655,16 +655,6 @@
         case 7: {
           if ((instr->Bits(21, 2) == 0x1) && (instr->ConditionField() == AL)) {
             Format(instr, "bkpt #'imm12_4");
-            if (instr->BkptField() == Instr::kStopMessageCode) {
-              const char* message = "Stop messages not enabled";
-              if (FLAG_print_stop_message) {
-                message = *reinterpret_cast<const char**>(
-                    reinterpret_cast<intptr_t>(instr) - Instr::kInstrSize);
-              }
-              buffer_pos_ += Utils::SNPrint(current_position_in_buffer(),
-                                            remaining_size_in_buffer(),
-                                            " ; \"%s\"", message);
-            }
           } else {
             // Format(instr, "smc'cond");
             Unknown(instr);  // Not used.
diff --git a/runtime/vm/compiler/assembler/disassembler_arm64.cc b/runtime/vm/compiler/assembler/disassembler_arm64.cc
index 2abe750..e074048 100644
--- a/runtime/vm/compiler/assembler/disassembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/disassembler_arm64.cc
@@ -885,16 +885,6 @@
   } else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
              (instr->Bits(21, 3) == 1)) {
     Format(instr, "brk 'imm16");
-    if (instr->Imm16Field() == Instr::kStopMessageCode) {
-      const char* message = "Stop messages not enabled";
-      if (FLAG_print_stop_message) {
-        message = *reinterpret_cast<const char**>(
-            reinterpret_cast<intptr_t>(instr) - 2 * Instr::kInstrSize);
-      }
-      buffer_pos_ +=
-          Utils::SNPrint(current_position_in_buffer(),
-                         remaining_size_in_buffer(), " ; \"%s\"", message);
-    }
   } else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
              (instr->Bits(21, 3) == 2)) {
     Format(instr, "hlt 'imm16");
diff --git a/runtime/vm/compiler/assembler/disassembler_kbc.cc b/runtime/vm/compiler/assembler/disassembler_kbc.cc
index 151429a..e0c7f8a 100644
--- a/runtime/vm/compiler/assembler/disassembler_kbc.cc
+++ b/runtime/vm/compiler/assembler/disassembler_kbc.cc
@@ -234,7 +234,7 @@
   KBCInstr instr = KernelBytecode::At(pc);
   if (HasLoadFromPool(instr)) {
     uint16_t index = KernelBytecode::DecodeD(instr);
-    if (object_pool.TypeAt(index) == ObjectPool::kTaggedObject) {
+    if (object_pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
       *obj = object_pool.ObjectAt(index);
       return true;
     }
diff --git a/runtime/vm/compiler/assembler/disassembler_test.cc b/runtime/vm/compiler/assembler/disassembler_test.cc
index fa208ec..2ced70e 100644
--- a/runtime/vm/compiler/assembler/disassembler_test.cc
+++ b/runtime/vm/compiler/assembler/disassembler_test.cc
@@ -14,8 +14,8 @@
 #if !defined(PRODUCT) && !defined(TARGET_ARCH_DBC)
 
 ISOLATE_UNIT_TEST_CASE(Disassembler) {
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(&object_pool_builder);
 
   // The used instructions work on all platforms.
   Register reg = static_cast<Register>(0);
diff --git a/runtime/vm/compiler/assembler/disassembler_x86.cc b/runtime/vm/compiler/assembler/disassembler_x86.cc
index 01e0c0c..fb00c01 100644
--- a/runtime/vm/compiler/assembler/disassembler_x86.cc
+++ b/runtime/vm/compiler/assembler/disassembler_x86.cc
@@ -308,7 +308,7 @@
   }
 
   const char* NameOfCPURegister(int reg) const {
-    return Assembler::RegisterName(static_cast<Register>(reg));
+    return compiler::Assembler::RegisterName(static_cast<Register>(reg));
   }
 
   const char* NameOfByteCPURegister(int reg) const {
@@ -344,7 +344,6 @@
   const char* TwoByteMnemonic(uint8_t opcode);
   int TwoByteOpcodeInstruction(uint8_t* data);
   int Print660F38Instruction(uint8_t* data);
-  void CheckPrintStop(uint8_t* data);
 
   int F6F7Instruction(uint8_t* data);
   int ShiftInstruction(uint8_t* data);
@@ -1229,20 +1228,6 @@
   }
 }
 
-// Called when disassembling test eax, 0xXXXXX.
-void DisassemblerX64::CheckPrintStop(uint8_t* data) {
-#if defined(TARGET_ARCH_IA32)
-  // Recognize stop pattern.
-  if (*data == 0xCC) {
-    const char* message = "Stop messages not enabled";
-    if (FLAG_print_stop_message) {
-      message = *reinterpret_cast<const char**>(data - 4);
-    }
-    Print("  STOP:'%s'", message);
-  }
-#endif
-}
-
 // Handle all two-byte opcodes, which start with 0x0F.
 // These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
 // We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
@@ -1854,12 +1839,8 @@
 
       case 0xA9: {
         data++;
-        bool check_for_stop = operand_size() == DOUBLEWORD_SIZE;
         Print("test%s %s,", operand_size_code(), Rax());
         data += PrintImmediate(data, operand_size());
-        if (check_for_stop) {
-          CheckPrintStop(data);
-        }
         break;
       }
       case 0xD1:  // fall through
diff --git a/runtime/vm/compiler/assembler/object_pool_builder.h b/runtime/vm/compiler/assembler/object_pool_builder.h
new file mode 100644
index 0000000..2202954
--- /dev/null
+++ b/runtime/vm/compiler/assembler/object_pool_builder.h
@@ -0,0 +1,187 @@
+// Copyright (c) 2012, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_OBJECT_POOL_BUILDER_H_
+#define RUNTIME_VM_COMPILER_ASSEMBLER_OBJECT_POOL_BUILDER_H_
+
+#include "platform/globals.h"
+#include "vm/bitfield.h"
+#include "vm/hash_map.h"
+
+namespace dart {
+
+class Object;
+
+namespace compiler {
+
+class ExternalLabel;
+
+bool IsSameObject(const Object& a, const Object& b);
+
+struct ObjectPoolBuilderEntry {
+  enum Patchability {
+    kPatchable,
+    kNotPatchable,
+  };
+
+  enum EntryType {
+    kTaggedObject,
+    kImmediate,
+    kNativeFunction,
+    kNativeFunctionWrapper,
+    kNativeEntryData,
+  };
+
+  using TypeBits = BitField<uint8_t, EntryType, 0, 7>;
+  using PatchableBit = BitField<uint8_t, Patchability, TypeBits::kNextBit, 1>;
+
+  static inline uint8_t EncodeTraits(EntryType type, Patchability patchable) {
+    return TypeBits::encode(type) | PatchableBit::encode(patchable);
+  }
+
+  ObjectPoolBuilderEntry() : raw_value_(), entry_bits_(0), equivalence_() {}
+  ObjectPoolBuilderEntry(const Object* obj, Patchability patchable)
+      : ObjectPoolBuilderEntry(obj, obj, patchable) {}
+  ObjectPoolBuilderEntry(const Object* obj,
+                         const Object* eqv,
+                         Patchability patchable)
+      : obj_(obj),
+        entry_bits_(EncodeTraits(kTaggedObject, patchable)),
+        equivalence_(eqv) {}
+  ObjectPoolBuilderEntry(uword value, EntryType info, Patchability patchable)
+      : raw_value_(value),
+        entry_bits_(EncodeTraits(info, patchable)),
+        equivalence_() {}
+
+  EntryType type() const { return TypeBits::decode(entry_bits_); }
+
+  Patchability patchable() const { return PatchableBit::decode(entry_bits_); }
+
+  union {
+    const Object* obj_;
+    uword raw_value_;
+  };
+  uint8_t entry_bits_;
+  const Object* equivalence_;
+};
+
+// Pair type parameter for DirectChainedHashMap used for the constant pool.
+class ObjIndexPair {
+ public:
+  // Typedefs needed for the DirectChainedHashMap template.
+  typedef ObjectPoolBuilderEntry Key;
+  typedef intptr_t Value;
+  typedef ObjIndexPair Pair;
+
+  static const intptr_t kNoIndex = -1;
+
+  ObjIndexPair()
+      : key_(reinterpret_cast<uword>(nullptr),
+             ObjectPoolBuilderEntry::kTaggedObject,
+             ObjectPoolBuilderEntry::kPatchable),
+        value_(kNoIndex) {}
+
+  ObjIndexPair(Key key, Value value) : value_(value) {
+    key_.entry_bits_ = key.entry_bits_;
+    if (key.type() == ObjectPoolBuilderEntry::kTaggedObject) {
+      key_.obj_ = key.obj_;
+      key_.equivalence_ = key.equivalence_;
+    } else {
+      key_.raw_value_ = key.raw_value_;
+    }
+  }
+
+  static Key KeyOf(Pair kv) { return kv.key_; }
+
+  static Value ValueOf(Pair kv) { return kv.value_; }
+
+  static intptr_t Hashcode(Key key);
+
+  static inline bool IsKeyEqual(Pair kv, Key key) {
+    if (kv.key_.entry_bits_ != key.entry_bits_) return false;
+    if (kv.key_.type() == ObjectPoolBuilderEntry::kTaggedObject) {
+      return IsSameObject(*kv.key_.obj_, *key.obj_) &&
+             IsSameObject(*kv.key_.equivalence_, *key.equivalence_);
+    }
+    return kv.key_.raw_value_ == key.raw_value_;
+  }
+
+ private:
+  Key key_;
+  Value value_;
+};
+
+class ObjectPoolBuilder : public ValueObject {
+ public:
+  ObjectPoolBuilder() : zone_(nullptr) {}
+  ~ObjectPoolBuilder() {
+    if (zone_ != nullptr) {
+      Reset();
+      zone_ = nullptr;
+    }
+  }
+
+  // Clears all existing entries in this object pool builder.
+  //
+  // Note: Any code which has been compiled via this builder might use offsets
+  // into the pool which are not correct anymore.
+  void Reset();
+
+  // Initialize this object pool builder with a [zone].
+  //
+  // Any objects added later on will be referenced using handles from [zone].
+  void InitializeWithZone(Zone* zone) {
+    ASSERT(object_pool_.length() == 0);
+    ASSERT(zone_ == nullptr && zone != nullptr);
+    zone_ = zone;
+  }
+
+  intptr_t AddObject(const Object& obj,
+                     ObjectPoolBuilderEntry::Patchability patchable =
+                         ObjectPoolBuilderEntry::kNotPatchable);
+  intptr_t AddImmediate(uword imm);
+
+  intptr_t FindObject(const Object& obj,
+                      ObjectPoolBuilderEntry::Patchability patchable =
+                          ObjectPoolBuilderEntry::kNotPatchable);
+  intptr_t FindObject(const Object& obj, const Object& equivalence);
+  intptr_t FindImmediate(uword imm);
+  intptr_t FindNativeFunction(const ExternalLabel* label,
+                              ObjectPoolBuilderEntry::Patchability patchable);
+  intptr_t FindNativeFunctionWrapper(
+      const ExternalLabel* label,
+      ObjectPoolBuilderEntry::Patchability patchable);
+
+  intptr_t CurrentLength() const { return object_pool_.length(); }
+  ObjectPoolBuilderEntry& EntryAt(intptr_t i) { return object_pool_[i]; }
+  const ObjectPoolBuilderEntry& EntryAt(intptr_t i) const {
+    return object_pool_[i];
+  }
+
+  intptr_t AddObject(ObjectPoolBuilderEntry entry);
+
+ private:
+  intptr_t FindObject(ObjectPoolBuilderEntry entry);
+
+  // Objects and jump targets.
+  GrowableArray<ObjectPoolBuilderEntry> object_pool_;
+
+  // Hashmap for fast lookup in object pool.
+  DirectChainedHashMap<ObjIndexPair> object_pool_index_table_;
+
+  // The zone used for allocating the handles we keep in the map and array (or
+  // NULL, in which case allocations happen using the zone active at the point
+  // of insertion).
+  Zone* zone_;
+};
+
+}  // namespace compiler
+
+}  // namespace dart
+
+namespace dart {
+using compiler::ObjectPoolBuilder;
+}
+
+#endif  // RUNTIME_VM_COMPILER_ASSEMBLER_OBJECT_POOL_BUILDER_H_
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index bda03b2..66bedcf 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -80,7 +80,8 @@
     if (it.CurrentLocation().IsInvalid() &&
         it.CurrentValue()->definition()->IsPushArgument()) {
       it.SetCurrentLocation(Location::StackSlot(
-          compiler_frame_layout.FrameSlotForVariableIndex(-*stack_height)));
+          compiler::target::frame_layout.FrameSlotForVariableIndex(
+              -*stack_height)));
       (*stack_height)++;
     }
   }
@@ -356,7 +357,8 @@
 #if defined(DART_PRECOMPILER)
 static intptr_t LocationToStackIndex(const Location& src) {
   ASSERT(src.HasStackIndex());
-  return -compiler_frame_layout.VariableIndexForFrameSlot(src.stack_index());
+  return -compiler::target::frame_layout.VariableIndexForFrameSlot(
+      src.stack_index());
 }
 
 static CatchEntryMove CatchEntryMoveFor(Assembler* assembler,
@@ -369,7 +371,7 @@
       return CatchEntryMove();
     }
     const intptr_t pool_index =
-        assembler->object_pool_wrapper().FindObject(src.constant());
+        assembler->object_pool_builder().FindObject(src.constant());
     return CatchEntryMove::FromSlot(CatchEntryMove::SourceKind::kConstant,
                                     pool_index, dst_index);
   }
@@ -1073,7 +1075,7 @@
     info.scope_id = 0;
     info.begin_pos = TokenPosition::kMinSource;
     info.end_pos = TokenPosition::kMinSource;
-    info.set_index(compiler_frame_layout.FrameSlotForVariable(
+    info.set_index(compiler::target::frame_layout.FrameSlotForVariable(
         parsed_function().current_context_var()));
     var_descs.SetVar(0, Symbols::CurrentContextVar(), &info);
   }
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index 12e0b4b..6064ec7 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -47,10 +47,11 @@
     const auto& array_stub =
         Code::ZoneHandle(object_store->array_write_barrier_stub());
     if (!array_stub.InVMHeap()) {
-      assembler_->invoke_array_write_barrier_ = [&](Condition condition) {
-        AddPcRelativeCallStubTarget(array_stub);
-        assembler_->GenerateUnRelocatedPcRelativeCall(condition);
-      };
+      assembler_->generate_invoke_array_write_barrier_ =
+          [&](Condition condition) {
+            AddPcRelativeCallStubTarget(array_stub);
+            assembler_->GenerateUnRelocatedPcRelativeCall(condition);
+          };
     }
   }
 }
@@ -770,11 +771,11 @@
   // (see runtime/vm/runtime_entry.cc:TypeCheck).  It will use pattern matching
   // on the call site to find out at which pool index the destination name is
   // located.
-  const intptr_t sub_type_cache_index = __ object_pool_wrapper().AddObject(
+  const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
       Object::null_object(), ObjectPool::Patchability::kPatchable);
   const intptr_t sub_type_cache_offset =
       ObjectPool::element_offset(sub_type_cache_index) - kHeapObjectTag;
-  const intptr_t dst_name_index = __ object_pool_wrapper().AddObject(
+  const intptr_t dst_name_index = __ object_pool_builder().AddObject(
       dst_name, ObjectPool::Patchability::kPatchable);
   ASSERT((sub_type_cache_index + 1) == dst_name_index);
   ASSERT(__ constant_pool_allowed());
@@ -809,9 +810,9 @@
   const Code& build_method_extractor = Code::ZoneHandle(
       isolate()->object_store()->build_method_extractor_code());
 
-  const intptr_t stub_index = __ object_pool_wrapper().AddObject(
+  const intptr_t stub_index = __ object_pool_builder().AddObject(
       build_method_extractor, ObjectPool::Patchability::kNotPatchable);
-  const intptr_t function_index = __ object_pool_wrapper().AddObject(
+  const intptr_t function_index = __ object_pool_builder().AddObject(
       extracted_method, ObjectPool::Patchability::kNotPatchable);
 
   // We use a custom pool register to preserve caller PP.
@@ -882,8 +883,8 @@
     }
     __ CompareImmediate(R3, GetOptimizationThreshold());
     ASSERT(function_reg == R8);
-    __ Branch(StubCode::OptimizeFunction(), ObjectPool::kNotPatchable, new_pp,
-              GE);
+    __ Branch(StubCode::OptimizeFunction(),
+              compiler::ObjectPoolBuilderEntry::kNotPatchable, new_pp, GE);
   }
   __ Comment("Enter frame");
   if (flow_graph().IsCompiledForOsr()) {
@@ -906,7 +907,7 @@
 
     intptr_t args_desc_slot = -1;
     if (parsed_function().has_arg_desc_var()) {
-      args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
+      args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
           parsed_function().arg_desc_var());
     }
 
@@ -916,7 +917,7 @@
     }
     for (intptr_t i = 0; i < num_locals; ++i) {
       const intptr_t slot_index =
-          compiler_frame_layout.FrameSlotForVariableIndex(-i);
+          compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
       Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
       __ StoreToOffset(kWord, value_reg, FP, slot_index * kWordSize);
     }
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index 7e50e57..08be0de 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -44,7 +44,7 @@
     const auto& array_stub =
         Code::ZoneHandle(object_store->array_write_barrier_stub());
     if (!array_stub.InVMHeap()) {
-      assembler_->invoke_array_write_barrier_ = [&]() {
+      assembler_->generate_invoke_array_write_barrier_ = [&]() {
         AddPcRelativeCallStubTarget(array_stub);
         assembler_->GenerateUnRelocatedPcRelativeCall();
       };
@@ -748,11 +748,11 @@
   // (see runtime/vm/runtime_entry.cc:TypeCheck).  It will use pattern matching
   // on the call site to find out at which pool index the destination name is
   // located.
-  const intptr_t sub_type_cache_index = __ object_pool_wrapper().AddObject(
+  const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
       Object::null_object(), ObjectPool::Patchability::kPatchable);
   const intptr_t sub_type_cache_offset =
       ObjectPool::element_offset(sub_type_cache_index);
-  const intptr_t dst_name_index = __ object_pool_wrapper().AddObject(
+  const intptr_t dst_name_index = __ object_pool_builder().AddObject(
       dst_name, ObjectPool::Patchability::kPatchable);
   ASSERT((sub_type_cache_index + 1) == dst_name_index);
   ASSERT(__ constant_pool_allowed());
@@ -786,9 +786,9 @@
   const Code& build_method_extractor = Code::ZoneHandle(
       isolate()->object_store()->build_method_extractor_code());
 
-  const intptr_t stub_index = __ object_pool_wrapper().AddObject(
+  const intptr_t stub_index = __ object_pool_builder().AddObject(
       build_method_extractor, ObjectPool::Patchability::kNotPatchable);
-  const intptr_t function_index = __ object_pool_wrapper().AddObject(
+  const intptr_t function_index = __ object_pool_builder().AddObject(
       extracted_method, ObjectPool::Patchability::kNotPatchable);
 
   // We use a custom pool register to preserve caller PP.
@@ -918,7 +918,7 @@
 
     intptr_t args_desc_slot = -1;
     if (parsed_function().has_arg_desc_var()) {
-      args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
+      args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
           parsed_function().arg_desc_var());
     }
 
@@ -928,7 +928,7 @@
     }
     for (intptr_t i = 0; i < num_locals; ++i) {
       const intptr_t slot_index =
-          compiler_frame_layout.FrameSlotForVariableIndex(-i);
+          compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
       Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
       __ StoreToOffset(value_reg, FP, slot_index * kWordSize);
     }
@@ -1111,7 +1111,7 @@
   ASSERT(ic_data.NumArgsTested() == 1);
   const Code& initial_stub = StubCode::ICCallThroughFunction();
 
-  auto& op = __ object_pool_wrapper();
+  auto& op = __ object_pool_builder();
 
   __ Comment("SwitchableCall");
   __ LoadFromOffset(R0, SP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc b/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc
index b778688..09c490bd 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_dbc.cc
@@ -112,7 +112,8 @@
     builder->AddCopy(
         NULL,
         Location::StackSlot(
-            compiler_frame_layout.FrameSlotForVariableIndex(-stack_height)),
+            compiler::target::frame_layout.FrameSlotForVariableIndex(
+                -stack_height)),
         slot_ix++);
   }
 
@@ -282,7 +283,7 @@
 }
 
 void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
-  __ Move(0, -(1 + compiler_frame_layout.param_end_from_fp));
+  __ Move(0, -(1 + compiler::target::frame_layout.param_end_from_fp));
   ASSERT(offset % kWordSize == 0);
   if (Utils::IsInt(8, offset / kWordSize)) {
     __ LoadField(0, 0, offset / kWordSize);
@@ -294,8 +295,8 @@
 }
 
 void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
-  __ Move(0, -(2 + compiler_frame_layout.param_end_from_fp));
-  __ Move(1, -(1 + compiler_frame_layout.param_end_from_fp));
+  __ Move(0, -(2 + compiler::target::frame_layout.param_end_from_fp));
+  __ Move(1, -(1 + compiler::target::frame_layout.param_end_from_fp));
   ASSERT(offset % kWordSize == 0);
   if (Utils::IsInt(8, offset / kWordSize)) {
     __ StoreField(0, offset / kWordSize, 1);
@@ -328,8 +329,9 @@
     if (parsed_function().has_arg_desc_var()) {
       // TODO(kustermann): If dbc simulator put the args_desc_ into the
       // _special_regs, we could replace these 3 with the MoveSpecial bytecode.
-      const intptr_t slot_index = compiler_frame_layout.FrameSlotForVariable(
-          parsed_function().arg_desc_var());
+      const intptr_t slot_index =
+          compiler::target::frame_layout.FrameSlotForVariable(
+              parsed_function().arg_desc_var());
       __ LoadArgDescriptor();
       __ StoreLocal(LocalVarIndex(0, slot_index));
       __ Drop(1);
@@ -369,7 +371,8 @@
     // Only allow access to the arguments (which have in the non-inverted stack
     // positive indices).
     ASSERT(source.base_reg() == FPREG);
-    ASSERT(source.stack_index() > compiler_frame_layout.param_end_from_fp);
+    ASSERT(source.stack_index() >
+           compiler::target::frame_layout.param_end_from_fp);
     __ Move(destination.reg(), -source.stack_index());
   } else if (source.IsRegister() && destination.IsRegister()) {
     __ Move(destination.reg(), source.reg());
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index 84f3c29..ba4960e 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -806,7 +806,7 @@
 
     intptr_t args_desc_slot = -1;
     if (parsed_function().has_arg_desc_var()) {
-      args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
+      args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
           parsed_function().arg_desc_var());
     }
 
@@ -818,7 +818,7 @@
     }
     for (intptr_t i = 0; i < num_locals; ++i) {
       const intptr_t slot_index =
-          compiler_frame_layout.FrameSlotForVariableIndex(-i);
+          compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
       Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX;
       __ movl(Address(EBP, slot_index * kWordSize), value_reg);
     }
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index a466458..1c07f8a 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -43,7 +43,7 @@
     const auto& array_stub =
         Code::ZoneHandle(object_store->array_write_barrier_stub());
     if (!array_stub.InVMHeap()) {
-      assembler_->invoke_array_write_barrier_ = [&]() {
+      assembler_->generate_invoke_array_write_barrier_ = [&]() {
         AddPcRelativeCallStubTarget(array_stub);
         assembler_->GenerateUnRelocatedPcRelativeCall();
       };
@@ -761,12 +761,12 @@
   // (see runtime/vm/runtime_entry.cc:TypeCheck).  It will use pattern matching
   // on the call site to find out at which pool index the destination name is
   // located.
-  const intptr_t sub_type_cache_index = __ object_pool_wrapper().AddObject(
-      Object::null_object(), ObjectPool::Patchability::kPatchable);
+  const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
+      Object::null_object(), compiler::ObjectPoolBuilderEntry::kPatchable);
   const intptr_t sub_type_cache_offset =
       ObjectPool::element_offset(sub_type_cache_index) - kHeapObjectTag;
-  const intptr_t dst_name_index = __ object_pool_wrapper().AddObject(
-      dst_name, ObjectPool::Patchability::kPatchable);
+  const intptr_t dst_name_index = __ object_pool_builder().AddObject(
+      dst_name, compiler::ObjectPoolBuilderEntry::kPatchable);
   ASSERT((sub_type_cache_index + 1) == dst_name_index);
   ASSERT(__ constant_pool_allowed());
 
@@ -805,10 +805,10 @@
       isolate()->object_store()->build_method_extractor_code());
   ASSERT(!build_method_extractor.IsNull());
 
-  const intptr_t stub_index = __ object_pool_wrapper().AddObject(
-      build_method_extractor, ObjectPool::Patchability::kNotPatchable);
-  const intptr_t function_index = __ object_pool_wrapper().AddObject(
-      extracted_method, ObjectPool::Patchability::kNotPatchable);
+  const intptr_t stub_index = __ object_pool_builder().AddObject(
+      build_method_extractor, compiler::ObjectPoolBuilderEntry::kNotPatchable);
+  const intptr_t function_index = __ object_pool_builder().AddObject(
+      extracted_method, compiler::ObjectPoolBuilderEntry::kNotPatchable);
 
   // We use a custom pool register to preserve caller PP.
   Register kPoolReg = RAX;
@@ -901,7 +901,7 @@
 
     intptr_t args_desc_slot = -1;
     if (parsed_function().has_arg_desc_var()) {
-      args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
+      args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
           parsed_function().arg_desc_var());
     }
 
@@ -911,7 +911,7 @@
     }
     for (intptr_t i = 0; i < num_locals; ++i) {
       const intptr_t slot_index =
-          compiler_frame_layout.FrameSlotForVariableIndex(-i);
+          compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
       Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : RAX;
       __ movq(Address(RBP, slot_index * kWordSize), value_reg);
     }
@@ -1307,13 +1307,13 @@
     } else {
       ASSERT(destination.IsStackSlot());
       ASSERT((destination.base_reg() != FPREG) ||
-             ((-compiler_frame_layout.VariableIndexForFrameSlot(
+             ((-compiler::target::frame_layout.VariableIndexForFrameSlot(
                   destination.stack_index())) < compiler_->StackSize()));
       __ movq(destination.ToStackSlotAddress(), source.reg());
     }
   } else if (source.IsStackSlot()) {
     ASSERT((source.base_reg() != FPREG) ||
-           ((-compiler_frame_layout.VariableIndexForFrameSlot(
+           ((-compiler::target::frame_layout.VariableIndexForFrameSlot(
                 source.stack_index())) < compiler_->StackSize()));
     if (destination.IsRegister()) {
       __ movq(destination.reg(), source.ToStackSlotAddress());
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 22d35d5..029a337 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -4537,7 +4537,7 @@
                                                FlowGraphCompiler* compiler) {
   const String& function_name = check_null->function_name();
   const intptr_t name_index =
-      compiler->assembler()->object_pool_wrapper().FindObject(function_name);
+      compiler->assembler()->object_pool_builder().FindObject(function_name);
   compiler->AddNullCheck(compiler->assembler()->CodeSize(),
                          check_null->token_pos(), name_index);
 }
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 363f19a..6c3ea35 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -127,7 +127,8 @@
   Label stack_ok;
   __ Comment("Stack Check");
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ sub(R2, SP, Operand(FP));
@@ -282,9 +283,8 @@
 
 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   const Register result = locs()->out(0).reg();
-  __ LoadFromOffset(
-      kWord, result, FP,
-      compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
+  __ LoadFromOffset(kWord, result, FP,
+                    compiler::target::FrameOffsetInBytesForVariable(&local()));
 }
 
 LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
@@ -297,9 +297,8 @@
   const Register value = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
   ASSERT(result == value);  // Assert that register assignment is correct.
-  __ StoreToOffset(
-      kWord, value, FP,
-      compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
+  __ StoreToOffset(kWord, value, FP,
+                   compiler::target::FrameOffsetInBytesForVariable(&local()));
 }
 
 LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@@ -967,9 +966,10 @@
   }
   __ LoadImmediate(R1, argc_tag);
   ExternalLabel label(entry);
-  __ LoadNativeEntry(
-      R9, &label,
-      link_lazily() ? ObjectPool::kPatchable : ObjectPool::kNotPatchable);
+  __ LoadNativeEntry(R9, &label,
+                     link_lazily()
+                         ? compiler::ObjectPoolBuilderEntry::kPatchable
+                         : compiler::ObjectPoolBuilderEntry::kNotPatchable);
   if (link_lazily()) {
     compiler->GeneratePatchableCall(token_pos(), *stub,
                                     RawPcDescriptors::kOther, locs());
@@ -2965,21 +2965,22 @@
   // Restore SP from FP as we are coming from a throw and the code for
   // popping arguments has not been run.
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ AddImmediate(SP, FP, fp_sp_dist);
 
   if (!compiler->is_optimizing()) {
     if (raw_exception_var_ != nullptr) {
-      __ StoreToOffset(kWord, kExceptionObjectReg, FP,
-                       compiler_frame_layout.FrameOffsetInBytesForVariable(
-                           raw_exception_var_));
+      __ StoreToOffset(
+          kWord, kExceptionObjectReg, FP,
+          compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
     }
     if (raw_stacktrace_var_ != nullptr) {
-      __ StoreToOffset(kWord, kStackTraceObjectReg, FP,
-                       compiler_frame_layout.FrameOffsetInBytesForVariable(
-                           raw_stacktrace_var_));
+      __ StoreToOffset(
+          kWord, kStackTraceObjectReg, FP,
+          compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
     }
   }
 }
@@ -3029,11 +3030,9 @@
     compiler->pending_deoptimization_env_ = env;
 
     if (using_shared_stub) {
-      uword entry_point_offset =
-          instruction()->locs()->live_registers()->FpuRegisterCount() > 0
-              ? Thread::stack_overflow_shared_with_fpu_regs_entry_point_offset()
-              : Thread::
-                    stack_overflow_shared_without_fpu_regs_entry_point_offset();
+      const uword entry_point_offset =
+          Thread::stack_overflow_shared_stub_entry_point_offset(
+              instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
       __ ldr(LR, Address(THR, entry_point_offset));
       __ blx(LR);
       compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index b9f06c7..ba567b0 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -127,7 +127,8 @@
   Label stack_ok;
   __ Comment("Stack Check");
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ sub(R2, SP, Operand(FP));
@@ -281,9 +282,8 @@
 
 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   const Register result = locs()->out(0).reg();
-  __ LoadFromOffset(
-      result, FP,
-      compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
+  __ LoadFromOffset(result, FP,
+                    compiler::target::FrameOffsetInBytesForVariable(&local()));
 }
 
 LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
@@ -296,8 +296,8 @@
   const Register value = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
   ASSERT(result == value);  // Assert that register assignment is correct.
-  __ StoreToOffset(
-      value, FP, compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
+  __ StoreToOffset(value, FP,
+                   compiler::target::FrameOffsetInBytesForVariable(&local()));
 }
 
 LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@@ -857,9 +857,9 @@
   }
   __ LoadImmediate(R1, argc_tag);
   ExternalLabel label(entry);
-  __ LoadNativeEntry(
-      R5, &label,
-      link_lazily() ? ObjectPool::kPatchable : ObjectPool::kNotPatchable);
+  __ LoadNativeEntry(R5, &label,
+                     link_lazily() ? ObjectPool::Patchability::kPatchable
+                                   : ObjectPool::Patchability::kNotPatchable);
   if (link_lazily()) {
     compiler->GeneratePatchableCall(token_pos(), *stub,
                                     RawPcDescriptors::kOther, locs());
@@ -2653,21 +2653,22 @@
   // Restore SP from FP as we are coming from a throw and the code for
   // popping arguments has not been run.
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ AddImmediate(SP, FP, fp_sp_dist);
 
   if (!compiler->is_optimizing()) {
     if (raw_exception_var_ != nullptr) {
-      __ StoreToOffset(kExceptionObjectReg, FP,
-                       compiler_frame_layout.FrameOffsetInBytesForVariable(
-                           raw_exception_var_));
+      __ StoreToOffset(
+          kExceptionObjectReg, FP,
+          compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
     }
     if (raw_stacktrace_var_ != nullptr) {
-      __ StoreToOffset(kStackTraceObjectReg, FP,
-                       compiler_frame_layout.FrameOffsetInBytesForVariable(
-                           raw_stacktrace_var_));
+      __ StoreToOffset(
+          kStackTraceObjectReg, FP,
+          compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
     }
   }
 }
@@ -2731,12 +2732,9 @@
         __ GenerateUnRelocatedPcRelativeCall();
 
       } else {
-        uword entry_point_offset =
-            locs->live_registers()->FpuRegisterCount() > 0
-                ? Thread::
-                      stack_overflow_shared_with_fpu_regs_entry_point_offset()
-                : Thread::
-                      stack_overflow_shared_without_fpu_regs_entry_point_offset();
+        const uword entry_point_offset =
+            Thread::stack_overflow_shared_stub_entry_point_offset(
+                locs->live_registers()->FpuRegisterCount() > 0);
         __ ldr(LR, Address(THR, entry_point_offset));
         __ blr(LR);
       }
diff --git a/runtime/vm/compiler/backend/il_dbc.cc b/runtime/vm/compiler/backend/il_dbc.cc
index c44cd43..70f8e2b 100644
--- a/runtime/vm/compiler/backend/il_dbc.cc
+++ b/runtime/vm/compiler/backend/il_dbc.cc
@@ -332,14 +332,14 @@
 EMIT_NATIVE_CODE(LoadLocal, 0) {
   ASSERT(!compiler->is_optimizing());
   const intptr_t slot_index =
-      compiler_frame_layout.FrameSlotForVariable(&local());
+      compiler::target::frame_layout.FrameSlotForVariable(&local());
   __ Push(LocalVarIndex(0, slot_index));
 }
 
 EMIT_NATIVE_CODE(StoreLocal, 0) {
   ASSERT(!compiler->is_optimizing());
   const intptr_t slot_index =
-      compiler_frame_layout.FrameSlotForVariable(&local());
+      compiler::target::frame_layout.FrameSlotForVariable(&local());
   if (HasTemp()) {
     __ StoreLocal(LocalVarIndex(0, slot_index));
   } else {
@@ -984,13 +984,13 @@
 
   const ExternalLabel trampoline_label(reinterpret_cast<uword>(trampoline));
   const intptr_t trampoline_kidx =
-      __ object_pool_wrapper().FindNativeFunctionWrapper(
-          &trampoline_label, ObjectPool::kPatchable);
+      __ object_pool_builder().FindNativeFunctionWrapper(
+          &trampoline_label, ObjectPool::Patchability::kPatchable);
   const ExternalLabel label(reinterpret_cast<uword>(function));
-  const intptr_t target_kidx = __ object_pool_wrapper().FindNativeFunction(
-      &label, ObjectPool::kPatchable);
+  const intptr_t target_kidx = __ object_pool_builder().FindNativeFunction(
+      &label, ObjectPool::Patchability::kPatchable);
   const intptr_t argc_tag_kidx =
-      __ object_pool_wrapper().FindImmediate(static_cast<uword>(argc_tag));
+      __ object_pool_builder().FindImmediate(static_cast<uword>(argc_tag));
   __ NativeCall(trampoline_kidx, target_kidx, argc_tag_kidx);
   compiler->RecordSafepoint(locs());
   compiler->AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone,
@@ -1199,13 +1199,13 @@
   if (!compiler->is_optimizing()) {
     if (raw_exception_var_ != nullptr) {
       __ MoveSpecial(
-          LocalVarIndex(0, compiler_frame_layout.FrameSlotForVariable(
+          LocalVarIndex(0, compiler::target::frame_layout.FrameSlotForVariable(
                                raw_exception_var_)),
           Simulator::kExceptionSpecialIndex);
     }
     if (raw_stacktrace_var_ != nullptr) {
       __ MoveSpecial(
-          LocalVarIndex(0, compiler_frame_layout.FrameSlotForVariable(
+          LocalVarIndex(0, compiler::target::frame_layout.FrameSlotForVariable(
                                raw_stacktrace_var_)),
           Simulator::kStackTraceSpecialIndex);
     }
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index e1cce1c..3f6ee68 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -118,7 +118,8 @@
   __ Comment("Stack Check");
   Label done;
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ movl(EDI, ESP);
@@ -136,7 +137,7 @@
                                                      bool opt) const {
   const intptr_t kNumInputs = 0;
   const intptr_t stack_index =
-      compiler_frame_layout.FrameSlotForVariable(&local());
+      compiler::target::frame_layout.FrameSlotForVariable(&local());
   return LocationSummary::Make(zone, kNumInputs,
                                Location::StackSlot(stack_index),
                                LocationSummary::kNoCall);
@@ -158,9 +159,9 @@
   Register value = locs()->in(0).reg();
   Register result = locs()->out(0).reg();
   ASSERT(result == value);  // Assert that register assignment is correct.
-  __ movl(Address(EBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
-                           &local())),
-          value);
+  __ movl(
+      Address(EBP, compiler::target::FrameOffsetInBytesForVariable(&local())),
+      value);
 }
 
 LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@@ -2544,19 +2545,20 @@
   // Restore ESP from EBP as we are coming from a throw and the code for
   // popping arguments has not been run.
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ leal(ESP, Address(EBP, fp_sp_dist));
 
   if (!compiler->is_optimizing()) {
     if (raw_exception_var_ != nullptr) {
-      __ movl(Address(EBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
+      __ movl(Address(EBP, compiler::target::FrameOffsetInBytesForVariable(
                                raw_exception_var_)),
               kExceptionObjectReg);
     }
     if (raw_stacktrace_var_ != nullptr) {
-      __ movl(Address(EBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
+      __ movl(Address(EBP, compiler::target::FrameOffsetInBytesForVariable(
                                raw_stacktrace_var_)),
               kStackTraceObjectReg);
     }
@@ -5989,8 +5991,8 @@
   Register target_reg = locs()->temp_slot(0)->reg();
 
   // Load code object from frame.
-  __ movl(target_reg,
-          Address(EBP, compiler_frame_layout.code_from_fp * kWordSize));
+  __ movl(target_reg, Address(EBP, compiler::target::frame_layout.code_from_fp *
+                                       kWordSize));
   // Load instructions object (active_instructions and Code::entry_point() may
   // not point to this instruction object any more; see Code::DisableDartCode).
   __ movl(target_reg,
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index f53888d..aa045cf 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -118,7 +118,8 @@
   __ Comment("Stack Check");
   Label done;
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ movq(RDI, RSP);
@@ -238,7 +239,7 @@
                                                      bool opt) const {
   const intptr_t kNumInputs = 0;
   const intptr_t stack_index =
-      compiler_frame_layout.FrameSlotForVariable(&local());
+      compiler::target::frame_layout.FrameSlotForVariable(&local());
   return LocationSummary::Make(zone, kNumInputs,
                                Location::StackSlot(stack_index),
                                LocationSummary::kNoCall);
@@ -260,9 +261,9 @@
   Register value = locs()->in(0).reg();
   Register result = locs()->out(0).reg();
   ASSERT(result == value);  // Assert that register assignment is correct.
-  __ movq(Address(RBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
-                           &local())),
-          value);
+  __ movq(
+      Address(RBP, compiler::target::FrameOffsetInBytesForVariable(&local())),
+      value);
 }
 
 LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@@ -853,7 +854,8 @@
   if (link_lazily()) {
     stub = &StubCode::CallBootstrapNative();
     ExternalLabel label(NativeEntry::LinkNativeCallEntry());
-    __ LoadNativeEntry(RBX, &label, ObjectPool::kPatchable);
+    __ LoadNativeEntry(RBX, &label,
+                       compiler::ObjectPoolBuilderEntry::kPatchable);
     compiler->GeneratePatchableCall(token_pos(), *stub,
                                     RawPcDescriptors::kOther, locs());
   } else {
@@ -865,7 +867,8 @@
       stub = &StubCode::CallNoScopeNative();
     }
     const ExternalLabel label(reinterpret_cast<uword>(native_c_function()));
-    __ LoadNativeEntry(RBX, &label, ObjectPool::kNotPatchable);
+    __ LoadNativeEntry(RBX, &label,
+                       compiler::ObjectPoolBuilderEntry::kNotPatchable);
     compiler->GenerateCall(token_pos(), *stub, RawPcDescriptors::kOther,
                            locs());
   }
@@ -2668,19 +2671,20 @@
   // Restore RSP from RBP as we are coming from a throw and the code for
   // popping arguments has not been run.
   const intptr_t fp_sp_dist =
-      (compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
       kWordSize;
   ASSERT(fp_sp_dist <= 0);
   __ leaq(RSP, Address(RBP, fp_sp_dist));
 
   if (!compiler->is_optimizing()) {
     if (raw_exception_var_ != nullptr) {
-      __ movq(Address(RBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
+      __ movq(Address(RBP, compiler::target::FrameOffsetInBytesForVariable(
                                raw_exception_var_)),
               kExceptionObjectReg);
     }
     if (raw_stacktrace_var_ != nullptr) {
-      __ movq(Address(RBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
+      __ movq(Address(RBP, compiler::target::FrameOffsetInBytesForVariable(
                                raw_stacktrace_var_)),
               kStackTraceObjectReg);
     }
@@ -2730,11 +2734,9 @@
     compiler->pending_deoptimization_env_ = env;
 
     if (using_shared_stub) {
-      uword entry_point_offset =
-          instruction()->locs()->live_registers()->FpuRegisterCount() > 0
-              ? Thread::stack_overflow_shared_with_fpu_regs_entry_point_offset()
-              : Thread::
-                    stack_overflow_shared_without_fpu_regs_entry_point_offset();
+      const uword entry_point_offset =
+          Thread::stack_overflow_shared_stub_entry_point_offset(
+              instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
       __ call(Address(THR, entry_point_offset));
       compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
       compiler->RecordCatchEntryMoves();
@@ -6185,7 +6187,7 @@
     ASSERT(__ CodeSize() == entry_to_rip_offset);
   }
 
-  // Load from [current frame pointer] + compiler_frame_layout.code_from_fp.
+  // Load from FP+compiler::target::frame_layout.code_from_fp.
 
   // Calculate the final absolute address.
   if (offset()->definition()->representation() == kTagged) {
diff --git a/runtime/vm/compiler/backend/linearscan.cc b/runtime/vm/compiler/backend/linearscan.cc
index 15184b1..b09079d 100644
--- a/runtime/vm/compiler/backend/linearscan.cc
+++ b/runtime/vm/compiler/backend/linearscan.cc
@@ -474,7 +474,7 @@
   assigned_location().Print();
   if (spill_slot_.HasStackIndex()) {
     const intptr_t stack_slot =
-        -compiler_frame_layout.VariableIndexForFrameSlot(
+        -compiler::target::frame_layout.VariableIndexForFrameSlot(
             spill_slot_.stack_index());
     THR_Print(" allocated spill slot: %" Pd "", stack_slot);
   }
@@ -750,7 +750,8 @@
     }
 #endif  // defined(TARGET_ARCH_DBC)
     if (param->base_reg() == FPREG) {
-      slot_index = compiler_frame_layout.FrameSlotForVariableIndex(-slot_index);
+      slot_index =
+          compiler::target::frame_layout.FrameSlotForVariableIndex(-slot_index);
     }
     range->set_assigned_location(
         Location::StackSlot(slot_index, param->base_reg()));
@@ -793,7 +794,8 @@
   ConvertAllUses(range);
   Location spill_slot = range->spill_slot();
   if (spill_slot.IsStackSlot() && spill_slot.base_reg() == FPREG &&
-      spill_slot.stack_index() <= compiler_frame_layout.first_local_from_fp) {
+      spill_slot.stack_index() <=
+          compiler::target::frame_layout.first_local_from_fp) {
     // On entry to the function, range is stored on the stack above the FP in
     // the same space which is used for spill slots. Update spill slot state to
     // reflect that and prevent register allocator from reusing this space as a
@@ -2039,15 +2041,16 @@
   // Assign spill slot to the range.
   if (register_kind_ == Location::kRegister) {
     const intptr_t slot_index =
-        compiler_frame_layout.FrameSlotForVariableIndex(-idx);
+        compiler::target::frame_layout.FrameSlotForVariableIndex(-idx);
     range->set_spill_slot(Location::StackSlot(slot_index));
   } else {
     // We use the index of the slot with the lowest address as an index for the
     // FPU register spill slot. In terms of indexes this relation is inverted:
     // so we have to take the highest index.
-    const intptr_t slot_idx = compiler_frame_layout.FrameSlotForVariableIndex(
-        -(cpu_spill_slot_count_ + idx * kDoubleSpillFactor +
-          (kDoubleSpillFactor - 1)));
+    const intptr_t slot_idx =
+        compiler::target::frame_layout.FrameSlotForVariableIndex(
+            -(cpu_spill_slot_count_ + idx * kDoubleSpillFactor +
+              (kDoubleSpillFactor - 1)));
 
     Location location;
     if ((range->representation() == kUnboxedFloat32x4) ||
@@ -2069,7 +2072,7 @@
   Location spill_slot = range->spill_slot();
   intptr_t stack_index = spill_slot.stack_index();
   if (spill_slot.base_reg() == FPREG) {
-    stack_index = -compiler_frame_layout.VariableIndexForFrameSlot(
+    stack_index = -compiler::target::frame_layout.VariableIndexForFrameSlot(
         spill_slot.stack_index());
   }
   ASSERT(stack_index >= 0);
diff --git a/runtime/vm/compiler/backend/locations.cc b/runtime/vm/compiler/backend/locations.cc
index 569be42..a06b4b8 100644
--- a/runtime/vm/compiler/backend/locations.cc
+++ b/runtime/vm/compiler/backend/locations.cc
@@ -233,20 +233,20 @@
     intptr_t index = cpu_reg_slots[reg()];
     ASSERT(index >= 0);
     return Location::StackSlot(
-        compiler_frame_layout.FrameSlotForVariableIndex(-index));
+        compiler::target::frame_layout.FrameSlotForVariableIndex(-index));
   } else if (IsFpuRegister()) {
     intptr_t index = fpu_reg_slots[fpu_reg()];
     ASSERT(index >= 0);
     switch (def->representation()) {
       case kUnboxedDouble:
         return Location::DoubleStackSlot(
-            compiler_frame_layout.FrameSlotForVariableIndex(-index));
+            compiler::target::frame_layout.FrameSlotForVariableIndex(-index));
 
       case kUnboxedFloat32x4:
       case kUnboxedInt32x4:
       case kUnboxedFloat64x2:
         return Location::QuadStackSlot(
-            compiler_frame_layout.FrameSlotForVariableIndex(-index));
+            compiler::target::frame_layout.FrameSlotForVariableIndex(-index));
 
       default:
         UNREACHABLE();
@@ -258,7 +258,7 @@
     intptr_t index_hi;
 
     if (value_pair->At(0).IsRegister()) {
-      index_lo = compiler_frame_layout.FrameSlotForVariableIndex(
+      index_lo = compiler::target::frame_layout.FrameSlotForVariableIndex(
           -cpu_reg_slots[value_pair->At(0).reg()]);
     } else {
       ASSERT(value_pair->At(0).IsStackSlot());
@@ -266,7 +266,7 @@
     }
 
     if (value_pair->At(1).IsRegister()) {
-      index_hi = compiler_frame_layout.FrameSlotForVariableIndex(
+      index_hi = compiler::target::frame_layout.FrameSlotForVariableIndex(
           -cpu_reg_slots[value_pair->At(1).reg()]);
     } else {
       ASSERT(value_pair->At(1).IsStackSlot());
diff --git a/runtime/vm/compiler/backend/locations.h b/runtime/vm/compiler/backend/locations.h
index 8da9934..8be7779 100644
--- a/runtime/vm/compiler/backend/locations.h
+++ b/runtime/vm/compiler/backend/locations.h
@@ -7,6 +7,7 @@
 
 #include "vm/allocation.h"
 #include "vm/bitfield.h"
+#include "vm/bitmap.h"
 #include "vm/compiler/assembler/assembler.h"
 #include "vm/log.h"
 
@@ -17,7 +18,6 @@
 class Definition;
 class PairLocation;
 class Value;
-struct FrameLayout;
 
 enum Representation {
   kNoRepresentation,
diff --git a/runtime/vm/compiler/compiler_pass.h b/runtime/vm/compiler/compiler_pass.h
index 1e0f613..8166644 100644
--- a/runtime/vm/compiler/compiler_pass.h
+++ b/runtime/vm/compiler/compiler_pass.h
@@ -52,6 +52,7 @@
 class Precompiler;
 class SpeculativeInliningPolicy;
 class TimelineStream;
+class Thread;
 
 struct CompilerPassState {
   CompilerPassState(Thread* thread,
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index 1b6e95f..24d3c2c 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -29,6 +29,7 @@
   "assembler/disassembler_kbc.cc",
   "assembler/disassembler_kbc.h",
   "assembler/disassembler_x86.cc",
+  "assembler/object_pool_builder.h",
   "backend/block_scheduler.cc",
   "backend/block_scheduler.h",
   "backend/branch_optimizer.cc",
@@ -119,6 +120,8 @@
   "method_recognizer.h",
   "relocation.cc",
   "relocation.h",
+  "runtime_api.cc",
+  "runtime_api.h",
 ]
 
 compiler_sources_tests = [
diff --git a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
index 3562378..71688da 100644
--- a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
@@ -597,7 +597,7 @@
     store_type_args += B->LoadArgDescriptor();
     store_type_args += B->LoadNativeField(Slot::ArgumentsDescriptor_count());
     store_type_args += B->LoadFpRelativeSlot(
-        kWordSize * (1 + compiler_frame_layout.param_end_from_fp));
+        kWordSize * (1 + compiler::target::frame_layout.param_end_from_fp));
     store_type_args +=
         B->StoreLocalRaw(TokenPosition::kNoSource, type_args_var);
     store_type_args += B->Drop();
@@ -1364,7 +1364,7 @@
 }
 
 static bool IsICDataEntry(const ObjectPool& object_pool, intptr_t index) {
-  if (object_pool.TypeAt(index) != ObjectPool::kTaggedObject) {
+  if (object_pool.TypeAt(index) != ObjectPool::EntryType::kTaggedObject) {
     return false;
   }
   RawObject* entry = object_pool.ObjectAt(index);
diff --git a/runtime/vm/compiler/frontend/bytecode_reader.cc b/runtime/vm/compiler/frontend/bytecode_reader.cc
index 9f705c5..271ccdd 100644
--- a/runtime/vm/compiler/frontend/bytecode_reader.cc
+++ b/runtime/vm/compiler/frontend/bytecode_reader.cc
@@ -478,7 +478,8 @@
         // InstanceField constant occupies 2 entries.
         // The first entry is used for field offset.
         obj = Smi::New(field.Offset() / kWordSize);
-        pool.SetTypeAt(i, ObjectPool::kTaggedObject, ObjectPool::kNotPatchable);
+        pool.SetTypeAt(i, ObjectPool::EntryType::kTaggedObject,
+                       ObjectPool::Patchability::kNotPatchable);
         pool.SetObjectAt(i, obj);
         ++i;
         ASSERT(i < obj_count);
@@ -562,8 +563,8 @@
       case ConstantPoolTag::kNativeEntry: {
         name = ReadString();
         obj = NativeEntry(function, name);
-        pool.SetTypeAt(i, ObjectPool::kNativeEntryData,
-                       ObjectPool::kNotPatchable);
+        pool.SetTypeAt(i, ObjectPool::EntryType::kNativeEntryData,
+                       ObjectPool::Patchability::kNotPatchable);
         pool.SetObjectAt(i, obj);
         continue;
       }
@@ -620,7 +621,8 @@
         array ^= pool.ObjectAt(arg_desc_index);
         // InterfaceCall constant occupies 2 entries.
         // The first entry is used for selector name.
-        pool.SetTypeAt(i, ObjectPool::kTaggedObject, ObjectPool::kNotPatchable);
+        pool.SetTypeAt(i, ObjectPool::EntryType::kTaggedObject,
+                       ObjectPool::Patchability::kNotPatchable);
         pool.SetObjectAt(i, name);
         ++i;
         ASSERT(i < obj_count);
@@ -630,7 +632,8 @@
       default:
         UNREACHABLE();
     }
-    pool.SetTypeAt(i, ObjectPool::kTaggedObject, ObjectPool::kNotPatchable);
+    pool.SetTypeAt(i, ObjectPool::EntryType::kTaggedObject,
+                   ObjectPool::Patchability::kNotPatchable);
     pool.SetObjectAt(i, obj);
   }
 }
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
index 280ee7c..1af6d53 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
@@ -692,15 +692,15 @@
       body += LoadLocal(parsed_function()->current_context_var());
       body += B->LoadNativeField(
           Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
-      body += B->StoreFpRelativeSlot(kWordSize *
-                                     compiler_frame_layout.param_end_from_fp);
+      body += B->StoreFpRelativeSlot(
+          kWordSize * compiler::target::frame_layout.param_end_from_fp);
     } else {
       body += LoadLocal(parsed_function()->current_context_var());
       body += B->LoadNativeField(
           Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
       body += B->StoreFpRelativeSlot(
-          kWordSize *
-          (compiler_frame_layout.param_end_from_fp + function.NumParameters()));
+          kWordSize * (compiler::target::frame_layout.param_end_from_fp +
+                       function.NumParameters()));
     }
   }
 
@@ -804,8 +804,8 @@
     loop_body += LoadLocal(argument_count);
     loop_body += LoadLocal(index);
     loop_body += B->SmiBinaryOp(Token::kSUB, /*truncate=*/true);
-    loop_body += B->LoadFpRelativeSlot(kWordSize *
-                                       compiler_frame_layout.param_end_from_fp);
+    loop_body += B->LoadFpRelativeSlot(
+        kWordSize * compiler::target::frame_layout.param_end_from_fp);
     loop_body += StoreIndexed(kArrayCid);
 
     // ++i
diff --git a/runtime/vm/compiler/frontend/prologue_builder.cc b/runtime/vm/compiler/frontend/prologue_builder.cc
index 1ec7fd2..1149d15 100644
--- a/runtime/vm/compiler/frontend/prologue_builder.cc
+++ b/runtime/vm/compiler/frontend/prologue_builder.cc
@@ -181,8 +181,8 @@
   for (; param < num_fixed_params; ++param) {
     copy_args_prologue += LoadLocal(optional_count_var);
     copy_args_prologue += LoadFpRelativeSlot(
-        kWordSize *
-        (compiler_frame_layout.param_end_from_fp + num_fixed_params - param));
+        kWordSize * (compiler::target::frame_layout.param_end_from_fp +
+                     num_fixed_params - param));
     copy_args_prologue +=
         StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
     copy_args_prologue += Drop();
@@ -201,8 +201,8 @@
       Fragment good(supplied);
       good += LoadLocal(optional_count_var);
       good += LoadFpRelativeSlot(
-          kWordSize *
-          (compiler_frame_layout.param_end_from_fp + num_fixed_params - param));
+          kWordSize * (compiler::target::frame_layout.param_end_from_fp +
+                       num_fixed_params - param));
       good += StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
       good += Drop();
 
@@ -286,7 +286,7 @@
       Fragment good(supplied);
 
       {
-        // fp[compiler_frame_layout.param_end_from_fp + (count_var - pos)]
+        // fp[target::frame_layout.param_end_from_fp + (count_var - pos)]
         good += LoadLocal(count_var);
         {
           // pos = arg_desc[names_offset + arg_desc_name_index + positionOffset]
@@ -299,8 +299,8 @@
           good += LoadIndexed(/* index_scale = */ kWordSize);
         }
         good += SmiBinaryOp(Token::kSUB, /* truncate= */ true);
-        good += LoadFpRelativeSlot(kWordSize *
-                                   compiler_frame_layout.param_end_from_fp);
+        good += LoadFpRelativeSlot(
+            kWordSize * compiler::target::frame_layout.param_end_from_fp);
 
         // Copy down.
         good += StoreLocalRaw(TokenPosition::kNoSource,
@@ -407,7 +407,7 @@
   store_type_args += LoadArgDescriptor();
   store_type_args += LoadNativeField(Slot::ArgumentsDescriptor_count());
   store_type_args += LoadFpRelativeSlot(
-      kWordSize * (1 + compiler_frame_layout.param_end_from_fp));
+      kWordSize * (1 + compiler::target::frame_layout.param_end_from_fp));
   store_type_args += StoreLocal(TokenPosition::kNoSource, type_args_var);
   store_type_args += Drop();
 
diff --git a/runtime/vm/compiler/intrinsifier.h b/runtime/vm/compiler/intrinsifier.h
index 23d5436..694518e 100644
--- a/runtime/vm/compiler/intrinsifier.h
+++ b/runtime/vm/compiler/intrinsifier.h
@@ -12,14 +12,19 @@
 namespace dart {
 
 // Forward declarations.
+namespace compiler {
 class Assembler;
 class Label;
+}  // namespace compiler
 class FlowGraphCompiler;
 class Function;
 class TargetEntryInstr;
 class ParsedFunction;
 class FlowGraph;
 
+using compiler::Assembler;
+using compiler::Label;
+
 class Intrinsifier : public AllStatic {
  public:
   static bool Intrinsify(const ParsedFunction& parsed_function,
diff --git a/runtime/vm/compiler/intrinsifier_arm.cc b/runtime/vm/compiler/intrinsifier_arm.cc
index 8b51488..beb9d4e 100644
--- a/runtime/vm/compiler/intrinsifier_arm.cc
+++ b/runtime/vm/compiler/intrinsifier_arm.cc
@@ -109,7 +109,6 @@
       sizeof(Raw##type_name) + kObjectAlignment - 1;                           \
   __ AddImmediate(R2, fixed_size_plus_alignment_padding);                      \
   __ bic(R2, R2, Operand(kObjectAlignment - 1));                               \
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);                              \
   __ ldr(R0, Address(THR, Thread::top_offset()));                              \
                                                                                \
   /* R2: allocation size. */                                                   \
@@ -176,7 +175,7 @@
   __ b(&init_loop, CC);                                                        \
   __ str(R8, Address(R3, -2 * kWordSize), HI);                                 \
                                                                                \
-  NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space));          \
+  NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2));                 \
   __ Ret();                                                                    \
   __ Bind(normal_ir_body);
 
@@ -1943,7 +1942,6 @@
   __ bic(length_reg, length_reg, Operand(kObjectAlignment - 1));
 
   const intptr_t cid = kOneByteStringCid;
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
   __ ldr(R0, Address(THR, Thread::top_offset()));
 
   // length_reg: allocation size.
@@ -1993,7 +1991,7 @@
   __ LoadImmediate(TMP, 0);
   __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::hash_offset()), TMP);
 
-  NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space));
+  NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2));
   __ b(ok);
 
   __ Bind(&fail);
diff --git a/runtime/vm/compiler/intrinsifier_arm64.cc b/runtime/vm/compiler/intrinsifier_arm64.cc
index 16f8f0b..79a6cc4 100644
--- a/runtime/vm/compiler/intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/intrinsifier_arm64.cc
@@ -127,7 +127,6 @@
       sizeof(Raw##type_name) + kObjectAlignment - 1;                           \
   __ AddImmediate(R2, fixed_size_plus_alignment_padding);                      \
   __ andi(R2, R2, Immediate(~(kObjectAlignment - 1)));                         \
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);                              \
   __ ldr(R0, Address(THR, Thread::top_offset()));                              \
                                                                                \
   /* R2: allocation size. */                                                   \
@@ -146,7 +145,7 @@
   /* next object start and initialize the object. */                           \
   __ str(R1, Address(THR, Thread::top_offset()));                              \
   __ AddImmediate(R0, kHeapObjectTag);                                         \
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space));            \
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2));                   \
   /* Initialize the tags. */                                                   \
   /* R0: new object start as a tagged pointer. */                              \
   /* R1: new object end address. */                                            \
@@ -2009,7 +2008,6 @@
   __ andi(length_reg, length_reg, Immediate(~(kObjectAlignment - 1)));
 
   const intptr_t cid = kOneByteStringCid;
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
   __ ldr(R0, Address(THR, Thread::top_offset()));
 
   // length_reg: allocation size.
@@ -2028,7 +2026,7 @@
   // next object start and initialize the object.
   __ str(R1, Address(THR, Thread::top_offset()));
   __ AddImmediate(R0, kHeapObjectTag);
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space));
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2));
 
   // Initialize the tags.
   // R0: new object start as a tagged pointer.
diff --git a/runtime/vm/compiler/intrinsifier_ia32.cc b/runtime/vm/compiler/intrinsifier_ia32.cc
index ff8de4f..4229e6e 100644
--- a/runtime/vm/compiler/intrinsifier_ia32.cc
+++ b/runtime/vm/compiler/intrinsifier_ia32.cc
@@ -113,7 +113,6 @@
       sizeof(Raw##type_name) + kObjectAlignment - 1;                           \
   __ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding)); \
   __ andl(EDI, Immediate(-kObjectAlignment));                                  \
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);                              \
   __ movl(EAX, Address(THR, Thread::top_offset()));                            \
   __ movl(EBX, EAX);                                                           \
                                                                                \
@@ -132,7 +131,7 @@
   /* next object start and initialize the object. */                           \
   __ movl(Address(THR, Thread::top_offset()), EBX);                            \
   __ addl(EAX, Immediate(kHeapObjectTag));                                     \
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX, space));      \
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX));             \
                                                                                \
   /* Initialize the tags. */                                                   \
   /* EAX: new object start as a tagged pointer. */                             \
@@ -1936,7 +1935,6 @@
   __ andl(EDI, Immediate(-kObjectAlignment));
 
   const intptr_t cid = kOneByteStringCid;
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
   __ movl(EAX, Address(THR, Thread::top_offset()));
   __ movl(EBX, EAX);
 
@@ -1956,7 +1954,7 @@
   __ movl(Address(THR, Thread::top_offset()), EBX);
   __ addl(EAX, Immediate(kHeapObjectTag));
 
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX, space));
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX));
 
   // Initialize the tags.
   // EAX: new object start as a tagged pointer.
diff --git a/runtime/vm/compiler/intrinsifier_x64.cc b/runtime/vm/compiler/intrinsifier_x64.cc
index ac56407..e313634 100644
--- a/runtime/vm/compiler/intrinsifier_x64.cc
+++ b/runtime/vm/compiler/intrinsifier_x64.cc
@@ -114,7 +114,6 @@
       sizeof(Raw##type_name) + kObjectAlignment - 1;                           \
   __ leaq(RDI, Address(RDI, scale_factor, fixed_size_plus_alignment_padding)); \
   __ andq(RDI, Immediate(-kObjectAlignment));                                  \
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);                              \
   __ movq(RAX, Address(THR, Thread::top_offset()));                            \
   __ movq(RCX, RAX);                                                           \
                                                                                \
@@ -133,7 +132,7 @@
   /* next object start and initialize the object. */                           \
   __ movq(Address(THR, Thread::top_offset()), RCX);                            \
   __ addq(RAX, Immediate(kHeapObjectTag));                                     \
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI, space));           \
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI));                  \
   /* Initialize the tags. */                                                   \
   /* RAX: new object start as a tagged pointer. */                             \
   /* RCX: new object end address. */                                           \
@@ -1965,7 +1964,6 @@
   __ andq(RDI, Immediate(-kObjectAlignment));
 
   const intptr_t cid = kOneByteStringCid;
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
   __ movq(RAX, Address(THR, Thread::top_offset()));
 
   // RDI: allocation size.
@@ -1984,7 +1982,7 @@
   // next object start and initialize the object.
   __ movq(Address(THR, Thread::top_offset()), RCX);
   __ addq(RAX, Immediate(kHeapObjectTag));
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI, space));
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI));
 
   // Initialize the tags.
   // RAX: new object start as a tagged pointer.
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index 22acf86..2fa6596 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -125,7 +125,6 @@
     FLAG_deoptimize_alot = false;  // Used in some tests.
     FLAG_deoptimize_every = 0;     // Used in some tests.
     FLAG_load_deferred_eagerly = true;
-    FLAG_print_stop_message = false;
     FLAG_use_osr = false;
 #endif
   }
@@ -667,8 +666,8 @@
 
       ASSERT(pass_state.inline_id_to_function.length() ==
              pass_state.caller_inline_id.length());
-      ObjectPoolWrapper object_pool_wrapper;
-      Assembler assembler(&object_pool_wrapper, use_far_branches);
+      ObjectPoolBuilder object_pool_builder;
+      Assembler assembler(&object_pool_builder, use_far_branches);
       FlowGraphCompiler graph_compiler(
           &assembler, flow_graph, *parsed_function(), optimized(),
           &speculative_policy, pass_state.inline_id_to_function,
diff --git a/runtime/vm/compiler/runtime_api.cc b/runtime/vm/compiler/runtime_api.cc
new file mode 100644
index 0000000..ccdc912
--- /dev/null
+++ b/runtime/vm/compiler/runtime_api.cc
@@ -0,0 +1,321 @@
+// Copyright (c) 2019, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/compiler/runtime_api.h"
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/longjump.h"
+#include "vm/object.h"
+
+namespace dart {
+namespace compiler {
+
+bool IsSameObject(const Object& a, const Object& b) {
+  return a.raw() == b.raw();
+}
+
+bool IsNotTemporaryScopedHandle(const Object& obj) {
+  return obj.IsNotTemporaryScopedHandle();
+}
+
+bool IsInOldSpace(const Object& obj) {
+  return obj.IsOld();
+}
+
+intptr_t ObjectHash(const Object& obj) {
+  if (obj.IsNull()) {
+    return 2011;
+  }
+  if (obj.IsString() || obj.IsNumber()) {
+    return Instance::Cast(obj).CanonicalizeHash();
+  }
+  if (obj.IsCode()) {
+    // Instructions don't move during compaction.
+    return Code::Cast(obj).PayloadStart();
+  }
+  if (obj.IsFunction()) {
+    return Function::Cast(obj).Hash();
+  }
+  if (obj.IsField()) {
+    return dart::String::HashRawSymbol(Field::Cast(obj).name());
+  }
+  // Unlikely.
+  return obj.GetClassId();
+}
+
+void SetToNull(Object* obj) {
+  *obj = Object::null();
+}
+
+Object& NewZoneHandle(Zone* zone) {
+  return Object::ZoneHandle(zone, Object::null());
+}
+
+Object& NewZoneHandle(Zone* zone, const Object& obj) {
+  return Object::ZoneHandle(zone, obj.raw());
+}
+
+bool IsOriginalObject(const Object& object) {
+  if (object.IsICData()) {
+    return ICData::Cast(object).IsOriginal();
+  } else if (object.IsField()) {
+    return Field::Cast(object).IsOriginal();
+  }
+  return true;
+}
+
+const String& AllocateString(const char* buffer) {
+  return String::ZoneHandle(String::New(buffer));
+}
+
+bool HasIntegerValue(const dart::Object& object, int64_t* value) {
+  if (object.IsInteger()) {
+    *value = Integer::Cast(object).AsInt64Value();
+    return true;
+  }
+  return false;
+}
+
+int32_t CreateJitCookie() {
+  return static_cast<int32_t>(Isolate::Current()->random()->NextUInt32());
+}
+
+void BailoutWithBranchOffsetError() {
+  Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error());
+}
+
+namespace target {
+
+uint32_t MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size) {
+  return dart::RawObject::SizeTag::encode(instance_size) |
+         dart::RawObject::ClassIdTag::encode(cid) |
+         dart::RawObject::NewBit::encode(true);
+}
+
+word Object::tags_offset() {
+  return dart::Object::tags_offset();
+}
+
+const word RawObject::kClassIdTagPos = dart::RawObject::kClassIdTagPos;
+
+const word RawObject::kClassIdTagSize = dart::RawObject::kClassIdTagSize;
+
+const word RawObject::kBarrierOverlapShift =
+    dart::RawObject::kBarrierOverlapShift;
+
+intptr_t ObjectPool::element_offset(intptr_t index) {
+  return dart::ObjectPool::element_offset(index);
+}
+
+classid_t Class::GetId(const dart::Class& handle) {
+  return handle.id();
+}
+
+uword Class::GetInstanceSize(const dart::Class& handle) {
+  return handle.instance_size();
+}
+
+word Instance::DataOffsetFor(intptr_t cid) {
+  return dart::Instance::DataOffsetFor(cid);
+}
+
+bool Heap::IsAllocatableInNewSpace(intptr_t instance_size) {
+  return dart::Heap::IsAllocatableInNewSpace(instance_size);
+}
+
+word Thread::top_offset() {
+  return dart::Thread::top_offset();
+}
+
+word Thread::end_offset() {
+  return dart::Thread::end_offset();
+}
+
+word Thread::isolate_offset() {
+  return dart::Thread::isolate_offset();
+}
+
+#if !defined(TARGET_ARCH_DBC)
+word Thread::call_to_runtime_entry_point_offset() {
+  return dart::Thread::call_to_runtime_entry_point_offset();
+}
+
+word Thread::null_error_shared_with_fpu_regs_entry_point_offset() {
+  return dart::Thread::null_error_shared_with_fpu_regs_entry_point_offset();
+}
+
+word Thread::null_error_shared_without_fpu_regs_entry_point_offset() {
+  return dart::Thread::null_error_shared_without_fpu_regs_entry_point_offset();
+}
+
+word Thread::monomorphic_miss_entry_offset() {
+  return dart::Thread::monomorphic_miss_entry_offset();
+}
+
+word Thread::write_barrier_mask_offset() {
+  return dart::Thread::write_barrier_mask_offset();
+}
+
+word Thread::write_barrier_entry_point_offset() {
+  return dart::Thread::write_barrier_entry_point_offset();
+}
+
+word Thread::array_write_barrier_entry_point_offset() {
+  return dart::Thread::array_write_barrier_entry_point_offset();
+}
+#endif  // !defined(TARGET_ARCH_DBC)
+
+#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
+    defined(TARGET_ARCH_X64)
+word Thread::write_barrier_wrappers_thread_offset(intptr_t regno) {
+  return dart::Thread::write_barrier_wrappers_thread_offset(
+      static_cast<Register>(regno));
+}
+#endif
+
+word Thread::vm_tag_offset() {
+  return dart::Thread::vm_tag_offset();
+}
+
+#define DECLARE_CONSTANT_OFFSET_GETTER(name)                                   \
+  word Thread::name##_address_offset() {                                       \
+    return dart::Thread::name##_address_offset();                              \
+  }
+THREAD_XMM_CONSTANT_LIST(DECLARE_CONSTANT_OFFSET_GETTER)
+#undef DECLARE_CONSTANT_OFFSET_GETTER
+
+word Isolate::class_table_offset() {
+  return dart::Isolate::class_table_offset();
+}
+
+word ClassTable::table_offset() {
+  return dart::ClassTable::table_offset();
+}
+
+word ClassTable::ClassOffsetFor(intptr_t cid) {
+  return dart::ClassTable::ClassOffsetFor(cid);
+}
+
+#if !defined(PRODUCT)
+word ClassTable::StateOffsetFor(intptr_t cid) {
+  return dart::ClassTable::StateOffsetFor(cid);
+}
+
+word ClassTable::TableOffsetFor(intptr_t cid) {
+  return dart::ClassTable::TableOffsetFor(cid);
+}
+
+word ClassTable::CounterOffsetFor(intptr_t cid, bool is_new) {
+  return dart::ClassTable::CounterOffsetFor(cid, is_new);
+}
+
+word ClassTable::SizeOffsetFor(intptr_t cid, bool is_new) {
+  return dart::ClassTable::SizeOffsetFor(cid, is_new);
+}
+#endif  // !defined(PRODUCT)
+
+const word ClassTable::kSizeOfClassPairLog2 = dart::kSizeOfClassPairLog2;
+
+const intptr_t Instructions::kPolymorphicEntryOffset =
+    dart::Instructions::kPolymorphicEntryOffset;
+
+const intptr_t Instructions::kMonomorphicEntryOffset =
+    dart::Instructions::kMonomorphicEntryOffset;
+
+intptr_t Instructions::HeaderSize() {
+  return dart::Instructions::HeaderSize();
+}
+
+intptr_t Code::object_pool_offset() {
+  return dart::Code::object_pool_offset();
+}
+
+intptr_t Code::saved_instructions_offset() {
+  return dart::Code::saved_instructions_offset();
+}
+
+intptr_t Code::entry_point_offset(CodeEntryKind kind) {
+  return dart::Code::entry_point_offset(kind);
+}
+
+#if !defined(PRODUCT)
+word ClassHeapStats::TraceAllocationMask() {
+  return dart::ClassHeapStats::TraceAllocationMask();
+}
+
+word ClassHeapStats::state_offset() {
+  return dart::ClassHeapStats::state_offset();
+}
+
+word ClassHeapStats::allocated_since_gc_new_space_offset() {
+  return dart::ClassHeapStats::allocated_since_gc_new_space_offset();
+}
+
+word ClassHeapStats::allocated_size_since_gc_new_space_offset() {
+  return dart::ClassHeapStats::allocated_size_since_gc_new_space_offset();
+}
+#endif  // !defined(PRODUCT)
+
+word Double::value_offset() {
+  return dart::Double::value_offset();
+}
+
+word Float32x4::value_offset() {
+  return dart::Float32x4::value_offset();
+}
+
+word Float64x2::value_offset() {
+  return dart::Float64x2::value_offset();
+}
+
+bool IsSmi(const dart::Object& a) {
+  return a.IsSmi();
+}
+
+word ToRawSmi(const dart::Object& a) {
+  ASSERT(a.IsSmi());
+  return reinterpret_cast<word>(a.raw());
+}
+
+word ToRawSmi(intptr_t value) {
+  return dart::Smi::RawValue(value);
+}
+
+bool CanLoadFromThread(const dart::Object& object,
+                       word* offset /* = nullptr */) {
+  if (dart::Thread::CanLoadFromThread(object)) {
+    if (offset != nullptr) {
+      *offset = dart::Thread::OffsetFromThread(object);
+    }
+    return true;
+  }
+  return false;
+}
+
+#if defined(TARGET_ARCH_IA32)
+uword Code::EntryPointOf(const dart::Code& code) {
+  static_assert(kHostWordSize == kWordSize,
+                "Can't embed raw pointers to runtime objects when host and "
+                "target word sizes are different");
+  return code.EntryPoint();
+}
+
+bool CanEmbedAsRawPointerInGeneratedCode(const dart::Object& obj) {
+  return obj.IsSmi() || obj.InVMHeap();
+}
+
+word ToRawPointer(const dart::Object& a) {
+  static_assert(kHostWordSize == kWordSize,
+                "Can't embed raw pointers to runtime objects when host and "
+                "target word sizes are different");
+  return reinterpret_cast<word>(a.raw());
+}
+#endif  // defined(TARGET_ARCH_IA32)
+
+}  // namespace target
+}  // namespace compiler
+}  // namespace dart
+
+#endif  // !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
new file mode 100644
index 0000000..d129708
--- /dev/null
+++ b/runtime/vm/compiler/runtime_api.h
@@ -0,0 +1,350 @@
+// Copyright (c) 2019, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_RUNTIME_API_H_
+#define RUNTIME_VM_COMPILER_RUNTIME_API_H_
+
+// This header defines the API that compiler can use to interact with the
+// underlying Dart runtime that it is embedded into.
+//
+// Compiler is not allowed to directly interact with any objects - it can only
+// use classes like dart::Object, dart::Code, dart::Function and similar as
+// opaque handles. All interactions should be done through helper methods
+// provided by this header.
+//
+// This header also provides ways to get word sizes, frame layout, field
+// offsets for the target runtime. Note that these can be different from
+// those on the host. Helpers providing access to these values live
+// in compiler::target namespace.
+
+#include "platform/globals.h"
+#include "vm/allocation.h"
+#include "vm/bitfield.h"
+#include "vm/code_entry_kind.h"
+#include "vm/frame_layout.h"
+#include "vm/pointer_tagging.h"
+
+namespace dart {
+
+// Forward declarations.
+class Class;
+class Code;
+class Function;
+class LocalVariable;
+class Object;
+class String;
+class Zone;
+namespace compiler {
+class Assembler;
+}
+
+namespace compiler {
+
+// Host word sizes.
+//
+// Code in the compiler namespace should not use kWordSize and derived
+// constants directly because the word size on host and target might
+// be different.
+//
+// To prevent this we introduce variables that would shadow these
+// constants and introduce compilation errors when used.
+//
+// target::kWordSize and target::ObjectAlignment give access to
+// word size and object aligment offsets for the target.
+//
+// Similarly kHostWordSize gives access to the host word size.
+class InvalidClass {};
+extern InvalidClass kWordSize;
+extern InvalidClass kWordSizeLog2;
+extern InvalidClass kNewObjectAlignmentOffset;
+extern InvalidClass kOldObjectAlignmentOffset;
+extern InvalidClass kNewObjectBitPosition;
+extern InvalidClass kObjectAlignment;
+extern InvalidClass kObjectAlignmentLog2;
+extern InvalidClass kObjectAlignmentMask;
+
+static constexpr intptr_t kHostWordSize = dart::kWordSize;
+static constexpr intptr_t kHostWordSizeLog2 = dart::kWordSizeLog2;
+
+//
+// Object handles.
+//
+
+// Create an empty handle.
+Object& NewZoneHandle(Zone* zone);
+
+// Clone the given handle.
+Object& NewZoneHandle(Zone* zone, const Object&);
+
+// Returns true if [a] and [b] are the same object.
+bool IsSameObject(const Object& a, const Object& b);
+
+// Returns true if the given handle is a zone handle or one of the global
+// cached handles.
+bool IsNotTemporaryScopedHandle(const Object& obj);
+
+// Returns true if [obj] resides in old space.
+bool IsInOldSpace(const Object& obj);
+
+// Returns true if [obj] is not a Field/ICData clone.
+//
+// Used to assert that we are not embedding pointers to cloned objects that are
+// used by background compiler into object pools / code.
+bool IsOriginalObject(const Object& object);
+
+// Clear the given handle.
+void SetToNull(Object* obj);
+
+// Helper functions to upcast handles.
+//
+// Note: compiler code cannot include object.h so it cannot see that Object is
+// a superclass of Code or Function - thus we have to cast these pointers using
+// reinterpret_cast.
+inline const Object& ToObject(const Code& handle) {
+  return *reinterpret_cast<const Object*>(&handle);
+}
+
+inline const Object& ToObject(const Function& handle) {
+  return *reinterpret_cast<const Object*>(&handle);
+}
+
+// Returns some hash value for the given object.
+//
+// Note: the given hash value does not necessarily match Object.get:hashCode,
+// or canonical hash.
+intptr_t ObjectHash(const Object& obj);
+
+// If the given object represents a Dart integer returns true and sets [value]
+// to the value of the integer.
+bool HasIntegerValue(const dart::Object& obj, int64_t* value);
+
+// Creates a random cookie to be used for masking constants embedded in the
+// generated code.
+int32_t CreateJitCookie();
+
+class RuntimeEntry : public ValueObject {
+ public:
+  virtual ~RuntimeEntry() {}
+  virtual void Call(compiler::Assembler* assembler,
+                    intptr_t argument_count) const = 0;
+};
+
+// Allocate a string object with the given content in the runtime heap.
+const String& AllocateString(const char* buffer);
+
+DART_NORETURN void BailoutWithBranchOffsetError();
+
+// compiler::target namespace contains information about the target platform:
+//
+//    - word sizes and derived constants
+//    - offsets of fields
+//    - sizes of structures
+namespace target {
+
+// Currently we define target::word to match dart::word which represents
+// host word.
+//
+// Once refactoring of the compiler is complete we will switch target::word
+// to be independent from host word.
+typedef dart::word word;
+typedef dart::uword uword;
+
+static constexpr word kWordSize = dart::kWordSize;
+static constexpr word kWordSizeLog2 = dart::kWordSizeLog2;
+static_assert((1 << kWordSizeLog2) == kWordSize,
+              "kWordSizeLog2 should match kWordSize");
+
+using ObjectAlignment = dart::ObjectAlignment<kWordSize, kWordSizeLog2>;
+
+// Information about frame_layout that compiler should be targeting.
+extern FrameLayout frame_layout;
+
+// Returns the FP-relative index where [variable] can be found (assumes
+// [variable] is not captured), in bytes.
+inline int FrameOffsetInBytesForVariable(const LocalVariable* variable) {
+  return frame_layout.FrameSlotForVariable(variable) * kWordSize;
+}
+
+// Encode tag word for a heap allocated object with the given class id and
+// size.
+//
+// Note: even on 64-bit platforms we only use lower 32-bits of the tag word.
+uint32_t MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size);
+
+//
+// Target specific information about objects.
+//
+
+// Returns true if the given object can be represented as a Smi on the
+// target platform.
+bool IsSmi(const dart::Object& a);
+
+// Return raw Smi representation of the given object for the target platform.
+word ToRawSmi(const dart::Object& a);
+
+// Return raw Smi representation of the given integer value for the target
+// platform.
+//
+// Note: method assumes that caller has validated that value is representable
+// as a Smi.
+word ToRawSmi(intptr_t value);
+
+// If the given object can be loaded from the thread on the target then
+// return true and set offset (if provided) to the offset from the
+// thread pointer to a field that contains the object.
+bool CanLoadFromThread(const dart::Object& object, word* offset = nullptr);
+
+// On IA32 we can embed raw pointers into generated code.
+#if defined(TARGET_ARCH_IA32)
+// Returns true if the pointer to the given object can be directly embedded
+// into the generated code (because the object is immortal and immovable).
+bool CanEmbedAsRawPointerInGeneratedCode(const dart::Object& obj);
+
+// Returns raw pointer value for the given object. Should only be invoked
+// if CanEmbedAsRawPointerInGeneratedCode returns true.
+word ToRawPointer(const dart::Object& a);
+#endif  // defined(TARGET_ARCH_IA32)
+
+//
+// Target specific offsets and constants.
+//
+// Currently we use the same names for classes, constants and getters to make
+// migration easier.
+
+class RawObject : public AllStatic {
+ public:
+  static const word kClassIdTagPos;
+  static const word kClassIdTagSize;
+  static const word kBarrierOverlapShift;
+};
+
+class Object : public AllStatic {
+ public:
+  // Offset of the tags word.
+  static word tags_offset();
+};
+
+class ObjectPool : public AllStatic {
+ public:
+  // Return offset to the element with the given [index] in the object pool.
+  static intptr_t element_offset(intptr_t index);
+};
+
+class Class : public AllStatic {
+ public:
+  // Return class id of the given class on the target.
+  static classid_t GetId(const dart::Class& handle);
+
+  // Return instance size for the given class on the target.
+  static uword GetInstanceSize(const dart::Class& handle);
+};
+
+class Instance : public AllStatic {
+ public:
+  static word DataOffsetFor(intptr_t cid);
+};
+
+class Double : public AllStatic {
+ public:
+  static word value_offset();
+};
+
+class Float32x4 : public AllStatic {
+ public:
+  static word value_offset();
+};
+
+class Float64x2 : public AllStatic {
+ public:
+  static word value_offset();
+};
+
+class Thread : public AllStatic {
+ public:
+  static word top_offset();
+  static word end_offset();
+  static word isolate_offset();
+  static word call_to_runtime_entry_point_offset();
+  static word null_error_shared_with_fpu_regs_entry_point_offset();
+  static word null_error_shared_without_fpu_regs_entry_point_offset();
+  static word write_barrier_mask_offset();
+  static word monomorphic_miss_entry_offset();
+  static word write_barrier_wrappers_thread_offset(intptr_t regno);
+  static word array_write_barrier_entry_point_offset();
+  static word write_barrier_entry_point_offset();
+  static word vm_tag_offset();
+
+#define THREAD_XMM_CONSTANT_LIST(V)                                            \
+  V(float_not)                                                                 \
+  V(float_negate)                                                              \
+  V(float_absolute)                                                            \
+  V(float_zerow)                                                               \
+  V(double_negate)                                                             \
+  V(double_abs)
+
+#define DECLARE_CONSTANT_OFFSET_GETTER(name)                                   \
+  static word name##_address_offset();
+  THREAD_XMM_CONSTANT_LIST(DECLARE_CONSTANT_OFFSET_GETTER)
+#undef DECLARE_CONSTANT_OFFSET_GETTER
+};
+
+class Isolate : public AllStatic {
+ public:
+  static word class_table_offset();
+};
+
+class ClassTable : public AllStatic {
+ public:
+  static word table_offset();
+  static word ClassOffsetFor(intptr_t cid);
+#if !defined(PRODUCT)
+  static word StateOffsetFor(intptr_t cid);
+  static word TableOffsetFor(intptr_t cid);
+  static word CounterOffsetFor(intptr_t cid, bool is_new);
+  static word SizeOffsetFor(intptr_t cid, bool is_new);
+#endif  // !defined(PRODUCT)
+  static const word kSizeOfClassPairLog2;
+};
+
+#if !defined(PRODUCT)
+class ClassHeapStats : public AllStatic {
+ public:
+  static word TraceAllocationMask();
+  static word state_offset();
+  static word allocated_since_gc_new_space_offset();
+  static word allocated_size_since_gc_new_space_offset();
+};
+#endif  // !defined(PRODUCT)
+
+class Instructions : public AllStatic {
+ public:
+  static const intptr_t kPolymorphicEntryOffset;
+  static const intptr_t kMonomorphicEntryOffset;
+  static intptr_t HeaderSize();
+};
+
+class Code : public AllStatic {
+ public:
+#if defined(TARGET_ARCH_IA32)
+  static uword EntryPointOf(const dart::Code& code);
+#endif  // defined(TARGET_ARCH_IA32)
+
+  static intptr_t object_pool_offset();
+  static intptr_t entry_point_offset(
+      CodeEntryKind kind = CodeEntryKind::kNormal);
+  static intptr_t saved_instructions_offset();
+};
+
+class Heap : public AllStatic {
+ public:
+  // Return true if an object with the given instance size is allocatable
+  // in new space on the target.
+  static bool IsAllocatableInNewSpace(intptr_t instance_size);
+};
+
+}  // namespace target
+}  // namespace compiler
+}  // namespace dart
+
+#endif  // RUNTIME_VM_COMPILER_RUNTIME_API_H_
diff --git a/runtime/vm/constants_arm.h b/runtime/vm/constants_arm.h
index 08380f8..4e272c5 100644
--- a/runtime/vm/constants_arm.h
+++ b/runtime/vm/constants_arm.h
@@ -496,7 +496,6 @@
       ((AL << kConditionShift) | (0x32 << 20) | (0xf << 12));
 
   static const int32_t kBreakPointCode = 0xdeb0;      // For breakpoint.
-  static const int32_t kStopMessageCode = 0xdeb1;     // For Stop(message).
   static const int32_t kSimulatorBreakCode = 0xdeb2;  // For breakpoint in sim.
   static const int32_t kSimulatorRedirectCode = 0xca11;  // For redirection.
 
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index 6067a37..0e2d6ea 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -882,7 +882,6 @@
 
   // Reserved brk and hlt instruction codes.
   static const int32_t kBreakPointCode = 0xdeb0;      // For breakpoint.
-  static const int32_t kStopMessageCode = 0xdeb1;     // For Stop(message).
   static const int32_t kSimulatorBreakCode = 0xdeb2;  // For breakpoint in sim.
   static const int32_t kSimulatorRedirectCode = 0xca11;  // For redirection.
 
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index f39694d..96c2426 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -95,8 +95,8 @@
 #if defined(TARGET_ARCH_ARM)
   // These offsets are embedded in precompiled instructions. We need simarm
   // (compiler) and arm (runtime) to agree.
-  CHECK_OFFSET(Thread::stack_limit_offset(), 28);
-  CHECK_OFFSET(Thread::object_null_offset(), 88);
+  CHECK_OFFSET(Thread::stack_limit_offset(), 36);
+  CHECK_OFFSET(Thread::object_null_offset(), 96);
   CHECK_OFFSET(SingleTargetCache::upper_limit_offset(), 14);
   CHECK_OFFSET(Isolate::object_store_offset(), 20);
   NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 168));
@@ -104,8 +104,8 @@
 #if defined(TARGET_ARCH_ARM64)
   // These offsets are embedded in precompiled instructions. We need simarm64
   // (compiler) and arm64 (runtime) to agree.
-  CHECK_OFFSET(Thread::stack_limit_offset(), 56);
-  CHECK_OFFSET(Thread::object_null_offset(), 168);
+  CHECK_OFFSET(Thread::stack_limit_offset(), 72);
+  CHECK_OFFSET(Thread::object_null_offset(), 184);
   CHECK_OFFSET(SingleTargetCache::upper_limit_offset(), 26);
   CHECK_OFFSET(Isolate::object_store_offset(), 40);
   NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 288));
diff --git a/runtime/vm/deopt_instructions.cc b/runtime/vm/deopt_instructions.cc
index 02d2991..0a48371 100644
--- a/runtime/vm/deopt_instructions.cc
+++ b/runtime/vm/deopt_instructions.cc
@@ -1057,12 +1057,12 @@
       materializations_() {}
 
 intptr_t DeoptInfoBuilder::FindOrAddObjectInTable(const Object& obj) const {
-  return assembler_->object_pool_wrapper().FindObject(obj);
+  return assembler_->object_pool_builder().FindObject(obj);
 }
 
 intptr_t DeoptInfoBuilder::CalculateStackIndex(
     const Location& source_loc) const {
-  intptr_t index = -compiler_frame_layout.VariableIndexForFrameSlot(
+  intptr_t index = -compiler::target::frame_layout.VariableIndexForFrameSlot(
       source_loc.stack_index());
   return index < 0 ? index + num_args_
                    : index + num_args_ + kDartFrameFixedSize;
diff --git a/runtime/vm/flag_list.h b/runtime/vm/flag_list.h
index ea449f3..401a296 100644
--- a/runtime/vm/flag_list.h
+++ b/runtime/vm/flag_list.h
@@ -147,7 +147,6 @@
     "Print live ranges after allocation.")                                     \
   R(print_stacktrace_at_api_error, false, bool, false,                         \
     "Attempt to print a native stack trace when an API error is created.")     \
-  C(print_stop_message, false, false, bool, false, "Print stop message.")      \
   D(print_variable_descriptors, bool, false,                                   \
     "Print variable descriptors in disassembly.")                              \
   R(profiler, false, bool, false, "Enable the profiler.")                      \
diff --git a/runtime/vm/frame_layout.h b/runtime/vm/frame_layout.h
new file mode 100644
index 0000000..f77da40
--- /dev/null
+++ b/runtime/vm/frame_layout.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2019, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_FRAME_LAYOUT_H_
+#define RUNTIME_VM_FRAME_LAYOUT_H_
+
+// FrameLayout structure captures configuration specific properties of the
+// frame layout used by the runtime system and compiler.
+//
+// Runtime system uses runtime_frame_layout defined in stack_frame.h.
+// Compiler uses compiler::target::frame_layout defined in runtime_api.h
+
+namespace dart {
+
+// Forward declarations.
+class LocalVariable;
+
+struct FrameLayout {
+  // The offset (in words) from FP to the first object.
+  int first_object_from_fp;
+
+  // The offset (in words) from FP to the last fixed object.
+  int last_fixed_object_from_fp;
+
+  // The offset (in words) from FP to the first local.
+  int param_end_from_fp;
+
+  // The offset (in words) from FP to the first local.
+  int first_local_from_fp;
+
+  // The fixed size of the frame.
+  int dart_fixed_frame_size;
+
+  // The offset (in words) from FP to the saved pool (if applicable).
+  int saved_caller_pp_from_fp;
+
+  // The offset (in words) from FP to the code object (if applicable).
+  int code_from_fp;
+
+  // The number of fixed slots below the saved PC.
+  int saved_below_pc() const { return -first_local_from_fp; }
+
+  // Returns the FP-relative index where [variable] can be found (assumes
+  // [variable] is not captured), in words.
+  int FrameSlotForVariable(const LocalVariable* variable) const;
+
+  // Returns the FP-relative index where [variable_index] can be found (assumes
+  // [variable_index] comes from a [LocalVariable::index()], which is not
+  // captured).
+  int FrameSlotForVariableIndex(int index) const;
+
+  // Returns the variable index from a FP-relative index.
+  intptr_t VariableIndexForFrameSlot(intptr_t frame_slot) const {
+    if (frame_slot <= first_local_from_fp) {
+      return frame_slot - first_local_from_fp;
+    } else {
+      ASSERT(frame_slot > param_end_from_fp);
+      return frame_slot - param_end_from_fp;
+    }
+  }
+
+  // Called to initialize the stack frame layout during startup.
+  static void Init();
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_FRAME_LAYOUT_H_
diff --git a/runtime/vm/handle_visitor.h b/runtime/vm/handle_visitor.h
new file mode 100644
index 0000000..8e8a2a1
--- /dev/null
+++ b/runtime/vm/handle_visitor.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2011, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_HANDLE_VISITOR_H_
+#define RUNTIME_VM_HANDLE_VISITOR_H_
+
+#include "vm/allocation.h"
+#include "vm/flags.h"
+#include "vm/os.h"
+
+namespace dart {
+
+class Thread;
+
+class HandleVisitor {
+ public:
+  explicit HandleVisitor(Thread* thread) : thread_(thread) {}
+  virtual ~HandleVisitor() {}
+
+  Thread* thread() const { return thread_; }
+
+  virtual void VisitHandle(uword addr) = 0;
+
+ private:
+  Thread* thread_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(HandleVisitor);
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_HANDLE_VISITOR_H_
diff --git a/runtime/vm/handles.cc b/runtime/vm/handles.cc
index 3c429ef..b276a08 100644
--- a/runtime/vm/handles.cc
+++ b/runtime/vm/handles.cc
@@ -84,7 +84,7 @@
 #endif
 }
 
-HandleScope::HandleScope(Thread* thread) : ThreadStackResource(thread) {
+HandleScope::HandleScope(ThreadState* thread) : StackResource(thread) {
   Initialize();
 }
 
diff --git a/runtime/vm/handles.h b/runtime/vm/handles.h
index 522aa02..2b3de59 100644
--- a/runtime/vm/handles.h
+++ b/runtime/vm/handles.h
@@ -8,7 +8,6 @@
 #include "vm/allocation.h"
 #include "vm/flags.h"
 #include "vm/os.h"
-#include "vm/thread_stack_resource.h"
 
 namespace dart {
 
@@ -49,25 +48,10 @@
 
 // Forward declarations.
 class ObjectPointerVisitor;
-class Thread;
+class HandleVisitor;
 
 DECLARE_FLAG(bool, verify_handles);
 
-class HandleVisitor {
- public:
-  explicit HandleVisitor(Thread* thread) : thread_(thread) {}
-  virtual ~HandleVisitor() {}
-
-  Thread* thread() const { return thread_; }
-
-  virtual void VisitHandle(uword addr) = 0;
-
- private:
-  Thread* thread_;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(HandleVisitor);
-};
-
 template <int kHandleSizeInWords, int kHandlesPerChunk, int kOffsetOfRawPtr>
 class Handles {
  public:
@@ -108,7 +92,6 @@
   // Returns true if specified handle is a zone handle.
   static bool IsZoneHandle(uword handle);
 
- protected:
   // Allocates space for a scoped handle.
   uword AllocateScopedHandle() {
     if (scoped_blocks_->IsFull()) {
@@ -117,6 +100,7 @@
     return scoped_blocks_->AllocateHandle();
   }
 
+ protected:
   // Returns a count of active handles (used for testing purposes).
   int CountScopedHandles() const;
   int CountZoneHandles() const;
@@ -224,7 +208,7 @@
   friend class HandleScope;
   friend class Dart;
   friend class ObjectStore;
-  friend class Thread;
+  friend class ThreadState;
   DISALLOW_ALLOCATION();
   DISALLOW_COPY_AND_ASSIGN(Handles);
 };
@@ -279,9 +263,9 @@
 //   code that creates some scoped handles.
 //   ....
 // }
-class HandleScope : public ThreadStackResource {
+class HandleScope : public StackResource {
  public:
-  explicit HandleScope(Thread* thread);
+  explicit HandleScope(ThreadState* thread);
   ~HandleScope();
 
  private:
diff --git a/runtime/vm/heap/freelist.cc b/runtime/vm/heap/freelist.cc
index a2012d5..0da39c2 100644
--- a/runtime/vm/heap/freelist.cc
+++ b/runtime/vm/heap/freelist.cc
@@ -270,7 +270,7 @@
     OS::PrintErr(
         "small %3d [%8d bytes] : "
         "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n",
-        i, i * kObjectAlignment, list_length,
+        i, static_cast<int>(i * kObjectAlignment), list_length,
         list_bytes / static_cast<double>(KB),
         small_bytes / static_cast<double>(KB));
   }
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 23a02d6..746ec7f 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -5,6 +5,10 @@
 #ifndef RUNTIME_VM_HEAP_HEAP_H_
 #define RUNTIME_VM_HEAP_HEAP_H_
 
+#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
+#error "Should not include runtime"
+#endif
+
 #include "platform/assert.h"
 #include "vm/allocation.h"
 #include "vm/flags.h"
diff --git a/runtime/vm/heap/verifier.h b/runtime/vm/heap/verifier.h
index d811be6..23ca6b4 100644
--- a/runtime/vm/heap/verifier.h
+++ b/runtime/vm/heap/verifier.h
@@ -7,6 +7,7 @@
 
 #include "vm/flags.h"
 #include "vm/globals.h"
+#include "vm/handle_visitor.h"
 #include "vm/handles.h"
 #include "vm/thread.h"
 #include "vm/visitor.h"
diff --git a/runtime/vm/instructions_arm.cc b/runtime/vm/instructions_arm.cc
index 2f8152a..785f8fb 100644
--- a/runtime/vm/instructions_arm.cc
+++ b/runtime/vm/instructions_arm.cc
@@ -232,7 +232,7 @@
     intptr_t index = ObjectPool::IndexFromOffset(offset);
     const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
     if (!pool.IsNull()) {
-      if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
+      if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
         *obj = pool.ObjectAt(index);
         return true;
       }
@@ -331,7 +331,8 @@
 }
 
 void BareSwitchableCallPattern::SetTarget(const Code& target) const {
-  ASSERT(object_pool_.TypeAt(target_pool_index_) == ObjectPool::kImmediate);
+  ASSERT(object_pool_.TypeAt(target_pool_index_) ==
+         ObjectPool::EntryType::kImmediate);
   object_pool_.SetRawValueAt(target_pool_index_,
                              target.MonomorphicEntryPoint());
 }
diff --git a/runtime/vm/instructions_arm.h b/runtime/vm/instructions_arm.h
index 9f2d4c3..37b7cef 100644
--- a/runtime/vm/instructions_arm.h
+++ b/runtime/vm/instructions_arm.h
@@ -10,12 +10,21 @@
 #error Do not include instructions_arm.h directly; use instructions.h instead.
 #endif
 
+#include "vm/allocation.h"
+#include "vm/compiler/assembler/assembler.h"
 #include "vm/constants_arm.h"
-#include "vm/native_entry.h"
-#include "vm/object.h"
+#include "vm/native_function.h"
 
 namespace dart {
 
+class ICData;
+class Code;
+class Object;
+class ObjectPool;
+class RawCode;
+class RawICData;
+class RawObject;
+
 class InstructionPattern : public AllStatic {
  public:
   // Decodes a load sequence ending at 'end' (the last instruction of the
diff --git a/runtime/vm/instructions_arm64.cc b/runtime/vm/instructions_arm64.cc
index a5e9543..ea9a118 100644
--- a/runtime/vm/instructions_arm64.cc
+++ b/runtime/vm/instructions_arm64.cc
@@ -12,6 +12,7 @@
 #include "vm/constants_arm64.h"
 #include "vm/cpu.h"
 #include "vm/object.h"
+#include "vm/reverse_pc_lookup_cache.h"
 
 namespace dart {
 
@@ -318,7 +319,7 @@
       intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
       const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
       if (!pool.IsNull()) {
-        if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
+        if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
           *obj = pool.ObjectAt(index);
           return true;
         }
@@ -443,7 +444,8 @@
 }
 
 void BareSwitchableCallPattern::SetTarget(const Code& target) const {
-  ASSERT(object_pool_.TypeAt(target_pool_index_) == ObjectPool::kImmediate);
+  ASSERT(object_pool_.TypeAt(target_pool_index_) ==
+         ObjectPool::EntryType::kImmediate);
   object_pool_.SetRawValueAt(target_pool_index_,
                              target.MonomorphicEntryPoint());
 }
diff --git a/runtime/vm/instructions_arm64.h b/runtime/vm/instructions_arm64.h
index 88e523a..bfa1ae3 100644
--- a/runtime/vm/instructions_arm64.h
+++ b/runtime/vm/instructions_arm64.h
@@ -10,13 +10,19 @@
 #error Do not include instructions_arm64.h directly; use instructions.h instead.
 #endif
 
+#include "vm/allocation.h"
+#include "vm/compiler/assembler/assembler.h"
 #include "vm/constants_arm64.h"
-#include "vm/native_entry.h"
-#include "vm/object.h"
-#include "vm/reverse_pc_lookup_cache.h"
+#include "vm/native_function.h"
 
 namespace dart {
 
+class Code;
+class ObjectPool;
+class ICData;
+class RawICData;
+class RawCode;
+
 class InstructionPattern : public AllStatic {
  public:
   // Decodes a load sequence ending at 'end' (the last instruction of the
diff --git a/runtime/vm/instructions_dbc.cc b/runtime/vm/instructions_dbc.cc
index 377343b..b303e95 100644
--- a/runtime/vm/instructions_dbc.cc
+++ b/runtime/vm/instructions_dbc.cc
@@ -43,7 +43,7 @@
   Instr instr = SimulatorBytecode::At(pc);
   if (HasLoadFromPool(instr)) {
     uint16_t index = SimulatorBytecode::DecodeD(instr);
-    if (object_pool.TypeAt(index) == ObjectPool::kTaggedObject) {
+    if (object_pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
       *obj = object_pool.ObjectAt(index);
       return true;
     }
diff --git a/runtime/vm/instructions_ia32.h b/runtime/vm/instructions_ia32.h
index 2bf8510..ae82c3c 100644
--- a/runtime/vm/instructions_ia32.h
+++ b/runtime/vm/instructions_ia32.h
@@ -12,15 +12,9 @@
 
 #include "vm/allocation.h"
 #include "vm/cpu.h"
-#include "vm/object.h"
 
 namespace dart {
 
-// Forward declarations.
-class RawClass;
-class Immediate;
-class RawObject;
-
 // Template class for all instruction pattern classes.
 // P has to specify a static pattern and a pattern length method.
 template <class P>
diff --git a/runtime/vm/instructions_x64.cc b/runtime/vm/instructions_x64.cc
index d95e027..70ed255 100644
--- a/runtime/vm/instructions_x64.cc
+++ b/runtime/vm/instructions_x64.cc
@@ -38,7 +38,7 @@
         intptr_t index = IndexFromPPLoadDisp32(pc + 3);
         const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
         if (!pool.IsNull()) {
-          if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
+          if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
             *obj = pool.ObjectAt(index);
             return true;
           }
@@ -48,7 +48,7 @@
         intptr_t index = IndexFromPPLoadDisp8(pc + 3);
         const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
         if (!pool.IsNull()) {
-          if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
+          if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
             *obj = pool.ObjectAt(index);
             return true;
           }
diff --git a/runtime/vm/instructions_x64.h b/runtime/vm/instructions_x64.h
index 056f48f..da8f077 100644
--- a/runtime/vm/instructions_x64.h
+++ b/runtime/vm/instructions_x64.h
@@ -7,19 +7,13 @@
 #define RUNTIME_VM_INSTRUCTIONS_X64_H_
 
 #ifndef RUNTIME_VM_INSTRUCTIONS_H_
-#error Do not include instructions_ia32.h directly; use instructions.h instead.
+#error "Do not include instructions_x64.h directly; use instructions.h instead."
 #endif
 
 #include "vm/allocation.h"
-#include "vm/object.h"
 
 namespace dart {
 
-// Forward declarations.
-class RawClass;
-class Immediate;
-class RawObject;
-
 intptr_t IndexFromPPLoadDisp8(uword start);
 intptr_t IndexFromPPLoadDisp32(uword start);
 
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 9008ab2..ffc3f65 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -89,10 +89,8 @@
     FLAG_random_seed = 0x44617274;  // "Dart"
 #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
     FLAG_load_deferred_eagerly = true;
-    FLAG_print_stop_message = false;  // Embedds addresses in instructions.
 #else
     COMPILE_ASSERT(FLAG_load_deferred_eagerly);
-    COMPILE_ASSERT(!FLAG_print_stop_message);
 #endif
   }
 }
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index 36b4d8d..a7961bb 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -5,6 +5,10 @@
 #ifndef RUNTIME_VM_ISOLATE_H_
 #define RUNTIME_VM_ISOLATE_H_
 
+#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
+#error "Should not include runtime"
+#endif
+
 #include "include/dart_api.h"
 #include "platform/assert.h"
 #include "platform/atomic.h"
diff --git a/runtime/vm/log.h b/runtime/vm/log.h
index 1c4a6d0..467b95c 100644
--- a/runtime/vm/log.h
+++ b/runtime/vm/log.h
@@ -11,8 +11,8 @@
 
 namespace dart {
 
+class Isolate;
 class LogBlock;
-class Thread;
 
 #if defined(_MSC_VER)
 #define THR_Print(format, ...) Log::Current()->Print(format, __VA_ARGS__)
diff --git a/runtime/vm/megamorphic_cache_table.cc b/runtime/vm/megamorphic_cache_table.cc
index eba6c5f..4595b9c 100644
--- a/runtime/vm/megamorphic_cache_table.cc
+++ b/runtime/vm/megamorphic_cache_table.cc
@@ -52,13 +52,13 @@
 void MegamorphicCacheTable::InitMissHandler(Isolate* isolate) {
   // The miss handler for a class ID not found in the table is invoked as a
   // normal Dart function.
-  ObjectPoolWrapper object_pool_wrapper;
+  ObjectPoolBuilder object_pool_builder;
   const Code& code = Code::Handle(
-      StubCode::Generate("_stub_MegamorphicMiss", &object_pool_wrapper,
+      StubCode::Generate("_stub_MegamorphicMiss", &object_pool_builder,
                          StubCode::GenerateMegamorphicMissStub));
 
   const auto& object_pool =
-      ObjectPool::Handle(object_pool_wrapper.MakeObjectPool());
+      ObjectPool::Handle(ObjectPool::NewFromBuilder(object_pool_builder));
   code.set_object_pool(object_pool.raw());
 
   // When FLAG_lazy_dispatchers=false, this stub can be on the stack during
@@ -88,7 +88,7 @@
 }
 
 void MegamorphicCacheTable::ReInitMissHandlerCode(Isolate* isolate,
-                                                  ObjectPoolWrapper* wrapper) {
+                                                  ObjectPoolBuilder* wrapper) {
   ASSERT(FLAG_precompiled_mode && FLAG_use_bare_instructions);
 
   const Code& code = Code::Handle(StubCode::Generate(
diff --git a/runtime/vm/megamorphic_cache_table.h b/runtime/vm/megamorphic_cache_table.h
index 8d7788a..437fff7 100644
--- a/runtime/vm/megamorphic_cache_table.h
+++ b/runtime/vm/megamorphic_cache_table.h
@@ -9,11 +9,14 @@
 
 namespace dart {
 
+namespace compiler {
+class ObjectPoolBuilder;
+}
+
 class Array;
 class Function;
 class Isolate;
 class ObjectPointerVisitor;
-class ObjectPoolWrapper;
 class RawArray;
 class RawFunction;
 class RawCode;
@@ -33,7 +36,7 @@
   // re-generate the handler to ensure it uses the common object pool.
   NOT_IN_PRECOMPILED(
       static void ReInitMissHandlerCode(Isolate* isolate,
-                                        ObjectPoolWrapper* wrapper));
+                                        compiler::ObjectPoolBuilder* wrapper));
 
   static RawMegamorphicCache* Lookup(Isolate* isolate,
                                      const String& name,
diff --git a/runtime/vm/native_entry.h b/runtime/vm/native_entry.h
index b98e2f4..0892258 100644
--- a/runtime/vm/native_entry.h
+++ b/runtime/vm/native_entry.h
@@ -8,14 +8,13 @@
 #include "platform/memory_sanitizer.h"
 
 #include "vm/allocation.h"
-#include "vm/compiler/assembler/assembler.h"
 #include "vm/exceptions.h"
 #include "vm/heap/verifier.h"
 #include "vm/log.h"
 #include "vm/native_arguments.h"
+#include "vm/native_function.h"
 #include "vm/runtime_entry.h"
 
-#include "include/dart_api.h"
 
 namespace dart {
 
@@ -23,22 +22,6 @@
 class Class;
 class String;
 
-// We have three variants of native functions:
-//  - bootstrap natives, which are called directly from stub code. The callee is
-//    responsible for safepoint transitions and setting up handle scopes as
-//    needed. Only VM-defined natives are bootstrap natives; they cannot be
-//    defined by embedders or native extensions.
-//  - no scope natives, which are called through a wrapper function. The wrapper
-//    function handles the safepoint transition. The callee is responsible for
-//    setting up API scopes as needed.
-//  - auto scope natives, which are called through a wrapper function. The
-//    wrapper function handles the safepoint transition and sets up an API
-//    scope.
-
-typedef void (*NativeFunction)(NativeArguments* arguments);
-typedef void (*NativeFunctionWrapper)(Dart_NativeArguments args,
-                                      Dart_NativeFunction func);
-
 #ifdef DEBUG
 #define TRACE_NATIVE_CALL(format, name)                                        \
   if (FLAG_trace_natives) {                                                    \
diff --git a/runtime/vm/native_function.h b/runtime/vm/native_function.h
new file mode 100644
index 0000000..f5db8fe
--- /dev/null
+++ b/runtime/vm/native_function.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2011, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_NATIVE_FUNCTION_H_
+#define RUNTIME_VM_NATIVE_FUNCTION_H_
+
+#include "vm/allocation.h"
+
+#include "include/dart_api.h"
+
+namespace dart {
+
+// Forward declarations.
+class NativeArguments;
+
+// We have three variants of native functions:
+//  - bootstrap natives, which are called directly from stub code. The callee is
+//    responsible for safepoint transitions and setting up handle scopes as
+//    needed. Only VM-defined natives are bootstrap natives; they cannot be
+//    defined by embedders or native extensions.
+//  - no scope natives, which are called through a wrapper function. The wrapper
+//    function handles the safepoint transition. The callee is responsible for
+//    setting up API scopes as needed.
+//  - auto scope natives, which are called through a wrapper function. The
+//    wrapper function handles the safepoint transition and sets up an API
+//    scope.
+
+typedef void (*NativeFunction)(NativeArguments* arguments);
+typedef void (*NativeFunctionWrapper)(Dart_NativeArguments args,
+                                      Dart_NativeFunction func);
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_NATIVE_FUNCTION_H_
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 581dbc6..eaced86 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -12142,13 +12142,65 @@
     result ^= raw;
     result.SetLength(len);
     for (intptr_t i = 0; i < len; i++) {
-      result.SetTypeAt(i, ObjectPool::kImmediate, ObjectPool::kPatchable);
+      result.SetTypeAt(i, ObjectPool::EntryType::kImmediate,
+                       ObjectPool::Patchability::kPatchable);
     }
   }
 
   return result.raw();
 }
 
+#if !defined(DART_PRECOMPILED_RUNTIME)
+RawObjectPool* ObjectPool::NewFromBuilder(
+    const compiler::ObjectPoolBuilder& builder) {
+  const intptr_t len = builder.CurrentLength();
+  if (len == 0) {
+    return Object::empty_object_pool().raw();
+  }
+  const ObjectPool& result = ObjectPool::Handle(ObjectPool::New(len));
+  for (intptr_t i = 0; i < len; i++) {
+    auto entry = builder.EntryAt(i);
+    auto type = entry.type();
+    auto patchable = entry.patchable();
+    result.SetTypeAt(i, type, patchable);
+    if (type == EntryType::kTaggedObject) {
+      result.SetObjectAt(i, *entry.obj_);
+    } else {
+      result.SetRawValueAt(i, entry.raw_value_);
+    }
+  }
+  return result.raw();
+}
+
+void ObjectPool::CopyInto(compiler::ObjectPoolBuilder* builder) const {
+  ASSERT(builder->CurrentLength());
+
+  for (intptr_t i = 0; i < Length(); i++) {
+    auto type = TypeAt(i);
+    auto patchable = PatchableAt(i);
+    switch (type) {
+      case compiler::ObjectPoolBuilderEntry::kTaggedObject: {
+        compiler::ObjectPoolBuilderEntry entry(&Object::ZoneHandle(ObjectAt(i)),
+                                               patchable);
+        builder->AddObject(entry);
+        break;
+      }
+      case compiler::ObjectPoolBuilderEntry::kImmediate:
+      case compiler::ObjectPoolBuilderEntry::kNativeFunction:
+      case compiler::ObjectPoolBuilderEntry::kNativeFunctionWrapper: {
+        compiler::ObjectPoolBuilderEntry entry(RawValueAt(i), type, patchable);
+        builder->AddObject(entry);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  ASSERT(builder->CurrentLength() == Length());
+}
+#endif
+
 const char* ObjectPool::ToCString() const {
   Zone* zone = Thread::Current()->zone();
   return zone->PrintToString("ObjectPool len:%" Pd, Length());
@@ -12159,13 +12211,14 @@
   for (intptr_t i = 0; i < Length(); i++) {
     intptr_t offset = OffsetFromIndex(i);
     THR_Print("  %" Pd " PP+0x%" Px ": ", i, offset);
-    if ((TypeAt(i) == kTaggedObject) || (TypeAt(i) == kNativeEntryData)) {
+    if ((TypeAt(i) == EntryType::kTaggedObject) ||
+        (TypeAt(i) == EntryType::kNativeEntryData)) {
       RawObject* obj = ObjectAt(i);
       THR_Print("0x%" Px " %s (obj)\n", reinterpret_cast<uword>(obj),
                 Object::Handle(obj).ToCString());
-    } else if (TypeAt(i) == kNativeFunction) {
+    } else if (TypeAt(i) == EntryType::kNativeFunction) {
       THR_Print("0x%" Px " (native function)\n", RawValueAt(i));
-    } else if (TypeAt(i) == kNativeFunctionWrapper) {
+    } else if (TypeAt(i) == EntryType::kNativeFunctionWrapper) {
       THR_Print("0x%" Px " (native function wrapper)\n", RawValueAt(i));
     } else {
       THR_Print("0x%" Px " (raw)\n", RawValueAt(i));
@@ -14226,6 +14279,19 @@
   const Code::Comments& comments_;
   String& string_;
 };
+
+static const Code::Comments& CreateCommentsFrom(
+    compiler::Assembler* assembler) {
+  const auto& comments = assembler->comments();
+  Code::Comments& result = Code::Comments::New(comments.length());
+
+  for (intptr_t i = 0; i < comments.length(); i++) {
+    result.SetPCOffsetAt(i, comments[i]->pc_offset());
+    result.SetCommentAt(i, comments[i]->comment());
+  }
+
+  return result;
+}
 #endif
 
 RawCode* Code::FinalizeCode(const char* name,
@@ -14242,7 +14308,10 @@
   ASSERT(assembler != NULL);
   const auto object_pool =
       pool_attachment == PoolAttachment::kAttachPool
-          ? &ObjectPool::Handle(assembler->MakeObjectPool())
+          ? &ObjectPool::Handle(assembler->HasObjectPoolBuilder()
+                                    ? ObjectPool::NewFromBuilder(
+                                          assembler->object_pool_builder())
+                                    : ObjectPool::empty_object_pool().raw())
           : nullptr;
 
   // Allocate the Code and Instructions objects.  Code is allocated first
@@ -14251,7 +14320,7 @@
   intptr_t pointer_offset_count = assembler->CountPointerOffsets();
   Code& code = Code::ZoneHandle(Code::New(pointer_offset_count));
 #ifdef TARGET_ARCH_IA32
-  assembler->set_code_object(code);
+  assembler->GetSelfHandle() = code.raw();
 #endif
   Instructions& instrs = Instructions::ZoneHandle(Instructions::New(
       assembler->CodeSize(), assembler->has_single_entry_point(),
@@ -14314,7 +14383,7 @@
 #endif
 
 #ifndef PRODUCT
-  const Code::Comments& comments = assembler->GetCodeComments();
+  const Code::Comments& comments = CreateCommentsFrom(assembler);
 
   code.set_compile_timestamp(OS::GetCurrentMonotonicMicros());
   CodeCommentsWrapper comments_wrapper(comments);
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index cea3fda..b2cadbf 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -5,12 +5,19 @@
 #ifndef RUNTIME_VM_OBJECT_H_
 #define RUNTIME_VM_OBJECT_H_
 
+#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
+#error "Should not include runtime"
+#endif
+
 #include <tuple>
 #include "include/dart_api.h"
 #include "platform/assert.h"
 #include "platform/utils.h"
 #include "vm/bitmap.h"
+#include "vm/code_entry_kind.h"
+#include "vm/compiler/assembler/object_pool_builder.h"
 #include "vm/compiler/method_recognizer.h"
+#include "vm/compiler/runtime_api.h"
 #include "vm/dart.h"
 #include "vm/flags.h"
 #include "vm/globals.h"
@@ -29,6 +36,10 @@
 namespace dart {
 
 // Forward declarations.
+namespace compiler {
+class Assembler;
+}
+
 namespace kernel {
 class Program;
 class TreeNode;
@@ -39,7 +50,6 @@
 #undef DEFINE_FORWARD_DECLARATION
 class Api;
 class ArgumentsDescriptor;
-class Assembler;
 class Closure;
 class Code;
 class DeoptInstr;
@@ -4052,26 +4062,15 @@
 // with it which is stored in-inline after all the entries.
 class ObjectPool : public Object {
  public:
-  enum EntryType {
-    kTaggedObject,
-    kImmediate,
-    kNativeFunction,
-    kNativeFunctionWrapper,
-    kNativeEntryData,
-  };
-
-  enum Patchability {
-    kPatchable,
-    kNotPatchable,
-  };
-
-  class TypeBits : public BitField<uint8_t, EntryType, 0, 7> {};
-  class PatchableBit
-      : public BitField<uint8_t, Patchability, TypeBits::kNextBit, 1> {};
+  using EntryType = compiler::ObjectPoolBuilderEntry::EntryType;
+  using Patchability = compiler::ObjectPoolBuilderEntry::Patchability;
+  using TypeBits = compiler::ObjectPoolBuilderEntry::TypeBits;
+  using PatchableBit = compiler::ObjectPoolBuilderEntry::PatchableBit;
 
   struct Entry {
     Entry() : raw_value_(), type_() {}
-    explicit Entry(const Object* obj) : obj_(obj), type_(kTaggedObject) {}
+    explicit Entry(const Object* obj)
+        : obj_(obj), type_(EntryType::kTaggedObject) {}
     Entry(uword value, EntryType info) : raw_value_(value), type_(info) {}
     union {
       const Object* obj_;
@@ -4109,23 +4108,23 @@
   }
 
   RawObject* ObjectAt(intptr_t index) const {
-    ASSERT((TypeAt(index) == kTaggedObject) ||
-           (TypeAt(index) == kNativeEntryData));
+    ASSERT((TypeAt(index) == EntryType::kTaggedObject) ||
+           (TypeAt(index) == EntryType::kNativeEntryData));
     return EntryAddr(index)->raw_obj_;
   }
   void SetObjectAt(intptr_t index, const Object& obj) const {
-    ASSERT((TypeAt(index) == kTaggedObject) ||
-           (TypeAt(index) == kNativeEntryData) ||
-           (TypeAt(index) == kImmediate && obj.IsSmi()));
+    ASSERT((TypeAt(index) == EntryType::kTaggedObject) ||
+           (TypeAt(index) == EntryType::kNativeEntryData) ||
+           (TypeAt(index) == EntryType::kImmediate && obj.IsSmi()));
     StorePointer(&EntryAddr(index)->raw_obj_, obj.raw());
   }
 
   uword RawValueAt(intptr_t index) const {
-    ASSERT(TypeAt(index) != kTaggedObject);
+    ASSERT(TypeAt(index) != EntryType::kTaggedObject);
     return EntryAddr(index)->raw_value_;
   }
   void SetRawValueAt(intptr_t index, uword raw_value) const {
-    ASSERT(TypeAt(index) != kTaggedObject);
+    ASSERT(TypeAt(index) != EntryType::kTaggedObject);
     StoreNonPointer(&EntryAddr(index)->raw_value_, raw_value);
   }
 
@@ -4150,8 +4149,12 @@
                                  (len * kBytesPerElement));
   }
 
+  static RawObjectPool* NewFromBuilder(
+      const compiler::ObjectPoolBuilder& builder);
   static RawObjectPool* New(intptr_t len);
 
+  void CopyInto(compiler::ObjectPoolBuilder* builder) const;
+
   // Returns the pool index from the offset relative to a tagged RawObjectPool*,
   // adjusting for the tag-bit.
   static intptr_t IndexFromOffset(intptr_t offset) {
@@ -4731,12 +4734,7 @@
     return OFFSET_OF(RawCode, instructions_);
   }
 
-  enum class EntryKind {
-    kNormal,
-    kUnchecked,
-    kMonomorphic,
-    kMonomorphicUnchecked,
-  };
+  using EntryKind = CodeEntryKind;
 
   static intptr_t entry_point_offset(EntryKind kind = EntryKind::kNormal) {
     switch (kind) {
@@ -4755,9 +4753,9 @@
 
   static intptr_t function_entry_point_offset(EntryKind kind) {
     switch (kind) {
-      case Code::EntryKind::kNormal:
+      case EntryKind::kNormal:
         return Function::entry_point_offset();
-      case Code::EntryKind::kUnchecked:
+      case EntryKind::kUnchecked:
         return Function::unchecked_entry_point_offset();
       default:
         ASSERT(false && "Invalid entry kind.");
@@ -5050,13 +5048,13 @@
   // `Object::set_object_pool()`.
   static RawCode* FinalizeCode(const Function& function,
                                FlowGraphCompiler* compiler,
-                               Assembler* assembler,
+                               compiler::Assembler* assembler,
                                PoolAttachment pool_attachment,
                                bool optimized = false,
                                CodeStatistics* stats = nullptr);
   static RawCode* FinalizeCode(const char* name,
                                FlowGraphCompiler* compiler,
-                               Assembler* assembler,
+                               compiler::Assembler* assembler,
                                PoolAttachment pool_attachment,
                                bool optimized,
                                CodeStatistics* stats = nullptr);
diff --git a/runtime/vm/object_reload.cc b/runtime/vm/object_reload.cc
index c519034..4082a1b 100644
--- a/runtime/vm/object_reload.cc
+++ b/runtime/vm/object_reload.cc
@@ -101,7 +101,7 @@
   Object& object = Object::Handle(zone);
   for (intptr_t i = 0; i < Length(); i++) {
     ObjectPool::EntryType entry_type = TypeAt(i);
-    if (entry_type != ObjectPool::kTaggedObject) {
+    if (entry_type != ObjectPool::EntryType::kTaggedObject) {
       continue;
     }
     object = ObjectAt(i);
diff --git a/runtime/vm/object_service.cc b/runtime/vm/object_service.cc
index f83dd85..c0c1152 100644
--- a/runtime/vm/object_service.cc
+++ b/runtime/vm/object_service.cc
@@ -628,27 +628,27 @@
       JSONObject jsentry(stream);
       jsentry.AddProperty("offset", OffsetFromIndex(i));
       switch (TypeAt(i)) {
-        case ObjectPool::kTaggedObject:
+        case ObjectPool::EntryType::kTaggedObject:
           obj = ObjectAt(i);
           jsentry.AddProperty("kind", "Object");
           jsentry.AddProperty("value", obj);
           break;
-        case ObjectPool::kImmediate:
+        case ObjectPool::EntryType::kImmediate:
           imm = RawValueAt(i);
           jsentry.AddProperty("kind", "Immediate");
           jsentry.AddProperty64("value", imm);
           break;
-        case ObjectPool::kNativeEntryData:
+        case ObjectPool::EntryType::kNativeEntryData:
           obj = ObjectAt(i);
           jsentry.AddProperty("kind", "NativeEntryData");
           jsentry.AddProperty("value", obj);
           break;
-        case ObjectPool::kNativeFunction:
+        case ObjectPool::EntryType::kNativeFunction:
           imm = RawValueAt(i);
           jsentry.AddProperty("kind", "NativeFunction");
           jsentry.AddProperty64("value", imm);
           break;
-        case ObjectPool::kNativeFunctionWrapper:
+        case ObjectPool::EntryType::kNativeFunctionWrapper:
           imm = RawValueAt(i);
           jsentry.AddProperty("kind", "NativeFunctionWrapper");
           jsentry.AddProperty64("value", imm);
diff --git a/runtime/vm/object_test.cc b/runtime/vm/object_test.cc
index c43828a..cb5d990 100644
--- a/runtime/vm/object_test.cc
+++ b/runtime/vm/object_test.cc
@@ -2470,8 +2470,8 @@
 // Test for Code and Instruction object creation.
 ISOLATE_UNIT_TEST_CASE(Code) {
   extern void GenerateIncrement(Assembler * assembler);
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateIncrement(&_assembler_);
   const Function& function = Function::Handle(CreateFunction("Test_Code"));
   Code& code = Code::Handle(Code::FinalizeCode(
@@ -2492,8 +2492,8 @@
       MallocHooks::stack_trace_collection_enabled();
   MallocHooks::set_stack_trace_collection_enabled(false);
   extern void GenerateIncrement(Assembler * assembler);
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateIncrement(&_assembler_);
   const Function& function = Function::Handle(CreateFunction("Test_Code"));
   Code& code = Code::Handle(Code::FinalizeCode(
@@ -2519,8 +2519,8 @@
   extern void GenerateEmbedStringInCode(Assembler * assembler, const char* str);
   const char* kHello = "Hello World!";
   word expected_length = static_cast<word>(strlen(kHello));
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateEmbedStringInCode(&_assembler_, kHello);
   const Function& function =
       Function::Handle(CreateFunction("Test_EmbedStringInCode"));
@@ -2542,8 +2542,8 @@
 ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
   extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
   const intptr_t kSmiTestValue = 5;
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
   const Function& function =
       Function::Handle(CreateFunction("Test_EmbedSmiInCode"));
@@ -2560,8 +2560,8 @@
 ISOLATE_UNIT_TEST_CASE(EmbedSmiIn64BitCode) {
   extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
   const intptr_t kSmiTestValue = DART_INT64_C(5) << 32;
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
   const Function& function =
       Function::Handle(CreateFunction("Test_EmbedSmiIn64BitCode"));
@@ -2591,8 +2591,8 @@
                                     TokenPosition::kNoSource, true);
 
   extern void GenerateIncrement(Assembler * assembler);
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateIncrement(&_assembler_);
   Code& code = Code::Handle(
       Code::FinalizeCode(Function::Handle(CreateFunction("Test_Code")), nullptr,
@@ -2633,8 +2633,8 @@
   descriptors ^= builder->FinalizePcDescriptors(0);
 
   extern void GenerateIncrement(Assembler * assembler);
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateIncrement(&_assembler_);
   Code& code = Code::Handle(
       Code::FinalizeCode(Function::Handle(CreateFunction("Test_Code")), nullptr,
@@ -2696,8 +2696,8 @@
   descriptors ^= builder->FinalizePcDescriptors(0);
 
   extern void GenerateIncrement(Assembler * assembler);
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler _assembler_(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler _assembler_(&object_pool_builder);
   GenerateIncrement(&_assembler_);
   Code& code = Code::Handle(
       Code::FinalizeCode(Function::Handle(CreateFunction("Test_Code")), nullptr,
diff --git a/runtime/vm/os.h b/runtime/vm/os.h
index bb8bc1f..e987b5c 100644
--- a/runtime/vm/os.h
+++ b/runtime/vm/os.h
@@ -13,7 +13,6 @@
 namespace dart {
 
 // Forward declarations.
-class Isolate;
 class Zone;
 
 // Interface to the underlying OS platform.
diff --git a/runtime/vm/os_thread.h b/runtime/vm/os_thread.h
index 811bb07..a82ef6f 100644
--- a/runtime/vm/os_thread.h
+++ b/runtime/vm/os_thread.h
@@ -44,12 +44,11 @@
 
  private:
   explicit BaseThread(bool is_os_thread) : is_os_thread_(is_os_thread) {}
-  ~BaseThread() {}
+  virtual ~BaseThread() {}
 
   bool is_os_thread_;
 
   friend class ThreadState;
-  friend class Thread;
   friend class OSThread;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(BaseThread);
diff --git a/runtime/vm/pointer_tagging.h b/runtime/vm/pointer_tagging.h
new file mode 100644
index 0000000..411b6b0
--- /dev/null
+++ b/runtime/vm/pointer_tagging.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2019, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_POINTER_TAGGING_H_
+#define RUNTIME_VM_POINTER_TAGGING_H_
+
+// This header defines constants associated with pointer tagging:
+//
+//    * which bits determine whether or not this is a Smi value or a heap
+//      pointer;
+//    * which bits determine whether this is a pointer into a new or an old
+//      space.
+
+namespace dart {
+
+// Dart VM aligns all objects by 2 words in in the old space and misaligns them
+// in new space. This allows to distinguish new and old pointers by their bits.
+//
+// Note: these bits depend on the word size.
+template <intptr_t word_size, intptr_t word_size_log2>
+struct ObjectAlignment {
+  // Alignment offsets are used to determine object age.
+  static constexpr intptr_t kNewObjectAlignmentOffset = word_size;
+  static constexpr intptr_t kOldObjectAlignmentOffset = 0;
+  static constexpr intptr_t kNewObjectBitPosition = word_size_log2;
+
+  // Object sizes are aligned to kObjectAlignment.
+  static constexpr intptr_t kObjectAlignment = 2 * word_size;
+  static constexpr intptr_t kObjectAlignmentLog2 = word_size_log2 + 1;
+  static constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
+};
+
+using HostObjectAlignment = ObjectAlignment<kWordSize, kWordSizeLog2>;
+
+static constexpr intptr_t kNewObjectAlignmentOffset =
+    HostObjectAlignment::kNewObjectAlignmentOffset;
+static constexpr intptr_t kOldObjectAlignmentOffset =
+    HostObjectAlignment::kOldObjectAlignmentOffset;
+static constexpr intptr_t kNewObjectBitPosition =
+    HostObjectAlignment::kNewObjectBitPosition;
+static constexpr intptr_t kObjectAlignment =
+    HostObjectAlignment::kObjectAlignment;
+static constexpr intptr_t kObjectAlignmentLog2 =
+    HostObjectAlignment::kObjectAlignmentLog2;
+static constexpr intptr_t kObjectAlignmentMask =
+    HostObjectAlignment::kObjectAlignmentMask;
+
+// On all targets heap pointers are tagged by set least significant bit.
+//
+// To recover address of the actual heap object kHeapObjectTag needs to be
+// subtracted from the tagged pointer value.
+//
+// Smi-s (small integers) have least significant bit cleared.
+//
+// To recover the integer value tagged pointer value needs to be shifted
+// right by kSmiTagShift.
+enum {
+  kSmiTag = 0,
+  kHeapObjectTag = 1,
+  kSmiTagSize = 1,
+  kSmiTagMask = 1,
+  kSmiTagShift = 1,
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_POINTER_TAGGING_H_
diff --git a/runtime/vm/raw_object.cc b/runtime/vm/raw_object.cc
index e0d7945..cce58b2 100644
--- a/runtime/vm/raw_object.cc
+++ b/runtime/vm/raw_object.cc
@@ -572,8 +572,8 @@
   for (intptr_t i = 0; i < length; ++i) {
     ObjectPool::EntryType entry_type =
         ObjectPool::TypeBits::decode(entry_bits[i]);
-    if ((entry_type == ObjectPool::kTaggedObject) ||
-        (entry_type == ObjectPool::kNativeEntryData)) {
+    if ((entry_type == ObjectPool::EntryType::kTaggedObject) ||
+        (entry_type == ObjectPool::EntryType::kNativeEntryData)) {
       visitor->VisitPointer(&entries[i].raw_obj_);
     }
   }
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index 145bf46..2cea309 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -5,12 +5,18 @@
 #ifndef RUNTIME_VM_RAW_OBJECT_H_
 #define RUNTIME_VM_RAW_OBJECT_H_
 
+#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
+#error "Should not include runtime"
+#endif
+
 #include "platform/assert.h"
 #include "platform/atomic.h"
+#include "vm/class_id.h"
 #include "vm/compiler/method_recognizer.h"
 #include "vm/exceptions.h"
 #include "vm/globals.h"
 #include "vm/object_graph.h"
+#include "vm/pointer_tagging.h"
 #include "vm/snapshot.h"
 #include "vm/token.h"
 #include "vm/token_position.h"
@@ -20,127 +26,6 @@
 // For now there are no compressed pointers.
 typedef RawObject* RawCompressed;
 
-// Macrobatics to define the Object hierarchy of VM implementation classes.
-#define CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V)                           \
-  V(Class)                                                                     \
-  V(PatchClass)                                                                \
-  V(Function)                                                                  \
-  V(ClosureData)                                                               \
-  V(SignatureData)                                                             \
-  V(RedirectionData)                                                           \
-  V(Field)                                                                     \
-  V(Script)                                                                    \
-  V(Library)                                                                   \
-  V(Namespace)                                                                 \
-  V(KernelProgramInfo)                                                         \
-  V(Code)                                                                      \
-  V(Bytecode)                                                                  \
-  V(Instructions)                                                              \
-  V(ObjectPool)                                                                \
-  V(PcDescriptors)                                                             \
-  V(CodeSourceMap)                                                             \
-  V(StackMap)                                                                  \
-  V(LocalVarDescriptors)                                                       \
-  V(ExceptionHandlers)                                                         \
-  V(Context)                                                                   \
-  V(ContextScope)                                                              \
-  V(SingleTargetCache)                                                         \
-  V(UnlinkedCall)                                                              \
-  V(ICData)                                                                    \
-  V(MegamorphicCache)                                                          \
-  V(SubtypeTestCache)                                                          \
-  V(Error)                                                                     \
-  V(ApiError)                                                                  \
-  V(LanguageError)                                                             \
-  V(UnhandledException)                                                        \
-  V(UnwindError)                                                               \
-  V(Instance)                                                                  \
-  V(LibraryPrefix)                                                             \
-  V(TypeArguments)                                                             \
-  V(AbstractType)                                                              \
-  V(Type)                                                                      \
-  V(TypeRef)                                                                   \
-  V(TypeParameter)                                                             \
-  V(Closure)                                                                   \
-  V(Number)                                                                    \
-  V(Integer)                                                                   \
-  V(Smi)                                                                       \
-  V(Mint)                                                                      \
-  V(Double)                                                                    \
-  V(Bool)                                                                      \
-  V(GrowableObjectArray)                                                       \
-  V(Float32x4)                                                                 \
-  V(Int32x4)                                                                   \
-  V(Float64x2)                                                                 \
-  V(TypedData)                                                                 \
-  V(ExternalTypedData)                                                         \
-  V(Capability)                                                                \
-  V(ReceivePort)                                                               \
-  V(SendPort)                                                                  \
-  V(StackTrace)                                                                \
-  V(RegExp)                                                                    \
-  V(WeakProperty)                                                              \
-  V(MirrorReference)                                                           \
-  V(LinkedHashMap)                                                             \
-  V(UserTag)
-
-#define CLASS_LIST_ARRAYS(V)                                                   \
-  V(Array)                                                                     \
-  V(ImmutableArray)
-
-#define CLASS_LIST_STRINGS(V)                                                  \
-  V(String)                                                                    \
-  V(OneByteString)                                                             \
-  V(TwoByteString)                                                             \
-  V(ExternalOneByteString)                                                     \
-  V(ExternalTwoByteString)
-
-#define CLASS_LIST_TYPED_DATA(V)                                               \
-  V(Int8Array)                                                                 \
-  V(Uint8Array)                                                                \
-  V(Uint8ClampedArray)                                                         \
-  V(Int16Array)                                                                \
-  V(Uint16Array)                                                               \
-  V(Int32Array)                                                                \
-  V(Uint32Array)                                                               \
-  V(Int64Array)                                                                \
-  V(Uint64Array)                                                               \
-  V(Float32Array)                                                              \
-  V(Float64Array)                                                              \
-  V(Float32x4Array)                                                            \
-  V(Int32x4Array)                                                              \
-  V(Float64x2Array)
-
-#define DART_CLASS_LIST_TYPED_DATA(V)                                          \
-  V(Int8)                                                                      \
-  V(Uint8)                                                                     \
-  V(Uint8Clamped)                                                              \
-  V(Int16)                                                                     \
-  V(Uint16)                                                                    \
-  V(Int32)                                                                     \
-  V(Uint32)                                                                    \
-  V(Int64)                                                                     \
-  V(Uint64)                                                                    \
-  V(Float32)                                                                   \
-  V(Float64)                                                                   \
-  V(Float32x4)                                                                 \
-  V(Int32x4)                                                                   \
-  V(Float64x2)
-
-#define CLASS_LIST_FOR_HANDLES(V)                                              \
-  CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V)                                 \
-  V(Array)                                                                     \
-  V(String)
-
-#define CLASS_LIST_NO_OBJECT(V)                                                \
-  CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V)                                 \
-  CLASS_LIST_ARRAYS(V)                                                         \
-  CLASS_LIST_STRINGS(V)
-
-#define CLASS_LIST(V)                                                          \
-  V(Object)                                                                    \
-  CLASS_LIST_NO_OBJECT(V)
-
 // Forward declarations.
 class Isolate;
 #define DEFINE_FORWARD_DECLARATION(clazz) class Raw##clazz;
@@ -148,51 +33,6 @@
 #undef DEFINE_FORWARD_DECLARATION
 class CodeStatistics;
 
-enum ClassId {
-  // Illegal class id.
-  kIllegalCid = 0,
-
-  // A sentinel used by the vm service's heap snapshots to represent references
-  // from the stack.
-  kStackCid = 1,
-
-  // The following entries describes classes for pseudo-objects in the heap
-  // that should never be reachable from live objects. Free list elements
-  // maintain the free list for old space, and forwarding corpses are used to
-  // implement one-way become.
-  kFreeListElement,
-  kForwardingCorpse,
-
-// List of Ids for predefined classes.
-#define DEFINE_OBJECT_KIND(clazz) k##clazz##Cid,
-  CLASS_LIST(DEFINE_OBJECT_KIND)
-#undef DEFINE_OBJECT_KIND
-
-#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##Cid,
-      CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
-#undef DEFINE_OBJECT_KIND
-
-#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##ViewCid,
-          CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
-#undef DEFINE_OBJECT_KIND
-
-              kByteDataViewCid,
-
-#define DEFINE_OBJECT_KIND(clazz) kExternalTypedData##clazz##Cid,
-  CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
-#undef DEFINE_OBJECT_KIND
-
-      kByteBufferCid,
-
-  // The following entries do not describe a predefined class, but instead
-  // are class indexes for pre-allocated instances (Null, dynamic and Void).
-  kNullCid,
-  kDynamicCid,
-  kVoidCid,
-
-  kNumPredefinedCids,
-};
-
 #define VISIT_FROM(type, first)                                                \
   type* from() { return reinterpret_cast<type*>(&ptr()->first); }
 
@@ -215,25 +55,6 @@
 #define ASSERT_NOTHING_TO_VISIT(Type)                                          \
   ASSERT(SIZE_OF_RETURNED_VALUE(Raw##Type, NothingToVisit) == sizeof(int))
 
-enum ObjectAlignment {
-  // Alignment offsets are used to determine object age.
-  kNewObjectAlignmentOffset = kWordSize,
-  kOldObjectAlignmentOffset = 0,
-  kNewObjectBitPosition = kWordSizeLog2,
-  // Object sizes are aligned to kObjectAlignment.
-  kObjectAlignment = 2 * kWordSize,
-  kObjectAlignmentLog2 = kWordSizeLog2 + 1,
-  kObjectAlignmentMask = kObjectAlignment - 1,
-};
-
-enum {
-  kSmiTag = 0,
-  kHeapObjectTag = 1,
-  kSmiTagSize = 1,
-  kSmiTagMask = 1,
-  kSmiTagShift = 1,
-};
-
 enum TypedDataElementType {
 #define V(name) k##name##Element,
   CLASS_LIST_TYPED_DATA(V)
diff --git a/runtime/vm/runtime_entry.h b/runtime/vm/runtime_entry.h
index e0c9e56..a5a9f60 100644
--- a/runtime/vm/runtime_entry.h
+++ b/runtime/vm/runtime_entry.h
@@ -6,6 +6,9 @@
 #define RUNTIME_VM_RUNTIME_ENTRY_H_
 
 #include "vm/allocation.h"
+#if !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/compiler/runtime_api.h"
+#endif
 #include "vm/flags.h"
 #include "vm/heap/safepoint.h"
 #include "vm/native_arguments.h"
@@ -13,14 +16,18 @@
 
 namespace dart {
 
-class Assembler;
-
 typedef void (*RuntimeFunction)(NativeArguments arguments);
 
+#if !defined(DART_PRECOMPILED_RUNTIME)
+using BaseRuntimeEntry = compiler::RuntimeEntry;
+#else
+using BaseRuntimeEntry = ValueObject;
+#endif
+
 // Class RuntimeEntry is used to encapsulate runtime functions, it includes
 // the entry point for the runtime function and the number of arguments expected
 // by the function.
-class RuntimeEntry : public ValueObject {
+class RuntimeEntry : public BaseRuntimeEntry {
  public:
   RuntimeEntry(const char* name,
                RuntimeFunction function,
@@ -41,8 +48,8 @@
   uword GetEntryPoint() const;
 
   // Generate code to call the runtime entry.
-  NOT_IN_PRECOMPILED(void Call(Assembler* assembler, intptr_t argument_count)
-                         const);
+  NOT_IN_PRECOMPILED(void Call(compiler::Assembler* assembler,
+                               intptr_t argument_count) const);
 
   static uword InterpretCallEntry();
   static RawObject* InterpretCall(RawFunction* function,
diff --git a/runtime/vm/simulator_arm.cc b/runtime/vm/simulator_arm.cc
index 992dd24..1047955 100644
--- a/runtime/vm/simulator_arm.cc
+++ b/runtime/vm/simulator_arm.cc
@@ -1551,20 +1551,10 @@
             // Format(instr, "bkpt #'imm12_4");
             SimulatorDebugger dbg(this);
             int32_t imm = instr->BkptField();
-            if (imm == Instr::kStopMessageCode) {
-              const char* message = "Stop messages not enabled";
-              if (FLAG_print_stop_message) {
-                message = *reinterpret_cast<const char**>(
-                    reinterpret_cast<intptr_t>(instr) - Instr::kInstrSize);
-              }
-              set_pc(get_pc() + Instr::kInstrSize);
-              dbg.Stop(instr, message);
-            } else {
-              char buffer[32];
-              snprintf(buffer, sizeof(buffer), "bkpt #0x%x", imm);
-              set_pc(get_pc() + Instr::kInstrSize);
-              dbg.Stop(instr, buffer);
-            }
+            char buffer[32];
+            snprintf(buffer, sizeof(buffer), "bkpt #0x%x", imm);
+            set_pc(get_pc() + Instr::kInstrSize);
+            dbg.Stop(instr, buffer);
           } else {
             // Format(instr, "smc'cond");
             UnimplementedInstruction(instr);
diff --git a/runtime/vm/simulator_arm64.cc b/runtime/vm/simulator_arm64.cc
index 82f12b1..d7240cb 100644
--- a/runtime/vm/simulator_arm64.cc
+++ b/runtime/vm/simulator_arm64.cc
@@ -1647,20 +1647,10 @@
     // Format(instr, "brk 'imm16");
     SimulatorDebugger dbg(this);
     int32_t imm = instr->Imm16Field();
-    if (imm == Instr::kStopMessageCode) {
-      const char* message = "Stop messages not enabled";
-      if (FLAG_print_stop_message) {
-        message = *reinterpret_cast<const char**>(
-            reinterpret_cast<intptr_t>(instr) - 2 * Instr::kInstrSize);
-      }
-      set_pc(get_pc() + Instr::kInstrSize);
-      dbg.Stop(instr, message);
-    } else {
-      char buffer[32];
-      snprintf(buffer, sizeof(buffer), "brk #0x%x", imm);
-      set_pc(get_pc() + Instr::kInstrSize);
-      dbg.Stop(instr, buffer);
-    }
+    char buffer[32];
+    snprintf(buffer, sizeof(buffer), "brk #0x%x", imm);
+    set_pc(get_pc() + Instr::kInstrSize);
+    dbg.Stop(instr, buffer);
   } else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
              (instr->Bits(21, 3) == 2)) {
     // Format(instr, "hlt 'imm16");
diff --git a/runtime/vm/stack_frame.cc b/runtime/vm/stack_frame.cc
index 5f174f8..900061b 100644
--- a/runtime/vm/stack_frame.cc
+++ b/runtime/vm/stack_frame.cc
@@ -6,6 +6,7 @@
 
 #include "platform/memory_sanitizer.h"
 #include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/runtime_api.h"
 #include "vm/deopt_instructions.h"
 #include "vm/heap/become.h"
 #include "vm/isolate.h"
@@ -56,7 +57,14 @@
     /*.code_from_fp = */ 0,             // No saved CODE
 };
 
-FrameLayout compiler_frame_layout = invalid_frame_layout;
+namespace compiler {
+
+namespace target {
+FrameLayout frame_layout = invalid_frame_layout;
+}
+
+}  // namespace compiler
+
 FrameLayout runtime_frame_layout = invalid_frame_layout;
 
 int FrameLayout::FrameSlotForVariable(const LocalVariable* variable) const {
@@ -75,15 +83,15 @@
 
 void FrameLayout::Init() {
   // By default we use frames with CODE_REG/PP in the frame.
-  compiler_frame_layout = default_frame_layout;
+  compiler::target::frame_layout = default_frame_layout;
   runtime_frame_layout = default_frame_layout;
 
   if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
-    compiler_frame_layout = bare_instructions_frame_layout;
+    compiler::target::frame_layout = bare_instructions_frame_layout;
   }
 #if defined(DART_PRECOMPILED_RUNTIME)
   if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
-    compiler_frame_layout = invalid_frame_layout;
+    compiler::target::frame_layout = invalid_frame_layout;
     runtime_frame_layout = bare_instructions_frame_layout;
   }
 #endif
diff --git a/runtime/vm/stack_frame.h b/runtime/vm/stack_frame.h
index c8b59b9..bf67520 100644
--- a/runtime/vm/stack_frame.h
+++ b/runtime/vm/stack_frame.h
@@ -6,6 +6,7 @@
 #define RUNTIME_VM_STACK_FRAME_H_
 
 #include "vm/allocation.h"
+#include "vm/frame_layout.h"
 #include "vm/interpreter.h"
 #include "vm/object.h"
 #include "vm/stack_frame_kbc.h"
@@ -32,61 +33,6 @@
 class RawContext;
 class LocalVariable;
 
-struct FrameLayout {
-  // The offset (in words) from FP to the first object.
-  int first_object_from_fp;
-
-  // The offset (in words) from FP to the last fixed object.
-  int last_fixed_object_from_fp;
-
-  // The offset (in words) from FP to the first local.
-  int param_end_from_fp;
-
-  // The offset (in words) from FP to the first local.
-  int first_local_from_fp;
-
-  // The fixed size of the frame.
-  int dart_fixed_frame_size;
-
-  // The offset (in words) from FP to the saved pool (if applicable).
-  int saved_caller_pp_from_fp;
-
-  // The offset (in words) from FP to the code object (if applicable).
-  int code_from_fp;
-
-  // The number of fixed slots below the saved PC.
-  int saved_below_pc() const { return -first_local_from_fp; }
-
-  // Returns the FP-relative index where [variable] can be found (assumes
-  // [variable] is not captured), in words.
-  int FrameSlotForVariable(const LocalVariable* variable) const;
-
-  // Returns the FP-relative index where [variable_index] can be found (assumes
-  // [variable_index] comes from a [LocalVariable::index()], which is not
-  // captured).
-  int FrameSlotForVariableIndex(int index) const;
-
-  // Returns the FP-relative index where [variable] can be found (assumes
-  // [variable] is not captured), in bytes.
-  int FrameOffsetInBytesForVariable(const LocalVariable* variable) const {
-    return FrameSlotForVariable(variable) * kWordSize;
-  }
-
-  // Returns the variable index from a FP-relative index.
-  intptr_t VariableIndexForFrameSlot(intptr_t frame_slot) const {
-    if (frame_slot <= first_local_from_fp) {
-      return frame_slot - first_local_from_fp;
-    } else {
-      ASSERT(frame_slot > param_end_from_fp);
-      return frame_slot - param_end_from_fp;
-    }
-  }
-
-  // Called to initialize the stack frame layout during startup.
-  static void Init();
-};
-
-extern FrameLayout compiler_frame_layout;
 extern FrameLayout runtime_frame_layout;
 
 // Generic stack frame.
diff --git a/runtime/vm/stub_code.cc b/runtime/vm/stub_code.cc
index 57a718c..6e90207 100644
--- a/runtime/vm/stub_code.cc
+++ b/runtime/vm/stub_code.cc
@@ -20,6 +20,8 @@
 
 namespace dart {
 
+using compiler::ObjectPoolBuilder;
+
 DEFINE_FLAG(bool, disassemble_stubs, false, "Disassemble generated stubs.");
 DECLARE_FLAG(bool, precompiled_mode);
 
@@ -45,20 +47,21 @@
 
 #define STUB_CODE_GENERATE(name)                                               \
   entries_[k##name##Index] = Code::ReadOnlyHandle();                           \
-  *entries_[k##name##Index] = Generate("_stub_" #name, &object_pool_wrapper,   \
+  *entries_[k##name##Index] = Generate("_stub_" #name, &object_pool_builder,   \
                                        StubCode::Generate##name##Stub);
 
 #define STUB_CODE_SET_OBJECT_POOL(name)                                        \
   entries_[k##name##Index]->set_object_pool(object_pool.raw());
 
 void StubCode::Init() {
-  ObjectPoolWrapper object_pool_wrapper;
+  ObjectPoolBuilder object_pool_builder;
 
   // Generate all the stubs.
   VM_STUB_CODE_LIST(STUB_CODE_GENERATE);
 
   const ObjectPool& object_pool =
-      ObjectPool::Handle(object_pool_wrapper.MakeObjectPool());
+      ObjectPool::Handle(ObjectPool::NewFromBuilder(object_pool_builder));
+
   VM_STUB_CODE_LIST(STUB_CODE_SET_OBJECT_POOL)
 }
 
@@ -74,9 +77,9 @@
 #undef STUB_CODE_CLEANUP
 
 RawCode* StubCode::Generate(const char* name,
-                            ObjectPoolWrapper* object_pool_wrapper,
+                            ObjectPoolBuilder* object_pool_builder,
                             void (*GenerateStub)(Assembler* assembler)) {
-  Assembler assembler(object_pool_wrapper);
+  Assembler assembler(object_pool_builder);
   GenerateStub(&assembler);
   const Code& code = Code::Handle(Code::FinalizeCode(
       name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
@@ -163,13 +166,13 @@
   Code& stub = Code::Handle(zone, cls.allocation_stub());
 #if !defined(DART_PRECOMPILED_RUNTIME)
   if (stub.IsNull()) {
-    ObjectPoolWrapper object_pool_wrapper;
+    ObjectPoolBuilder object_pool_builder;
     Precompiler* precompiler = Precompiler::Instance();
 
-    ObjectPoolWrapper* wrapper =
+    ObjectPoolBuilder* wrapper =
         FLAG_use_bare_instructions && precompiler != NULL
-            ? precompiler->global_object_pool_wrapper()
-            : &object_pool_wrapper;
+            ? precompiler->global_object_pool_builder()
+            : &object_pool_builder;
 
     const auto pool_attachment =
         FLAG_precompiled_mode && FLAG_use_bare_instructions
@@ -238,10 +241,10 @@
 }
 
 #if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
-RawCode* StubCode::GetBuildMethodExtractorStub(ObjectPoolWrapper* pool) {
+RawCode* StubCode::GetBuildMethodExtractorStub(ObjectPoolBuilder* pool) {
 #if !defined(DART_PRECOMPILED_RUNTIME)
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(pool != nullptr ? pool : &object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
   StubCode::GenerateBuildMethodExtractorStub(&assembler);
 
   const char* name = "BuildMethodExtractor";
@@ -250,9 +253,7 @@
       /*optimized=*/false));
 
   if (pool == nullptr) {
-    const ObjectPool& object_pool =
-        ObjectPool::Handle(object_pool_wrapper.MakeObjectPool());
-    stub.set_object_pool(object_pool.raw());
+    stub.set_object_pool(ObjectPool::NewFromBuilder(object_pool_builder));
   }
 
 #ifndef PRODUCT
diff --git a/runtime/vm/stub_code.h b/runtime/vm/stub_code.h
index f551484..aa148e3 100644
--- a/runtime/vm/stub_code.h
+++ b/runtime/vm/stub_code.h
@@ -7,6 +7,7 @@
 
 #include "vm/allocation.h"
 #include "vm/compiler/assembler/assembler.h"
+#include "vm/object.h"
 
 namespace dart {
 
@@ -14,7 +15,6 @@
 class Code;
 class Isolate;
 class ObjectPointerVisitor;
-class ObjectPoolWrapper;
 class RawCode;
 class SnapshotReader;
 class SnapshotWriter;
@@ -152,8 +152,8 @@
   static RawCode* GetAllocationStubForClass(const Class& cls);
 
 #if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
-  static RawCode* GetBuildMethodExtractorStub(ObjectPoolWrapper* pool);
-  static void GenerateBuildMethodExtractorStub(Assembler* assembler);
+  static RawCode* GetBuildMethodExtractorStub(ObjectPoolBuilder* pool);
+  static void GenerateBuildMethodExtractorStub(compiler::Assembler* assembler);
 #endif
 
   static const Code& UnoptimizedStaticCallEntry(intptr_t num_args_tested);
@@ -171,7 +171,7 @@
 
 #if !defined(DART_PRECOMPILED_RUNTIME)
 #define GENERATE_STUB(name)                                                    \
-  static RawCode* BuildIsolateSpecific##name##Stub(ObjectPoolWrapper* opw) {   \
+  static RawCode* BuildIsolateSpecific##name##Stub(ObjectPoolBuilder* opw) {   \
     return StubCode::Generate("_iso_stub_" #name, opw,                         \
                               StubCode::Generate##name##Stub);                 \
   }
@@ -193,35 +193,37 @@
 
 #if !defined(DART_PRECOMPILED_RUNTIME)
 #define STUB_CODE_GENERATE(name)                                               \
-  static void Generate##name##Stub(Assembler* assembler);
+  static void Generate##name##Stub(compiler::Assembler* assembler);
   VM_STUB_CODE_LIST(STUB_CODE_GENERATE)
 #undef STUB_CODE_GENERATE
 
   // Generate the stub and finalize the generated code into the stub
   // code executable area.
-  static RawCode* Generate(const char* name,
-                           ObjectPoolWrapper* object_pool_wrapper,
-                           void (*GenerateStub)(Assembler* assembler));
+  static RawCode* Generate(
+      const char* name,
+      ObjectPoolBuilder* object_pool_builder,
+      void (*GenerateStub)(compiler::Assembler* assembler));
 
-  static void GenerateSharedStub(Assembler* assembler,
+  static void GenerateSharedStub(compiler::Assembler* assembler,
                                  bool save_fpu_registers,
                                  const RuntimeEntry* target,
                                  intptr_t self_code_stub_offset_from_thread,
                                  bool allow_return);
 
-  static void GenerateMegamorphicMissStub(Assembler* assembler);
-  static void GenerateAllocationStubForClass(Assembler* assembler,
+  static void GenerateMegamorphicMissStub(compiler::Assembler* assembler);
+  static void GenerateAllocationStubForClass(compiler::Assembler* assembler,
                                              const Class& cls);
   static void GenerateNArgsCheckInlineCacheStub(
-      Assembler* assembler,
+      compiler::Assembler* assembler,
       intptr_t num_args,
       const RuntimeEntry& handle_ic_miss,
       Token::Kind kind,
       bool optimized = false,
       bool exactness_check = false);
-  static void GenerateUsageCounterIncrement(Assembler* assembler,
+  static void GenerateUsageCounterIncrement(compiler::Assembler* assembler,
                                             Register temp_reg);
-  static void GenerateOptimizedUsageCounterIncrement(Assembler* assembler);
+  static void GenerateOptimizedUsageCounterIncrement(
+      compiler::Assembler* assembler);
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 };
 
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/stub_code_arm.cc
index 6709e31..5d0f2d8 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/stub_code_arm.cc
@@ -160,7 +160,8 @@
   const auto& closure_allocation_stub =
       Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
 
-  const intptr_t kReceiverOffset = compiler_frame_layout.param_end_from_fp + 1;
+  const intptr_t kReceiverOffset =
+      compiler::target::frame_layout.param_end_from_fp + 1;
 
   const auto& context_allocation_stub = StubCode::AllocateContext();
 
@@ -569,13 +570,13 @@
   // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
   // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
   const intptr_t saved_result_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - R0);
   const intptr_t saved_exception_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - R0);
   const intptr_t saved_stacktrace_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - R1);
   // Result in R0 is preserved as part of pushing all registers below.
 
@@ -643,13 +644,14 @@
   __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);  // Pass last FP in R0.
   if (kind == kLazyDeoptFromReturn) {
     // Restore result into R1.
-    __ ldr(R1,
-           Address(FP, compiler_frame_layout.first_local_from_fp * kWordSize));
+    __ ldr(R1, Address(FP, compiler::target::frame_layout.first_local_from_fp *
+                               kWordSize));
   } else if (kind == kLazyDeoptFromThrow) {
     // Restore result into R1.
-    __ ldr(R1,
-           Address(FP, compiler_frame_layout.first_local_from_fp * kWordSize));
-    __ ldr(R2, Address(FP, (compiler_frame_layout.first_local_from_fp - 1) *
+    __ ldr(R1, Address(FP, compiler::target::frame_layout.first_local_from_fp *
+                               kWordSize));
+    __ ldr(R2, Address(FP, (compiler::target::frame_layout.first_local_from_fp -
+                            1) *
                                kWordSize));
   }
   // Code above cannot cause GC.
@@ -755,7 +757,8 @@
   // Load the receiver.
   __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
   __ add(IP, FP, Operand(R2, LSL, 1));  // R2 is Smi.
-  __ ldr(R8, Address(IP, compiler_frame_layout.param_end_from_fp * kWordSize));
+  __ ldr(R8, Address(IP, compiler::target::frame_layout.param_end_from_fp *
+                             kWordSize));
 
   // Preserve IC data and arguments descriptor.
   __ PushList((1 << R4) | (1 << R9));
@@ -827,7 +830,6 @@
   __ bic(R9, R9, Operand(kObjectAlignment - 1));
 
   // R9: Allocation size.
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
   // Potential new object start.
   __ ldr(R0, Address(THR, Thread::top_offset()));
   __ adds(NOTFP, R0, Operand(R9));  // Potential next object start.
@@ -886,7 +888,7 @@
   // data area to be initialized.
   // NOTFP: new object end address.
   // R9: allocation size.
-  NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R3, R9, space));
+  NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R3, R9));
 
   __ LoadObject(R8, Object::null_object());
   __ mov(R9, Operand(R8));
@@ -1067,7 +1069,6 @@
     // R1: number of context variables.
     // R2: object size.
     const intptr_t cid = kContextCid;
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     __ ldr(R0, Address(THR, Thread::top_offset()));
     __ add(R3, R2, Operand(R0));
     // Check if the allocation fits into the remaining space.
@@ -1142,7 +1143,7 @@
     Label loop;
     __ AddImmediate(NOTFP, R0, Context::variable_offset(0) - kHeapObjectTag);
     __ InitializeFieldsNoBarrier(R0, NOTFP, R3, R8, R9);
-    NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space));
+    NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2));
 
     // Done allocating and initializing the context.
     // R0: new object.
@@ -1412,7 +1413,6 @@
 
     // Allocate the object and update top to point to
     // next object start and initialize the allocated object.
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
 
     RELEASE_ASSERT((Thread::top_offset() + kWordSize) == Thread::end_offset());
     __ ldrd(kInstanceReg, kEndReg, THR, Thread::top_offset());
@@ -1466,8 +1466,7 @@
     }
 
     // Update allocation stats.
-    NOT_IN_PRODUCT(
-        __ IncrementAllocationStats(kAllocationStatsReg, cls.id(), space));
+    NOT_IN_PRODUCT(__ IncrementAllocationStats(kAllocationStatsReg, cls.id()));
 
     __ Ret();
     __ Bind(&slow_case);
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index 182a355f..bca7db2 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -182,7 +182,8 @@
   const auto& closure_allocation_stub =
       Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
 
-  const intptr_t kReceiverOffset = compiler_frame_layout.param_end_from_fp + 1;
+  const intptr_t kReceiverOffset =
+      compiler::target::frame_layout.param_end_from_fp + 1;
 
   const auto& context_allocation_stub = StubCode::AllocateContext();
 
@@ -619,13 +620,13 @@
   // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
   // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
   const intptr_t saved_result_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - R0);
   const intptr_t saved_exception_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - R0);
   const intptr_t saved_stacktrace_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - R1);
   // Result in R0 is preserved as part of pushing all registers below.
 
@@ -686,14 +687,15 @@
   __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
   if (kind == kLazyDeoptFromReturn) {
     // Restore result into R1.
-    __ LoadFromOffset(R1, FP,
-                      compiler_frame_layout.first_local_from_fp * kWordSize);
+    __ LoadFromOffset(
+        R1, FP, compiler::target::frame_layout.first_local_from_fp * kWordSize);
   } else if (kind == kLazyDeoptFromThrow) {
     // Restore result into R1.
-    __ LoadFromOffset(R1, FP,
-                      compiler_frame_layout.first_local_from_fp * kWordSize);
     __ LoadFromOffset(
-        R2, FP, (compiler_frame_layout.first_local_from_fp - 1) * kWordSize);
+        R1, FP, compiler::target::frame_layout.first_local_from_fp * kWordSize);
+    __ LoadFromOffset(
+        R2, FP,
+        (compiler::target::frame_layout.first_local_from_fp - 1) * kWordSize);
   }
   // Code above cannot cause GC.
   // There is a Dart Frame on the stack. We must restore PP and leave frame.
@@ -802,8 +804,8 @@
   // Load the receiver.
   __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
   __ add(TMP, FP, Operand(R2, LSL, 2));  // R2 is Smi.
-  __ LoadFromOffset(R6, TMP,
-                    compiler_frame_layout.param_end_from_fp * kWordSize);
+  __ LoadFromOffset(
+      R6, TMP, compiler::target::frame_layout.param_end_from_fp * kWordSize);
 
   // Preserve IC data and arguments descriptor.
   __ Push(R5);
@@ -872,8 +874,6 @@
   const intptr_t cid = kArrayCid;
   NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, R4, &slow_case));
 
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
-
   // Calculate and align allocation size.
   // Load new object start and calculate next object start.
   // R1: array element type.
@@ -907,7 +907,7 @@
   // R7: potential next object start.
   __ str(R7, Address(THR, Thread::top_offset()));
   __ add(R0, R0, Operand(kHeapObjectTag));
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R3, space));
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R3));
 
   // R0: new object start as a tagged pointer.
   // R1: array element type.
@@ -1290,7 +1290,6 @@
     // R1: number of context variables.
     // R2: object size.
     const intptr_t cid = kContextCid;
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     __ ldr(R0, Address(THR, Thread::top_offset()));
     __ add(R3, R2, Operand(R0));
     // Check if the allocation fits into the remaining space.
@@ -1314,7 +1313,7 @@
     // R3: next object start.
     __ str(R3, Address(THR, Thread::top_offset()));
     __ add(R0, R0, Operand(kHeapObjectTag));
-    NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space));
+    NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2));
 
     // Calculate the size tag.
     // R0: new object.
diff --git a/runtime/vm/stub_code_arm64_test.cc b/runtime/vm/stub_code_arm64_test.cc
index 6d4af28..5dbe70d 100644
--- a/runtime/vm/stub_code_arm64_test.cc
+++ b/runtime/vm/stub_code_arm64_test.cc
@@ -55,8 +55,8 @@
                                               const Code& code);
   const int length = 10;
   const char* kName = "Test_CallRuntimeStubCode";
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(&object_pool_builder);
   GenerateCallToCallRuntimeStub(&assembler, length);
   const Code& code = Code::Handle(
       Code::FinalizeCode(*CreateFunction("Test_CallRuntimeStubCode"), nullptr,
@@ -96,8 +96,8 @@
   intptr_t rhs_index_value = 2;
   intptr_t length_value = 2;
   const char* kName = "Test_CallLeafRuntimeStubCode";
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(&object_pool_builder);
   GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
                                     rhs_index_value, length_value);
   const Code& code = Code::Handle(Code::FinalizeCode(
diff --git a/runtime/vm/stub_code_arm_test.cc b/runtime/vm/stub_code_arm_test.cc
index c1699b7..85224e3 100644
--- a/runtime/vm/stub_code_arm_test.cc
+++ b/runtime/vm/stub_code_arm_test.cc
@@ -54,8 +54,8 @@
                                               const Code& code);
   const int length = 10;
   const char* kName = "Test_CallRuntimeStubCode";
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(&object_pool_builder);
   GenerateCallToCallRuntimeStub(&assembler, length);
   const Code& code = Code::Handle(
       Code::FinalizeCode(*CreateFunction("Test_CallRuntimeStubCode"), nullptr,
@@ -94,8 +94,8 @@
   intptr_t rhs_index_value = 2;
   intptr_t length_value = 2;
   const char* kName = "Test_CallLeafRuntimeStubCode";
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(&object_pool_builder);
   GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
                                     rhs_index_value, length_value);
   const Code& code = Code::Handle(Code::FinalizeCode(
diff --git a/runtime/vm/stub_code_ia32.cc b/runtime/vm/stub_code_ia32.cc
index 4c59d09..fdefce3 100644
--- a/runtime/vm/stub_code_ia32.cc
+++ b/runtime/vm/stub_code_ia32.cc
@@ -381,13 +381,13 @@
   // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
   // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
   const intptr_t saved_result_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - EAX);
   const intptr_t saved_exception_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - EAX);
   const intptr_t saved_stacktrace_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - EDX);
   // Result in EAX is preserved as part of pushing all registers below.
 
@@ -447,14 +447,18 @@
   __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
   if (kind == kLazyDeoptFromReturn) {
     // Restore result into EBX.
-    __ movl(EBX, Address(EBP, compiler_frame_layout.first_local_from_fp *
-                                  kWordSize));
+    __ movl(EBX,
+            Address(EBP, compiler::target::frame_layout.first_local_from_fp *
+                             kWordSize));
   } else if (kind == kLazyDeoptFromThrow) {
     // Restore result into EBX.
-    __ movl(EBX, Address(EBP, compiler_frame_layout.first_local_from_fp *
-                                  kWordSize));
-    __ movl(ECX, Address(EBP, (compiler_frame_layout.first_local_from_fp - 1) *
-                                  kWordSize));
+    __ movl(EBX,
+            Address(EBP, compiler::target::frame_layout.first_local_from_fp *
+                             kWordSize));
+    __ movl(
+        ECX,
+        Address(EBP, (compiler::target::frame_layout.first_local_from_fp - 1) *
+                         kWordSize));
   }
   // Code above cannot cause GC.
   __ LeaveFrame();
@@ -628,7 +632,6 @@
   // EBX: allocation size.
 
   const intptr_t cid = kArrayCid;
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
   __ movl(EAX, Address(THR, Thread::top_offset()));
   __ addl(EBX, EAX);
   __ j(CARRY, &slow_case);
@@ -646,7 +649,7 @@
   __ movl(Address(THR, Thread::top_offset()), EBX);
   __ subl(EBX, EAX);
   __ addl(EAX, Immediate(kHeapObjectTag));
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EBX, EDI, space));
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EBX, EDI));
 
   // Initialize the tags.
   // EAX: new object start as a tagged pointer.
@@ -864,7 +867,6 @@
     // Now allocate the object.
     // EDX: number of context variables.
     const intptr_t cid = kContextCid;
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     __ movl(EAX, Address(THR, Thread::top_offset()));
     __ addl(EBX, EAX);
     // Check if the allocation fits into the remaining space.
@@ -893,7 +895,7 @@
     __ subl(EBX, EAX);
     __ addl(EAX, Immediate(kHeapObjectTag));
     // Generate isolate-independent code to allow sharing between isolates.
-    NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EBX, EDI, space));
+    NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EBX, EDI));
 
     // Calculate the size tag.
     // EAX: new object.
@@ -1142,7 +1144,6 @@
     // Allocate the object and update top to point to
     // next object start and initialize the allocated object.
     // EDX: instantiated type arguments (if is_cls_parameterized).
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     __ movl(EAX, Address(THR, Thread::top_offset()));
     __ leal(EBX, Address(EAX, instance_size));
     // Check if the allocation fits into the remaining space.
@@ -1155,7 +1156,7 @@
       __ j(ABOVE_EQUAL, &slow_case);
     }
     __ movl(Address(THR, Thread::top_offset()), EBX);
-    NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), ECX, space));
+    NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), ECX));
 
     // EAX: new object start (untagged).
     // EBX: next object start.
diff --git a/runtime/vm/stub_code_x64.cc b/runtime/vm/stub_code_x64.cc
index bcc2198..346ee01 100644
--- a/runtime/vm/stub_code_x64.cc
+++ b/runtime/vm/stub_code_x64.cc
@@ -171,7 +171,8 @@
   const auto& closure_allocation_stub =
       Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
 
-  const intptr_t kReceiverOffset = compiler_frame_layout.param_end_from_fp + 1;
+  const intptr_t kReceiverOffsetInWords =
+      compiler::target::frame_layout.param_end_from_fp + 1;
 
   const auto& context_allocation_stub = StubCode::AllocateContext();
 
@@ -182,7 +183,8 @@
   __ movq(RCX, Address(THR, Thread::object_null_offset()));
   __ cmpq(RDX, Immediate(0));
   __ j(EQUAL, &no_type_args, Assembler::kNearJump);
-  __ movq(RAX, Address(RBP, kWordSize * kReceiverOffset));
+  __ movq(RAX,
+          Address(RBP, compiler::target::kWordSize * kReceiverOffsetInWords));
   __ movq(RCX, Address(RAX, RDX, TIMES_1, 0));
   __ Bind(&no_type_args);
   __ pushq(RCX);
@@ -213,7 +215,8 @@
   }
 
   // Store receiver in context
-  __ movq(RSI, Address(RBP, kWordSize * kReceiverOffset));
+  __ movq(RSI,
+          Address(RBP, compiler::target::kWordSize * kReceiverOffsetInWords));
   __ StoreIntoObject(RAX, FieldAddress(RAX, Context::variable_offset(0)), RSI);
 
   // Push context.
@@ -563,13 +566,13 @@
   // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
   // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
   const intptr_t saved_result_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - RAX);
   const intptr_t saved_exception_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - RAX);
   const intptr_t saved_stacktrace_slot_from_fp =
-      compiler_frame_layout.first_local_from_fp + 1 -
+      compiler::target::frame_layout.first_local_from_fp + 1 -
       (kNumberOfCpuRegisters - RDX);
   // Result in RAX is preserved as part of pushing all registers below.
 
@@ -635,15 +638,19 @@
   __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
   if (kind == kLazyDeoptFromReturn) {
     // Restore result into RBX.
-    __ movq(RBX, Address(RBP, compiler_frame_layout.first_local_from_fp *
-                                  kWordSize));
+    __ movq(RBX,
+            Address(RBP, compiler::target::frame_layout.first_local_from_fp *
+                             kWordSize));
   } else if (kind == kLazyDeoptFromThrow) {
     // Restore exception into RBX.
-    __ movq(RBX, Address(RBP, compiler_frame_layout.first_local_from_fp *
-                                  kWordSize));
+    __ movq(RBX,
+            Address(RBP, compiler::target::frame_layout.first_local_from_fp *
+                             kWordSize));
     // Restore stacktrace into RDX.
-    __ movq(RDX, Address(RBP, (compiler_frame_layout.first_local_from_fp - 1) *
-                                  kWordSize));
+    __ movq(
+        RDX,
+        Address(RBP, (compiler::target::frame_layout.first_local_from_fp - 1) *
+                         kWordSize));
   }
   // Code above cannot cause GC.
   // There is a Dart Frame on the stack. We must restore PP and leave frame.
@@ -756,8 +763,9 @@
   __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
   // Three words (saved pp, saved fp, stub's pc marker)
   // in the stack above the return address.
-  __ movq(RAX, Address(RSP, RAX, TIMES_4,
-                       compiler_frame_layout.saved_below_pc() * kWordSize));
+  __ movq(RAX,
+          Address(RSP, RAX, TIMES_4,
+                  compiler::target::frame_layout.saved_below_pc() * kWordSize));
   // Preserve IC data and arguments descriptor.
   __ pushq(RBX);
   __ pushq(R10);
@@ -826,7 +834,6 @@
   __ andq(RDI, Immediate(-kObjectAlignment));
 
   const intptr_t cid = kArrayCid;
-  NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
   __ movq(RAX, Address(THR, Thread::top_offset()));
 
   // RDI: allocation size.
@@ -845,7 +852,7 @@
   // next object start and initialize the object.
   __ movq(Address(THR, Thread::top_offset()), RCX);
   __ addq(RAX, Immediate(kHeapObjectTag));
-  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI, space));
+  NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI));
   // Initialize the tags.
   // RAX: new object start as a tagged pointer.
   // RDI: allocation size.
@@ -1229,7 +1236,6 @@
     // Now allocate the object.
     // R10: number of context variables.
     const intptr_t cid = kContextCid;
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     __ movq(RAX, Address(THR, Thread::top_offset()));
     __ addq(R13, RAX);
     // Check if the allocation fits into the remaining space.
@@ -1253,7 +1259,7 @@
     __ subq(R13, RAX);
     __ addq(RAX, Immediate(kHeapObjectTag));
     // Generate isolate-independent code to allow sharing between isolates.
-    NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R13, space));
+    NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R13));
 
     // Calculate the size tag.
     // RAX: new object.
@@ -1546,7 +1552,6 @@
     // Allocate the object and update top to point to
     // next object start and initialize the allocated object.
     // RDX: instantiated type arguments (if is_cls_parameterized).
-    NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
     __ movq(RAX, Address(THR, Thread::top_offset()));
     __ leaq(RBX, Address(RAX, instance_size));
     // Check if the allocation fits into the remaining space.
@@ -1559,7 +1564,7 @@
       __ j(ABOVE_EQUAL, &slow_case);
     }
     __ movq(Address(THR, Thread::top_offset()), RBX);
-    NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), space));
+    NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id()));
 
     // RAX: new object start (untagged).
     // RBX: next object start.
diff --git a/runtime/vm/stub_code_x64_test.cc b/runtime/vm/stub_code_x64_test.cc
index 7739541..53917bb 100644
--- a/runtime/vm/stub_code_x64_test.cc
+++ b/runtime/vm/stub_code_x64_test.cc
@@ -55,8 +55,8 @@
                                               const Code& code);
   const int length = 10;
   const char* kName = "Test_CallRuntimeStubCode";
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(&object_pool_builder);
   GenerateCallToCallRuntimeStub(&assembler, length);
   const Code& code = Code::Handle(
       Code::FinalizeCode(*CreateFunction("Test_CallRuntimeStubCode"), nullptr,
@@ -96,8 +96,8 @@
   intptr_t rhs_index_value = 2;
   intptr_t length_value = 2;
   const char* kName = "Test_CallLeafRuntimeStubCode";
-  ObjectPoolWrapper object_pool_wrapper;
-  Assembler assembler(&object_pool_wrapper);
+  ObjectPoolBuilder object_pool_builder;
+  Assembler assembler(&object_pool_builder);
   GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
                                     rhs_index_value, length_value);
   const Code& code = Code::Handle(Code::FinalizeCode(
diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc
index d4b98cc..866af84 100644
--- a/runtime/vm/thread.cc
+++ b/runtime/vm/thread.cc
@@ -82,7 +82,6 @@
       api_top_scope_(NULL),
       no_callback_scope_depth_(0),
 #if defined(DEBUG)
-      top_handle_scope_(NULL),
       no_safepoint_scope_depth_(0),
 #endif
       reusable_handles_(),
@@ -858,50 +857,6 @@
   return total;
 }
 
-bool Thread::IsValidZoneHandle(Dart_Handle object) const {
-  Zone* zone = this->zone();
-  while (zone != NULL) {
-    if (zone->handles()->IsValidZoneHandle(reinterpret_cast<uword>(object))) {
-      return true;
-    }
-    zone = zone->previous();
-  }
-  return false;
-}
-
-intptr_t Thread::CountZoneHandles() const {
-  intptr_t count = 0;
-  Zone* zone = this->zone();
-  while (zone != NULL) {
-    count += zone->handles()->CountZoneHandles();
-    zone = zone->previous();
-  }
-  ASSERT(count >= 0);
-  return count;
-}
-
-bool Thread::IsValidScopedHandle(Dart_Handle object) const {
-  Zone* zone = this->zone();
-  while (zone != NULL) {
-    if (zone->handles()->IsValidScopedHandle(reinterpret_cast<uword>(object))) {
-      return true;
-    }
-    zone = zone->previous();
-  }
-  return false;
-}
-
-intptr_t Thread::CountScopedHandles() const {
-  intptr_t count = 0;
-  Zone* zone = this->zone();
-  while (zone != NULL) {
-    count += zone->handles()->CountScopedHandles();
-    zone = zone->previous();
-  }
-  ASSERT(count >= 0);
-  return count;
-}
-
 int Thread::ZoneSizeInBytes() const {
   int total = 0;
   ApiLocalScope* scope = api_top_scope_;
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index a4fbdbc..847aa85 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -5,6 +5,10 @@
 #ifndef RUNTIME_VM_THREAD_H_
 #define RUNTIME_VM_THREAD_H_
 
+#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
+#error "Should not include runtime"
+#endif
+
 #include "include/dart_api.h"
 #include "platform/assert.h"
 #include "platform/atomic.h"
@@ -16,6 +20,7 @@
 #include "vm/heap/pointer_block.h"
 #include "vm/os_thread.h"
 #include "vm/runtime_entry_list.h"
+#include "vm/thread_stack_resource.h"
 #include "vm/thread_state.h"
 
 namespace dart {
@@ -294,6 +299,14 @@
     return ++stack_overflow_count_;
   }
 
+#if !defined(TARGET_ARCH_DBC)
+  static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs) {
+    return fpu_regs
+               ? stack_overflow_shared_with_fpu_regs_entry_point_offset()
+               : stack_overflow_shared_without_fpu_regs_entry_point_offset();
+  }
+#endif
+
   TaskKind task_kind() const { return task_kind_; }
 
   // Retrieves and clears the stack overflow flags.  These are set by
@@ -449,20 +462,6 @@
   bool bump_allocate() const { return bump_allocate_; }
   void set_bump_allocate(bool b) { bump_allocate_ = b; }
 
-  HandleScope* top_handle_scope() const {
-#if defined(DEBUG)
-    return top_handle_scope_;
-#else
-    return 0;
-#endif
-  }
-
-  void set_top_handle_scope(HandleScope* handle_scope) {
-#if defined(DEBUG)
-    top_handle_scope_ = handle_scope;
-#endif
-  }
-
   int32_t no_safepoint_scope_depth() const {
 #if defined(DEBUG)
     return no_safepoint_scope_depth_;
@@ -706,7 +705,7 @@
     execution_state_ = static_cast<uint32_t>(state);
   }
 
-  bool MayAllocateHandles() {
+  virtual bool MayAllocateHandles() {
     return (execution_state() == kThreadInVM) ||
            (execution_state() == kThreadInGenerated);
   }
@@ -764,10 +763,6 @@
   bool IsValidHandle(Dart_Handle object) const;
   bool IsValidLocalHandle(Dart_Handle object) const;
   intptr_t CountLocalHandles() const;
-  bool IsValidZoneHandle(Dart_Handle object) const;
-  intptr_t CountZoneHandles() const;
-  bool IsValidScopedHandle(Dart_Handle object) const;
-  intptr_t CountScopedHandles() const;
   int ZoneSizeInBytes() const;
   void UnwindScopes(uword stack_marker);
 
@@ -851,7 +846,6 @@
   ApiLocalScope* api_top_scope_;
   int32_t no_callback_scope_depth_;
 #if defined(DEBUG)
-  HandleScope* top_handle_scope_;
   int32_t no_safepoint_scope_depth_;
 #endif
   VMHandles reusable_handles_;
diff --git a/runtime/vm/thread_state.cc b/runtime/vm/thread_state.cc
index d5e37e3..00deaf7 100644
--- a/runtime/vm/thread_state.cc
+++ b/runtime/vm/thread_state.cc
@@ -4,6 +4,7 @@
 
 #include "vm/thread_state.h"
 
+#include "vm/handles_impl.h"
 #include "vm/zone.h"
 
 namespace dart {
@@ -38,4 +39,48 @@
   return false;
 }
 
+bool ThreadState::IsValidZoneHandle(Dart_Handle object) const {
+  Zone* zone = this->zone();
+  while (zone != NULL) {
+    if (zone->handles()->IsValidZoneHandle(reinterpret_cast<uword>(object))) {
+      return true;
+    }
+    zone = zone->previous();
+  }
+  return false;
+}
+
+intptr_t ThreadState::CountZoneHandles() const {
+  intptr_t count = 0;
+  Zone* zone = this->zone();
+  while (zone != NULL) {
+    count += zone->handles()->CountZoneHandles();
+    zone = zone->previous();
+  }
+  ASSERT(count >= 0);
+  return count;
+}
+
+bool ThreadState::IsValidScopedHandle(Dart_Handle object) const {
+  Zone* zone = this->zone();
+  while (zone != NULL) {
+    if (zone->handles()->IsValidScopedHandle(reinterpret_cast<uword>(object))) {
+      return true;
+    }
+    zone = zone->previous();
+  }
+  return false;
+}
+
+intptr_t ThreadState::CountScopedHandles() const {
+  intptr_t count = 0;
+  Zone* zone = this->zone();
+  while (zone != NULL) {
+    count += zone->handles()->CountScopedHandles();
+    zone = zone->previous();
+  }
+  ASSERT(count >= 0);
+  return count;
+}
+
 }  // namespace dart
diff --git a/runtime/vm/thread_state.h b/runtime/vm/thread_state.h
index 85106d2..90cd58e 100644
--- a/runtime/vm/thread_state.h
+++ b/runtime/vm/thread_state.h
@@ -5,10 +5,12 @@
 #ifndef RUNTIME_VM_THREAD_STATE_H_
 #define RUNTIME_VM_THREAD_STATE_H_
 
+#include "include/dart_api.h"
 #include "vm/os_thread.h"
 
 namespace dart {
 
+class HandleScope;
 class LongJumpScope;
 class Zone;
 
@@ -35,7 +37,7 @@
   }
 
   explicit ThreadState(bool is_os_thread);
-  ~ThreadState();
+  virtual ~ThreadState();
 
   // OSThread corresponding to this thread.
   OSThread* os_thread() const { return os_thread_; }
@@ -72,6 +74,27 @@
   LongJumpScope* long_jump_base() const { return long_jump_base_; }
   void set_long_jump_base(LongJumpScope* value) { long_jump_base_ = value; }
 
+  bool IsValidZoneHandle(Dart_Handle object) const;
+  intptr_t CountZoneHandles() const;
+  bool IsValidScopedHandle(Dart_Handle object) const;
+  intptr_t CountScopedHandles() const;
+
+  virtual bool MayAllocateHandles() = 0;
+
+  HandleScope* top_handle_scope() const {
+#if defined(DEBUG)
+    return top_handle_scope_;
+#else
+    return 0;
+#endif
+  }
+
+  void set_top_handle_scope(HandleScope* handle_scope) {
+#if defined(DEBUG)
+    top_handle_scope_ = handle_scope;
+#endif
+  }
+
  private:
   void set_zone(Zone* zone) { zone_ = zone; }
 
@@ -82,6 +105,11 @@
   StackResource* top_resource_ = nullptr;
   LongJumpScope* long_jump_base_ = nullptr;
 
+  // This field is only used in the DEBUG builds, but we don't exclude it
+  // because it would cause RELEASE and DEBUG builds to have different
+  // offsets for various Thread fields that are used from generated code.
+  HandleScope* top_handle_scope_ = nullptr;
+
   friend class ApiZone;
   friend class StackZone;
 };
diff --git a/runtime/vm/type_testing_stubs.h b/runtime/vm/type_testing_stubs.h
index 2f091d1..7c1913a 100644
--- a/runtime/vm/type_testing_stubs.h
+++ b/runtime/vm/type_testing_stubs.h
@@ -10,7 +10,6 @@
 
 namespace dart {
 
-class ObjectPoolWrapper;
 
 class TypeTestingStubNamer {
  public:
diff --git a/runtime/vm/unit_test.h b/runtime/vm/unit_test.h
index 6c53746..e8f1391 100644
--- a/runtime/vm/unit_test.h
+++ b/runtime/vm/unit_test.h
@@ -92,8 +92,8 @@
       bool use_far_branches = false;                                           \
       LongJumpScope jump;                                                      \
       if (setjmp(*jump.Set()) == 0) {                                          \
-        ObjectPoolWrapper object_pool_wrapper;                                 \
-        Assembler assembler(&object_pool_wrapper, use_far_branches);           \
+        ObjectPoolBuilder object_pool_builder;                                 \
+        Assembler assembler(&object_pool_builder, use_far_branches);           \
         AssemblerTest test("" #name, &assembler);                              \
         AssemblerTestGenerate##name(test.assembler());                         \
         test.Assemble();                                                       \
@@ -105,8 +105,8 @@
     const Error& error = Error::Handle(Thread::Current()->sticky_error());     \
     if (error.raw() == Object::branch_offset_error().raw()) {                  \
       bool use_far_branches = true;                                            \
-      ObjectPoolWrapper object_pool_wrapper;                                   \
-      Assembler assembler(&object_pool_wrapper, use_far_branches);             \
+      ObjectPoolBuilder object_pool_builder;                                   \
+      Assembler assembler(&object_pool_builder, use_far_branches);             \
       AssemblerTest test("" #name, &assembler);                                \
       AssemblerTestGenerate##name(test.assembler());                           \
       test.Assemble();                                                         \
@@ -198,7 +198,9 @@
 namespace dart {
 
 // Forward declarations.
+namespace compiler {
 class Assembler;
+}
 class CodeGenerator;
 class VirtualMemory;
 
diff --git a/runtime/vm/vm_sources.gni b/runtime/vm/vm_sources.gni
index 9e4dc96..9732770 100644
--- a/runtime/vm/vm_sources.gni
+++ b/runtime/vm/vm_sources.gni
@@ -22,12 +22,14 @@
   "bootstrap_natives.h",
   "class_finalizer.cc",
   "class_finalizer.h",
+  "class_id.h",
   "class_table.cc",
   "class_table.h",
   "clustered_snapshot.cc",
   "clustered_snapshot.h",
   "code_descriptors.cc",
   "code_descriptors.h",
+  "code_entry_kind.h",
   "code_observers.cc",
   "code_observers.h",
   "code_patcher.cc",
@@ -94,9 +96,11 @@
   "flag_list.h",
   "flags.cc",
   "flags.h",
+  "frame_layout.h",
   "gdb_helpers.cc",
   "globals.h",
   "growable_array.h",
+  "handle_visitor.h",
   "handles.cc",
   "handles.h",
   "handles_impl.h",
@@ -202,6 +206,7 @@
   "os_win.cc",
   "parser.cc",
   "parser.h",
+  "pointer_tagging.h",
   "port.cc",
   "port.h",
   "proccpuinfo.cc",