[vm/compiler] Refactor representation of stores and loads in IL.

Make StoreIntanceField and LoadField instructions use a single uniform
abstraction: Slot (used to be called NativeFieldDesc), which represents
either a real Dart field (i.e. a field that has a corresponding Field object)
or a native VM field that does not have a corresponding Field object.

This refactoring eliminates raw stores/loads that were just using offsets
before - now we always know what kind of slots we are accessing and
this yields better aliasing information.

Change-Id: I2f48332d58258219565bd961764e8cc9dd4d75ce
Reviewed-on: https://dart-review.googlesource.com/c/74582
Commit-Queue: Vyacheslav Egorov <vegorov@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
diff --git a/pkg/vm/lib/bytecode/assembler.dart b/pkg/vm/lib/bytecode/assembler.dart
index 2bf1127..54a227b 100644
--- a/pkg/vm/lib/bytecode/assembler.dart
+++ b/pkg/vm/lib/bytecode/assembler.dart
@@ -363,8 +363,8 @@
     emitWord(_encodeD(Opcode.kAllocateContext, rd));
   }
 
-  void emitCloneContext() {
-    emitWord(_encode0(Opcode.kCloneContext));
+  void emitCloneContext(int rd) {
+    emitWord(_encodeD(Opcode.kCloneContext, rd));
   }
 
   void emitMoveSpecial(SpecialIndex ra, int rx) {
diff --git a/pkg/vm/lib/bytecode/dbc.dart b/pkg/vm/lib/bytecode/dbc.dart
index aef5aa9..b86bd57 100644
--- a/pkg/vm/lib/bytecode/dbc.dart
+++ b/pkg/vm/lib/bytecode/dbc.dart
@@ -175,7 +175,7 @@
   Opcode.kAllocateContext: const Format(
       Encoding.kD, const [Operand.imm, Operand.none, Operand.none]),
   Opcode.kCloneContext: const Format(
-      Encoding.k0, const [Operand.none, Operand.none, Operand.none]),
+      Encoding.kD, const [Operand.imm, Operand.none, Operand.none]),
   Opcode.kLoadContextParent: const Format(
       Encoding.k0, const [Operand.none, Operand.none, Operand.none]),
   Opcode.kStoreContextParent: const Format(
diff --git a/pkg/vm/lib/bytecode/gen_bytecode.dart b/pkg/vm/lib/bytecode/gen_bytecode.dart
index 36ef9d4..d2a0a23 100644
--- a/pkg/vm/lib/bytecode/gen_bytecode.dart
+++ b/pkg/vm/lib/bytecode/gen_bytecode.dart
@@ -2461,7 +2461,7 @@
 
       if (locals.currentContextSize > 0) {
         asm.emitPush(locals.contextVarIndexInFrame);
-        asm.emitCloneContext();
+        asm.emitCloneContext(locals.currentContextSize);
         asm.emitPopLocal(locals.contextVarIndexInFrame);
       }
 
diff --git a/pkg/vm/testcases/bytecode/async.dart.expect b/pkg/vm/testcases/bytecode/async.dart.expect
index 7b539ab..174ed5c 100644
--- a/pkg/vm/testcases/bytecode/async.dart.expect
+++ b/pkg/vm/testcases/bytecode/async.dart.expect
@@ -1015,7 +1015,7 @@
   Jump                 L5
 L3:
   Push                 r4
-  CloneContext
+  CloneContext         2
   PopLocal             r4
   Push                 r4
   Push                 r4
diff --git a/pkg/vm/testcases/bytecode/closures.dart.expect b/pkg/vm/testcases/bytecode/closures.dart.expect
index 9bef3f8..b76eb92 100644
--- a/pkg/vm/testcases/bytecode/closures.dart.expect
+++ b/pkg/vm/testcases/bytecode/closures.dart.expect
@@ -874,7 +874,7 @@
   InstanceCall         2, CP#24
   Drop1
   Push                 r0
-  CloneContext
+  CloneContext         1
   PopLocal             r0
   Push                 r0
   Push                 r0
diff --git a/runtime/vm/compiler/backend/compile_type.h b/runtime/vm/compiler/backend/compile_type.h
new file mode 100644
index 0000000..86f148a
--- /dev/null
+++ b/runtime/vm/compiler/backend/compile_type.h
@@ -0,0 +1,215 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_BACKEND_COMPILE_TYPE_H_
+#define RUNTIME_VM_COMPILER_BACKEND_COMPILE_TYPE_H_
+
+#include "vm/object.h"
+#include "vm/thread.h"
+
+namespace dart {
+
+class BufferFormatter;
+
+// CompileType describes type of a value produced by a definition.
+//
+// It captures the following properties:
+//    - whether the value can potentially be null or if it is definitely not
+//      null;
+//    - concrete class id of the value or kDynamicCid if unknown statically;
+//    - abstract super type of the value, concrete type of the value in runtime
+//      is guaranteed to be sub type of this type.
+//
+// Values of CompileType form a lattice with a None type as a bottom and a
+// nullable Dynamic type as a top element. Method Union provides a join
+// operation for the lattice.
+class CompileType : public ZoneAllocated {
+ public:
+  static const bool kNullable = true;
+  static const bool kNonNullable = false;
+
+  CompileType(bool is_nullable, intptr_t cid, const AbstractType* type)
+      : is_nullable_(is_nullable), cid_(cid), type_(type) {}
+
+  CompileType(const CompileType& other)
+      : ZoneAllocated(),
+        is_nullable_(other.is_nullable_),
+        cid_(other.cid_),
+        type_(other.type_) {}
+
+  CompileType& operator=(const CompileType& other) {
+    is_nullable_ = other.is_nullable_;
+    cid_ = other.cid_;
+    type_ = other.type_;
+    return *this;
+  }
+
+  bool is_nullable() const { return is_nullable_; }
+
+  // Return type such that concrete value's type in runtime is guaranteed to
+  // be subtype of it.
+  const AbstractType* ToAbstractType();
+
+  // Return class id such that it is either kDynamicCid or in runtime
+  // value is guaranteed to have an equal class id.
+  intptr_t ToCid();
+
+  // Return class id such that it is either kDynamicCid or in runtime
+  // value is guaranteed to be either null or have an equal class id.
+  intptr_t ToNullableCid();
+
+  // Return true if the value is guaranteed to be not-null or is known to be
+  // always null.
+  bool HasDecidableNullability();
+
+  // Return true if the value is known to be always null.
+  bool IsNull();
+
+  // Return true if this type is more specific than given type.
+  bool IsMoreSpecificThan(const AbstractType& other);
+
+  // Return true if value of this type is assignable to a location of the
+  // given type.
+  bool IsAssignableTo(const AbstractType& type) {
+    bool is_instance;
+    return CanComputeIsInstanceOf(type, kNullable, &is_instance) && is_instance;
+  }
+
+  // Create a new CompileType representing given combination of class id and
+  // abstract type. The pair is assumed to be coherent.
+  static CompileType Create(intptr_t cid, const AbstractType& type);
+
+  CompileType CopyNonNullable() const {
+    return CompileType(kNonNullable, kIllegalCid, type_);
+  }
+
+  static CompileType CreateNullable(bool is_nullable, intptr_t cid) {
+    return CompileType(is_nullable, cid, NULL);
+  }
+
+  // Create a new CompileType representing given abstract type. By default
+  // values as assumed to be nullable.
+  static CompileType FromAbstractType(const AbstractType& type,
+                                      bool is_nullable = kNullable);
+
+  // Create a new CompileType representing a value with the given class id.
+  // Resulting CompileType is nullable only if cid is kDynamicCid or kNullCid.
+  static CompileType FromCid(intptr_t cid);
+
+  // Create None CompileType. It is the bottom of the lattice and is used to
+  // represent type of the phi that was not yet inferred.
+  static CompileType None() {
+    return CompileType(kNullable, kIllegalCid, NULL);
+  }
+
+  // Create Dynamic CompileType. It is the top of the lattice and is used to
+  // represent unknown type.
+  static CompileType Dynamic();
+
+  static CompileType Null();
+
+  // Create non-nullable Bool type.
+  static CompileType Bool();
+
+  // Create non-nullable Int type.
+  static CompileType Int();
+
+  // Create nullable Int type.
+  static CompileType NullableInt();
+
+  // Create non-nullable Smi type.
+  static CompileType Smi();
+
+  // Create nullable Smi type.
+  static CompileType NullableSmi() {
+    return CreateNullable(kNullable, kSmiCid);
+  }
+
+  // Create nullable Mint type.
+  static CompileType NullableMint() {
+    return CreateNullable(kNullable, kMintCid);
+  }
+
+  // Create non-nullable Double type.
+  static CompileType Double();
+
+  // Create nullable Double type.
+  static CompileType NullableDouble();
+
+  // Create non-nullable String type.
+  static CompileType String();
+
+  // Perform a join operation over the type lattice.
+  void Union(CompileType* other);
+
+  // Refine old type with newly inferred type (it could be more or less
+  // specific, or even unrelated to an old type in case of unreachable code).
+  // May return 'old_type', 'new_type' or create a new CompileType instance.
+  static CompileType* ComputeRefinedType(CompileType* old_type,
+                                         CompileType* new_type);
+
+  // Return true if this and other types are the same.
+  bool IsEqualTo(CompileType* other) {
+    return (is_nullable_ == other->is_nullable_) &&
+           (ToNullableCid() == other->ToNullableCid()) &&
+           (ToAbstractType()->Equals(*other->ToAbstractType()));
+  }
+
+  bool IsNone() const { return (cid_ == kIllegalCid) && (type_ == NULL); }
+
+  // Return true if value of this type is a non-nullable int.
+  bool IsInt() { return !is_nullable() && IsNullableInt(); }
+
+  // Return true if value of this type is a non-nullable double.
+  bool IsDouble() { return !is_nullable() && IsNullableDouble(); }
+
+  // Return true if value of this type is either int or null.
+  bool IsNullableInt() {
+    if ((cid_ == kSmiCid) || (cid_ == kMintCid)) {
+      return true;
+    }
+    if ((cid_ == kIllegalCid) || (cid_ == kDynamicCid)) {
+      return (type_ != NULL) && ((type_->IsIntType() || type_->IsSmiType()));
+    }
+    return false;
+  }
+
+  // Returns true if value of this type is either Smi or null.
+  bool IsNullableSmi() {
+    if (cid_ == kSmiCid) {
+      return true;
+    }
+    if ((cid_ == kIllegalCid) || (cid_ == kDynamicCid)) {
+      return type_ != nullptr && type_->IsSmiType();
+    }
+    return false;
+  }
+
+  // Return true if value of this type is either double or null.
+  bool IsNullableDouble() {
+    if (cid_ == kDoubleCid) {
+      return true;
+    }
+    if ((cid_ == kIllegalCid) || (cid_ == kDynamicCid)) {
+      return (type_ != NULL) && type_->IsDoubleType();
+    }
+    return false;
+  }
+
+  void PrintTo(BufferFormatter* f) const;
+  const char* ToCString() const;
+
+ private:
+  bool CanComputeIsInstanceOf(const AbstractType& type,
+                              bool is_nullable,
+                              bool* is_instance);
+
+  bool is_nullable_;
+  intptr_t cid_;
+  const AbstractType* type_;
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_COMPILER_BACKEND_COMPILE_TYPE_H_
diff --git a/runtime/vm/compiler/backend/constant_propagator.cc b/runtime/vm/compiler/backend/constant_propagator.cc
index 01e7854..9342167 100644
--- a/runtime/vm/compiler/backend/constant_propagator.cc
+++ b/runtime/vm/compiler/backend/constant_propagator.cc
@@ -796,8 +796,7 @@
 
 void ConstantPropagator::VisitLoadField(LoadFieldInstr* instr) {
   Value* instance = instr->instance();
-  if ((instr->native_field() != nullptr) &&
-      (instr->native_field()->kind() == NativeFieldDesc::kArray_length) &&
+  if ((instr->slot().kind() == Slot::Kind::kArray_length) &&
       instance->definition()->OriginalDefinition()->IsCreateArray()) {
     Value* num_elements = instance->definition()
                               ->OriginalDefinition()
diff --git a/runtime/vm/compiler/backend/flow_graph.cc b/runtime/vm/compiler/backend/flow_graph.cc
index de1c670..1be6986 100644
--- a/runtime/vm/compiler/backend/flow_graph.cc
+++ b/runtime/vm/compiler/backend/flow_graph.cc
@@ -549,10 +549,9 @@
   const Class& cls = Class::Handle(
       zone(), Isolate::Current()->class_table()->At(receiver_cid));
 
-  Definition* load_type_args = new (zone())
-      LoadFieldInstr(call->Receiver()->CopyWithType(),
-                     NativeFieldDesc::GetTypeArgumentsFieldFor(zone(), cls),
-                     call->token_pos());
+  Definition* load_type_args = new (zone()) LoadFieldInstr(
+      call->Receiver()->CopyWithType(),
+      Slot::GetTypeArgumentsSlotFor(thread(), cls), call->token_pos());
   InsertBefore(call, load_type_args, call->env(), FlowGraph::kValue);
 
   const AbstractType& type =
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 23a4b47..d7f3af6 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -735,103 +735,19 @@
   return mask;
 }
 
-const NativeFieldDesc* NativeFieldDesc::Get(Kind kind) {
-  static const NativeFieldDesc fields[] = {
-#define IMMUTABLE true
-#define MUTABLE false
-#define DEFINE_NATIVE_FIELD(ClassName, FieldName, cid, mutability)             \
-  NativeFieldDesc(k##ClassName##_##FieldName, ClassName::FieldName##_offset(), \
-                  k##cid##Cid, mutability),
-
-      NATIVE_FIELDS_LIST(DEFINE_NATIVE_FIELD)
-
-#undef DEFINE_FIELD
-#undef MUTABLE
-#undef IMMUTABLE
-  };
-
-  return &fields[kind];
-}
-
-const NativeFieldDesc* NativeFieldDesc::GetLengthFieldForArrayCid(
-    intptr_t array_cid) {
-  if (RawObject::IsExternalTypedDataClassId(array_cid) ||
-      RawObject::IsTypedDataClassId(array_cid)) {
-    return Get(kTypedData_length);
-  }
-
-  switch (array_cid) {
-    case kGrowableObjectArrayCid:
-      return Get(kGrowableObjectArray_length);
-
-    case kOneByteStringCid:
-    case kTwoByteStringCid:
-    case kExternalOneByteStringCid:
-    case kExternalTwoByteStringCid:
-      return Get(kString_length);
-
-    case kArrayCid:
-    case kImmutableArrayCid:
-      return Get(kArray_length);
-
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-const NativeFieldDesc* NativeFieldDesc::GetTypeArgumentsField(Zone* zone,
-                                                              intptr_t offset) {
-  // TODO(vegorov) consider caching type arguments fields for specific classes
-  // in some sort of a flow-graph specific cache.
-  ASSERT(offset != Class::kNoTypeArguments);
-  return new (zone) NativeFieldDesc(kTypeArguments, offset, kDynamicCid,
-                                    /*immutable=*/true);
-}
-
-const NativeFieldDesc* NativeFieldDesc::GetTypeArgumentsFieldFor(
-    Zone* zone,
-    const Class& cls) {
-  return GetTypeArgumentsField(zone, cls.type_arguments_field_offset());
-}
-
-RawAbstractType* NativeFieldDesc::type() const {
-  if (cid() == kSmiCid) {
-    return Type::SmiType();
-  }
-
-  return Type::DynamicType();
-}
-
-const char* NativeFieldDesc::name() const {
-  switch (kind()) {
-#define HANDLE_CASE(ClassName, FieldName, cid, mutability)                     \
-  case k##ClassName##_##FieldName:                                             \
-    return #ClassName "." #FieldName;
-
-    NATIVE_FIELDS_LIST(HANDLE_CASE)
-
-#undef HANDLE_CASE
-    case kTypeArguments:
-      return ":type_arguments";
-  }
-  UNREACHABLE();
-  return nullptr;
-}
-
 bool LoadFieldInstr::IsUnboxedLoad() const {
-  return FLAG_unbox_numeric_fields && (field() != NULL) &&
-         FlowGraphCompiler::IsUnboxedField(*field());
+  return FLAG_unbox_numeric_fields && slot().IsDartField() &&
+         FlowGraphCompiler::IsUnboxedField(slot().field());
 }
 
 bool LoadFieldInstr::IsPotentialUnboxedLoad() const {
-  return FLAG_unbox_numeric_fields && (field() != NULL) &&
-         FlowGraphCompiler::IsPotentialUnboxedField(*field());
+  return FLAG_unbox_numeric_fields && slot().IsDartField() &&
+         FlowGraphCompiler::IsPotentialUnboxedField(slot().field());
 }
 
 Representation LoadFieldInstr::representation() const {
   if (IsUnboxedLoad()) {
-    const intptr_t cid = field()->UnboxedFieldCid();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
     switch (cid) {
       case kDoubleCid:
         return kUnboxedDouble;
@@ -847,20 +763,20 @@
 }
 
 bool StoreInstanceFieldInstr::IsUnboxedStore() const {
-  return FLAG_unbox_numeric_fields && !field().IsNull() &&
-         FlowGraphCompiler::IsUnboxedField(field());
+  return FLAG_unbox_numeric_fields && slot().IsDartField() &&
+         FlowGraphCompiler::IsUnboxedField(slot().field());
 }
 
 bool StoreInstanceFieldInstr::IsPotentialUnboxedStore() const {
-  return FLAG_unbox_numeric_fields && !field().IsNull() &&
-         FlowGraphCompiler::IsPotentialUnboxedField(field());
+  return FLAG_unbox_numeric_fields && slot().IsDartField() &&
+         FlowGraphCompiler::IsPotentialUnboxedField(slot().field());
 }
 
 Representation StoreInstanceFieldInstr::RequiredInputRepresentation(
     intptr_t index) const {
   ASSERT((index == 0) || (index == 1));
   if ((index == 1) && IsUnboxedStore()) {
-    const intptr_t cid = field().UnboxedFieldCid();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
     switch (cid) {
       case kDoubleCid:
         return kUnboxedDouble;
@@ -963,12 +879,7 @@
 bool LoadFieldInstr::AttributesEqual(Instruction* other) const {
   LoadFieldInstr* other_load = other->AsLoadField();
   ASSERT(other_load != NULL);
-  if (field() != NULL) {
-    return (other_load->field() != NULL) &&
-           (field()->raw() == other_load->field()->raw());
-  }
-  return (other_load->field() == NULL) &&
-         (offset_in_bytes() == other_load->offset_in_bytes());
+  return &this->slot_ == &other_load->slot_;
 }
 
 Instruction* InitStaticFieldInstr::Canonicalize(FlowGraph* flow_graph) {
@@ -2573,26 +2484,37 @@
 }
 
 bool LoadFieldInstr::IsImmutableLengthLoad() const {
-  if (native_field() != nullptr) {
-    switch (native_field()->kind()) {
-      case NativeFieldDesc::kArray_length:
-      case NativeFieldDesc::kTypedData_length:
-      case NativeFieldDesc::kString_length:
-        return true;
-      case NativeFieldDesc::kGrowableObjectArray_length:
-        return false;
+  switch (slot().kind()) {
+    case Slot::Kind::kArray_length:
+    case Slot::Kind::kTypedData_length:
+    case Slot::Kind::kString_length:
+      return true;
+    case Slot::Kind::kGrowableObjectArray_length:
+      return false;
 
-      // Not length loads.
-      case NativeFieldDesc::kLinkedHashMap_index:
-      case NativeFieldDesc::kLinkedHashMap_data:
-      case NativeFieldDesc::kLinkedHashMap_hash_mask:
-      case NativeFieldDesc::kLinkedHashMap_used_data:
-      case NativeFieldDesc::kLinkedHashMap_deleted_keys:
-      case NativeFieldDesc::kArgumentsDescriptor_type_args_len:
-      case NativeFieldDesc::kTypeArguments:
-        return false;
-    }
+    // Not length loads.
+    case Slot::Kind::kLinkedHashMap_index:
+    case Slot::Kind::kLinkedHashMap_data:
+    case Slot::Kind::kLinkedHashMap_hash_mask:
+    case Slot::Kind::kLinkedHashMap_used_data:
+    case Slot::Kind::kLinkedHashMap_deleted_keys:
+    case Slot::Kind::kArgumentsDescriptor_type_args_len:
+    case Slot::Kind::kArgumentsDescriptor_positional_count:
+    case Slot::Kind::kArgumentsDescriptor_count:
+    case Slot::Kind::kTypeArguments:
+    case Slot::Kind::kGrowableObjectArray_data:
+    case Slot::Kind::kContext_parent:
+    case Slot::Kind::kClosure_context:
+    case Slot::Kind::kClosure_delayed_type_arguments:
+    case Slot::Kind::kClosure_function:
+    case Slot::Kind::kClosure_function_type_arguments:
+    case Slot::Kind::kClosure_instantiator_type_arguments:
+    case Slot::Kind::kClosure_hash:
+    case Slot::Kind::kCapturedVariable:
+    case Slot::Kind::kDartField:
+      return false;
   }
+  UNREACHABLE();
   return false;
 }
 
@@ -2622,42 +2544,54 @@
   return this;
 }
 
-bool LoadFieldInstr::Evaluate(const Object& instance, Object* result) {
-  if (native_field() != nullptr) {
-    switch (native_field()->kind()) {
-      case NativeFieldDesc::kArgumentsDescriptor_type_args_len:
-        if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
-          ArgumentsDescriptor desc(Array::Cast(instance));
-          *result = Smi::New(desc.TypeArgsLen());
-          return true;
-        }
-        return false;
+bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
+                                     const Slot& field,
+                                     Object* result) {
+  switch (field.kind()) {
+    case Slot::Kind::kDartField:
+      return TryEvaluateLoad(instance, field.field(), result);
 
-      default:
-        break;
-    }
+    case Slot::Kind::kArgumentsDescriptor_type_args_len:
+      if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
+        ArgumentsDescriptor desc(Array::Cast(instance));
+        *result = Smi::New(desc.TypeArgsLen());
+        return true;
+      }
+      return false;
+
+    default:
+      break;
   }
+  return false;
+}
 
-  if (field() == nullptr || !field()->is_final() || !instance.IsInstance()) {
+bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
+                                     const Field& field,
+                                     Object* result) {
+  if (!field.is_final() || !instance.IsInstance()) {
     return false;
   }
 
   // Check that instance really has the field which we
   // are trying to load from.
   Class& cls = Class::Handle(instance.clazz());
-  while (cls.raw() != Class::null() && cls.raw() != field()->Owner()) {
+  while (cls.raw() != Class::null() && cls.raw() != field.Owner()) {
     cls = cls.SuperClass();
   }
-  if (cls.raw() != field()->Owner()) {
+  if (cls.raw() != field.Owner()) {
     // Failed to find the field in class or its superclasses.
     return false;
   }
 
   // Object has the field: execute the load.
-  *result = Instance::Cast(instance).GetField(*field());
+  *result = Instance::Cast(instance).GetField(field);
   return true;
 }
 
+bool LoadFieldInstr::Evaluate(const Object& instance, Object* result) {
+  return TryEvaluateLoad(instance, slot(), result);
+}
+
 Definition* LoadFieldInstr::Canonicalize(FlowGraph* flow_graph) {
   if (!HasUses()) return nullptr;
 
@@ -2672,21 +2606,21 @@
         return call->ArgumentAt(1);
       }
     } else if (CreateArrayInstr* create_array = array->AsCreateArray()) {
-      if (native_field() == NativeFieldDesc::Array_length()) {
+      if (slot().kind() == Slot::Kind::kArray_length) {
         return create_array->num_elements()->definition();
       }
     } else if (LoadFieldInstr* load_array = array->AsLoadField()) {
       // For arrays with guarded lengths, replace the length load
       // with a constant.
-      if (const Field* field = load_array->field()) {
-        if (field->guarded_list_length() >= 0) {
+      const Slot& slot = load_array->slot();
+      if (slot.IsDartField()) {
+        if (slot.field().guarded_list_length() >= 0) {
           return flow_graph->GetConstant(
-              Smi::Handle(Smi::New(field->guarded_list_length())));
+              Smi::Handle(Smi::New(slot.field().guarded_list_length())));
         }
       }
     }
-  } else if (native_field() != nullptr &&
-             native_field()->kind() == NativeFieldDesc::kTypeArguments) {
+  } else if (slot().IsTypeArguments()) {
     Definition* array = instance()->definition()->OriginalDefinition();
     if (StaticCallInstr* call = array->AsStaticCall()) {
       if (call->is_known_list_constructor()) {
@@ -2698,18 +2632,24 @@
     } else if (CreateArrayInstr* create_array = array->AsCreateArray()) {
       return create_array->element_type()->definition();
     } else if (LoadFieldInstr* load_array = array->AsLoadField()) {
-      const Field* field = load_array->field();
-      // For trivially exact fields we know that type arguments match
-      // static type arguments exactly.
-      if ((field != nullptr) &&
-          field->static_type_exactness_state().IsTriviallyExact()) {
-        return flow_graph->GetConstant(TypeArguments::Handle(
-            AbstractType::Handle(field->type()).arguments()));
-      } else if (const NativeFieldDesc* native_field =
-                     load_array->native_field()) {
-        if (native_field == NativeFieldDesc::LinkedHashMap_data()) {
-          return flow_graph->constant_null();
+      const Slot& slot = load_array->slot();
+      switch (slot.kind()) {
+        case Slot::Kind::kDartField: {
+          // For trivially exact fields we know that type arguments match
+          // static type arguments exactly.
+          const Field& field = slot.field();
+          if (field.static_type_exactness_state().IsTriviallyExact()) {
+            return flow_graph->GetConstant(TypeArguments::Handle(
+                AbstractType::Handle(field.type()).arguments()));
+          }
+          break;
         }
+
+        case Slot::Kind::kLinkedHashMap_data:
+          return flow_graph->constant_null();
+
+        default:
+          break;
       }
     }
   }
@@ -2790,19 +2730,18 @@
   if (instantiator_type_args == nullptr) {
     if (LoadFieldInstr* load_type_args =
             instantiator_type_arguments()->definition()->AsLoadField()) {
-      if (load_type_args->native_field() != nullptr &&
-          load_type_args->native_field()->kind() ==
-              NativeFieldDesc::kTypeArguments) {
+      if (load_type_args->slot().IsTypeArguments()) {
         if (LoadFieldInstr* load_field = load_type_args->instance()
                                              ->definition()
                                              ->OriginalDefinition()
                                              ->AsLoadField()) {
-          if (load_field->field() != nullptr &&
-              load_field->field()
-                  ->static_type_exactness_state()
+          if (load_field->slot().IsDartField() &&
+              load_field->slot()
+                  .field()
+                  .static_type_exactness_state()
                   .IsHasExactSuperClass()) {
             instantiator_type_args = &TypeArguments::Handle(
-                Z, AbstractType::Handle(Z, load_field->field()->type())
+                Z, AbstractType::Handle(Z, load_field->slot().field().type())
                        .arguments());
           }
         }
@@ -2824,17 +2763,19 @@
       new_dst_type = TypeRef::Cast(new_dst_type).type();
     }
     new_dst_type = new_dst_type.Canonicalize();
+
+    // Successfully instantiated destination type: update the type attached
+    // to this instruction and set type arguments to null because we no
+    // longer need them (the type was instantiated).
     set_dst_type(new_dst_type);
+    instantiator_type_arguments()->BindTo(flow_graph->constant_null());
+    function_type_arguments()->BindTo(flow_graph->constant_null());
 
     if (new_dst_type.IsDynamicType() || new_dst_type.IsObjectType() ||
         (FLAG_eliminate_type_checks &&
          value()->Type()->IsAssignableTo(new_dst_type))) {
       return value()->definition();
     }
-
-    ConstantInstr* null_constant = flow_graph->constant_null();
-    instantiator_type_arguments()->BindTo(null_constant);
-    function_type_arguments()->BindTo(null_constant);
   }
   return this;
 }
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index dff225c..05bab1d 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -7,7 +7,9 @@
 
 #include "vm/allocation.h"
 #include "vm/code_descriptors.h"
+#include "vm/compiler/backend/compile_type.h"
 #include "vm/compiler/backend/locations.h"
+#include "vm/compiler/backend/slot.h"
 #include "vm/compiler/compiler_state.h"
 #include "vm/compiler/method_recognizer.h"
 #include "vm/flags.h"
@@ -42,203 +44,6 @@
 class UnboxIntegerInstr;
 class TypeUsageInfo;
 
-// CompileType describes type of the value produced by the definition.
-//
-// It captures the following properties:
-//    - whether value can potentially be null or it is definitely not null;
-//    - concrete class id of the value or kDynamicCid if unknown statically;
-//    - abstract super type of the value, concrete type of the value in runtime
-//      is guaranteed to be sub type of this type.
-//
-// Values of CompileType form a lattice with a None type as a bottom and a
-// nullable Dynamic type as a top element. Method Union provides a join
-// operation for the lattice.
-class CompileType : public ZoneAllocated {
- public:
-  static const bool kNullable = true;
-  static const bool kNonNullable = false;
-
-  CompileType(bool is_nullable, intptr_t cid, const AbstractType* type)
-      : is_nullable_(is_nullable), cid_(cid), type_(type) {}
-
-  CompileType(const CompileType& other)
-      : ZoneAllocated(),
-        is_nullable_(other.is_nullable_),
-        cid_(other.cid_),
-        type_(other.type_) {}
-
-  CompileType& operator=(const CompileType& other) {
-    is_nullable_ = other.is_nullable_;
-    cid_ = other.cid_;
-    type_ = other.type_;
-    return *this;
-  }
-
-  bool is_nullable() const { return is_nullable_; }
-
-  // Return type such that concrete value's type in runtime is guaranteed to
-  // be subtype of it.
-  const AbstractType* ToAbstractType();
-
-  // Return class id such that it is either kDynamicCid or in runtime
-  // value is guaranteed to have an equal class id.
-  intptr_t ToCid();
-
-  // Return class id such that it is either kDynamicCid or in runtime
-  // value is guaranteed to be either null or have an equal class id.
-  intptr_t ToNullableCid();
-
-  // Returns true if the value is guaranteed to be not-null or is known to be
-  // always null.
-  bool HasDecidableNullability();
-
-  // Returns true if the value is known to be always null.
-  bool IsNull();
-
-  // Returns true if this type is more specific than given type.
-  bool IsMoreSpecificThan(const AbstractType& other);
-
-  // Returns true if value of this type is assignable to a location of the
-  // given type.
-  bool IsAssignableTo(const AbstractType& type) {
-    bool is_instance;
-    return CanComputeIsInstanceOf(type, kNullable, &is_instance) && is_instance;
-  }
-
-  // Create a new CompileType representing given combination of class id and
-  // abstract type. The pair is assumed to be coherent.
-  static CompileType Create(intptr_t cid, const AbstractType& type);
-
-  CompileType CopyNonNullable() const {
-    return CompileType(kNonNullable, kIllegalCid, type_);
-  }
-
-  static CompileType CreateNullable(bool is_nullable, intptr_t cid) {
-    return CompileType(is_nullable, cid, NULL);
-  }
-
-  // Create a new CompileType representing given abstract type. By default
-  // values as assumed to be nullable.
-  static CompileType FromAbstractType(const AbstractType& type,
-                                      bool is_nullable = kNullable);
-
-  // Create a new CompileType representing a value with the given class id.
-  // Resulting CompileType is nullable only if cid is kDynamicCid or kNullCid.
-  static CompileType FromCid(intptr_t cid);
-
-  // Create None CompileType. It is the bottom of the lattice and is used to
-  // represent type of the phi that was not yet inferred.
-  static CompileType None() {
-    return CompileType(kNullable, kIllegalCid, NULL);
-  }
-
-  // Create Dynamic CompileType. It is the top of the lattice and is used to
-  // represent unknown type.
-  static CompileType Dynamic();
-
-  static CompileType Null();
-
-  // Create non-nullable Bool type.
-  static CompileType Bool();
-
-  // Create non-nullable Int type.
-  static CompileType Int();
-
-  // Create nullable Int type.
-  static CompileType NullableInt();
-
-  // Create non-nullable Smi type.
-  static CompileType Smi();
-
-  // Create nullable Smi type.
-  static CompileType NullableSmi() {
-    return CreateNullable(kNullable, kSmiCid);
-  }
-
-  // Create nullable Mint type.
-  static CompileType NullableMint() {
-    return CreateNullable(kNullable, kMintCid);
-  }
-
-  // Create non-nullable Double type.
-  static CompileType Double();
-
-  // Create nullable Double type.
-  static CompileType NullableDouble();
-
-  // Create non-nullable String type.
-  static CompileType String();
-
-  // Perform a join operation over the type lattice.
-  void Union(CompileType* other);
-
-  // Refine old type with newly inferred type (it could be more or less
-  // specific, or even unrelated to an old type in case of unreachable code).
-  // May return 'old_type', 'new_type' or create a new CompileType instance.
-  static CompileType* ComputeRefinedType(CompileType* old_type,
-                                         CompileType* new_type);
-
-  // Returns true if this and other types are the same.
-  bool IsEqualTo(CompileType* other) {
-    return (is_nullable_ == other->is_nullable_) &&
-           (ToNullableCid() == other->ToNullableCid()) &&
-           (ToAbstractType()->Equals(*other->ToAbstractType()));
-  }
-
-  bool IsNone() const { return (cid_ == kIllegalCid) && (type_ == NULL); }
-
-  // Returns true if value of this type is a non-nullable int.
-  bool IsInt() { return !is_nullable() && IsNullableInt(); }
-
-  // Returns true if value of this type is a non-nullable double.
-  bool IsDouble() { return !is_nullable() && IsNullableDouble(); }
-
-  // Returns true if value of this type is either int or null.
-  bool IsNullableInt() {
-    if ((cid_ == kSmiCid) || (cid_ == kMintCid)) {
-      return true;
-    }
-    if ((cid_ == kIllegalCid) || (cid_ == kDynamicCid)) {
-      return (type_ != NULL) && ((type_->IsIntType() || type_->IsSmiType()));
-    }
-    return false;
-  }
-
-  // Returns true if value of this type is either Smi or null.
-  bool IsNullableSmi() {
-    if (cid_ == kSmiCid) {
-      return true;
-    }
-    if ((cid_ == kIllegalCid) || (cid_ == kDynamicCid)) {
-      return type_ != nullptr && type_->IsSmiType();
-    }
-    return false;
-  }
-
-  // Returns true if value of this type is either double or null.
-  bool IsNullableDouble() {
-    if (cid_ == kDoubleCid) {
-      return true;
-    }
-    if ((cid_ == kIllegalCid) || (cid_ == kDynamicCid)) {
-      return (type_ != NULL) && type_->IsDoubleType();
-    }
-    return false;
-  }
-
-  void PrintTo(BufferFormatter* f) const;
-  const char* ToCString() const;
-
- private:
-  bool CanComputeIsInstanceOf(const AbstractType& type,
-                              bool is_nullable,
-                              bool* is_instance);
-
-  bool is_nullable_;
-  intptr_t cid_;
-  const AbstractType* type_;
-};
-
 class Value : public ZoneAllocated {
  public:
   // A forward iterator that allows removing the current value from the
@@ -2319,7 +2124,7 @@
   DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
 };
 
-// Stores a tagged pointer to a slot accessable from a fixed register.  It has
+// Stores a tagged pointer to a slot accessible from a fixed register.  It has
 // the form:
 //
 //     base_reg[index + #constant] = value
@@ -2330,7 +2135,7 @@
 //
 // Currently this instruction uses pinpoints the register to be FP.
 //
-// This lowlevel instruction is non-inlinable since it makes assumptons about
+// This low-level instruction is non-inlinable since it makes assumptions about
 // the frame.  This is asserted via `inliner.cc::CalleeGraphValidator`.
 class StoreIndexedUnsafeInstr : public TemplateDefinition<2, NoThrow> {
  public:
@@ -4297,51 +4102,76 @@
 
 enum StoreBarrierType { kNoStoreBarrier, kEmitStoreBarrier };
 
+// StoreInstanceField instruction represents a store of the given [value] into
+// the specified [slot] on the [instance] object. [emit_store_barrier] allows to
+// specify whether the store should omit the write barrier. [kind] specifies
+// whether this store is an initializing store, i.e. the first store into a
+// field after the allocation.
+//
+// In JIT mode a slot might be a subject to the field unboxing optimization:
+// if field type profiling shows that this slot always contains a double or SIMD
+// value then this field becomes "unboxed" - in this case when storing into
+// such field we update the payload of the box referenced by the field, rather
+// than updating the field itself.
+//
+// Note: even if [emit_store_barrier] is set to [kEmitStoreBarrier] the store
+// can still omit the barrier if it establishes that it is not needed.
+//
+// Note: stores generated from the constructor initializer list and from
+// field initializers *must* be marked as initializing. Initializing stores
+// into unboxed fields are responsible for allocating the mutable box which
+// would be mutated by subsequent stores.
 class StoreInstanceFieldInstr : public TemplateDefinition<2, NoThrow> {
  public:
+  enum class Kind {
+    // Store is known to be the first store into a slot of an object after
+    // object was allocated and before it escapes (e.g. stores in constructor
+    // initializer list).
+    kInitializing,
+
+    // All other stores.
+    kOther,
+  };
+
+  StoreInstanceFieldInstr(const Slot& slot,
+                          Value* instance,
+                          Value* value,
+                          StoreBarrierType emit_store_barrier,
+                          TokenPosition token_pos,
+                          Kind kind = Kind::kOther)
+      : slot_(slot),
+        emit_store_barrier_(emit_store_barrier),
+        token_pos_(token_pos),
+        is_initialization_(kind == Kind::kInitializing) {
+    SetInputAt(kInstancePos, instance);
+    SetInputAt(kValuePos, value);
+  }
+
+  // Convenience constructor that looks up an IL Slot for the given [field].
   StoreInstanceFieldInstr(const Field& field,
                           Value* instance,
                           Value* value,
                           StoreBarrierType emit_store_barrier,
-                          TokenPosition token_pos)
-      : field_(field),
-        offset_in_bytes_(field.Offset()),
-        emit_store_barrier_(emit_store_barrier),
-        token_pos_(token_pos),
-        is_initialization_(false) {
-    SetInputAt(kInstancePos, instance);
-    SetInputAt(kValuePos, value);
-    CheckField(field);
-  }
-
-  StoreInstanceFieldInstr(intptr_t offset_in_bytes,
-                          Value* instance,
-                          Value* value,
-                          StoreBarrierType emit_store_barrier,
-                          TokenPosition token_pos)
-      : field_(Field::ZoneHandle()),
-        offset_in_bytes_(offset_in_bytes),
-        emit_store_barrier_(emit_store_barrier),
-        token_pos_(token_pos),
-        is_initialization_(false) {
-    SetInputAt(kInstancePos, instance);
-    SetInputAt(kValuePos, value);
-  }
+                          TokenPosition token_pos,
+                          const ParsedFunction* parsed_function,
+                          Kind kind = Kind::kOther)
+      : StoreInstanceFieldInstr(Slot::Get(field, parsed_function),
+                                instance,
+                                value,
+                                emit_store_barrier,
+                                token_pos,
+                                kind) {}
 
   DECLARE_INSTRUCTION(StoreInstanceField)
 
-  void set_is_initialization(bool value) { is_initialization_ = value; }
-
   enum { kInstancePos = 0, kValuePos = 1 };
 
   Value* instance() const { return inputs_[kInstancePos]; }
+  const Slot& slot() const { return slot_; }
   Value* value() const { return inputs_[kValuePos]; }
-  bool is_initialization() const { return is_initialization_; }
 
   virtual TokenPosition token_pos() const { return token_pos_; }
-
-  const Field& field() const { return field_; }
-  intptr_t offset_in_bytes() const { return offset_in_bytes_; }
+  bool is_initialization() const { return is_initialization_; }
 
   bool ShouldEmitStoreBarrier() const {
     if (instance()->definition() == value()->definition()) {
@@ -4373,7 +4203,6 @@
   virtual bool HasUnknownSideEffects() const { return false; }
 
   bool IsUnboxedStore() const;
-
   bool IsPotentialUnboxedStore() const;
 
   virtual Representation RequiredInputRepresentation(intptr_t index) const;
@@ -4383,6 +4212,8 @@
  private:
   friend class JitCallSpecializer;  // For ASSERT(initialization_).
 
+  intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
+
   Assembler::CanBeSmi CanValueBeSmi() const {
     Isolate* isolate = Isolate::Current();
     if (isolate->type_checks() && !FLAG_strong) {
@@ -4398,12 +4229,11 @@
                               : Assembler::kValueIsNotSmi;
   }
 
-  const Field& field_;
-  intptr_t offset_in_bytes_;
+  const Slot& slot_;
   StoreBarrierType emit_store_barrier_;
   const TokenPosition token_pos_;
   // Marks initializing stores. E.g. in the constructor.
-  bool is_initialization_;
+  const bool is_initialization_;
 
   DISALLOW_COPY_AND_ASSIGN(StoreInstanceFieldInstr);
 };
@@ -4993,6 +4823,9 @@
   DISALLOW_COPY_AND_ASSIGN(AllocateObjectInstr);
 };
 
+// TODO(vegorov) the name of the instruction is confusing. At some point
+// it used to allocate uninitialized storage, but this is no longer true.
+// These days it allocates null initialized storage.
 class AllocateUninitializedContextInstr
     : public TemplateAllocation<0, NoThrow> {
  public:
@@ -5036,7 +4869,7 @@
 class MaterializeObjectInstr : public Definition {
  public:
   MaterializeObjectInstr(AllocateObjectInstr* allocation,
-                         const ZoneGrowableArray<const Object*>& slots,
+                         const ZoneGrowableArray<const Slot*>& slots,
                          ZoneGrowableArray<Value*>* values)
       : allocation_(allocation),
         cls_(allocation->cls()),
@@ -5054,7 +4887,7 @@
   }
 
   MaterializeObjectInstr(AllocateUninitializedContextInstr* allocation,
-                         const ZoneGrowableArray<const Object*>& slots,
+                         const ZoneGrowableArray<const Slot*>& slots,
                          ZoneGrowableArray<Value*>* values)
       : allocation_(allocation),
         cls_(Class::ZoneHandle(Object::context_class())),
@@ -5077,8 +4910,7 @@
   intptr_t num_variables() const { return num_variables_; }
 
   intptr_t FieldOffsetAt(intptr_t i) const {
-    return slots_[i]->IsField() ? Field::Cast(*slots_[i]).Offset()
-                                : Smi::Cast(*slots_[i]).Value();
+    return slots_[i]->offset_in_bytes();
   }
 
   const Location& LocationAt(intptr_t i) { return locations_[i]; }
@@ -5122,7 +4954,7 @@
   Definition* allocation_;
   const Class& cls_;
   intptr_t num_variables_;
-  const ZoneGrowableArray<const Object*>& slots_;
+  const ZoneGrowableArray<const Slot*>& slots_;
   ZoneGrowableArray<Value*>* values_;
   Location* locations_;
 
@@ -5233,160 +5065,29 @@
   DISALLOW_COPY_AND_ASSIGN(LoadClassIdInstr);
 };
 
-#define NATIVE_FIELDS_LIST(V)                                                  \
-  V(Array, length, Smi, IMMUTABLE)                                             \
-  V(GrowableObjectArray, length, Smi, MUTABLE)                                 \
-  V(TypedData, length, Smi, IMMUTABLE)                                         \
-  V(String, length, Smi, IMMUTABLE)                                            \
-  V(LinkedHashMap, index, TypedDataUint32Array, MUTABLE)                       \
-  V(LinkedHashMap, data, Array, MUTABLE)                                       \
-  V(LinkedHashMap, hash_mask, Smi, MUTABLE)                                    \
-  V(LinkedHashMap, used_data, Smi, MUTABLE)                                    \
-  V(LinkedHashMap, deleted_keys, Smi, MUTABLE)                                 \
-  V(ArgumentsDescriptor, type_args_len, Smi, IMMUTABLE)
-
-class NativeFieldDesc : public ZoneAllocated {
- public:
-  // clang-format off
-  enum Kind {
-#define DECLARE_KIND(ClassName, FieldName, cid, mutability)                    \
-  k##ClassName##_##FieldName,
-    NATIVE_FIELDS_LIST(DECLARE_KIND)
-#undef DECLARE_KIND
-    kTypeArguments,
-  };
-  // clang-format on
-
-#define DEFINE_GETTER(ClassName, FieldName, cid, mutability)                   \
-  static const NativeFieldDesc* ClassName##_##FieldName() {                    \
-    return Get(k##ClassName##_##FieldName);                                    \
-  }
-
-  NATIVE_FIELDS_LIST(DEFINE_GETTER)
-#undef DEFINE_GETTER
-
-  static const NativeFieldDesc* Get(Kind kind);
-  static const NativeFieldDesc* GetLengthFieldForArrayCid(intptr_t array_cid);
-  static const NativeFieldDesc* GetTypeArgumentsField(Zone* zone,
-                                                      intptr_t offset);
-  static const NativeFieldDesc* GetTypeArgumentsFieldFor(Zone* zone,
-                                                         const Class& cls);
-
-  const char* name() const;
-
-  Kind kind() const { return kind_; }
-
-  intptr_t offset_in_bytes() const { return offset_in_bytes_; }
-
-  bool is_immutable() const { return immutable_; }
-
-  intptr_t cid() const { return cid_; }
-
-  RawAbstractType* type() const;
-
- private:
-  NativeFieldDesc(Kind kind,
-                  intptr_t offset_in_bytes,
-                  intptr_t cid,
-                  bool immutable)
-      : kind_(kind),
-        offset_in_bytes_(offset_in_bytes),
-        immutable_(immutable),
-        cid_(cid) {}
-
-  NativeFieldDesc(const NativeFieldDesc& other)
-      : NativeFieldDesc(other.kind_,
-                        other.offset_in_bytes_,
-                        other.immutable_,
-                        other.cid_) {}
-
-  const Kind kind_;
-  const intptr_t offset_in_bytes_;
-  const bool immutable_;
-
-  const intptr_t cid_;
-};
-
+// LoadFieldInstr represents a load from the given [slot] in the given
+// [instance].
+//
+// Note: if slot was a subject of the field unboxing optimization then this load
+// would both load the box stored in the field and then load the content of
+// the box.
 class LoadFieldInstr : public TemplateDefinition<1, NoThrow> {
  public:
-  LoadFieldInstr(Value* instance,
-                 intptr_t offset_in_bytes,
-                 const AbstractType& type,
-                 TokenPosition token_pos)
-      : offset_in_bytes_(offset_in_bytes),
-        type_(type),
-        result_cid_(kDynamicCid),
-        immutable_(false),
-        native_field_(nullptr),
-        field_(nullptr),
-        token_pos_(token_pos) {
-    ASSERT(offset_in_bytes >= 0);
-    // May be null if field is not an instance.
-    ASSERT(type_.IsZoneHandle() || type_.IsReadOnlyHandle());
+  LoadFieldInstr(Value* instance, const Slot& slot, TokenPosition token_pos)
+      : slot_(slot), token_pos_(token_pos) {
     SetInputAt(0, instance);
   }
 
-  LoadFieldInstr(Value* instance,
-                 const NativeFieldDesc* native_field,
-                 TokenPosition token_pos)
-      : offset_in_bytes_(native_field->offset_in_bytes()),
-        type_(AbstractType::ZoneHandle(native_field->type())),
-        result_cid_(native_field->cid()),
-        immutable_(native_field->is_immutable()),
-        native_field_(native_field),
-        field_(nullptr),
-        token_pos_(token_pos) {
-    ASSERT(offset_in_bytes_ >= 0);
-    // May be null if field is not an instance.
-    ASSERT(type_.IsZoneHandle() || type_.IsReadOnlyHandle());
-    SetInputAt(0, instance);
-  }
-
-  LoadFieldInstr(Value* instance,
-                 const Field* field,
-                 const AbstractType& type,
-                 TokenPosition token_pos,
-                 const ParsedFunction* parsed_function)
-      : offset_in_bytes_(field->Offset()),
-        type_(type),
-        result_cid_(kDynamicCid),
-        immutable_(false),
-        native_field_(nullptr),
-        field_(field),
-        token_pos_(token_pos) {
-    ASSERT(Class::Handle(field->Owner()).is_finalized());
-    ASSERT(field->IsZoneHandle());
-    // May be null if field is not an instance.
-    ASSERT(type.IsZoneHandle() || type.IsReadOnlyHandle());
-    SetInputAt(0, instance);
-
-    if (parsed_function != nullptr && field->guarded_cid() != kIllegalCid) {
-      if (!field->is_nullable() || (field->guarded_cid() == kNullCid)) {
-        set_result_cid(field->guarded_cid());
-      }
-      parsed_function->AddToGuardedFields(field);
-    }
-  }
-
-  void set_is_immutable(bool value) { immutable_ = value; }
-
   Value* instance() const { return inputs_[0]; }
-  intptr_t offset_in_bytes() const { return offset_in_bytes_; }
-  const AbstractType& type() const { return type_; }
-  void set_result_cid(intptr_t value) { result_cid_ = value; }
-  intptr_t result_cid() const { return result_cid_; }
-  virtual TokenPosition token_pos() const { return token_pos_; }
+  const Slot& slot() const { return slot_; }
 
-  const Field* field() const { return field_; }
+  virtual TokenPosition token_pos() const { return token_pos_; }
 
   virtual Representation representation() const;
 
   bool IsUnboxedLoad() const;
-
   bool IsPotentialUnboxedLoad() const;
 
-  const NativeFieldDesc* native_field() const { return native_field_; }
-
   DECLARE_INSTRUCTION(LoadField)
   virtual CompileType ComputeType() const;
 
@@ -5403,11 +5104,19 @@
   // instance has the field.
   bool Evaluate(const Object& instance_value, Object* result);
 
+  static bool TryEvaluateLoad(const Object& instance,
+                              const Field& field,
+                              Object* result);
+
+  static bool TryEvaluateLoad(const Object& instance,
+                              const Slot& field,
+                              Object* result);
+
   virtual Definition* Canonicalize(FlowGraph* flow_graph);
 
   static bool IsFixedLengthArrayCid(intptr_t cid);
 
-  virtual bool AllowsCSE() const { return immutable_; }
+  virtual bool AllowsCSE() const { return slot_.is_immutable(); }
   virtual bool HasUnknownSideEffects() const { return false; }
 
   virtual bool AttributesEqual(Instruction* other) const;
@@ -5415,13 +5124,9 @@
   PRINT_OPERANDS_TO_SUPPORT
 
  private:
-  const intptr_t offset_in_bytes_;
-  const AbstractType& type_;
-  intptr_t result_cid_;
-  bool immutable_;
+  intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
 
-  const NativeFieldDesc* native_field_;
-  const Field* field_;
+  const Slot& slot_;
   const TokenPosition token_pos_;
 
   DISALLOW_COPY_AND_ASSIGN(LoadFieldInstr);
@@ -5507,16 +5212,25 @@
   DISALLOW_COPY_AND_ASSIGN(InstantiateTypeArgumentsInstr);
 };
 
+// [AllocateContext] instruction allocates a new Context object with the space
+// for the given [context_variables].
 class AllocateContextInstr : public TemplateAllocation<0, NoThrow> {
  public:
-  AllocateContextInstr(TokenPosition token_pos, intptr_t num_context_variables)
-      : token_pos_(token_pos), num_context_variables_(num_context_variables) {}
+  AllocateContextInstr(TokenPosition token_pos,
+                       const GrowableArray<LocalVariable*>& context_variables)
+      : token_pos_(token_pos), context_variables_(context_variables) {}
 
   DECLARE_INSTRUCTION(AllocateContext)
   virtual CompileType ComputeType() const;
 
   virtual TokenPosition token_pos() const { return token_pos_; }
-  intptr_t num_context_variables() const { return num_context_variables_; }
+  const GrowableArray<LocalVariable*>& context_variables() const {
+    return context_variables_;
+  }
+
+  intptr_t num_context_variables() const {
+    return context_variables().length();
+  }
 
   virtual bool ComputeCanDeoptimize() const { return false; }
 
@@ -5524,14 +5238,14 @@
 
   virtual bool WillAllocateNewOrRemembered() const {
     return Heap::IsAllocatableInNewSpace(
-        Context::InstanceSize(num_context_variables_));
+        Context::InstanceSize(context_variables().length()));
   }
 
   PRINT_OPERANDS_TO_SUPPORT
 
  private:
   const TokenPosition token_pos_;
-  const intptr_t num_context_variables_;
+  const GrowableArray<LocalVariable*>& context_variables_;
 
   DISALLOW_COPY_AND_ASSIGN(AllocateContextInstr);
 };
@@ -5559,23 +5273,26 @@
   DISALLOW_COPY_AND_ASSIGN(InitStaticFieldInstr);
 };
 
+// [CloneContext] instruction clones the given Context object assuming that
+// it contains exactly the provided [context_variables].
 class CloneContextInstr : public TemplateDefinition<1, NoThrow> {
  public:
   CloneContextInstr(TokenPosition token_pos,
                     Value* context_value,
-                    intptr_t num_context_variables,
+                    const GrowableArray<LocalVariable*>& context_variables,
                     intptr_t deopt_id)
       : TemplateDefinition(deopt_id),
         token_pos_(token_pos),
-        num_context_variables_(num_context_variables) {
+        context_variables_(context_variables) {
     SetInputAt(0, context_value);
   }
 
-  static const intptr_t kUnknownContextSize = -1;
-
   virtual TokenPosition token_pos() const { return token_pos_; }
   Value* context_value() const { return inputs_[0]; }
-  intptr_t num_context_variables() const { return num_context_variables_; }
+
+  const GrowableArray<LocalVariable*>& context_variables() const {
+    return context_variables_;
+  }
 
   DECLARE_INSTRUCTION(CloneContext)
   virtual CompileType ComputeType() const;
@@ -5586,7 +5303,7 @@
 
  private:
   const TokenPosition token_pos_;
-  const intptr_t num_context_variables_;
+  const GrowableArray<LocalVariable*>& context_variables_;
 
   DISALLOW_COPY_AND_ASSIGN(CloneContextInstr);
 };
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 7f7b6de..ccd7847 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -2247,12 +2247,13 @@
   Label skip_store;
 
   const Register instance_reg = locs()->in(0).reg();
+  const intptr_t offset_in_bytes = OffsetInBytes();
 
   if (IsUnboxedStore() && compiler->is_optimizing()) {
     const DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg());
     const Register temp = locs()->temp(0).reg();
     const Register temp2 = locs()->temp(1).reg();
-    const intptr_t cid = field().UnboxedFieldCid();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
 
     if (is_initialization()) {
       const Class* cls = NULL;
@@ -2272,10 +2273,10 @@
 
       BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
       __ MoveRegister(temp2, temp);
-      __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2,
+      __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, temp2,
                                Assembler::kValueIsNotSmi);
     } else {
-      __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes_));
+      __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes));
     }
     switch (cid) {
       case kDoubleCid:
@@ -2316,7 +2317,7 @@
     Label store_float32x4;
     Label store_float64x2;
 
-    __ LoadObject(temp, Field::ZoneHandle(Z, field().Original()));
+    __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
 
     __ ldrh(temp2, FieldAddress(temp, Field::is_nullable_offset()));
     __ CompareImmediate(temp2, kNullCid);
@@ -2349,7 +2350,7 @@
     {
       __ Bind(&store_double);
       EnsureMutableBox(compiler, this, temp, compiler->double_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ CopyDoubleField(temp, value_reg, TMP, temp2, fpu_temp);
       __ b(&skip_store);
     }
@@ -2357,7 +2358,7 @@
     {
       __ Bind(&store_float32x4);
       EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ CopyFloat32x4Field(temp, value_reg, TMP, temp2, fpu_temp);
       __ b(&skip_store);
     }
@@ -2365,7 +2366,7 @@
     {
       __ Bind(&store_float64x2);
       EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ CopyFloat64x2Field(temp, value_reg, TMP, temp2, fpu_temp);
       __ b(&skip_store);
     }
@@ -2379,16 +2380,16 @@
     // by executing 'ret LR' directly. Therefore we cannot overwrite LR. (see
     // ReturnInstr::EmitNativeCode).
     ASSERT(!locs()->live_registers()->Contains(Location::RegisterLocation(LR)));
-    __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, value_reg,
+    __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
                              CanValueBeSmi(),
                              /*lr_reserved=*/!compiler->intrinsic_mode());
   } else {
     if (locs()->in(1).IsConstant()) {
-      __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_,
+      __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
                                         locs()->in(1).constant());
     } else {
       const Register value_reg = locs()->in(1).reg();
-      __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_,
+      __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
                                         value_reg);
     }
   }
@@ -2603,8 +2604,8 @@
   if (IsUnboxedLoad() && compiler->is_optimizing()) {
     const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
     const Register temp = locs()->temp(0).reg();
-    __ LoadFieldFromOffset(kWord, temp, instance_reg, offset_in_bytes());
-    const intptr_t cid = field()->UnboxedFieldCid();
+    __ LoadFieldFromOffset(kWord, temp, instance_reg, OffsetInBytes());
+    const intptr_t cid = slot().field().UnboxedFieldCid();
     switch (cid) {
       case kDoubleCid:
         __ Comment("UnboxedDoubleLoadFieldInstr");
@@ -2639,7 +2640,7 @@
     Label load_float32x4;
     Label load_float64x2;
 
-    __ LoadObject(result_reg, Field::ZoneHandle(field()->Original()));
+    __ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
 
     FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset());
     FieldAddress field_nullability_operand(result_reg,
@@ -2672,7 +2673,7 @@
       __ Bind(&load_double);
       BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
                                       result_reg, temp);
-      __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ ldr(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ CopyDoubleField(result_reg, temp, TMP, temp2, value);
       __ b(&done);
     }
@@ -2681,7 +2682,7 @@
       __ Bind(&load_float32x4);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float32x4_class(), result_reg, temp);
-      __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ ldr(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ CopyFloat32x4Field(result_reg, temp, TMP, temp2, value);
       __ b(&done);
     }
@@ -2690,14 +2691,14 @@
       __ Bind(&load_float64x2);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float64x2_class(), result_reg, temp);
-      __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ ldr(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ CopyFloat64x2Field(result_reg, temp, TMP, temp2, value);
       __ b(&done);
     }
 
     __ Bind(&load_pointer);
   }
-  __ LoadFieldFromOffset(kWord, result_reg, instance_reg, offset_in_bytes());
+  __ LoadFieldFromOffset(kWord, result_reg, instance_reg, OffsetInBytes());
   __ Bind(&done);
 }
 
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index b959d2f..c8b80a1 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -1641,7 +1641,7 @@
   Label* fail = (deopt != NULL) ? deopt : &fail_label;
 
   if (emit_full_guard) {
-    __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
+    __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
 
     FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset(),
                                    kUnsignedHalfword);
@@ -1936,12 +1936,13 @@
   Label skip_store;
 
   const Register instance_reg = locs()->in(0).reg();
+  const intptr_t offset_in_bytes = OffsetInBytes();
 
   if (IsUnboxedStore() && compiler->is_optimizing()) {
     const VRegister value = locs()->in(1).fpu_reg();
     const Register temp = locs()->temp(0).reg();
     const Register temp2 = locs()->temp(1).reg();
-    const intptr_t cid = field().UnboxedFieldCid();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
 
     if (is_initialization()) {
       const Class* cls = NULL;
@@ -1961,11 +1962,11 @@
 
       BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
       __ MoveRegister(temp2, temp);
-      __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2,
+      __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, temp2,
                                Assembler::kValueIsNotSmi,
                                /*lr_reserved=*/!compiler->intrinsic_mode());
     } else {
-      __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_);
+      __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes);
     }
     switch (cid) {
       case kDoubleCid:
@@ -2003,7 +2004,7 @@
     Label store_float32x4;
     Label store_float64x2;
 
-    __ LoadObject(temp, Field::ZoneHandle(Z, field().Original()));
+    __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
 
     __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(),
                            kUnsignedHalfword);
@@ -2041,7 +2042,7 @@
     {
       __ Bind(&store_double);
       EnsureMutableBox(compiler, this, temp, compiler->double_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ LoadDFieldFromOffset(VTMP, value_reg, Double::value_offset());
       __ StoreDFieldToOffset(VTMP, temp, Double::value_offset());
       __ b(&skip_store);
@@ -2050,7 +2051,7 @@
     {
       __ Bind(&store_float32x4);
       EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ LoadQFieldFromOffset(VTMP, value_reg, Float32x4::value_offset());
       __ StoreQFieldToOffset(VTMP, temp, Float32x4::value_offset());
       __ b(&skip_store);
@@ -2059,7 +2060,7 @@
     {
       __ Bind(&store_float64x2);
       EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ LoadQFieldFromOffset(VTMP, value_reg, Float64x2::value_offset());
       __ StoreQFieldToOffset(VTMP, temp, Float64x2::value_offset());
       __ b(&skip_store);
@@ -2074,16 +2075,16 @@
     // by executing 'ret LR' directly. Therefore we cannot overwrite LR. (see
     // ReturnInstr::EmitNativeCode).
     ASSERT((kDartAvailableCpuRegs & (1 << LR)) == 0);
-    __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, value_reg,
+    __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
                              CanValueBeSmi(),
                              /*lr_reserved=*/!compiler->intrinsic_mode());
   } else {
     if (locs()->in(1).IsConstant()) {
-      __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes_,
+      __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
                                         locs()->in(1).constant());
     } else {
       const Register value_reg = locs()->in(1).reg();
-      __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes_,
+      __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
                                         value_reg);
     }
   }
@@ -2294,8 +2295,8 @@
   if (IsUnboxedLoad() && compiler->is_optimizing()) {
     const VRegister result = locs()->out(0).fpu_reg();
     const Register temp = locs()->temp(0).reg();
-    __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes());
-    const intptr_t cid = field()->UnboxedFieldCid();
+    __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes());
+    const intptr_t cid = slot().field().UnboxedFieldCid();
     switch (cid) {
       case kDoubleCid:
         __ Comment("UnboxedDoubleLoadFieldInstr");
@@ -2323,7 +2324,7 @@
     Label load_float32x4;
     Label load_float64x2;
 
-    __ LoadObject(result_reg, Field::ZoneHandle(field()->Original()));
+    __ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
 
     FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset(),
                                    kUnsignedHalfword);
@@ -2357,7 +2358,7 @@
       __ Bind(&load_double);
       BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
                                       result_reg, temp);
-      __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes());
+      __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes());
       __ LoadDFieldFromOffset(VTMP, temp, Double::value_offset());
       __ StoreDFieldToOffset(VTMP, result_reg, Double::value_offset());
       __ b(&done);
@@ -2367,7 +2368,7 @@
       __ Bind(&load_float32x4);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float32x4_class(), result_reg, temp);
-      __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes());
+      __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes());
       __ LoadQFieldFromOffset(VTMP, temp, Float32x4::value_offset());
       __ StoreQFieldToOffset(VTMP, result_reg, Float32x4::value_offset());
       __ b(&done);
@@ -2377,7 +2378,7 @@
       __ Bind(&load_float64x2);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float64x2_class(), result_reg, temp);
-      __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes());
+      __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes());
       __ LoadQFieldFromOffset(VTMP, temp, Float64x2::value_offset());
       __ StoreQFieldToOffset(VTMP, result_reg, Float64x2::value_offset());
       __ b(&done);
@@ -2385,7 +2386,7 @@
 
     __ Bind(&load_pointer);
   }
-  __ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes());
+  __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
   __ Bind(&done);
 }
 
diff --git a/runtime/vm/compiler/backend/il_dbc.cc b/runtime/vm/compiler/backend/il_dbc.cc
index 419f64b..4e5509f 100644
--- a/runtime/vm/compiler/backend/il_dbc.cc
+++ b/runtime/vm/compiler/backend/il_dbc.cc
@@ -1087,34 +1087,34 @@
 
 EMIT_NATIVE_CODE(StoreInstanceField, 2) {
   ASSERT(!HasTemp());
-  ASSERT(offset_in_bytes() % kWordSize == 0);
+  ASSERT(OffsetInBytes() % kWordSize == 0);
   if (compiler->is_optimizing()) {
     const Register value = locs()->in(1).reg();
     const Register instance = locs()->in(0).reg();
-    if (Utils::IsInt(8, offset_in_bytes() / kWordSize)) {
-      __ StoreField(instance, offset_in_bytes() / kWordSize, value);
+    if (Utils::IsInt(8, OffsetInBytes() / kWordSize)) {
+      __ StoreField(instance, OffsetInBytes() / kWordSize, value);
     } else {
       __ StoreFieldExt(instance, value);
-      __ Nop(offset_in_bytes() / kWordSize);
+      __ Nop(OffsetInBytes() / kWordSize);
     }
   } else {
-    __ StoreFieldTOS(offset_in_bytes() / kWordSize);
+    __ StoreFieldTOS(OffsetInBytes() / kWordSize);
   }
 }
 
 EMIT_NATIVE_CODE(LoadField, 1, Location::RequiresRegister()) {
-  ASSERT(offset_in_bytes() % kWordSize == 0);
+  ASSERT(OffsetInBytes() % kWordSize == 0);
   if (compiler->is_optimizing()) {
     const Register result = locs()->out(0).reg();
     const Register instance = locs()->in(0).reg();
-    if (Utils::IsInt(8, offset_in_bytes() / kWordSize)) {
-      __ LoadField(result, instance, offset_in_bytes() / kWordSize);
+    if (Utils::IsInt(8, OffsetInBytes() / kWordSize)) {
+      __ LoadField(result, instance, OffsetInBytes() / kWordSize);
     } else {
       __ LoadFieldExt(result, instance);
-      __ Nop(offset_in_bytes() / kWordSize);
+      __ Nop(OffsetInBytes() / kWordSize);
     }
   } else {
-    __ LoadFieldTOS(offset_in_bytes() / kWordSize);
+    __ LoadFieldTOS(OffsetInBytes() / kWordSize);
   }
 }
 
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index b24af60..e28e999 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -1830,13 +1830,14 @@
   ASSERT(sizeof(classid_t) == kInt16Size);
   Label skip_store;
 
-  Register instance_reg = locs()->in(0).reg();
+  const Register instance_reg = locs()->in(0).reg();
+  const intptr_t offset_in_bytes = OffsetInBytes();
 
   if (IsUnboxedStore() && compiler->is_optimizing()) {
     XmmRegister value = locs()->in(1).fpu_reg();
     Register temp = locs()->temp(0).reg();
     Register temp2 = locs()->temp(1).reg();
-    const intptr_t cid = field().UnboxedFieldCid();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
 
     if (is_initialization()) {
       const Class* cls = NULL;
@@ -1857,10 +1858,10 @@
       BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
       __ movl(temp2, temp);
       __ StoreIntoObject(instance_reg,
-                         FieldAddress(instance_reg, offset_in_bytes_), temp2,
+                         FieldAddress(instance_reg, offset_in_bytes), temp2,
                          Assembler::kValueIsNotSmi);
     } else {
-      __ movl(temp, FieldAddress(instance_reg, offset_in_bytes_));
+      __ movl(temp, FieldAddress(instance_reg, offset_in_bytes));
     }
     switch (cid) {
       case kDoubleCid:
@@ -1900,7 +1901,7 @@
     Label store_float32x4;
     Label store_float64x2;
 
-    __ LoadObject(temp, Field::ZoneHandle(Z, field().Original()));
+    __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
 
     __ cmpw(FieldAddress(temp, Field::is_nullable_offset()),
             Immediate(kNullCid));
@@ -1933,7 +1934,7 @@
     {
       __ Bind(&store_double);
       EnsureMutableBox(compiler, this, temp, compiler->double_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset()));
       __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp);
       __ jmp(&skip_store);
@@ -1942,7 +1943,7 @@
     {
       __ Bind(&store_float32x4);
       EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ movups(fpu_temp, FieldAddress(value_reg, Float32x4::value_offset()));
       __ movups(FieldAddress(temp, Float32x4::value_offset()), fpu_temp);
       __ jmp(&skip_store);
@@ -1951,7 +1952,7 @@
     {
       __ Bind(&store_float64x2);
       EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ movups(fpu_temp, FieldAddress(value_reg, Float64x2::value_offset()));
       __ movups(FieldAddress(temp, Float64x2::value_offset()), fpu_temp);
       __ jmp(&skip_store);
@@ -1963,18 +1964,17 @@
   if (ShouldEmitStoreBarrier()) {
     Register value_reg = locs()->in(1).reg();
     __ StoreIntoObject(instance_reg,
-                       FieldAddress(instance_reg, offset_in_bytes_), value_reg,
+                       FieldAddress(instance_reg, offset_in_bytes), value_reg,
                        CanValueBeSmi());
   } else {
     if (locs()->in(1).IsConstant()) {
       __ StoreIntoObjectNoBarrier(instance_reg,
-                                  FieldAddress(instance_reg, offset_in_bytes_),
+                                  FieldAddress(instance_reg, offset_in_bytes),
                                   locs()->in(1).constant());
     } else {
       Register value_reg = locs()->in(1).reg();
-      __ StoreIntoObjectNoBarrier(instance_reg,
-                                  FieldAddress(instance_reg, offset_in_bytes_),
-                                  value_reg);
+      __ StoreIntoObjectNoBarrier(
+          instance_reg, FieldAddress(instance_reg, offset_in_bytes), value_reg);
     }
   }
   __ Bind(&skip_store);
@@ -2185,8 +2185,8 @@
   if (IsUnboxedLoad() && compiler->is_optimizing()) {
     XmmRegister result = locs()->out(0).fpu_reg();
     Register temp = locs()->temp(0).reg();
-    __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
-    const intptr_t cid = field()->UnboxedFieldCid();
+    __ movl(temp, FieldAddress(instance_reg, OffsetInBytes()));
+    const intptr_t cid = slot().field().UnboxedFieldCid();
     switch (cid) {
       case kDoubleCid:
         __ Comment("UnboxedDoubleLoadFieldInstr");
@@ -2217,7 +2217,7 @@
     Label load_float32x4;
     Label load_float64x2;
 
-    __ LoadObject(result, Field::ZoneHandle(field()->Original()));
+    __ LoadObject(result, Field::ZoneHandle(slot().field().Original()));
 
     FieldAddress field_cid_operand(result, Field::guarded_cid_offset());
     FieldAddress field_nullability_operand(result, Field::is_nullable_offset());
@@ -2245,7 +2245,7 @@
       __ Bind(&load_double);
       BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
                                       result, temp);
-      __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ movl(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ movsd(value, FieldAddress(temp, Double::value_offset()));
       __ movsd(FieldAddress(result, Double::value_offset()), value);
       __ jmp(&done);
@@ -2255,7 +2255,7 @@
       __ Bind(&load_float32x4);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float32x4_class(), result, temp);
-      __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ movl(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ movups(value, FieldAddress(temp, Float32x4::value_offset()));
       __ movups(FieldAddress(result, Float32x4::value_offset()), value);
       __ jmp(&done);
@@ -2265,7 +2265,7 @@
       __ Bind(&load_float64x2);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float64x2_class(), result, temp);
-      __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ movl(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ movups(value, FieldAddress(temp, Float64x2::value_offset()));
       __ movups(FieldAddress(result, Float64x2::value_offset()), value);
       __ jmp(&done);
@@ -2273,7 +2273,7 @@
 
     __ Bind(&load_pointer);
   }
-  __ movl(result, FieldAddress(instance_reg, offset_in_bytes()));
+  __ movl(result, FieldAddress(instance_reg, OffsetInBytes()));
   __ Bind(&done);
 }
 
diff --git a/runtime/vm/compiler/backend/il_printer.cc b/runtime/vm/compiler/backend/il_printer.cc
index 0ad2ab2..a2a26ea 100644
--- a/runtime/vm/compiler/backend/il_printer.cc
+++ b/runtime/vm/compiler/backend/il_printer.cc
@@ -612,14 +612,8 @@
 }
 
 void StoreInstanceFieldInstr::PrintOperandsTo(BufferFormatter* f) const {
-  if (field().IsNull()) {
-    f->Print("{%" Pd "}, ", offset_in_bytes());
-  } else {
-    f->Print("%s {%" Pd "}, ", String::Handle(field().name()).ToCString(),
-             field().Offset());
-  }
   instance()->PrintTo(f);
-  f->Print(", ");
+  f->Print(" . %s = ", slot().Name());
   value()->PrintTo(f);
   if (!ShouldEmitStoreBarrier()) f->Print(", barrier removed");
 }
@@ -671,27 +665,14 @@
   f->Print("%s", String::Handle(cls_.ScrubbedName()).ToCString());
   for (intptr_t i = 0; i < InputCount(); i++) {
     f->Print(", ");
-    f->Print("%s: ", slots_[i]->ToCString());
+    f->Print("%s: ", slots_[i]->Name());
     InputAt(i)->PrintTo(f);
   }
 }
 
 void LoadFieldInstr::PrintOperandsTo(BufferFormatter* f) const {
   instance()->PrintTo(f);
-  f->Print(", %" Pd, offset_in_bytes());
-
-  if (field() != nullptr) {
-    f->Print(" {%s} %s", String::Handle(field()->name()).ToCString(),
-             field()->GuardedPropertiesAsCString());
-  }
-
-  if (native_field() != nullptr) {
-    f->Print(" {%s}", native_field()->name());
-  }
-
-  if (immutable_) {
-    f->Print(", immutable");
-  }
+  f->Print(" . %s%s", slot().Name(), slot().is_immutable() ? " {final}" : "");
 }
 
 void InstantiateTypeInstr::PrintOperandsTo(BufferFormatter* f) const {
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 82e780a..64ed573 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -1957,13 +1957,14 @@
   ASSERT(sizeof(classid_t) == kInt16Size);
   Label skip_store;
 
-  Register instance_reg = locs()->in(0).reg();
+  const Register instance_reg = locs()->in(0).reg();
+  const intptr_t offset_in_bytes = OffsetInBytes();
 
   if (IsUnboxedStore() && compiler->is_optimizing()) {
     XmmRegister value = locs()->in(1).fpu_reg();
     Register temp = locs()->temp(0).reg();
     Register temp2 = locs()->temp(1).reg();
-    const intptr_t cid = field().UnboxedFieldCid();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
 
     if (is_initialization()) {
       const Class* cls = NULL;
@@ -1984,10 +1985,10 @@
       BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
       __ movq(temp2, temp);
       __ StoreIntoObject(instance_reg,
-                         FieldAddress(instance_reg, offset_in_bytes_), temp2,
+                         FieldAddress(instance_reg, offset_in_bytes), temp2,
                          Assembler::kValueIsNotSmi);
     } else {
-      __ movq(temp, FieldAddress(instance_reg, offset_in_bytes_));
+      __ movq(temp, FieldAddress(instance_reg, offset_in_bytes));
     }
     switch (cid) {
       case kDoubleCid:
@@ -2025,7 +2026,7 @@
     Label store_float32x4;
     Label store_float64x2;
 
-    __ LoadObject(temp, Field::ZoneHandle(Z, field().Original()));
+    __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
 
     __ cmpw(FieldAddress(temp, Field::is_nullable_offset()),
             Immediate(kNullCid));
@@ -2058,7 +2059,7 @@
     {
       __ Bind(&store_double);
       EnsureMutableBox(compiler, this, temp, compiler->double_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset()));
       __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp);
       __ jmp(&skip_store);
@@ -2067,7 +2068,7 @@
     {
       __ Bind(&store_float32x4);
       EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ movups(fpu_temp, FieldAddress(value_reg, Float32x4::value_offset()));
       __ movups(FieldAddress(temp, Float32x4::value_offset()), fpu_temp);
       __ jmp(&skip_store);
@@ -2076,7 +2077,7 @@
     {
       __ Bind(&store_float64x2);
       EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
-                       instance_reg, offset_in_bytes_, temp2);
+                       instance_reg, offset_in_bytes, temp2);
       __ movups(fpu_temp, FieldAddress(value_reg, Float64x2::value_offset()));
       __ movups(FieldAddress(temp, Float64x2::value_offset()), fpu_temp);
       __ jmp(&skip_store);
@@ -2088,18 +2089,17 @@
   if (ShouldEmitStoreBarrier()) {
     Register value_reg = locs()->in(1).reg();
     __ StoreIntoObject(instance_reg,
-                       FieldAddress(instance_reg, offset_in_bytes_), value_reg,
+                       FieldAddress(instance_reg, offset_in_bytes), value_reg,
                        CanValueBeSmi());
   } else {
     if (locs()->in(1).IsConstant()) {
       __ StoreIntoObjectNoBarrier(instance_reg,
-                                  FieldAddress(instance_reg, offset_in_bytes_),
+                                  FieldAddress(instance_reg, offset_in_bytes),
                                   locs()->in(1).constant());
     } else {
       Register value_reg = locs()->in(1).reg();
-      __ StoreIntoObjectNoBarrier(instance_reg,
-                                  FieldAddress(instance_reg, offset_in_bytes_),
-                                  value_reg);
+      __ StoreIntoObjectNoBarrier(
+          instance_reg, FieldAddress(instance_reg, offset_in_bytes), value_reg);
     }
   }
   __ Bind(&skip_store);
@@ -2311,8 +2311,8 @@
   if (IsUnboxedLoad() && compiler->is_optimizing()) {
     XmmRegister result = locs()->out(0).fpu_reg();
     Register temp = locs()->temp(0).reg();
-    __ movq(temp, FieldAddress(instance_reg, offset_in_bytes()));
-    intptr_t cid = field()->UnboxedFieldCid();
+    __ movq(temp, FieldAddress(instance_reg, OffsetInBytes()));
+    intptr_t cid = slot().field().UnboxedFieldCid();
     switch (cid) {
       case kDoubleCid:
         __ Comment("UnboxedDoubleLoadFieldInstr");
@@ -2343,7 +2343,7 @@
     Label load_float32x4;
     Label load_float64x2;
 
-    __ LoadObject(result, Field::ZoneHandle(field()->Original()));
+    __ LoadObject(result, Field::ZoneHandle(slot().field().Original()));
 
     FieldAddress field_cid_operand(result, Field::guarded_cid_offset());
     FieldAddress field_nullability_operand(result, Field::is_nullable_offset());
@@ -2371,7 +2371,7 @@
       __ Bind(&load_double);
       BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
                                       result, temp);
-      __ movq(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ movq(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ movsd(value, FieldAddress(temp, Double::value_offset()));
       __ movsd(FieldAddress(result, Double::value_offset()), value);
       __ jmp(&done);
@@ -2381,7 +2381,7 @@
       __ Bind(&load_float32x4);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float32x4_class(), result, temp);
-      __ movq(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ movq(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ movups(value, FieldAddress(temp, Float32x4::value_offset()));
       __ movups(FieldAddress(result, Float32x4::value_offset()), value);
       __ jmp(&done);
@@ -2391,7 +2391,7 @@
       __ Bind(&load_float64x2);
       BoxAllocationSlowPath::Allocate(
           compiler, this, compiler->float64x2_class(), result, temp);
-      __ movq(temp, FieldAddress(instance_reg, offset_in_bytes()));
+      __ movq(temp, FieldAddress(instance_reg, OffsetInBytes()));
       __ movups(value, FieldAddress(temp, Float64x2::value_offset()));
       __ movups(FieldAddress(result, Float64x2::value_offset()), value);
       __ jmp(&done);
@@ -2399,7 +2399,7 @@
 
     __ Bind(&load_pointer);
   }
-  __ movq(result, FieldAddress(instance_reg, offset_in_bytes()));
+  __ movq(result, FieldAddress(instance_reg, OffsetInBytes()));
   __ Bind(&done);
 }
 
diff --git a/runtime/vm/compiler/backend/inliner.cc b/runtime/vm/compiler/backend/inliner.cc
index e73115c..1398af1 100644
--- a/runtime/vm/compiler/backend/inliner.cc
+++ b/runtime/vm/compiler/backend/inliner.cc
@@ -674,10 +674,7 @@
           ASSERT(call_data->call->IsClosureCall());
           LoadFieldInstr* context_load = new (zone) LoadFieldInstr(
               new Value((*arguments)[first_arg_index]->definition()),
-              Closure::context_offset(),
-              AbstractType::ZoneHandle(zone, AbstractType::null()),
-              call_data->call->token_pos());
-          context_load->set_is_immutable(true);
+              Slot::Closure_context(), call_data->call->token_pos());
           context_load->set_ssa_temp_index(
               caller_graph->alloc_ssa_temp_index());
           context_load->InsertBefore(callee_entry->next());
@@ -2366,8 +2363,8 @@
                                        bool can_speculate) {
   // Insert array length load and bounds check.
   LoadFieldInstr* length = new (Z) LoadFieldInstr(
-      new (Z) Value(*array),
-      NativeFieldDesc::GetLengthFieldForArrayCid(array_cid), call->token_pos());
+      new (Z) Value(*array), Slot::GetLengthFieldForArrayCid(array_cid),
+      call->token_pos());
   *cursor = flow_graph->AppendTo(*cursor, length, NULL, FlowGraph::kValue);
 
   Instruction* bounds_check = NULL;
@@ -2383,10 +2380,9 @@
 
   if (array_cid == kGrowableObjectArrayCid) {
     // Insert data elements load.
-    LoadFieldInstr* elements = new (Z) LoadFieldInstr(
-        new (Z) Value(*array), GrowableObjectArray::data_offset(),
-        Object::dynamic_type(), call->token_pos());
-    elements->set_result_cid(kArrayCid);
+    LoadFieldInstr* elements = new (Z)
+        LoadFieldInstr(new (Z) Value(*array), Slot::GrowableObjectArray_data(),
+                       call->token_pos());
     *cursor = flow_graph->AppendTo(*cursor, elements, NULL, FlowGraph::kValue);
     // Load from the data from backing store which is a fixed-length array.
     *array = elements;
@@ -2482,10 +2478,11 @@
       case kArrayCid:
       case kGrowableObjectArrayCid: {
         const Class& instantiator_class = Class::Handle(Z, target.Owner());
-        LoadFieldInstr* load_type_args = new (Z) LoadFieldInstr(
-            new (Z) Value(array),
-            NativeFieldDesc::GetTypeArgumentsFieldFor(Z, instantiator_class),
-            call->token_pos());
+        LoadFieldInstr* load_type_args = new (Z)
+            LoadFieldInstr(new (Z) Value(array),
+                           Slot::GetTypeArgumentsSlotFor(flow_graph->thread(),
+                                                         instantiator_class),
+                           call->token_pos());
         cursor = flow_graph->AppendTo(cursor, load_type_args, NULL,
                                       FlowGraph::kValue);
         type_args = load_type_args;
@@ -2671,7 +2668,7 @@
 }
 
 static bool InlineGrowableArraySetter(FlowGraph* flow_graph,
-                                      intptr_t offset,
+                                      const Slot& field,
                                       StoreBarrierType store_barrier_type,
                                       Instruction* call,
                                       Definition* receiver,
@@ -2687,9 +2684,9 @@
   (*entry)->InheritDeoptTarget(Z, call);
 
   // This is an internal method, no need to check argument types.
-  StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
-      offset, new (Z) Value(array), new (Z) Value(value), store_barrier_type,
-      call->token_pos());
+  StoreInstanceFieldInstr* store = new (Z)
+      StoreInstanceFieldInstr(field, new (Z) Value(array), new (Z) Value(value),
+                              store_barrier_type, call->token_pos());
   flow_graph->AppendTo(*entry, store, call->env(), FlowGraph::kEffect);
   *last = store;
 
@@ -2707,8 +2704,8 @@
   ASSERT(array_cid != kDynamicCid);
 
   LoadFieldInstr* length = new (Z) LoadFieldInstr(
-      new (Z) Value(array),
-      NativeFieldDesc::GetLengthFieldForArrayCid(array_cid), call->token_pos());
+      new (Z) Value(array), Slot::GetLengthFieldForArrayCid(array_cid),
+      call->token_pos());
   *cursor = flow_graph->AppendTo(*cursor, length, NULL, FlowGraph::kValue);
 
   intptr_t element_size = Instance::ElementSizeFor(array_cid);
@@ -3119,9 +3116,9 @@
                                               Definition* str,
                                               Definition* index,
                                               Instruction* cursor) {
-  LoadFieldInstr* length = new (Z) LoadFieldInstr(
-      new (Z) Value(str), NativeFieldDesc::GetLengthFieldForArrayCid(cid),
-      str->token_pos());
+  LoadFieldInstr* length = new (Z)
+      LoadFieldInstr(new (Z) Value(str), Slot::GetLengthFieldForArrayCid(cid),
+                     str->token_pos());
   cursor = flow_graph->AppendTo(cursor, length, NULL, FlowGraph::kValue);
 
   // Bounds check.
@@ -3858,16 +3855,16 @@
       ASSERT(call->IsStaticCall() ||
              (ic_data == NULL || ic_data->NumberOfChecksIs(1)));
       return InlineGrowableArraySetter(
-          flow_graph, GrowableObjectArray::data_offset(), kEmitStoreBarrier,
-          call, receiver, graph_entry, entry, last);
+          flow_graph, Slot::GrowableObjectArray_data(), kEmitStoreBarrier, call,
+          receiver, graph_entry, entry, last);
     case MethodRecognizer::kGrowableArraySetLength:
       ASSERT((receiver_cid == kGrowableObjectArrayCid) ||
              ((receiver_cid == kDynamicCid) && call->IsStaticCall()));
       ASSERT(call->IsStaticCall() ||
              (ic_data == NULL || ic_data->NumberOfChecksIs(1)));
       return InlineGrowableArraySetter(
-          flow_graph, GrowableObjectArray::length_offset(), kNoStoreBarrier,
-          call, receiver, graph_entry, entry, last);
+          flow_graph, Slot::GrowableObjectArray_length(), kNoStoreBarrier, call,
+          receiver, graph_entry, entry, last);
     case MethodRecognizer::kSmi_bitAndFromSmi:
       return InlineSmiBitAndFromSmi(flow_graph, call, receiver, graph_entry,
                                     entry, last);
diff --git a/runtime/vm/compiler/backend/range_analysis.cc b/runtime/vm/compiler/backend/range_analysis.cc
index c2a4bef..5b50229 100644
--- a/runtime/vm/compiler/backend/range_analysis.cc
+++ b/runtime/vm/compiler/backend/range_analysis.cc
@@ -2707,43 +2707,55 @@
 }
 
 void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
-  if (native_field() != nullptr) {
-    switch (native_field()->kind()) {
-      case NativeFieldDesc::kArray_length:
-      case NativeFieldDesc::kGrowableObjectArray_length:
-        *range = Range(RangeBoundary::FromConstant(0),
-                       RangeBoundary::FromConstant(Array::kMaxElements));
-        break;
+  switch (slot().kind()) {
+    case Slot::Kind::kArray_length:
+    case Slot::Kind::kGrowableObjectArray_length:
+      *range = Range(RangeBoundary::FromConstant(0),
+                     RangeBoundary::FromConstant(Array::kMaxElements));
+      break;
 
-      case NativeFieldDesc::kTypedData_length:
-        *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
-        break;
+    case Slot::Kind::kTypedData_length:
+      *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
+      break;
 
-      case NativeFieldDesc::kString_length:
-        *range = Range(RangeBoundary::FromConstant(0),
-                       RangeBoundary::FromConstant(String::kMaxElements));
-        break;
+    case Slot::Kind::kString_length:
+      *range = Range(RangeBoundary::FromConstant(0),
+                     RangeBoundary::FromConstant(String::kMaxElements));
+      break;
 
-      case NativeFieldDesc::kLinkedHashMap_index:
-      case NativeFieldDesc::kLinkedHashMap_data:
-      case NativeFieldDesc::kTypeArguments:
-        // Not an integer valued field.
-        UNREACHABLE();
-        break;
+    case Slot::Kind::kDartField:
+    case Slot::Kind::kCapturedVariable:
+      // Use default value.
+      Definition::InferRange(analysis, range);
+      break;
 
-      case NativeFieldDesc::kLinkedHashMap_hash_mask:
-      case NativeFieldDesc::kLinkedHashMap_used_data:
-      case NativeFieldDesc::kLinkedHashMap_deleted_keys:
-        *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
-        break;
+    case Slot::Kind::kLinkedHashMap_index:
+    case Slot::Kind::kLinkedHashMap_data:
+    case Slot::Kind::kGrowableObjectArray_data:
+    case Slot::Kind::kContext_parent:
+    case Slot::Kind::kTypeArguments:
+    case Slot::Kind::kClosure_context:
+    case Slot::Kind::kClosure_delayed_type_arguments:
+    case Slot::Kind::kClosure_function:
+    case Slot::Kind::kClosure_function_type_arguments:
+    case Slot::Kind::kClosure_instantiator_type_arguments:
+      // Not an integer valued field.
+      UNREACHABLE();
+      break;
 
-      case NativeFieldDesc::kArgumentsDescriptor_type_args_len:
-        *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
-        break;
-    }
-    return;
+    case Slot::Kind::kClosure_hash:
+    case Slot::Kind::kLinkedHashMap_hash_mask:
+    case Slot::Kind::kLinkedHashMap_used_data:
+    case Slot::Kind::kLinkedHashMap_deleted_keys:
+      *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
+      break;
+
+    case Slot::Kind::kArgumentsDescriptor_type_args_len:
+    case Slot::Kind::kArgumentsDescriptor_positional_count:
+    case Slot::Kind::kArgumentsDescriptor_count:
+      *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
+      break;
   }
-  Definition::InferRange(analysis, range);
 }
 
 void LoadIndexedInstr::InferRange(RangeAnalysis* analysis, Range* range) {
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.cc b/runtime/vm/compiler/backend/redundancy_elimination.cc
index f8f5ec8..f3d89f5 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination.cc
@@ -58,8 +58,10 @@
 // We distinguish the following aliases:
 //
 //   - for fields
-//     - *.f, *.@offs - field inside some object;
-//     - X.f, X.@offs - field inside an allocated object X;
+//     - *.f - field inside some object;
+//     - X.f - field inside an allocated object X;
+//     -   f - static fields
+//
 //   - for indexed accesses
 //     - *[*] - non-constant index inside some object;
 //     - *[C] - constant index inside some object;
@@ -103,14 +105,13 @@
   enum Kind {
     kNone,
 
-    // Field location. For instance fields is represented as a pair of a Field
-    // object and an instance (SSA definition) that is being accessed.
-    // For static fields instance is NULL.
-    kField,
+    // Static field location. Is represented as a Field object with a
+    // nullptr instance.
+    kStaticField,
 
-    // VMField location. Represented as a pair of an instance (SSA definition)
-    // being accessed and offset to the field.
-    kVMField,
+    // Instance field location. It is reprensented by a pair of instance
+    // and a Slot.
+    kInstanceField,
 
     // Indexed location with a non-constant index.
     kIndexed,
@@ -158,19 +159,14 @@
   // Construct a place from instruction if instruction accesses any place.
   // Otherwise constructs kNone place.
   Place(Instruction* instr, bool* is_load, bool* is_store)
-      : flags_(0), instance_(NULL), raw_selector_(0), id_(0) {
+      : flags_(0), instance_(nullptr), raw_selector_(0), id_(0) {
     switch (instr->tag()) {
       case Instruction::kLoadField: {
         LoadFieldInstr* load_field = instr->AsLoadField();
         set_representation(load_field->representation());
         instance_ = load_field->instance()->definition()->OriginalDefinition();
-        if (load_field->field() != NULL) {
-          set_kind(kField);
-          field_ = load_field->field();
-        } else {
-          set_kind(kVMField);
-          offset_in_bytes_ = load_field->offset_in_bytes();
-        }
+        set_kind(kInstanceField);
+        instance_field_ = &load_field->slot();
         *is_load = true;
         break;
       }
@@ -180,30 +176,25 @@
         set_representation(store->RequiredInputRepresentation(
             StoreInstanceFieldInstr::kValuePos));
         instance_ = store->instance()->definition()->OriginalDefinition();
-        if (!store->field().IsNull()) {
-          set_kind(kField);
-          field_ = &store->field();
-        } else {
-          set_kind(kVMField);
-          offset_in_bytes_ = store->offset_in_bytes();
-        }
+        set_kind(kInstanceField);
+        instance_field_ = &store->slot();
         *is_store = true;
         break;
       }
 
       case Instruction::kLoadStaticField:
-        set_kind(kField);
+        set_kind(kStaticField);
         set_representation(instr->AsLoadStaticField()->representation());
-        field_ = &instr->AsLoadStaticField()->StaticField();
+        static_field_ = &instr->AsLoadStaticField()->StaticField();
         *is_load = true;
         break;
 
       case Instruction::kStoreStaticField:
-        set_kind(kField);
+        set_kind(kStaticField);
         set_representation(
             instr->AsStoreStaticField()->RequiredInputRepresentation(
                 StoreStaticFieldInstr::kValuePos));
-        field_ = &instr->AsStoreStaticField()->field();
+        static_field_ = &instr->AsStoreStaticField()->field();
         *is_store = true;
         break;
 
@@ -233,6 +224,18 @@
     }
   }
 
+  bool IsConstant(Object* value) const {
+    switch (kind()) {
+      case kInstanceField:
+        return (instance() != nullptr) && instance()->IsConstant() &&
+               LoadFieldInstr::TryEvaluateLoad(
+                   instance()->AsConstant()->constant_value(), instance_field(),
+                   value);
+      default:
+        return false;
+    }
+  }
+
   // Create object representing *[*] alias.
   static Place* CreateAnyInstanceAnyIndexAlias(Zone* zone, intptr_t id) {
     return Wrap(
@@ -264,12 +267,12 @@
 
   bool DependsOnInstance() const {
     switch (kind()) {
-      case kField:
-      case kVMField:
+      case kInstanceField:
       case kIndexed:
       case kConstantIndexed:
         return true;
 
+      case kStaticField:
       case kNone:
         return false;
     }
@@ -325,14 +328,15 @@
     instance_ = def->OriginalDefinition();
   }
 
-  const Field& field() const {
-    ASSERT(kind() == kField);
-    return *field_;
+  const Field& static_field() const {
+    ASSERT(kind() == kStaticField);
+    ASSERT(static_field_->is_static());
+    return *static_field_;
   }
 
-  intptr_t offset_in_bytes() const {
-    ASSERT(kind() == kVMField);
-    return offset_in_bytes_;
+  const Slot& instance_field() const {
+    ASSERT(kind() == kInstanceField);
+    return *instance_field_;
   }
 
   Definition* index() const {
@@ -361,19 +365,16 @@
       case kNone:
         return "<none>";
 
-      case kField: {
-        const char* field_name = String::Handle(field().name()).ToCString();
-        if (field().is_static()) {
-          return Thread::Current()->zone()->PrintToString("<%s>", field_name);
-        } else {
-          return Thread::Current()->zone()->PrintToString(
-              "<%s.%s>", DefinitionName(instance()), field_name);
-        }
+      case kStaticField: {
+        const char* field_name =
+            String::Handle(static_field().name()).ToCString();
+        return Thread::Current()->zone()->PrintToString("<%s>", field_name);
       }
 
-      case kVMField:
+      case kInstanceField:
         return Thread::Current()->zone()->PrintToString(
-            "<%s.@%" Pd ">", DefinitionName(instance()), offset_in_bytes());
+            "<%s.%s[%p]>", DefinitionName(instance()), instance_field().Name(),
+            &instance_field());
 
       case kIndexed:
         return Thread::Current()->zone()->PrintToString(
@@ -397,8 +398,14 @@
   // Handle static finals as non-final with precompilation because
   // they may be reset to uninitialized after compilation.
   bool IsImmutableField() const {
-    return (kind() == kField) && field().is_final() &&
-           (!field().is_static() || !FLAG_fields_may_be_reset);
+    switch (kind()) {
+      case kInstanceField:
+        return instance_field().is_immutable();
+      case kStaticField:
+        return static_field().is_final() && !FLAG_fields_may_be_reset;
+      default:
+        return false;
+    }
   }
 
   intptr_t Hashcode() const {
@@ -427,14 +434,16 @@
       : flags_(flags), instance_(instance), raw_selector_(selector), id_(0) {}
 
   bool SameField(const Place* other) const {
-    return (kind() == kField)
-               ? (field().Original() == other->field().Original())
-               : (offset_in_bytes_ == other->offset_in_bytes_);
+    return (kind() == kStaticField)
+               ? (static_field().Original() == other->static_field().Original())
+               : (raw_selector_ == other->raw_selector_);
   }
 
   intptr_t FieldHashcode() const {
-    return (kind() == kField) ? reinterpret_cast<intptr_t>(field().Original())
-                              : offset_in_bytes_;
+    return (kind() == kStaticField)
+               ? String::Handle(Field::Handle(static_field().Original()).name())
+                     .Hash()
+               : raw_selector_;
   }
 
   void set_representation(Representation rep) {
@@ -549,8 +558,8 @@
   Definition* instance_;
   union {
     intptr_t raw_selector_;
-    const Field* field_;
-    intptr_t offset_in_bytes_;
+    const Field* static_field_;
+    const Slot* instance_field_;
     intptr_t index_constant_;
     Definition* index_;
   };
@@ -907,10 +916,13 @@
         }
         break;
 
-      case Place::kField:
-      case Place::kVMField:
+      case Place::kStaticField:
+        // Nothing to do.
+        break;
+
+      case Place::kInstanceField:
         if (CanBeAliased(alias->instance())) {
-          // X.f or X.@offs alias with *.f and *.@offs respectively.
+          // X.f alias with *.f.
           CrossAlias(alias, alias->CopyWithoutInstance());
         }
         break;
@@ -925,30 +937,16 @@
   // occur in other functions.
   bool IsIndependentFromEffects(Place* place) {
     if (place->IsImmutableField()) {
-      // Note that we can't use LoadField's is_immutable attribute here because
-      // some VM-fields (those that have no corresponding Field object and
-      // accessed through offset alone) can share offset but have different
-      // immutability properties.
-      // One example is the length property of growable and fixed size list. If
-      // loads of these two properties occur in the same function for the same
-      // receiver then they will get the same expression number. However
-      // immutability of the length of fixed size list does not mean that
-      // growable list also has immutable property. Thus we will make a
-      // conservative assumption for the VM-properties.
-      // TODO(vegorov): disambiguate immutable and non-immutable VM-fields with
-      // the same offset e.g. through recognized kind.
       return true;
     }
 
-    return ((place->kind() == Place::kField) ||
-            (place->kind() == Place::kVMField)) &&
+    return (place->kind() == Place::kInstanceField) &&
            !CanBeAliased(place->instance());
   }
 
   // Returns true if there are direct loads from the given place.
   bool HasLoadsFromPlace(Definition* defn, const Place* place) {
-    ASSERT((place->kind() == Place::kField) ||
-           (place->kind() == Place::kVMField));
+    ASSERT(place->kind() == Place::kInstanceField);
 
     for (Value* use = defn->input_use_list(); use != NULL;
          use = use->next_use()) {
@@ -1127,8 +1125,7 @@
 }
 
 static bool IsPhiDependentPlace(Place* place) {
-  return ((place->kind() == Place::kField) ||
-          (place->kind() == Place::kVMField)) &&
+  return (place->kind() == Place::kInstanceField) &&
          (place->instance() != NULL) && place->instance()->IsPhi();
 }
 
@@ -1589,13 +1586,13 @@
         }
 
         // For object allocation forward initial values of the fields to
-        // subsequent loads. For skip final fields.  Final fields are
-        // initialized in constructor that potentially can be not inlined into
-        // the function that we are currently optimizing. However at the same
-        // time we assume that values of the final fields can be forwarded
-        // across side-effects. If we add 'null' as known values for these
-        // fields here we will incorrectly propagate this null across
-        // constructor invocation.
+        // subsequent loads except for final fields of escaping objects.
+        // Final fields are initialized in constructor which potentially was
+        // not inlined into the function that we are currently optimizing.
+        // However at the same time we assume that values of the final fields
+        // can be forwarded across side-effects. If we add 'null' as known
+        // values for these fields here we will incorrectly propagate this
+        // null across constructor invocation.
         AllocateObjectInstr* alloc = instr->AsAllocateObject();
         if ((alloc != NULL)) {
           for (Value* use = alloc->input_use_list(); use != NULL;
@@ -1610,10 +1607,10 @@
               // Found a load. Initialize current value of the field to null for
               // normal fields, or with type arguments.
 
-              // Forward for all fields for non-escaping objects and only
-              // non-final fields and type arguments for escaping ones.
+              // If the object escapes then don't forward final fields - see
+              // the comment above for explanation.
               if (aliased_set_->CanBeAliased(alloc) &&
-                  (load->field() != NULL) && load->field()->is_final()) {
+                  load->slot().IsDartField() && load->slot().is_immutable()) {
                 continue;
               }
 
@@ -1622,7 +1619,8 @@
                 ASSERT(alloc->ArgumentCount() == 1);
                 intptr_t type_args_offset =
                     alloc->cls().type_arguments_field_offset();
-                if (load->offset_in_bytes() == type_args_offset) {
+                if (load->slot().IsTypeArguments() &&
+                    load->slot().offset_in_bytes() == type_args_offset) {
                   forward_def = alloc->PushArgumentAt(0)->value()->definition();
                 }
               }
@@ -2937,12 +2935,9 @@
 }
 
 // Add a field/offset to the list of fields if it is not yet present there.
-static bool AddSlot(ZoneGrowableArray<const Object*>* slots,
-                    const Object& slot) {
-  ASSERT(slot.IsSmi() || slot.IsField());
-  ASSERT(!slot.IsField() || Field::Cast(slot).IsOriginal());
-  for (intptr_t i = 0; i < slots->length(); i++) {
-    if ((*slots)[i]->raw() == slot.raw()) {
+static bool AddSlot(ZoneGrowableArray<const Slot*>* slots, const Slot& slot) {
+  for (auto s : *slots) {
+    if (s == &slot) {
       return false;
     }
   }
@@ -2993,7 +2988,7 @@
 void AllocationSinking::CreateMaterializationAt(
     Instruction* exit,
     Definition* alloc,
-    const ZoneGrowableArray<const Object*>& slots) {
+    const ZoneGrowableArray<const Slot*>& slots) {
   ZoneGrowableArray<Value*>* values =
       new (Z) ZoneGrowableArray<Value*>(slots.length());
 
@@ -3003,20 +2998,14 @@
   Instruction* load_point = FirstMaterializationAt(exit);
 
   // Insert load instruction for every field.
-  for (intptr_t i = 0; i < slots.length(); i++) {
+  for (auto slot : slots) {
     LoadFieldInstr* load =
-        slots[i]->IsField()
-            ? new (Z) LoadFieldInstr(
-                  new (Z) Value(alloc), &Field::Cast(*slots[i]),
-                  AbstractType::ZoneHandle(Z), alloc->token_pos(), NULL)
-            : new (Z) LoadFieldInstr(
-                  new (Z) Value(alloc), Smi::Cast(*slots[i]).Value(),
-                  AbstractType::ZoneHandle(Z), alloc->token_pos());
-    flow_graph_->InsertBefore(load_point, load, NULL, FlowGraph::kValue);
+        new (Z) LoadFieldInstr(new (Z) Value(alloc), *slot, alloc->token_pos());
+    flow_graph_->InsertBefore(load_point, load, nullptr, FlowGraph::kValue);
     values->Add(new (Z) Value(load));
   }
 
-  MaterializeObjectInstr* mat = NULL;
+  MaterializeObjectInstr* mat = nullptr;
   if (alloc->IsAllocateObject()) {
     mat = new (Z)
         MaterializeObjectInstr(alloc->AsAllocateObject(), slots, values);
@@ -3026,7 +3015,7 @@
         alloc->AsAllocateUninitializedContext(), slots, values);
   }
 
-  flow_graph_->InsertBefore(exit, mat, NULL, FlowGraph::kValue);
+  flow_graph_->InsertBefore(exit, mat, nullptr, FlowGraph::kValue);
 
   // Replace all mentions of this allocation with a newly inserted
   // MaterializeObject instruction.
@@ -3112,27 +3101,21 @@
 
 void AllocationSinking::InsertMaterializations(Definition* alloc) {
   // Collect all fields that are written for this instance.
-  ZoneGrowableArray<const Object*>* slots =
-      new (Z) ZoneGrowableArray<const Object*>(5);
+  auto slots = new (Z) ZoneGrowableArray<const Slot*>(5);
 
   for (Value* use = alloc->input_use_list(); use != NULL;
        use = use->next_use()) {
     StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField();
     if ((store != NULL) && (store->instance()->definition() == alloc)) {
-      if (!store->field().IsNull()) {
-        AddSlot(slots, Field::ZoneHandle(Z, store->field().Original()));
-      } else {
-        AddSlot(slots, Smi::ZoneHandle(Z, Smi::New(store->offset_in_bytes())));
-      }
+      AddSlot(slots, store->slot());
     }
   }
 
   if (alloc->ArgumentCount() > 0) {
     AllocateObjectInstr* alloc_object = alloc->AsAllocateObject();
     ASSERT(alloc_object->ArgumentCount() == 1);
-    intptr_t type_args_offset =
-        alloc_object->cls().type_arguments_field_offset();
-    AddSlot(slots, Smi::ZoneHandle(Z, Smi::New(type_args_offset)));
+    AddSlot(slots, Slot::GetTypeArgumentsSlotFor(flow_graph_->thread(),
+                                                 alloc_object->cls()));
   }
 
   // Collect all instructions that mention this object in the environment.
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.h b/runtime/vm/compiler/backend/redundancy_elimination.h
index 0e9e1dd..f759073 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.h
+++ b/runtime/vm/compiler/backend/redundancy_elimination.h
@@ -64,7 +64,7 @@
 
   void CreateMaterializationAt(Instruction* exit,
                                Definition* alloc,
-                               const ZoneGrowableArray<const Object*>& fields);
+                               const ZoneGrowableArray<const Slot*>& fields);
 
   void EliminateAllocation(Definition* alloc);
 
diff --git a/runtime/vm/compiler/backend/slot.cc b/runtime/vm/compiler/backend/slot.cc
new file mode 100644
index 0000000..ebc0c53
--- /dev/null
+++ b/runtime/vm/compiler/backend/slot.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/compiler/backend/slot.h"
+
+#ifndef DART_PRECOMPILED_RUNTIME
+
+#include "vm/compiler/compiler_state.h"
+#include "vm/hash_map.h"
+#include "vm/parser.h"
+#include "vm/scopes.h"
+
+namespace dart {
+
+// Canonicalization cache for Slot objects.
+//
+// This cache is attached to the CompilerState to ensure that we preserve
+// identity of Slot objects during each individual compilation.
+class SlotCache : public ZoneAllocated {
+ public:
+  // Returns an instance of SlotCache for the current compilation.
+  static SlotCache& Instance(Thread* thread) {
+    auto result = thread->compiler_state().slot_cache();
+    if (result == nullptr) {
+      result = new (thread->zone()) SlotCache(thread);
+      thread->compiler_state().set_slot_cache(result);
+    }
+    return *result;
+  }
+
+  const Slot& Canonicalize(const Slot& value) {
+    auto result = fields_.LookupValue(&value);
+    if (result == nullptr) {
+      result = new (zone_) Slot(value);
+      fields_.Insert(result);
+    }
+    return *result;
+  }
+
+ private:
+  explicit SlotCache(Thread* thread)
+      : zone_(thread->zone()), fields_(thread->zone()) {}
+
+  Zone* const zone_;
+  DirectChainedHashMap<PointerKeyValueTrait<const Slot> > fields_;
+};
+
+const Slot& Slot::GetNativeSlot(Kind kind) {
+  // There is a fixed statically known number of native slots so we cache
+  // them statically.
+  static const Slot fields[] = {
+#define FIELD_FINAL (IsImmutableBit::encode(true))
+#define FIELD_VAR (0)
+#define DEFINE_NATIVE_FIELD(ClassName, FieldName, cid, mutability)             \
+  Slot(Kind::k##ClassName##_##FieldName, FIELD_##mutability, k##cid##Cid,      \
+       ClassName::FieldName##_offset(), #ClassName "." #FieldName, nullptr),
+
+      NATIVE_SLOTS_LIST(DEFINE_NATIVE_FIELD)
+
+#undef DEFINE_FIELD
+#undef FIELD_VAR
+#undef FIELD_FINAL
+  };
+
+  ASSERT(static_cast<uint8_t>(kind) < ARRAY_SIZE(fields));
+  return fields[static_cast<uint8_t>(kind)];
+}
+
+// Note: should only be called with cids of array-like classes.
+const Slot& Slot::GetLengthFieldForArrayCid(intptr_t array_cid) {
+  if (RawObject::IsExternalTypedDataClassId(array_cid) ||
+      RawObject::IsTypedDataClassId(array_cid)) {
+    return GetNativeSlot(Kind::kTypedData_length);
+  }
+
+  switch (array_cid) {
+    case kGrowableObjectArrayCid:
+      return GetNativeSlot(Kind::kGrowableObjectArray_length);
+
+    case kOneByteStringCid:
+    case kTwoByteStringCid:
+    case kExternalOneByteStringCid:
+    case kExternalTwoByteStringCid:
+      return GetNativeSlot(Kind::kString_length);
+
+    case kArrayCid:
+    case kImmutableArrayCid:
+      return GetNativeSlot(Kind::kArray_length);
+
+    default:
+      UNREACHABLE();
+      return GetNativeSlot(Kind::kArray_length);
+  }
+}
+
+const Slot& Slot::GetTypeArgumentsSlotAt(Thread* thread, intptr_t offset) {
+  ASSERT(offset != Class::kNoTypeArguments);
+  return SlotCache::Instance(thread).Canonicalize(Slot(
+      Kind::kTypeArguments, IsImmutableBit::encode(true), kTypeArgumentsCid,
+      offset, ":type_arguments", /*static_type=*/nullptr));
+}
+
+const Slot& Slot::GetTypeArgumentsSlotFor(Thread* thread, const Class& cls) {
+  return GetTypeArgumentsSlotAt(thread, cls.type_arguments_field_offset());
+}
+
+const Slot& Slot::GetContextVariableSlotFor(Thread* thread,
+                                            const LocalVariable& variable) {
+  ASSERT(variable.is_captured());
+  // TODO(vegorov) Can't assign static type to local variables because
+  // for captured parameters we generate the code that first stores a
+  // variable into the context and then loads it from the context to perform
+  // the type check.
+  return SlotCache::Instance(thread).Canonicalize(Slot(
+      Kind::kCapturedVariable,
+      IsImmutableBit::encode(variable.is_final()) | IsNullableBit::encode(true),
+      kDynamicCid, Context::variable_offset(variable.index().value()),
+      &variable.name(), /*static_type=*/nullptr));
+}
+
+const Slot& Slot::Get(const Field& field,
+                      const ParsedFunction* parsed_function) {
+  Thread* thread = Thread::Current();
+  Zone* zone = thread->zone();
+  intptr_t nullable_cid = kDynamicCid;
+  bool is_nullable = true;
+
+  if (thread->isolate()->use_field_guards() &&
+      field.guarded_cid() != kIllegalCid) {
+    ASSERT(parsed_function != nullptr);  // Need to record dependency.
+    nullable_cid = field.guarded_cid();
+    is_nullable = field.is_nullable();
+    parsed_function->AddToGuardedFields(&field);
+  }
+
+  return SlotCache::Instance(thread).Canonicalize(
+      Slot(Kind::kDartField,
+           IsImmutableBit::encode(field.is_final() || field.is_const()) |
+               IsNullableBit::encode(is_nullable),
+           nullable_cid, field.Offset(), &field,
+           &AbstractType::ZoneHandle(zone, field.type())));
+}
+
+CompileType Slot::ComputeCompileType() const {
+  return CompileType::CreateNullable(is_nullable(), nullable_cid());
+}
+
+const AbstractType& Slot::static_type() const {
+  return static_type_ != nullptr ? *static_type_ : Object::null_abstract_type();
+}
+
+const char* Slot::Name() const {
+  if (IsLocalVariable()) {
+    return DataAs<const String>()->ToCString();
+  } else if (IsDartField()) {
+    return String::Handle(field().name()).ToCString();
+  } else {
+    return DataAs<const char>();
+  }
+}
+
+bool Slot::Equals(const Slot* other) const {
+  if (kind_ != other->kind_) {
+    return false;
+  }
+
+  switch (kind_) {
+    case Kind::kTypeArguments:
+      return (offset_in_bytes_ == other->offset_in_bytes_);
+
+    case Kind::kCapturedVariable:
+      return (offset_in_bytes_ == other->offset_in_bytes_) &&
+             (flags_ == other->flags_) &&
+             (DataAs<const String>()->raw() ==
+              other->DataAs<const String>()->raw());
+
+    case Kind::kDartField:
+      return (offset_in_bytes_ == other->offset_in_bytes_) &&
+             other->DataAs<const Field>()->Original() ==
+                 DataAs<const Field>()->Original();
+
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+intptr_t Slot::Hashcode() const {
+  intptr_t result = (static_cast<int8_t>(kind_) * 63 + offset_in_bytes_) * 31;
+  if (IsDartField()) {
+    result += String::Handle(DataAs<const Field>()->name()).Hash();
+  } else if (IsLocalVariable()) {
+    result += DataAs<const String>()->Hash();
+  }
+  return result;
+}
+
+}  // namespace dart
+
+#endif  // DART_PRECOMPILED_RUNTIME
diff --git a/runtime/vm/compiler/backend/slot.h b/runtime/vm/compiler/backend/slot.h
new file mode 100644
index 0000000..aef5482
--- /dev/null
+++ b/runtime/vm/compiler/backend/slot.h
@@ -0,0 +1,214 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Slot is an abstraction that describes an readable (and possibly writeable)
+// location within an object.
+//
+// In general slots follow the memory model for normal Dart fields - but can
+// also be used to describe locations that don't have corresponding Field
+// object, i.e. fields within native objects like arrays or contexts.
+//
+// Slot objects created by the compiler have an identity. If two slots F and G
+// are different then compiler assumes that store into F can't alias a load
+// from G and vice versa.
+//
+// All slots can be split into 4 categories:
+//
+//   - slots for fields of native classes (Array, Closure, etc);
+//   - slots for type arguments;
+//   - slots for captured variable;
+//   - slots for normal Dart fields (e.g. those that Field object).
+//
+
+#ifndef RUNTIME_VM_COMPILER_BACKEND_SLOT_H_
+#define RUNTIME_VM_COMPILER_BACKEND_SLOT_H_
+
+#include "vm/compiler/backend/compile_type.h"
+#include "vm/thread.h"
+
+namespace dart {
+
+class LocalScope;
+class LocalVariable;
+class ParsedFunction;
+
+// List of slots that correspond to fields of native objects in the following
+// format:
+//
+//     V(class_name, field_name, exact_type, FINAL|VAR)
+//
+// - class_name and field_name specify the name of the host class and the name
+//   of the field respectively;
+// - exact_type specifies exact type of the field (any load from this field
+//   would only yield instances of this type);
+// - the last component specifies whether field behaves like a final field
+//   (i.e. initialized once at construction time and does not change after
+//   that) or like a non-final field.
+//
+// Note: native slots are expected to be non-nullable.
+#define NATIVE_SLOTS_LIST(V)                                                   \
+  V(Array, length, Smi, FINAL)                                                 \
+  V(Context, parent, Context, FINAL)                                           \
+  V(Closure, instantiator_type_arguments, TypeArguments, FINAL)                \
+  V(Closure, delayed_type_arguments, TypeArguments, FINAL)                     \
+  V(Closure, function_type_arguments, TypeArguments, FINAL)                    \
+  V(Closure, function, Function, FINAL)                                        \
+  V(Closure, context, Context, FINAL)                                          \
+  V(Closure, hash, Context, VAR)                                               \
+  V(GrowableObjectArray, length, Smi, VAR)                                     \
+  V(GrowableObjectArray, data, Array, VAR)                                     \
+  V(TypedData, length, Smi, FINAL)                                             \
+  V(String, length, Smi, FINAL)                                                \
+  V(LinkedHashMap, index, TypedDataUint32Array, VAR)                           \
+  V(LinkedHashMap, data, Array, VAR)                                           \
+  V(LinkedHashMap, hash_mask, Smi, VAR)                                        \
+  V(LinkedHashMap, used_data, Smi, VAR)                                        \
+  V(LinkedHashMap, deleted_keys, Smi, VAR)                                     \
+  V(ArgumentsDescriptor, type_args_len, Smi, FINAL)                            \
+  V(ArgumentsDescriptor, positional_count, Smi, FINAL)                         \
+  V(ArgumentsDescriptor, count, Smi, FINAL)
+
+// Slot is an abstraction that describes an readable (and possibly writeable)
+// location within an object.
+//
+// Slot objects returned by Slot::Get* methods have identity and can be
+// compared by pointer. If two slots are different they must not alias.
+// If two slots can alias - they must be represented by identical
+// slot object.
+class Slot : public ZoneAllocated {
+ public:
+  // clang-format off
+  enum class Kind : uint8_t {
+    // Native slots are identified by their kind - each native slot has its own.
+#define DECLARE_KIND(ClassName, FieldName, cid, mutability)                    \
+  k##ClassName##_##FieldName,
+    NATIVE_SLOTS_LIST(DECLARE_KIND)
+#undef DECLARE_KIND
+
+    // A slot used to store type arguments.
+    kTypeArguments,
+
+    // A slot within a Context object that contains a value of a captured
+    // local variable.
+    kCapturedVariable,
+
+    // A slot that corresponds to a Dart field (has corresponding Field object).
+    kDartField,
+  };
+  // clang-format on
+
+  // Returns a slot that represents length field for the given [array_cid].
+  static const Slot& GetLengthFieldForArrayCid(intptr_t array_cid);
+
+  // Return a slot that represents type arguments field at the given offset
+  // or for the given class.
+  //
+  // We do not distinguish type argument fields within disjoint
+  // class hierarchies: type argument fields at the same offset would be
+  // represented by the same Slot object. Type argument slots are final
+  // so disambiguating type arguments fields does not improve alias analysis.
+  static const Slot& GetTypeArgumentsSlotAt(Thread* thread, intptr_t offset);
+  static const Slot& GetTypeArgumentsSlotFor(Thread* thread, const Class& cls);
+
+  // Returns a slot that represents the given captured local variable.
+  static const Slot& GetContextVariableSlotFor(Thread* thread,
+                                               const LocalVariable& var);
+
+  // Returns a slot that represents the given Dart field.
+  static const Slot& Get(const Field& field,
+                         const ParsedFunction* parsed_function);
+
+  // Convenience getters for native slots.
+#define DEFINE_GETTER(ClassName, FieldName, cid, mutability)                   \
+  static const Slot& ClassName##_##FieldName() {                               \
+    return GetNativeSlot(Kind::k##ClassName##_##FieldName);                    \
+  }
+
+  NATIVE_SLOTS_LIST(DEFINE_GETTER)
+#undef DEFINE_GETTER
+
+  Kind kind() const { return kind_; }
+  bool IsDartField() const { return kind() == Kind::kDartField; }
+  bool IsLocalVariable() const { return kind() == Kind::kCapturedVariable; }
+  bool IsTypeArguments() const { return kind() == Kind::kTypeArguments; }
+
+  const char* Name() const;
+
+  intptr_t offset_in_bytes() const { return offset_in_bytes_; }
+
+  bool is_immutable() const { return IsImmutableBit::decode(flags_); }
+
+  intptr_t nullable_cid() const { return cid_; }
+  intptr_t is_nullable() const { return IsNullableBit::decode(flags_); }
+
+  // Static type of the slots if any.
+  //
+  // A value that is read from the slot is guaranteed to be assignable to its
+  // static type.
+  const AbstractType& static_type() const;
+
+  // More precise type information about values that can be read from this slot.
+  CompileType ComputeCompileType() const;
+
+  const Field& field() const {
+    ASSERT(IsDartField());
+    ASSERT(data_ != nullptr);
+    return *DataAs<const Field>();
+  }
+
+  bool Equals(const Slot* other) const;
+  intptr_t Hashcode() const;
+
+ private:
+  Slot(Kind kind,
+       int8_t bits,
+       int16_t cid,
+       intptr_t offset_in_bytes,
+       const void* data,
+       const AbstractType* static_type)
+      : kind_(kind),
+        flags_(bits),
+        cid_(cid),
+        offset_in_bytes_(offset_in_bytes),
+        data_(data),
+        static_type_(static_type) {}
+
+  Slot(const Slot& other)
+      : Slot(other.kind_,
+             other.flags_,
+             other.cid_,
+             other.offset_in_bytes_,
+             other.data_,
+             other.static_type_) {}
+
+  using IsImmutableBit = BitField<int8_t, bool, 0, 1>;
+  using IsNullableBit = BitField<int8_t, bool, 1, 1>;
+
+  template <typename T>
+  const T* DataAs() const {
+    return static_cast<const T*>(data_);
+  }
+
+  static const Slot& GetNativeSlot(Kind kind);
+
+  const Kind kind_;
+  const int8_t flags_;  // is_immutable, is_nullable
+  const int16_t cid_;   // Concrete cid of a value or kDynamicCid.
+
+  const intptr_t offset_in_bytes_;
+
+  // Kind dependent data:
+  //   - name as a Dart String object for local variables;
+  //   - name as a C string for native slots;
+  //   - Field object for Dart fields;
+  const void* data_;
+
+  const AbstractType* static_type_;
+
+  friend class SlotCache;
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_COMPILER_BACKEND_SLOT_H_
diff --git a/runtime/vm/compiler/backend/type_propagator.cc b/runtime/vm/compiler/backend/type_propagator.cc
index 7133e1f..ed928f7 100644
--- a/runtime/vm/compiler/backend/type_propagator.cc
+++ b/runtime/vm/compiler/backend/type_propagator.cc
@@ -1319,36 +1319,29 @@
 }
 
 CompileType LoadFieldInstr::ComputeType() const {
-  // Type may be null if the field is a VM field, e.g. context parent.
-  // Keep it as null for debug purposes and do not return dynamic in production
-  // mode, since misuse of the type would remain undetected.
-  if (type().IsNull()) {
-    return CompileType::Dynamic();
+  const AbstractType& field_type = slot().static_type();
+  CompileType compile_type_cid = slot().ComputeCompileType();
+  if (field_type.raw() == AbstractType::null()) {
+    return compile_type_cid;
   }
 
   const Isolate* isolate = Isolate::Current();
-  bool is_nullable = CompileType::kNullable;
   intptr_t cid = kDynamicCid;
   const AbstractType* abstract_type = NULL;
   if (isolate->can_use_strong_mode_types() ||
       (isolate->type_checks() &&
-       (type().IsFunctionType() || type().HasTypeClass()))) {
+       (field_type.IsFunctionType() || field_type.HasTypeClass()))) {
     cid = kIllegalCid;  // Abstract type is known, calculate cid lazily.
-    abstract_type = &type();
+    abstract_type = &field_type;
     TraceStrongModeType(this, *abstract_type);
   }
-  if ((field_ != NULL) && (field_->guarded_cid() != kIllegalCid) &&
-      (field_->guarded_cid() != kDynamicCid)) {
-    cid = field_->guarded_cid();
-    is_nullable = field_->is_nullable();
-    abstract_type = nullptr;  // Cid is known, calculate abstract type lazily.
-  } else {
-    cid = result_cid_;
-    if ((cid != kIllegalCid) && (cid != kDynamicCid)) {
-      abstract_type = nullptr;  // Cid is known, calculate abstract type lazily.
-    }
+
+  if (compile_type_cid.ToNullableCid() != kDynamicCid) {
+    abstract_type = nullptr;
   }
-  return CompileType(is_nullable, cid, abstract_type);
+
+  return CompileType(compile_type_cid.is_nullable(),
+                     compile_type_cid.ToNullableCid(), abstract_type);
 }
 
 CompileType LoadCodeUnitsInstr::ComputeType() const {
diff --git a/runtime/vm/compiler/call_specializer.cc b/runtime/vm/compiler/call_specializer.cc
index c07b6f4..577b3af 100644
--- a/runtime/vm/compiler/call_specializer.cc
+++ b/runtime/vm/compiler/call_specializer.cc
@@ -894,19 +894,17 @@
 
 void CallSpecializer::InlineImplicitInstanceGetter(Definition* call,
                                                    const Field& field) {
+  const Slot& slot = Slot::Get(field, &flow_graph()->parsed_function());
   LoadFieldInstr* load = new (Z) LoadFieldInstr(
-      new (Z) Value(call->ArgumentAt(0)), &field,
-      AbstractType::ZoneHandle(Z, field.type()), call->token_pos(),
-      isolate()->use_field_guards() ? &flow_graph()->parsed_function() : NULL);
-  load->set_is_immutable(field.is_final());
+      new (Z) Value(call->ArgumentAt(0)), slot, call->token_pos());
 
   // Discard the environment from the original instruction because the load
   // can't deoptimize.
   call->RemoveEnvironment();
   ReplaceCall(call, load);
 
-  if (load->result_cid() != kDynamicCid) {
-    // Reset value types if guarded_cid was used.
+  if (load->slot().nullable_cid() != kDynamicCid) {
+    // Reset value types if we know concrete cid.
     for (Value::Iterator it(load->input_use_list()); !it.Done(); it.Advance()) {
       it.Current()->SetReachingType(NULL);
     }
@@ -1034,10 +1032,10 @@
       if (!dst_type.IsInstantiated()) {
         const Class& owner = Class::Handle(Z, field.Owner());
         if (owner.NumTypeArguments() > 0) {
-          instantiator_type_args = new (Z) LoadFieldInstr(
-              new (Z) Value(instr->ArgumentAt(0)),
-              NativeFieldDesc::GetTypeArgumentsFieldFor(zone(), owner),
-              instr->token_pos());
+          instantiator_type_args = new (Z)
+              LoadFieldInstr(new (Z) Value(instr->ArgumentAt(0)),
+                             Slot::GetTypeArgumentsSlotFor(thread(), owner),
+                             instr->token_pos());
           InsertBefore(instr, instantiator_type_args, instr->env(),
                        FlowGraph::kValue);
         }
@@ -1056,15 +1054,10 @@
 
   // Field guard was detached.
   ASSERT(instr->FirstArgIndex() == 0);
-  StoreInstanceFieldInstr* store = new (Z)
-      StoreInstanceFieldInstr(field, new (Z) Value(instr->ArgumentAt(0)),
-                              new (Z) Value(instr->ArgumentAt(1)),
-                              kEmitStoreBarrier, instr->token_pos());
-
-  ASSERT(I->use_field_guards() || !store->IsUnboxedStore());
-  if (I->use_field_guards() && store->IsUnboxedStore()) {
-    flow_graph()->parsed_function().AddToGuardedFields(&field);
-  }
+  StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
+      field, new (Z) Value(instr->ArgumentAt(0)),
+      new (Z) Value(instr->ArgumentAt(1)), kEmitStoreBarrier,
+      instr->token_pos(), &flow_graph()->parsed_function());
 
   // Discard the environment from the original instruction because the store
   // can't deoptimize.
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index 54fc581..afc73c3 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -37,6 +37,7 @@
   "backend/code_statistics.h",
   "backend/constant_propagator.cc",
   "backend/constant_propagator.h",
+  "backend/compile_type.h",
   "backend/flow_graph.cc",
   "backend/flow_graph.h",
   "backend/flow_graph_compiler.cc",
@@ -69,6 +70,8 @@
   "backend/range_analysis.h",
   "backend/redundancy_elimination.cc",
   "backend/redundancy_elimination.h",
+  "backend/slot.cc",
+  "backend/slot.h",
   "backend/type_propagator.cc",
   "backend/type_propagator.h",
   "call_specializer.cc",
@@ -77,6 +80,7 @@
   "cha.h",
   "compiler_pass.cc",
   "compiler_pass.h",
+  "compiler_state.cc",
   "compiler_state.h",
   "frontend/base_flow_graph_builder.cc",
   "frontend/base_flow_graph_builder.h",
diff --git a/runtime/vm/compiler/compiler_state.cc b/runtime/vm/compiler/compiler_state.cc
new file mode 100644
index 0000000..6d4bbc2
--- /dev/null
+++ b/runtime/vm/compiler/compiler_state.cc
@@ -0,0 +1,78 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/compiler/compiler_state.h"
+
+#ifndef DART_PRECOMPILED_RUNTIME
+
+#include <functional>
+
+#include "vm/scopes.h"
+
+namespace dart {
+
+template <typename T>
+T* PutIfAbsent(Thread* thread,
+               ZoneGrowableArray<T*>** array_slot,
+               intptr_t index,
+               std::function<T*()> create) {
+  auto array = *array_slot;
+
+  if (array == nullptr) {
+    Zone* const Z = thread->zone();
+    *array_slot = array = new (Z) ZoneGrowableArray<T*>(Z, index + 1);
+  }
+
+  while (array->length() <= index) {
+    array->Add(nullptr);
+  }
+
+  if (array->At(index) == nullptr) {
+    (*array)[index] = create();
+  }
+  return array->At(index);
+}
+
+LocalVariable* CompilerState::GetDummyCapturedVariable(intptr_t index) {
+  return PutIfAbsent<LocalVariable>(
+      thread(), &dummy_captured_vars_, index, [&]() {
+        Zone* const Z = thread()->zone();
+        const AbstractType& dynamic_type =
+            AbstractType::ZoneHandle(Z, Type::DynamicType());
+        const String& name = String::ZoneHandle(
+            Z, Symbols::NewFormatted(thread(), ":context_var%" Pd, index));
+        LocalVariable* var = new (Z)
+            LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
+                          name, dynamic_type, /*param_type=*/nullptr);
+        var->set_is_captured();
+        var->set_index(VariableIndex(index));
+        return var;
+      });
+}
+
+const GrowableArray<LocalVariable*>& CompilerState::GetDummyContextVariables(
+    intptr_t num_context_variables) {
+  return PutIfAbsent<LocalScope>(
+             thread(), &dummy_scopes_, num_context_variables,
+             [&]() {
+               Zone* const Z = thread()->zone();
+
+               LocalScope* scope = new (Z) LocalScope(
+                   /*parent=*/NULL, /*function_level=*/0, /*loop_level=*/0);
+               scope->set_context_level(0);
+
+               for (intptr_t i = 0; i < num_context_variables; i++) {
+                 LocalVariable* var = GetDummyCapturedVariable(i);
+                 scope->AddVariable(var);
+                 scope->AddContextVariable(var);
+               }
+
+               return scope;
+             })
+      ->context_variables();
+}
+
+}  // namespace dart
+
+#endif  // DART_PRECOMPILED_RUNTIME
diff --git a/runtime/vm/compiler/compiler_state.h b/runtime/vm/compiler/compiler_state.h
index 5c5584e..e47dad0 100644
--- a/runtime/vm/compiler/compiler_state.h
+++ b/runtime/vm/compiler/compiler_state.h
@@ -10,6 +10,10 @@
 
 namespace dart {
 
+class LocalScope;
+class LocalVariable;
+class SlotCache;
+
 // Deoptimization Id logic.
 //
 // Deoptimization ids are used to refer to deoptimization points, at which
@@ -79,10 +83,41 @@
     return Thread::Current()->compiler_state();
   }
 
+  SlotCache* slot_cache() const { return slot_cache_; }
+  void set_slot_cache(SlotCache* cache) { slot_cache_ = cache; }
+
+  // Create a dummy list of local variables representing a context object
+  // with the given number of captured variables.
+  //
+  // Used during bytecode to IL translation because AllocateContext and
+  // CloneContext IL instructions need a list of local varaibles and bytecode
+  // does not record this information.
+  const GrowableArray<LocalVariable*>& GetDummyContextVariables(
+      intptr_t num_context_variables);
+
+  // Create a dummy LocalVariable that represents a captured local variable
+  // at the given index.
+  //
+  // Used during bytecode to IL translation because StoreInstanceField and
+  // LoadField IL instructions need Slot, which can only be created from a
+  // LocalVariable.
+  //
+  // This function returns the same variable when it is called with the
+  // same index.
+  LocalVariable* GetDummyCapturedVariable(intptr_t index);
+
  private:
   CHA cha_;
   intptr_t deopt_id_ = 0;
 
+  // Cache for Slot objects created during compilation (see slot.h).
+  SlotCache* slot_cache_ = nullptr;
+
+  // Caches for dummy LocalVariables and LocalScopes created during
+  // bytecode to IL translation.
+  ZoneGrowableArray<LocalScope*>* dummy_scopes_ = nullptr;
+  ZoneGrowableArray<LocalVariable*>* dummy_captured_vars_ = nullptr;
+
   CompilerState* previous_;
 };
 
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index e78cc28..4e54a2b 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -114,7 +114,7 @@
   ASSERT(delta >= 0);
   Fragment instructions = LoadLocal(parsed_function_->current_context_var());
   while (delta-- > 0) {
-    instructions += LoadField(Context::parent_offset());
+    instructions += LoadNativeField(Slot::Context_parent());
   }
   return instructions;
 }
@@ -244,7 +244,7 @@
   TargetEntryInstr* neq_entry;
 
   test += LoadArgDescriptor();
-  test += LoadNativeField(NativeFieldDesc::ArgumentsDescriptor_type_args_len());
+  test += LoadNativeField(Slot::ArgumentsDescriptor_type_args_len());
   test += IntConstant(num_type_args);
   test += BranchIfEqual(&eq_entry, &neq_entry);
 
@@ -267,7 +267,7 @@
   TargetEntryInstr* present_entry;
 
   test += LoadLocal(closure);
-  test += LoadField(Closure::delayed_type_arguments_offset());
+  test += LoadNativeField(Slot::Closure_delayed_type_arguments());
   test += Constant(Object::empty_type_arguments());
   test += BranchIfEqual(&absent_entry, &present_entry);
 
@@ -303,22 +303,6 @@
   }
 }
 
-Fragment BaseFlowGraphBuilder::LoadField(const Field& field) {
-  LoadFieldInstr* load = new (Z) LoadFieldInstr(
-      Pop(), &MayCloneField(field), AbstractType::ZoneHandle(Z, field.type()),
-      TokenPosition::kNoSource, parsed_function_);
-  Push(load);
-  return Fragment(load);
-}
-
-Fragment BaseFlowGraphBuilder::LoadField(intptr_t offset, intptr_t class_id) {
-  LoadFieldInstr* load = new (Z) LoadFieldInstr(
-      Pop(), offset, AbstractType::ZoneHandle(Z), TokenPosition::kNoSource);
-  load->set_result_cid(class_id);
-  Push(load);
-  return Fragment(load);
-}
-
 Fragment BaseFlowGraphBuilder::LoadIndexed(intptr_t index_scale) {
   Value* index = Pop();
   Value* array = Pop();
@@ -329,8 +313,11 @@
   return Fragment(instr);
 }
 
-Fragment BaseFlowGraphBuilder::LoadNativeField(
-    const NativeFieldDesc* native_field) {
+Fragment BaseFlowGraphBuilder::LoadField(const Field& field) {
+  return LoadNativeField(Slot::Get(MayCloneField(field), parsed_function_));
+}
+
+Fragment BaseFlowGraphBuilder::LoadNativeField(const Slot& native_field) {
   LoadFieldInstr* load =
       new (Z) LoadFieldInstr(Pop(), native_field, TokenPosition::kNoSource);
   Push(load);
@@ -381,14 +368,14 @@
 
 Fragment BaseFlowGraphBuilder::StoreInstanceField(
     TokenPosition position,
-    intptr_t offset,
+    const Slot& field,
     StoreBarrierType emit_store_barrier) {
   Value* value = Pop();
   if (value->BindsToConstant()) {
     emit_store_barrier = kNoStoreBarrier;
   }
   StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
-      offset, Pop(), value, emit_store_barrier, position);
+      field, Pop(), value, emit_store_barrier, position);
   return Fragment(store);
 }
 
@@ -401,10 +388,11 @@
     emit_store_barrier = kNoStoreBarrier;
   }
 
-  StoreInstanceFieldInstr* store = new (Z)
-      StoreInstanceFieldInstr(MayCloneField(field), Pop(), value,
-                              emit_store_barrier, TokenPosition::kNoSource);
-  store->set_is_initialization(is_initialization_store);
+  StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
+      MayCloneField(field), Pop(), value, emit_store_barrier,
+      TokenPosition::kNoSource, parsed_function_,
+      is_initialization_store ? StoreInstanceFieldInstr::Kind::kInitializing
+                              : StoreInstanceFieldInstr::Kind::kOther);
 
   return Fragment(store);
 }
@@ -475,7 +463,7 @@
     instructions += LoadContextAt(variable->owner()->context_level());
     instructions += LoadLocal(value);
     instructions += StoreInstanceField(
-        position, Context::variable_offset(variable->index().value()));
+        position, Slot::GetContextVariableSlotFor(thread_, *variable));
     return instructions;
   }
   return StoreLocalRaw(position, variable);
@@ -676,9 +664,10 @@
   return Fragment(negate);
 }
 
-Fragment BaseFlowGraphBuilder::AllocateContext(intptr_t size) {
+Fragment BaseFlowGraphBuilder::AllocateContext(
+    const GrowableArray<LocalVariable*>& context_variables) {
   AllocateContextInstr* allocate =
-      new (Z) AllocateContextInstr(TokenPosition::kNoSource, size);
+      new (Z) AllocateContextInstr(TokenPosition::kNoSource, context_variables);
   Push(allocate);
   return Fragment(allocate);
 }
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.h b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
index feaaa71..69ff503 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.h
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
@@ -133,8 +133,7 @@
         inlining_unchecked_entry_(inlining_unchecked_entry) {}
 
   Fragment LoadField(const Field& field);
-  Fragment LoadField(intptr_t offset, intptr_t class_id = kDynamicCid);
-  Fragment LoadNativeField(const NativeFieldDesc* native_field);
+  Fragment LoadNativeField(const Slot& native_field);
   Fragment LoadIndexed(intptr_t index_scale);
 
   void SetTempIndex(Definition* definition);
@@ -148,7 +147,7 @@
   const Field& MayCloneField(const Field& field);
   Fragment StoreInstanceField(
       TokenPosition position,
-      intptr_t offset,
+      const Slot& field,
       StoreBarrierType emit_store_barrier = kEmitStoreBarrier);
   Fragment StoreInstanceField(
       const Field& field,
@@ -262,7 +261,7 @@
 
   Fragment AssertBool(TokenPosition position);
   Fragment BooleanNegate();
-  Fragment AllocateContext(intptr_t size);
+  Fragment AllocateContext(const GrowableArray<LocalVariable*>& scope);
   Fragment CreateArray();
   Fragment InstantiateType(const AbstractType& type);
   Fragment InstantiateTypeArguments(const TypeArguments& type_arguments);
diff --git a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
index ecf4f80..a6e5498 100644
--- a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
@@ -374,14 +374,15 @@
   throw_no_such_method_ = B->BuildThrowNoSuchMethod();
 
   check_args += B->LoadArgDescriptor();
-  check_args += B->LoadField(ArgumentsDescriptor::positional_count_offset());
+  check_args +=
+      B->LoadNativeField(Slot::ArgumentsDescriptor_positional_count());
   check_args += B->IntConstant(num_fixed_params);
   TargetEntryInstr *success1, *fail1;
   check_args += B->BranchIfEqual(&success1, &fail1);
   check_args = Fragment(check_args.entry, success1);
 
   check_args += B->LoadArgDescriptor();
-  check_args += B->LoadField(ArgumentsDescriptor::count_offset());
+  check_args += B->LoadNativeField(Slot::ArgumentsDescriptor_count());
   check_args += B->IntConstant(num_fixed_params);
   TargetEntryInstr *success2, *fail2;
   check_args += B->BranchIfEqual(&success2, &fail2);
@@ -567,8 +568,8 @@
   // If expect_type_args, a non-zero length must match the declaration length.
   TargetEntryInstr *then, *fail;
   setup_type_args += B->LoadArgDescriptor();
-  setup_type_args += B->LoadNativeField(NativeFieldDesc::Get(
-      NativeFieldDesc::kArgumentsDescriptor_type_args_len));
+  setup_type_args +=
+      B->LoadNativeField(Slot::ArgumentsDescriptor_type_args_len());
 
   if (expected_num_type_args != 0) {
     JoinEntryInstr* join2 = B->BuildJoinEntry();
@@ -594,7 +595,7 @@
 
     Fragment store_type_args(then2);
     store_type_args += B->LoadArgDescriptor();
-    store_type_args += B->LoadField(ArgumentsDescriptor::count_offset());
+    store_type_args += B->LoadNativeField(Slot::ArgumentsDescriptor_count());
     store_type_args += B->LoadFpRelativeSlot(
         kWordSize * (1 + compiler_frame_layout.param_end_from_fp));
     store_type_args +=
@@ -780,15 +781,21 @@
     UNIMPLEMENTED();  // TODO(alexmarkov): interpreter
   }
 
-  code_ += B->AllocateContext(DecodeOperandD().value());
+  auto& context_variables = CompilerState::Current().GetDummyContextVariables(
+      DecodeOperandD().value());
+  code_ += B->AllocateContext(context_variables);
 }
 
 void BytecodeFlowGraphBuilder::BuildCloneContext() {
+  if (is_generating_interpreter()) {
+    UNIMPLEMENTED();  // TODO(alexmarkov): interpreter
+  }
+
   LoadStackSlots(1);
-  // TODO(alexmarkov): Pass context_size and use it in compiled mode.
+  auto& context_variables = CompilerState::Current().GetDummyContextVariables(
+      DecodeOperandD().value());
   CloneContextInstr* clone_instruction = new (Z) CloneContextInstr(
-      TokenPosition::kNoSource, Pop(), CloneContextInstr::kUnknownContextSize,
-      B->GetNextDeoptId());
+      TokenPosition::kNoSource, Pop(), context_variables, B->GetNextDeoptId());
   code_ <<= clone_instruction;
   B->Push(clone_instruction);
 }
@@ -798,6 +805,24 @@
   code_ += B->CreateArray();
 }
 
+const Slot& ClosureSlotByField(const Field& field) {
+  const intptr_t offset = field.Offset();
+  if (offset == Closure::instantiator_type_arguments_offset()) {
+    return Slot::Closure_instantiator_type_arguments();
+  } else if (offset == Closure::function_type_arguments_offset()) {
+    return Slot::Closure_function_type_arguments();
+  } else if (offset == Closure::delayed_type_arguments_offset()) {
+    return Slot::Closure_delayed_type_arguments();
+  } else if (offset == Closure::function_offset()) {
+    return Slot::Closure_function();
+  } else if (offset == Closure::context_offset()) {
+    return Slot::Closure_context();
+  } else {
+    RELEASE_ASSERT(offset == Closure::hash_offset());
+    return Slot::Closure_hash();
+  }
+}
+
 void BytecodeFlowGraphBuilder::BuildStoreFieldTOS() {
   if (is_generating_interpreter()) {
     UNIMPLEMENTED();  // TODO(alexmarkov): interpreter
@@ -812,8 +837,7 @@
 
   if (field.Owner() == isolate()->object_store()->closure_class()) {
     // Stores to _Closure fields are lower-level.
-    // TODO(alexmarkov): use NativeFieldDesc
-    code_ += B->StoreInstanceField(position_, field.Offset());
+    code_ += B->StoreInstanceField(position_, ClosureSlotByField(field));
   } else {
     // The rest of the StoreFieldTOS are for field initializers.
     // TODO(alexmarkov): Consider adding a flag to StoreFieldTOS or even
@@ -837,8 +861,7 @@
 
   if (field.Owner() == isolate()->object_store()->closure_class()) {
     // Loads from _Closure fields are lower-level.
-    // TODO(alexmarkov): use NativeFieldDesc
-    code_ += B->LoadField(field.Offset());
+    code_ += B->LoadNativeField(ClosureSlotByField(field));
   } else {
     code_ += B->LoadField(field);
   }
@@ -847,15 +870,13 @@
 void BytecodeFlowGraphBuilder::BuildStoreContextParent() {
   LoadStackSlots(2);
 
-  // TODO(alexmarkov): use NativeFieldDesc
-  code_ += B->StoreInstanceField(position_, Context::parent_offset());
+  code_ += B->StoreInstanceField(position_, Slot::Context_parent());
 }
 
 void BytecodeFlowGraphBuilder::BuildLoadContextParent() {
   LoadStackSlots(1);
 
-  // TODO(alexmarkov): use NativeFieldDesc
-  code_ += B->LoadField(Context::parent_offset());
+  code_ += B->LoadNativeField(Slot::Context_parent());
 }
 
 void BytecodeFlowGraphBuilder::BuildStoreContextVar() {
@@ -866,9 +887,12 @@
   LoadStackSlots(2);
   Operand var_index = DecodeOperandD();
 
-  // TODO(alexmarkov): use NativeFieldDesc
-  code_ += B->StoreInstanceField(position_,
-                                 Context::variable_offset(var_index.value()));
+  // TODO(alexmarkov) provide context_id in bytecode to disambiguate variables
+  // in different contexts
+  auto var =
+      CompilerState::Current().GetDummyCapturedVariable(var_index.value());
+  code_ += B->StoreInstanceField(
+      position_, Slot::GetContextVariableSlotFor(thread(), *var));
 }
 
 void BytecodeFlowGraphBuilder::BuildLoadContextVar() {
@@ -879,8 +903,11 @@
   LoadStackSlots(1);
   Operand var_index = DecodeOperandD();
 
-  // TODO(alexmarkov): use NativeFieldDesc
-  code_ += B->LoadField(Context::variable_offset(var_index.value()));
+  // TODO(alexmarkov) provide context_id in bytecode to disambiguate variables
+  // in different contexts
+  auto var =
+      CompilerState::Current().GetDummyCapturedVariable(var_index.value());
+  code_ += B->LoadNativeField(Slot::GetContextVariableSlotFor(thread(), *var));
 }
 
 void BytecodeFlowGraphBuilder::BuildLoadTypeArgumentsField() {
@@ -892,8 +919,7 @@
   const intptr_t offset =
       Smi::Cast(ConstantAt(DecodeOperandD()).value()).Value() * kWordSize;
 
-  code_ +=
-      B->LoadNativeField(NativeFieldDesc::GetTypeArgumentsField(Z, offset));
+  code_ += B->LoadNativeField(Slot::GetTypeArgumentsSlotAt(thread(), offset));
 }
 
 void BytecodeFlowGraphBuilder::BuildStoreStaticTOS() {
@@ -1035,8 +1061,7 @@
 
   TargetEntryInstr *is_zero, *is_not_zero;
   code_ += B->LoadArgDescriptor();
-  code_ += B->LoadNativeField(NativeFieldDesc::Get(
-      NativeFieldDesc::kArgumentsDescriptor_type_args_len));
+  code_ += B->LoadNativeField(Slot::ArgumentsDescriptor_type_args_len());
   code_ += B->IntConstant(0);
   code_ += B->BranchIfEqual(&is_zero, &is_not_zero);
 
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
index 4f8b6dd..61d755e 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
@@ -567,8 +567,9 @@
     // closed-over receiver.
     body +=
         LoadLocal(parsed_function()->node_sequence()->scope()->VariableAt(0));
-    body += LoadField(Closure::context_offset());
-    body += flow_graph_builder_->LoadField(Context::variable_offset(0));
+    body += LoadNativeField(Slot::Closure_context());
+    body += LoadNativeField(
+        Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
     body += PushArgument();
   }
 
@@ -681,15 +682,17 @@
   if (is_implicit_closure_function && !function.is_static()) {
     if (parsed_function()->has_arg_desc_var()) {
       body += B->LoadArgDescriptor();
-      body += LoadField(ArgumentsDescriptor::count_offset());
+      body += LoadNativeField(Slot::ArgumentsDescriptor_count());
       body += LoadLocal(parsed_function()->current_context_var());
-      body += B->LoadField(Context::variable_offset(0));
+      body += B->LoadNativeField(
+          Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
       body += B->StoreFpRelativeSlot(kWordSize *
                                      compiler_frame_layout.param_end_from_fp);
       body += Drop();
     } else {
       body += LoadLocal(parsed_function()->current_context_var());
-      body += B->LoadField(Context::variable_offset(0));
+      body += B->LoadNativeField(
+          Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
       body += B->StoreFpRelativeSlot(
           kWordSize *
           (compiler_frame_layout.param_end_from_fp + function.NumParameters()));
@@ -728,7 +731,7 @@
 
   if (function.HasOptionalParameters()) {
     body += B->LoadArgDescriptor();
-    body += LoadField(ArgumentsDescriptor::count_offset());
+    body += LoadNativeField(Slot::ArgumentsDescriptor_count());
   } else {
     body += IntConstant(function.NumParameters());
   }
@@ -841,7 +844,8 @@
       body += Constant(type);
     } else {
       body += LoadLocal(parsed_function()->current_context_var());
-      body += B->LoadField(Context::variable_offset(0));
+      body += B->LoadNativeField(
+          Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
     }
   } else {
     LocalScope* scope = parsed_function()->node_sequence()->scope();
@@ -1372,7 +1376,7 @@
       prologue += LoadLocal(fn_type_args);
       prologue += PushArgument();
       prologue += LoadLocal(closure);
-      prologue += LoadField(Closure::function_type_arguments_offset());
+      prologue += LoadNativeField(Slot::Closure_function_type_arguments());
       prologue += PushArgument();
       prologue += IntConstant(dart_function.NumParentTypeParameters());
       prologue += PushArgument();
@@ -1393,7 +1397,7 @@
       prologue += Drop();
     } else {
       prologue += LoadLocal(closure);
-      prologue += LoadField(Closure::function_type_arguments_offset());
+      prologue += LoadNativeField(Slot::Closure_function_type_arguments());
       prologue += StoreLocal(TokenPosition::kNoSource, fn_type_args);
       prologue += Drop();
     }
@@ -1477,10 +1481,9 @@
 Fragment StreamingFlowGraphBuilder::SetupCapturedParameters(
     const Function& dart_function) {
   Fragment body;
-  intptr_t context_size =
-      parsed_function()->node_sequence()->scope()->num_context_variables();
-  if (context_size > 0) {
-    body += flow_graph_builder_->PushContext(context_size);
+  const LocalScope* scope = parsed_function()->node_sequence()->scope();
+  if (scope->num_context_variables() > 0) {
+    body += flow_graph_builder_->PushContext(scope);
     LocalVariable* context = MakeTemporary();
 
     // Copy captured parameters from the stack into the context.
@@ -1506,7 +1509,7 @@
         body += LoadLocal(&raw_parameter);
         body += flow_graph_builder_->StoreInstanceField(
             TokenPosition::kNoSource,
-            Context::variable_offset(variable->index().value()));
+            Slot::GetContextVariableSlotFor(thread(), *variable));
         body += NullConstant();
         body += StoreLocal(TokenPosition::kNoSource, &raw_parameter);
         body += Drop();
@@ -2549,12 +2552,13 @@
   return flow_graph_builder_->AllocateObject(klass, closure_function);
 }
 
-Fragment StreamingFlowGraphBuilder::AllocateContext(intptr_t size) {
-  return flow_graph_builder_->AllocateContext(size);
+Fragment StreamingFlowGraphBuilder::AllocateContext(
+    const GrowableArray<LocalVariable*>& context_variables) {
+  return flow_graph_builder_->AllocateContext(context_variables);
 }
 
-Fragment StreamingFlowGraphBuilder::LoadField(intptr_t offset) {
-  return flow_graph_builder_->LoadField(offset);
+Fragment StreamingFlowGraphBuilder::LoadNativeField(const Slot& field) {
+  return flow_graph_builder_->LoadNativeField(field);
 }
 
 Fragment StreamingFlowGraphBuilder::StoreLocal(TokenPosition position,
@@ -2567,11 +2571,6 @@
   return flow_graph_builder_->StoreStaticField(position, field);
 }
 
-Fragment StreamingFlowGraphBuilder::StoreInstanceField(TokenPosition position,
-                                                       intptr_t offset) {
-  return flow_graph_builder_->StoreInstanceField(position, offset);
-}
-
 Fragment StreamingFlowGraphBuilder::StringInterpolate(TokenPosition position) {
   return flow_graph_builder_->StringInterpolate(position);
 }
@@ -2611,8 +2610,8 @@
 }
 
 Fragment StreamingFlowGraphBuilder::CloneContext(
-    intptr_t num_context_variables) {
-  return flow_graph_builder_->CloneContext(num_context_variables);
+    const GrowableArray<LocalVariable*>& context_variables) {
+  return flow_graph_builder_->CloneContext(context_variables);
 }
 
 Fragment StreamingFlowGraphBuilder::TranslateFinallyFinalizers(
@@ -2793,8 +2792,8 @@
 
 Fragment StreamingFlowGraphBuilder::EnterScope(
     intptr_t kernel_offset,
-    intptr_t* num_context_variables) {
-  return flow_graph_builder_->EnterScope(kernel_offset, num_context_variables);
+    const LocalScope** scope /* = nullptr */) {
+  return flow_graph_builder_->EnterScope(kernel_offset, scope);
 }
 
 Fragment StreamingFlowGraphBuilder::ExitScope(intptr_t kernel_offset) {
@@ -3746,7 +3745,7 @@
   if (is_unchecked_closure_call) {
     // Lookup the function in the closure.
     instructions += LoadLocal(receiver_temp);
-    instructions += LoadField(Closure::function_offset());
+    instructions += LoadNativeField(Slot::Closure_function());
     if (parsed_function()->function().is_debuggable()) {
       ASSERT(!parsed_function()->function().is_native());
       instructions += DebugStepCheck(position);
@@ -4846,38 +4845,40 @@
 
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(type_args_vec);
-  instructions += StoreInstanceField(TokenPosition::kNoSource,
-                                     Closure::delayed_type_arguments_offset());
-
+  instructions += flow_graph_builder_->StoreInstanceField(
+      TokenPosition::kNoSource, Slot::Closure_delayed_type_arguments());
   instructions += Drop();  // Drop type args.
 
   // Copy over the target function.
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(original_closure);
-  instructions += LoadField(Closure::function_offset());
   instructions +=
-      StoreInstanceField(TokenPosition::kNoSource, Closure::function_offset());
+      flow_graph_builder_->LoadNativeField(Slot::Closure_function());
+  instructions += flow_graph_builder_->StoreInstanceField(
+      TokenPosition::kNoSource, Slot::Closure_function());
 
   // Copy over the instantiator type arguments.
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(original_closure);
-  instructions += LoadField(Closure::instantiator_type_arguments_offset());
-  instructions += StoreInstanceField(
-      TokenPosition::kNoSource, Closure::instantiator_type_arguments_offset());
+  instructions += flow_graph_builder_->LoadNativeField(
+      Slot::Closure_instantiator_type_arguments());
+  instructions += flow_graph_builder_->StoreInstanceField(
+      TokenPosition::kNoSource, Slot::Closure_instantiator_type_arguments());
 
   // Copy over the function type arguments.
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(original_closure);
-  instructions += LoadField(Closure::function_type_arguments_offset());
-  instructions += StoreInstanceField(TokenPosition::kNoSource,
-                                     Closure::function_type_arguments_offset());
+  instructions += flow_graph_builder_->LoadNativeField(
+      Slot::Closure_function_type_arguments());
+  instructions += flow_graph_builder_->StoreInstanceField(
+      TokenPosition::kNoSource, Slot::Closure_function_type_arguments());
 
   // Copy over the context.
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(original_closure);
-  instructions += LoadField(Closure::context_offset());
-  instructions +=
-      StoreInstanceField(TokenPosition::kNoSource, Closure::context_offset());
+  instructions += flow_graph_builder_->LoadNativeField(Slot::Closure_context());
+  instructions += flow_graph_builder_->StoreInstanceField(
+      TokenPosition::kNoSource, Slot::Closure_context());
 
   instructions += DropTempsPreserveTop(1);  // Drop old closure.
 
@@ -5112,8 +5113,8 @@
 
   loop_depth_inc();
 
-  intptr_t num_context_variables = 0;
-  declarations += EnterScope(offset, &num_context_variables);
+  const LocalScope* context_scope = nullptr;
+  declarations += EnterScope(offset, &context_scope);
 
   intptr_t list_length = ReadListLength();  // read number of variables.
   for (intptr_t i = 0; i < list_length; ++i) {
@@ -5149,7 +5150,9 @@
     // the context object (at same depth) which ensures the next iteration of
     // the body gets a fresh set of [ForStatement] variables (with the old
     // (possibly updated) values).
-    if (num_context_variables > 0) body += CloneContext(num_context_variables);
+    if (context_scope->num_context_variables() > 0) {
+      body += CloneContext(context_scope->context_variables());
+    }
 
     body += updates;
     JoinEntryInstr* join = BuildJoinEntry();
@@ -5990,8 +5993,7 @@
     instructions += LoadLocal(closure);
     instructions += LoadInstantiatorTypeArguments();
     instructions += flow_graph_builder_->StoreInstanceField(
-        TokenPosition::kNoSource,
-        Closure::instantiator_type_arguments_offset());
+        TokenPosition::kNoSource, Slot::Closure_instantiator_type_arguments());
   }
 
   // TODO(30455): We only need to save these if the closure uses any captured
@@ -5999,23 +6001,23 @@
   instructions += LoadLocal(closure);
   instructions += LoadFunctionTypeArguments();
   instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Closure::function_type_arguments_offset());
+      TokenPosition::kNoSource, Slot::Closure_function_type_arguments());
 
   instructions += LoadLocal(closure);
   instructions += Constant(Object::empty_type_arguments());
   instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Closure::delayed_type_arguments_offset());
+      TokenPosition::kNoSource, Slot::Closure_delayed_type_arguments());
 
   // Store the function and the context in the closure.
   instructions += LoadLocal(closure);
   instructions += Constant(function);
   instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Closure::function_offset());
+      TokenPosition::kNoSource, Slot::Closure_function());
 
   instructions += LoadLocal(closure);
   instructions += LoadLocal(parsed_function()->current_context_var());
   instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Closure::context_offset());
+      TokenPosition::kNoSource, Slot::Closure_context());
 
   return instructions;
 }
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h
index c4d2538..644ab79 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.h
@@ -56,6 +56,8 @@
  private:
   bool optimizing();
 
+  Thread* thread() const { return flow_graph_builder_->thread_; }
+
   FlowGraph* BuildGraphOfFieldInitializer();
   FlowGraph* BuildGraphOfFieldAccessor(LocalVariable* setter_value);
   void SetupDefaultParameterValues();
@@ -273,11 +275,11 @@
                           const Class& klass,
                           intptr_t argument_count);
   Fragment AllocateObject(const Class& klass, const Function& closure_function);
-  Fragment AllocateContext(intptr_t size);
-  Fragment LoadField(intptr_t offset);
+  Fragment AllocateContext(
+      const GrowableArray<LocalVariable*>& context_variables);
+  Fragment LoadNativeField(const Slot& field);
   Fragment StoreLocal(TokenPosition position, LocalVariable* variable);
   Fragment StoreStaticField(TokenPosition position, const Field& field);
-  Fragment StoreInstanceField(TokenPosition position, intptr_t offset);
   Fragment StringInterpolate(TokenPosition position);
   Fragment StringInterpolateSingle(TokenPosition position);
   Fragment ThrowTypeError();
@@ -287,7 +289,7 @@
   Fragment CreateArray();
   Fragment StoreIndexed(intptr_t class_id);
   Fragment CheckStackOverflow(TokenPosition position);
-  Fragment CloneContext(intptr_t num_context_variables);
+  Fragment CloneContext(const GrowableArray<LocalVariable*>& context_variables);
   Fragment TranslateFinallyFinalizers(TryFinallyBlock* outer_finally,
                                       intptr_t target_context_depth);
   Fragment BranchIfTrue(TargetEntryInstr** then_entry,
@@ -326,7 +328,7 @@
   Fragment CheckVariableTypeInCheckedMode(const AbstractType& dst_type,
                                           const String& name_symbol);
   Fragment EnterScope(intptr_t kernel_offset,
-                      intptr_t* num_context_variables = NULL);
+                      const LocalScope** scope = nullptr);
   Fragment ExitScope(intptr_t kernel_offset);
 
   TestFragment TranslateConditionForControl();
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index b8b5bff..e0420dc 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2016, the Dart project authors.  Please see the AUTHORS file
+// Copyright (c) 2016, the Dart project authors.  Please see the AUTHORp file
 // for details. All rights reserved. Use of this source code is governed by a
 // BSD-style license that can be found in the LICENSE file.
 
@@ -68,17 +68,17 @@
 
 FlowGraphBuilder::~FlowGraphBuilder() {}
 
-Fragment FlowGraphBuilder::EnterScope(intptr_t kernel_offset,
-                                      intptr_t* num_context_variables) {
+Fragment FlowGraphBuilder::EnterScope(
+    intptr_t kernel_offset,
+    const LocalScope** context_scope /* = nullptr */) {
   Fragment instructions;
-  const intptr_t context_size =
-      scopes_->scopes.Lookup(kernel_offset)->num_context_variables();
-  if (context_size > 0) {
-    instructions += PushContext(context_size);
+  const LocalScope* scope = scopes_->scopes.Lookup(kernel_offset);
+  if (scope->num_context_variables() > 0) {
+    instructions += PushContext(scope);
     instructions += Drop();
   }
-  if (num_context_variables != NULL) {
-    *num_context_variables = context_size;
+  if (context_scope != nullptr) {
+    *context_scope = scope;
   }
   return instructions;
 }
@@ -106,14 +106,14 @@
   return instructions;
 }
 
-Fragment FlowGraphBuilder::PushContext(int size) {
-  ASSERT(size > 0);
-  Fragment instructions = AllocateContext(size);
+Fragment FlowGraphBuilder::PushContext(const LocalScope* scope) {
+  ASSERT(scope->num_context_variables() > 0);
+  Fragment instructions = AllocateContext(scope->context_variables());
   LocalVariable* context = MakeTemporary();
   instructions += LoadLocal(context);
   instructions += LoadLocal(parsed_function_->current_context_var());
   instructions +=
-      StoreInstanceField(TokenPosition::kNoSource, Context::parent_offset());
+      StoreInstanceField(TokenPosition::kNoSource, Slot::Context_parent());
   instructions += StoreLocal(TokenPosition::kNoSource,
                              parsed_function_->current_context_var());
   ++context_depth_;
@@ -142,7 +142,7 @@
     ASSERT(!parsed_function_->function().IsFactory());
     instructions += LoadLocal(scopes_->this_variable);
     instructions += LoadNativeField(
-        NativeFieldDesc::GetTypeArgumentsFieldFor(Z, *active_class_.klass));
+        Slot::GetTypeArgumentsSlotFor(thread_, *active_class_.klass));
   } else {
     instructions += NullConstant();
   }
@@ -267,7 +267,7 @@
     LocalVariable* closure_parameter = scope->VariableAt(0);
     ASSERT(!closure_parameter->is_captured());
     instructions += LoadLocal(closure_parameter);
-    instructions += LoadField(Closure::context_offset());
+    instructions += LoadNativeField(Slot::Closure_context());
     instructions += StoreLocal(TokenPosition::kNoSource, context_variable);
     instructions += Drop();
   }
@@ -277,14 +277,14 @@
     instructions += LoadLocal(raw_exception_var);
     instructions += StoreInstanceField(
         TokenPosition::kNoSource,
-        Context::variable_offset(exception_var->index().value()));
+        Slot::GetContextVariableSlotFor(thread_, *exception_var));
   }
   if (stacktrace_var->is_captured()) {
     instructions += LoadLocal(context_variable);
     instructions += LoadLocal(raw_stacktrace_var);
     instructions += StoreInstanceField(
         TokenPosition::kNoSource,
-        Context::variable_offset(stacktrace_var->index().value()));
+        Slot::GetContextVariableSlotFor(thread_, *stacktrace_var));
   }
 
   // :saved_try_context_var can be captured in the context of
@@ -332,13 +332,14 @@
   return CheckStackOverflow(position, loop_depth_);
 }
 
-Fragment FlowGraphBuilder::CloneContext(intptr_t num_context_variables) {
+Fragment FlowGraphBuilder::CloneContext(
+    const GrowableArray<LocalVariable*>& context_variables) {
   LocalVariable* context_variable = parsed_function_->current_context_var();
 
   Fragment instructions = LoadLocal(context_variable);
 
   CloneContextInstr* clone_instruction = new (Z) CloneContextInstr(
-      TokenPosition::kNoSource, Pop(), num_context_variables, GetNextDeoptId());
+      TokenPosition::kNoSource, Pop(), context_variables, GetNextDeoptId());
   instructions <<= clone_instruction;
   Push(clone_instruction);
 
@@ -424,7 +425,7 @@
     Fragment instructions;
     instructions += LoadContextAt(variable->owner()->context_level());
     instructions +=
-        LoadField(Context::variable_offset(variable->index().value()));
+        LoadNativeField(Slot::GetContextVariableSlotFor(thread_, *variable));
     return instructions;
   } else {
     return BaseFlowGraphBuilder::LoadLocal(variable);
@@ -763,7 +764,7 @@
     case MethodRecognizer::kStringBaseLength:
     case MethodRecognizer::kStringBaseIsEmpty:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::String_length());
+      body += LoadNativeField(Slot::String_length());
       if (kind == MethodRecognizer::kStringBaseIsEmpty) {
         body += IntConstant(0);
         body += StrictCompare(Token::kEQ_STRICT);
@@ -771,16 +772,16 @@
       break;
     case MethodRecognizer::kGrowableArrayLength:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::GrowableObjectArray_length());
+      body += LoadNativeField(Slot::GrowableObjectArray_length());
       break;
     case MethodRecognizer::kObjectArrayLength:
     case MethodRecognizer::kImmutableArrayLength:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::Array_length());
+      body += LoadNativeField(Slot::Array_length());
       break;
     case MethodRecognizer::kTypedDataLength:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::TypedData_length());
+      body += LoadNativeField(Slot::TypedData_length());
       break;
     case MethodRecognizer::kClassIDgetID:
       body += LoadLocal(first_parameter);
@@ -788,8 +789,8 @@
       break;
     case MethodRecognizer::kGrowableArrayCapacity:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadField(GrowableObjectArray::data_offset(), kArrayCid);
-      body += LoadNativeField(NativeFieldDesc::Array_length());
+      body += LoadNativeField(Slot::GrowableObjectArray_data());
+      body += LoadNativeField(Slot::Array_length());
       break;
     case MethodRecognizer::kListFactory: {
       // factory List<E>([int length]) {
@@ -801,8 +802,7 @@
       TargetEntryInstr *allocate_non_growable, *allocate_growable;
 
       body += LoadArgDescriptor();
-      body +=
-          LoadField(ArgumentsDescriptor::positional_count_offset(), kSmiCid);
+      body += LoadNativeField(Slot::ArgumentsDescriptor_positional_count());
       body += IntConstant(2);
       body += BranchIfStrictEqual(&allocate_non_growable, &allocate_growable);
 
@@ -863,59 +863,59 @@
       break;
     case MethodRecognizer::kLinkedHashMap_getIndex:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::LinkedHashMap_index());
+      body += LoadNativeField(Slot::LinkedHashMap_index());
       break;
     case MethodRecognizer::kLinkedHashMap_setIndex:
       body += LoadLocal(scopes_->this_variable);
       body += LoadLocal(first_parameter);
       body += StoreInstanceField(TokenPosition::kNoSource,
-                                 LinkedHashMap::index_offset());
+                                 Slot::LinkedHashMap_index());
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getData:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::LinkedHashMap_data());
+      body += LoadNativeField(Slot::LinkedHashMap_data());
       break;
     case MethodRecognizer::kLinkedHashMap_setData:
       body += LoadLocal(scopes_->this_variable);
       body += LoadLocal(first_parameter);
       body += StoreInstanceField(TokenPosition::kNoSource,
-                                 LinkedHashMap::data_offset());
+                                 Slot::LinkedHashMap_data());
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getHashMask:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::LinkedHashMap_hash_mask());
+      body += LoadNativeField(Slot::LinkedHashMap_hash_mask());
       break;
     case MethodRecognizer::kLinkedHashMap_setHashMask:
       body += LoadLocal(scopes_->this_variable);
       body += LoadLocal(first_parameter);
-      body += StoreInstanceField(TokenPosition::kNoSource,
-                                 LinkedHashMap::hash_mask_offset(),
-                                 kNoStoreBarrier);
+      body +=
+          StoreInstanceField(TokenPosition::kNoSource,
+                             Slot::LinkedHashMap_hash_mask(), kNoStoreBarrier);
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getUsedData:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::LinkedHashMap_used_data());
+      body += LoadNativeField(Slot::LinkedHashMap_used_data());
       break;
     case MethodRecognizer::kLinkedHashMap_setUsedData:
       body += LoadLocal(scopes_->this_variable);
       body += LoadLocal(first_parameter);
-      body += StoreInstanceField(TokenPosition::kNoSource,
-                                 LinkedHashMap::used_data_offset(),
-                                 kNoStoreBarrier);
+      body +=
+          StoreInstanceField(TokenPosition::kNoSource,
+                             Slot::LinkedHashMap_used_data(), kNoStoreBarrier);
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getDeletedKeys:
       body += LoadLocal(scopes_->this_variable);
-      body += LoadNativeField(NativeFieldDesc::LinkedHashMap_deleted_keys());
+      body += LoadNativeField(Slot::LinkedHashMap_deleted_keys());
       break;
     case MethodRecognizer::kLinkedHashMap_setDeletedKeys:
       body += LoadLocal(scopes_->this_variable);
       body += LoadLocal(first_parameter);
       body += StoreInstanceField(TokenPosition::kNoSource,
-                                 LinkedHashMap::deleted_keys_offset(),
+                                 Slot::LinkedHashMap_deleted_keys(),
                                  kNoStoreBarrier);
       body += NullConstant();
       break;
@@ -938,6 +938,42 @@
   return body + Return(TokenPosition::kNoSource, omit_result_type_check);
 }
 
+static Type& GetCanonicalType(Zone* Z, const Class& klass) {
+  ASSERT(!klass.IsNull());
+  // Note that if cls is _Closure, the returned type will be _Closure,
+  // and not the signature type.
+  Type& type = Type::ZoneHandle(Z, klass.CanonicalType());
+  if (!type.IsNull()) {
+    return type;
+  }
+  type = Type::New(klass, TypeArguments::Handle(Z, klass.type_parameters()),
+                   klass.token_pos());
+  if (klass.is_type_finalized()) {
+    type ^= ClassFinalizer::FinalizeType(klass, type);
+    // Note that the receiver type may now be a malbounded type.
+    klass.SetCanonicalType(type);
+  }
+  return type;
+}
+
+static const LocalScope* MakeImplicitClosureScope(Zone* Z,
+                                                  const Function& function) {
+  Class& klass = Class::Handle(Z, function.Owner());
+  Type& klass_type = GetCanonicalType(Z, klass);
+
+  LocalVariable* this_variable = new (Z)
+      LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
+                    Symbols::This(), klass_type, /*param_type=*/nullptr);
+
+  this_variable->set_is_captured();
+  //  this_variable->set_is_final();
+  LocalScope* scope = new (Z) LocalScope(NULL, 0, 0);
+  scope->set_context_level(0);
+  scope->AddVariable(this_variable);
+  scope->AddContextVariable(this_variable);
+  return scope;
+}
+
 Fragment FlowGraphBuilder::BuildImplicitClosureCreation(
     const Function& target) {
   Fragment fragment;
@@ -950,9 +986,8 @@
   if (!target.HasInstantiatedSignature(kCurrentClass)) {
     fragment += LoadLocal(closure);
     fragment += LoadInstantiatorTypeArguments();
-    fragment +=
-        StoreInstanceField(TokenPosition::kNoSource,
-                           Closure::instantiator_type_arguments_offset());
+    fragment += StoreInstanceField(TokenPosition::kNoSource,
+                                   Slot::Closure_instantiator_type_arguments());
   }
 
   // The function signature cannot have uninstantiated function type parameters,
@@ -960,31 +995,36 @@
   ASSERT(target.HasInstantiatedSignature(kFunctions));
 
   // Allocate a context that closes over `this`.
-  fragment += AllocateContext(1);
+  // Note: this must be kept in sync with ScopeBuilder::BuildScopes.
+  const LocalScope* implicit_closure_scope =
+      MakeImplicitClosureScope(Z, target);
+  fragment += AllocateContext(implicit_closure_scope->context_variables());
   LocalVariable* context = MakeTemporary();
 
   // Store the function and the context in the closure.
   fragment += LoadLocal(closure);
   fragment += Constant(target);
   fragment +=
-      StoreInstanceField(TokenPosition::kNoSource, Closure::function_offset());
+      StoreInstanceField(TokenPosition::kNoSource, Slot::Closure_function());
 
   fragment += LoadLocal(closure);
   fragment += LoadLocal(context);
   fragment +=
-      StoreInstanceField(TokenPosition::kNoSource, Closure::context_offset());
+      StoreInstanceField(TokenPosition::kNoSource, Slot::Closure_context());
 
   fragment += LoadLocal(closure);
   fragment += Constant(Object::empty_type_arguments());
   fragment += StoreInstanceField(TokenPosition::kNoSource,
-                                 Closure::delayed_type_arguments_offset());
+                                 Slot::Closure_delayed_type_arguments());
 
   // The context is on top of the operand stack.  Store `this`.  The context
   // doesn't need a parent pointer because it doesn't close over anything
   // else.
   fragment += LoadLocal(scopes_->this_variable);
-  fragment +=
-      StoreInstanceField(TokenPosition::kNoSource, Context::variable_offset(0));
+  fragment += StoreInstanceField(
+      TokenPosition::kNoSource,
+      Slot::GetContextVariableSlotFor(
+          thread_, *implicit_closure_scope->context_variables()[0]));
 
   return fragment;
 }
@@ -1367,7 +1407,7 @@
   if (is_closure_call) {
     // Lookup the function in the closure.
     body += LoadLocal(closure);
-    body += LoadField(Closure::function_offset());
+    body += LoadNativeField(Slot::Closure_function());
 
     body += ClosureCall(TokenPosition::kNoSource, descriptor.TypeArgsLen(),
                         descriptor.Count(), argument_names);
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.h b/runtime/vm/compiler/frontend/kernel_to_il.h
index 2d26d18..4a95aa3 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.h
+++ b/runtime/vm/compiler/frontend/kernel_to_il.h
@@ -67,12 +67,12 @@
                               LocalVariable* first_parameter);
 
   Fragment EnterScope(intptr_t kernel_offset,
-                      intptr_t* num_context_variables = NULL);
+                      const LocalScope** scope = nullptr);
   Fragment ExitScope(intptr_t kernel_offset);
 
   Fragment AdjustContextTo(int depth);
 
-  Fragment PushContext(int size);
+  Fragment PushContext(const LocalScope* scope);
   Fragment PopContext();
 
   Fragment LoadInstantiatorTypeArguments();
@@ -90,7 +90,7 @@
                            bool is_synthesized);
   Fragment TryCatch(int try_handler_index);
   Fragment CheckStackOverflowInPrologue(TokenPosition position);
-  Fragment CloneContext(intptr_t num_context_variables);
+  Fragment CloneContext(const GrowableArray<LocalVariable*>& context_variables);
 
   Fragment InstanceCall(
       TokenPosition position,
diff --git a/runtime/vm/compiler/frontend/prologue_builder.cc b/runtime/vm/compiler/frontend/prologue_builder.cc
index a63ae30..a08c6db 100644
--- a/runtime/vm/compiler/frontend/prologue_builder.cc
+++ b/runtime/vm/compiler/frontend/prologue_builder.cc
@@ -95,8 +95,7 @@
   // If expect_type_args, a non-zero length must match the declaration length.
   TargetEntryInstr *then, *fail;
   check_type_args += LoadArgDescriptor();
-  check_type_args += LoadNativeField(NativeFieldDesc::Get(
-      NativeFieldDesc::kArgumentsDescriptor_type_args_len));
+  check_type_args += LoadNativeField(Slot::ArgumentsDescriptor_type_args_len());
   if (expect_type_args) {
     JoinEntryInstr* join2 = BuildJoinEntry();
 
@@ -146,11 +145,11 @@
 
   copy_args_prologue += LoadArgDescriptor();
   copy_args_prologue +=
-      LoadField(ArgumentsDescriptor::positional_count_offset());
+      LoadNativeField(Slot::ArgumentsDescriptor_positional_count());
   LocalVariable* positional_count_var = MakeTemporary();
 
   copy_args_prologue += LoadArgDescriptor();
-  copy_args_prologue += LoadField(ArgumentsDescriptor::count_offset());
+  copy_args_prologue += LoadNativeField(Slot::ArgumentsDescriptor_count());
   LocalVariable* count_var = MakeTemporary();
 
   // Ensure the caller provided at least [min_num_pos_args] arguments.
@@ -363,7 +362,7 @@
   JoinEntryInstr* done = BuildJoinEntry();
 
   check_args += LoadArgDescriptor();
-  check_args += LoadField(ArgumentsDescriptor::count_offset());
+  check_args += LoadNativeField(Slot::ArgumentsDescriptor_count());
   LocalVariable* count = MakeTemporary();
 
   TargetEntryInstr *then, *fail;
@@ -374,7 +373,7 @@
   TargetEntryInstr *then2, *fail2;
   Fragment check_len(then);
   check_len += LoadArgDescriptor();
-  check_len += LoadField(ArgumentsDescriptor::positional_count_offset());
+  check_len += LoadNativeField(Slot::ArgumentsDescriptor_positional_count());
   check_len += BranchIfEqual(&then2, &fail2);
 
   Fragment(fail) + Goto(nsm);
@@ -394,7 +393,7 @@
   // (both load/store happen on the copyied-down places).
   Fragment populate_context;
   populate_context += LoadLocal(closure_parameter);
-  populate_context += LoadField(Closure::context_offset());
+  populate_context += LoadNativeField(Slot::Closure_context());
   populate_context += StoreLocal(TokenPosition::kNoSource, context);
   populate_context += Drop();
   return populate_context;
@@ -407,7 +406,7 @@
 
   Fragment store_type_args;
   store_type_args += LoadArgDescriptor();
-  store_type_args += LoadField(ArgumentsDescriptor::count_offset());
+  store_type_args += LoadNativeField(Slot::ArgumentsDescriptor_count());
   store_type_args += LoadFpRelativeSlot(
       kWordSize * (1 + compiler_frame_layout.param_end_from_fp));
   store_type_args += StoreLocal(TokenPosition::kNoSource, type_args_var);
@@ -430,7 +429,7 @@
     Fragment use_delayed_type_args;
     use_delayed_type_args += LoadLocal(closure);
     use_delayed_type_args +=
-        LoadField(Closure::delayed_type_arguments_offset());
+        LoadNativeField(Slot::Closure_delayed_type_arguments());
     use_delayed_type_args +=
         StoreLocal(TokenPosition::kNoSource, type_args_var);
     use_delayed_type_args += Drop();
diff --git a/runtime/vm/compiler/frontend/scope_builder.cc b/runtime/vm/compiler/frontend/scope_builder.cc
index 14f2e15..410e903 100644
--- a/runtime/vm/compiler/frontend/scope_builder.cc
+++ b/runtime/vm/compiler/frontend/scope_builder.cc
@@ -99,11 +99,11 @@
     result_->this_variable =
         MakeVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
                      Symbols::This(), klass_type);
-    result_->this_variable->set_index(VariableIndex(0));
     result_->this_variable->set_is_captured();
     enclosing_scope = new (Z) LocalScope(NULL, 0, 0);
     enclosing_scope->set_context_level(0);
     enclosing_scope->AddVariable(result_->this_variable);
+    enclosing_scope->AddContextVariable(result_->this_variable);
   } else if (function.IsLocalFunction()) {
     enclosing_scope = LocalScope::RestoreOuterScope(
         ContextScope::Handle(Z, function.context_scope()));
diff --git a/runtime/vm/compiler/intrinsifier.cc b/runtime/vm/compiler/intrinsifier.cc
index e90d100..bf445f5 100644
--- a/runtime/vm/compiler/intrinsifier.cc
+++ b/runtime/vm/compiler/intrinsifier.cc
@@ -480,10 +480,9 @@
 static void PrepareIndexedOp(BlockBuilder* builder,
                              Definition* array,
                              Definition* index,
-                             intptr_t length_offset) {
+                             const Slot& length_field) {
   Definition* length = builder->AddDefinition(new LoadFieldInstr(
-      new Value(array), length_offset, Type::ZoneHandle(Type::SmiType()),
-      TokenPosition::kNoSource));
+      new Value(array), length_field, TokenPosition::kNoSource));
   builder->AddInstruction(new CheckArrayBoundInstr(
       new Value(length), new Value(index), DeoptId::kNone));
 }
@@ -497,14 +496,8 @@
   Definition* index = builder.AddParameter(1);
   Definition* array = builder.AddParameter(2);
 
-  intptr_t length_offset = Array::length_offset();
-  if (RawObject::IsTypedDataClassId(array_cid)) {
-    length_offset = TypedData::length_offset();
-  } else if (RawObject::IsExternalTypedDataClassId(array_cid)) {
-    length_offset = ExternalTypedData::length_offset();
-  }
-
-  PrepareIndexedOp(&builder, array, index, length_offset);
+  PrepareIndexedOp(&builder, array, index,
+                   Slot::GetLengthFieldForArrayCid(array_cid));
 
   if (RawObject::IsExternalTypedDataClassId(array_cid)) {
     array = builder.AddDefinition(new LoadUntaggedInstr(
@@ -581,14 +574,8 @@
   Definition* index = builder.AddParameter(2);
   Definition* array = builder.AddParameter(3);
 
-  intptr_t length_offset = Array::length_offset();
-  if (RawObject::IsTypedDataClassId(array_cid)) {
-    length_offset = TypedData::length_offset();
-  } else if (RawObject::IsExternalTypedDataClassId(array_cid)) {
-    length_offset = ExternalTypedData::length_offset();
-  }
-
-  PrepareIndexedOp(&builder, array, index, length_offset);
+  PrepareIndexedOp(&builder, array, index,
+                   Slot::GetLengthFieldForArrayCid(array_cid));
 
   // Value check/conversion.
   switch (array_cid) {
@@ -782,7 +769,7 @@
 
   Definition* index = builder.AddParameter(1);
   Definition* str = builder.AddParameter(2);
-  PrepareIndexedOp(&builder, str, index, String::length_offset());
+  PrepareIndexedOp(&builder, str, index, Slot::String_length());
 
   // For external strings: Load external data.
   if (cid == kExternalOneByteStringCid) {
@@ -907,37 +894,37 @@
                                MethodRecognizer::kFloat32x4ShuffleW);
 }
 
-static bool BuildLoadField(FlowGraph* flow_graph, intptr_t offset) {
+static bool BuildLoadField(FlowGraph* flow_graph, const Slot& field) {
   GraphEntryInstr* graph_entry = flow_graph->graph_entry();
   auto normal_entry = graph_entry->normal_entry();
   BlockBuilder builder(flow_graph, normal_entry);
 
   Definition* array = builder.AddParameter(1);
 
-  Definition* length = builder.AddDefinition(new LoadFieldInstr(
-      new Value(array), offset, Type::ZoneHandle(), builder.TokenPos()));
+  Definition* length = builder.AddDefinition(
+      new LoadFieldInstr(new Value(array), field, builder.TokenPos()));
   builder.AddIntrinsicReturn(new Value(length));
   return true;
 }
 
 bool Intrinsifier::Build_ObjectArrayLength(FlowGraph* flow_graph) {
-  return BuildLoadField(flow_graph, Array::length_offset());
+  return BuildLoadField(flow_graph, Slot::Array_length());
 }
 
 bool Intrinsifier::Build_ImmutableArrayLength(FlowGraph* flow_graph) {
-  return BuildLoadField(flow_graph, Array::length_offset());
+  return BuildLoadField(flow_graph, Slot::Array_length());
 }
 
 bool Intrinsifier::Build_GrowableArrayLength(FlowGraph* flow_graph) {
-  return BuildLoadField(flow_graph, GrowableObjectArray::length_offset());
+  return BuildLoadField(flow_graph, Slot::GrowableObjectArray_length());
 }
 
 bool Intrinsifier::Build_StringBaseLength(FlowGraph* flow_graph) {
-  return BuildLoadField(flow_graph, String::length_offset());
+  return BuildLoadField(flow_graph, Slot::String_length());
 }
 
 bool Intrinsifier::Build_TypedDataLength(FlowGraph* flow_graph) {
-  return BuildLoadField(flow_graph, TypedData::length_offset());
+  return BuildLoadField(flow_graph, Slot::TypedData_length());
 }
 
 bool Intrinsifier::Build_GrowableArrayCapacity(FlowGraph* flow_graph) {
@@ -947,12 +934,10 @@
 
   Definition* array = builder.AddParameter(1);
 
-  Definition* backing_store = builder.AddDefinition(
-      new LoadFieldInstr(new Value(array), GrowableObjectArray::data_offset(),
-                         Type::ZoneHandle(), builder.TokenPos()));
-  Definition* capacity = builder.AddDefinition(
-      new LoadFieldInstr(new Value(backing_store), Array::length_offset(),
-                         Type::ZoneHandle(), builder.TokenPos()));
+  Definition* backing_store = builder.AddDefinition(new LoadFieldInstr(
+      new Value(array), Slot::GrowableObjectArray_data(), builder.TokenPos()));
+  Definition* capacity = builder.AddDefinition(new LoadFieldInstr(
+      new Value(backing_store), Slot::Array_length(), builder.TokenPos()));
   builder.AddIntrinsicReturn(new Value(capacity));
   return true;
 }
@@ -966,11 +951,11 @@
   Definition* growable_array = builder.AddParameter(2);
 
   PrepareIndexedOp(&builder, growable_array, index,
-                   GrowableObjectArray::length_offset());
+                   Slot::GrowableObjectArray_length());
 
-  Definition* backing_store = builder.AddDefinition(new LoadFieldInstr(
-      new Value(growable_array), GrowableObjectArray::data_offset(),
-      Type::ZoneHandle(), builder.TokenPos()));
+  Definition* backing_store = builder.AddDefinition(
+      new LoadFieldInstr(new Value(growable_array),
+                         Slot::GrowableObjectArray_data(), builder.TokenPos()));
   Definition* result = builder.AddDefinition(new LoadIndexedInstr(
       new Value(backing_store), new Value(index),
       Instance::ElementSizeFor(kArrayCid),  // index scale
@@ -996,7 +981,7 @@
   Definition* index = builder.AddParameter(2);
   Definition* array = builder.AddParameter(3);
 
-  PrepareIndexedOp(&builder, array, index, Array::length_offset());
+  PrepareIndexedOp(&builder, array, index, Slot::Array_length());
 
   builder.AddInstruction(new StoreIndexedInstr(
       new Value(array), new Value(index), new Value(value), kEmitStoreBarrier,
@@ -1026,12 +1011,10 @@
   Definition* index = builder.AddParameter(2);
   Definition* array = builder.AddParameter(3);
 
-  PrepareIndexedOp(&builder, array, index,
-                   GrowableObjectArray::length_offset());
+  PrepareIndexedOp(&builder, array, index, Slot::GrowableObjectArray_length());
 
-  Definition* backing_store = builder.AddDefinition(
-      new LoadFieldInstr(new Value(array), GrowableObjectArray::data_offset(),
-                         Type::ZoneHandle(), builder.TokenPos()));
+  Definition* backing_store = builder.AddDefinition(new LoadFieldInstr(
+      new Value(array), Slot::GrowableObjectArray_data(), builder.TokenPos()));
 
   builder.AddInstruction(new StoreIndexedInstr(
       new Value(backing_store), new Value(index), new Value(value),
@@ -1058,7 +1041,7 @@
                                              *value_check, builder.TokenPos()));
 
   builder.AddInstruction(new StoreInstanceFieldInstr(
-      GrowableObjectArray::data_offset(), new Value(growable_array),
+      Slot::GrowableObjectArray_data(), new Value(growable_array),
       new Value(data), kEmitStoreBarrier, builder.TokenPos()));
   // Return null.
   Definition* null_def = builder.AddNullDefinition();
@@ -1077,7 +1060,7 @@
   builder.AddInstruction(
       new CheckSmiInstr(new Value(length), DeoptId::kNone, builder.TokenPos()));
   builder.AddInstruction(new StoreInstanceFieldInstr(
-      GrowableObjectArray::length_offset(), new Value(growable_array),
+      Slot::GrowableObjectArray_length(), new Value(growable_array),
       new Value(length), kNoStoreBarrier, builder.TokenPos()));
   Definition* null_def = builder.AddNullDefinition();
   builder.AddIntrinsicReturn(new Value(null_def));
diff --git a/runtime/vm/compiler/jit/jit_call_specializer.cc b/runtime/vm/compiler/jit/jit_call_specializer.cc
index cbc5395..3b97b71 100644
--- a/runtime/vm/compiler/jit/jit_call_specializer.cc
+++ b/runtime/vm/compiler/jit/jit_call_specializer.cc
@@ -185,7 +185,7 @@
     // usage count of at least 1/kGetterSetterRatio of the getter usage count.
     // This is to avoid unboxing fields where the setter is never or rarely
     // executed.
-    const Field& field = instr->field();
+    const Field& field = instr->slot().field();
     const String& field_name = String::Handle(Z, field.name());
     const Class& owner = Class::Handle(Z, field.Owner());
     const Function& getter =
@@ -233,23 +233,24 @@
 // allocation and explicit initializing stores.
 // If context_value is not NULL then newly allocated context is a populated
 // with values copied from it, otherwise it is initialized with null.
-void JitCallSpecializer::LowerContextAllocation(Definition* alloc,
-                                                intptr_t num_context_variables,
-                                                Value* context_value) {
+void JitCallSpecializer::LowerContextAllocation(
+    Definition* alloc,
+    const GrowableArray<LocalVariable*>& context_variables,
+    Value* context_value) {
   ASSERT(alloc->IsAllocateContext() || alloc->IsCloneContext());
 
   AllocateUninitializedContextInstr* replacement =
       new AllocateUninitializedContextInstr(alloc->token_pos(),
-                                            num_context_variables);
+                                            context_variables.length());
   alloc->ReplaceWith(replacement, current_iterator());
 
   Definition* cursor = replacement;
 
   Value* initial_value;
   if (context_value != NULL) {
-    LoadFieldInstr* load = new (Z)
-        LoadFieldInstr(context_value->CopyWithType(Z), Context::parent_offset(),
-                       AbstractType::ZoneHandle(Z), alloc->token_pos());
+    LoadFieldInstr* load =
+        new (Z) LoadFieldInstr(context_value->CopyWithType(Z),
+                               Slot::Context_parent(), alloc->token_pos());
     flow_graph()->InsertAfter(cursor, load, NULL, FlowGraph::kValue);
     cursor = load;
     initial_value = new (Z) Value(load);
@@ -257,20 +258,18 @@
     initial_value = new (Z) Value(flow_graph()->constant_null());
   }
   StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
-      Context::parent_offset(), new (Z) Value(replacement), initial_value,
-      kNoStoreBarrier, alloc->token_pos());
-  // Storing into uninitialized memory; remember to prevent dead store
-  // elimination and ensure proper GC barrier.
-  store->set_is_initialization(true);
-  flow_graph()->InsertAfter(cursor, store, NULL, FlowGraph::kEffect);
+      Slot::Context_parent(), new (Z) Value(replacement), initial_value,
+      kNoStoreBarrier, alloc->token_pos(),
+      StoreInstanceFieldInstr::Kind::kInitializing);
+  flow_graph()->InsertAfter(cursor, store, nullptr, FlowGraph::kEffect);
   cursor = replacement;
 
-  for (intptr_t i = 0; i < num_context_variables; ++i) {
-    if (context_value != NULL) {
+  for (auto variable : context_variables) {
+    const auto& field = Slot::GetContextVariableSlotFor(thread(), *variable);
+    if (context_value != nullptr) {
       LoadFieldInstr* load = new (Z) LoadFieldInstr(
-          context_value->CopyWithType(Z), Context::variable_offset(i),
-          AbstractType::ZoneHandle(Z), alloc->token_pos());
-      flow_graph()->InsertAfter(cursor, load, NULL, FlowGraph::kValue);
+          context_value->CopyWithType(Z), field, alloc->token_pos());
+      flow_graph()->InsertAfter(cursor, load, nullptr, FlowGraph::kValue);
       cursor = load;
       initial_value = new (Z) Value(load);
     } else {
@@ -278,27 +277,19 @@
     }
 
     store = new (Z) StoreInstanceFieldInstr(
-        Context::variable_offset(i), new (Z) Value(replacement), initial_value,
-        kNoStoreBarrier, alloc->token_pos());
-    // Storing into uninitialized memory; remember to prevent dead store
-    // elimination and ensure proper GC barrier.
-    store->set_is_initialization(true);
-    flow_graph()->InsertAfter(cursor, store, NULL, FlowGraph::kEffect);
+        field, new (Z) Value(replacement), initial_value, kNoStoreBarrier,
+        alloc->token_pos(), StoreInstanceFieldInstr::Kind::kInitializing);
+    flow_graph()->InsertAfter(cursor, store, nullptr, FlowGraph::kEffect);
     cursor = store;
   }
 }
 
 void JitCallSpecializer::VisitAllocateContext(AllocateContextInstr* instr) {
-  LowerContextAllocation(instr, instr->num_context_variables(), NULL);
+  LowerContextAllocation(instr, instr->context_variables(), nullptr);
 }
 
 void JitCallSpecializer::VisitCloneContext(CloneContextInstr* instr) {
-  if (instr->num_context_variables() ==
-      CloneContextInstr::kUnknownContextSize) {
-    return;
-  }
-
-  LowerContextAllocation(instr, instr->num_context_variables(),
+  LowerContextAllocation(instr, instr->context_variables(),
                          instr->context_value());
 }
 
diff --git a/runtime/vm/compiler/jit/jit_call_specializer.h b/runtime/vm/compiler/jit/jit_call_specializer.h
index bbb00e3..77c72bd 100644
--- a/runtime/vm/compiler/jit/jit_call_specializer.h
+++ b/runtime/vm/compiler/jit/jit_call_specializer.h
@@ -30,9 +30,10 @@
 
   virtual bool TryOptimizeStaticCallUsingStaticTypes(StaticCallInstr* call);
 
-  void LowerContextAllocation(Definition* instr,
-                              intptr_t num_context_variables,
-                              Value* context_value);
+  void LowerContextAllocation(
+      Definition* instr,
+      const GrowableArray<LocalVariable*>& context_variables,
+      Value* context_value);
 
   void ReplaceWithStaticCall(InstanceCallInstr* instr,
                              const ICData& unary_checks,
diff --git a/runtime/vm/constants_kbc.h b/runtime/vm/constants_kbc.h
index 2bec12a..9c23e1f 100644
--- a/runtime/vm/constants_kbc.h
+++ b/runtime/vm/constants_kbc.h
@@ -159,9 +159,9 @@
 //
 //    Allocate Context object assuming for D context variables.
 //
-//  - CloneContext
+//  - CloneContext D
 //
-//    Clone context stored in TOS.
+//    Clone Context object stored in TOS assuming it has D context variables.
 //
 //  - LoadContextParent
 //
@@ -403,7 +403,7 @@
   V(AllocateT,                             0, ___, ___, ___)                   \
   V(CreateArrayTOS,                        0, ___, ___, ___)                   \
   V(AllocateContext,                       D, num, ___, ___)                   \
-  V(CloneContext,                          0, ___, ___, ___)                   \
+  V(CloneContext,                          D, num, ___, ___)                   \
   V(LoadContextParent,                     0, ___, ___, ___)                   \
   V(StoreContextParent,                    0, ___, ___, ___)                   \
   V(LoadContextVar,                        D, num, ___, ___)                   \
diff --git a/runtime/vm/scopes.cc b/runtime/vm/scopes.cc
index 0af2a71..91b3caa 100644
--- a/runtime/vm/scopes.cc
+++ b/runtime/vm/scopes.cc
@@ -29,11 +29,11 @@
       function_level_(function_level),
       loop_level_(loop_level),
       context_level_(LocalScope::kUnitializedContextLevel),
-      num_context_variables_(0),
       begin_token_pos_(TokenPosition::kNoSourcePos),
       end_token_pos_(TokenPosition::kNoSourcePos),
       variables_(),
       labels_(),
+      context_variables_(),
       referenced_() {
   // Hook this node into the children of the parent, unless the parent has a
   // different function_level, since the local scope of a nested function can
@@ -149,7 +149,7 @@
   // code generation time how far to walk up the context chain in order to
   // access the variable from the current context level.
   if ((*context_owner) == NULL) {
-    ASSERT(num_context_variables_ == 0);
+    ASSERT(num_context_variables() == 0);
     // This scope becomes the current context owner.
     set_context_level(1);
     *context_owner = this;
@@ -157,7 +157,7 @@
     // The captured variable is in a child scope of the context owner and we do
     // not share contexts.
     // This scope will allocate and chain a new context.
-    ASSERT(num_context_variables_ == 0);
+    ASSERT(num_context_variables() == 0);
     // This scope becomes the current context owner.
     set_context_level((*context_owner)->context_level() + 1);
     *context_owner = this;
@@ -165,7 +165,7 @@
     ASSERT(FLAG_share_enclosing_context);
     // The captured variable is at a deeper loop level than the current context.
     // This scope will allocate and chain a new context.
-    ASSERT(num_context_variables_ == 0);
+    ASSERT(num_context_variables() == 0);
     // This scope becomes the current context owner.
     set_context_level((*context_owner)->context_level() + 1);
     *context_owner = this;
@@ -178,8 +178,13 @@
       ASSERT(context_level() == (*context_owner)->context_level());
     }
   }
-  variable->set_index(
-      VariableIndex((*context_owner)->num_context_variables_++));
+
+  (*context_owner)->AddContextVariable(variable);
+}
+
+void LocalScope::AddContextVariable(LocalVariable* variable) {
+  variable->set_index(VariableIndex(context_variables_.length()));
+  context_variables_.Add(variable);
 }
 
 VariableIndex LocalScope::AllocateVariables(VariableIndex first_parameter_index,
diff --git a/runtime/vm/scopes.h b/runtime/vm/scopes.h
index 18d447c..38abf8f 100644
--- a/runtime/vm/scopes.h
+++ b/runtime/vm/scopes.h
@@ -312,14 +312,25 @@
   TokenPosition end_token_pos() const { return end_token_pos_; }
   void set_end_token_pos(TokenPosition value) { end_token_pos_ = value; }
 
+  // Return the list of variables allocated in the context and belonging to this
+  // scope and to its children at the same loop level.
+  const GrowableArray<LocalVariable*>& context_variables() const {
+    return context_variables_;
+  }
+
   // The number of variables allocated in the context and belonging to this
   // scope and to its children at the same loop level.
-  int num_context_variables() const { return num_context_variables_; }
+  int num_context_variables() const { return context_variables().length(); }
 
   // Add a variable to the scope. Returns false if a variable with the
   // same name is already present.
   bool AddVariable(LocalVariable* variable);
 
+  // Add a variable to the scope as a context allocated variable and assigns
+  // it an index within the context. Does not check if the scope already
+  // contains this variable or a variable with the same name.
+  void AddContextVariable(LocalVariable* var);
+
   // Insert a formal parameter variable to the scope at the given position,
   // possibly in front of aliases already added with AddVariable.
   // Returns false if a variable with the same name is already present.
@@ -446,12 +457,14 @@
   int function_level_;         // Reflects the nesting level of local functions.
   int loop_level_;             // Reflects the loop nesting level.
   int context_level_;          // Reflects the level of the runtime context.
-  int num_context_variables_;  // Only set if this scope is a context owner.
   TokenPosition begin_token_pos_;  // Token index of beginning of scope.
   TokenPosition end_token_pos_;    // Token index of end of scope.
   GrowableArray<LocalVariable*> variables_;
   GrowableArray<SourceLabel*> labels_;
 
+  // List of variables allocated into the context which is owned by this scope.
+  GrowableArray<LocalVariable*> context_variables_;
+
   // List of names referenced in this scope and its children that
   // are not resolved to local variables.
   GrowableArray<NameReference*> referenced_;
diff --git a/runtime/vm/type_testing_stubs.cc b/runtime/vm/type_testing_stubs.cc
index a7a83b8..1f849fd 100644
--- a/runtime/vm/type_testing_stubs.cc
+++ b/runtime/vm/type_testing_stubs.cc
@@ -630,9 +630,9 @@
     if (cid != kDynamicCid) {
       const Class& instance_klass =
           Class::Handle(Isolate::Current()->class_table()->At(cid));
-      if (instance_klass.IsGeneric() &&
+      if (load_field->slot().IsTypeArguments() && instance_klass.IsGeneric() &&
           instance_klass.type_arguments_field_offset() ==
-              load_field->offset_in_bytes()) {
+              load_field->slot().offset_in_bytes()) {
         // This is a subset of Case c) above, namely forwarding the type
         // argument vector.
         //