Version 2.14.0-76.0.dev

Merge commit '5d636c62612ef362fff0b237ec66f69ad54c480d' into 'dev'
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 42d0440..460ef6f 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -2716,6 +2716,32 @@
 }
 
 void Assembler::LoadFromOffset(Register reg,
+                               const Address& address,
+                               OperandSize size,
+                               Condition cond) {
+  switch (size) {
+    case kByte:
+      ldrsb(reg, address, cond);
+      break;
+    case kUnsignedByte:
+      ldrb(reg, address, cond);
+      break;
+    case kTwoBytes:
+      ldrsh(reg, address, cond);
+      break;
+    case kUnsignedTwoBytes:
+      ldrh(reg, address, cond);
+      break;
+    case kUnsignedFourBytes:
+    case kFourBytes:
+      ldr(reg, address, cond);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+void Assembler::LoadFromOffset(Register reg,
                                Register base,
                                int32_t offset,
                                OperandSize size,
@@ -2728,25 +2754,7 @@
     base = IP;
     offset = offset & offset_mask;
   }
-  switch (size) {
-    case kByte:
-      ldrsb(reg, Address(base, offset), cond);
-      break;
-    case kUnsignedByte:
-      ldrb(reg, Address(base, offset), cond);
-      break;
-    case kTwoBytes:
-      ldrsh(reg, Address(base, offset), cond);
-      break;
-    case kUnsignedTwoBytes:
-      ldrh(reg, Address(base, offset), cond);
-      break;
-    case kFourBytes:
-      ldr(reg, Address(base, offset), cond);
-      break;
-    default:
-      UNREACHABLE();
-  }
+  LoadFromOffset(reg, Address(base, offset), size, cond);
 }
 
 void Assembler::LoadFromStack(Register dst, intptr_t depth) {
@@ -2765,6 +2773,28 @@
 }
 
 void Assembler::StoreToOffset(Register reg,
+                              const Address& address,
+                              OperandSize size,
+                              Condition cond) {
+  switch (size) {
+    case kUnsignedByte:
+    case kByte:
+      strb(reg, address, cond);
+      break;
+    case kUnsignedTwoBytes:
+    case kTwoBytes:
+      strh(reg, address, cond);
+      break;
+    case kUnsignedFourBytes:
+    case kFourBytes:
+      str(reg, address, cond);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+void Assembler::StoreToOffset(Register reg,
                               Register base,
                               int32_t offset,
                               OperandSize size,
@@ -2778,19 +2808,7 @@
     base = IP;
     offset = offset & offset_mask;
   }
-  switch (size) {
-    case kByte:
-      strb(reg, Address(base, offset), cond);
-      break;
-    case kTwoBytes:
-      strh(reg, Address(base, offset), cond);
-      break;
-    case kFourBytes:
-      str(reg, Address(base, offset), cond);
-      break;
-    default:
-      UNREACHABLE();
-  }
+  StoreToOffset(reg, Address(base, offset), size, cond);
 }
 
 void Assembler::LoadSFromOffset(SRegister reg,
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index accbb47..2e995e1 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -928,6 +928,15 @@
   intptr_t FindImmediate(int32_t imm);
   bool CanLoadFromObjectPool(const Object& object) const;
   void LoadFromOffset(Register reg,
+                      const Address& address,
+                      OperandSize type,
+                      Condition cond);
+  void LoadFromOffset(Register reg,
+                      const Address& address,
+                      OperandSize type = kFourBytes) override {
+    LoadFromOffset(reg, address, type, AL);
+  }
+  void LoadFromOffset(Register reg,
                       Register base,
                       int32_t offset,
                       OperandSize type = kFourBytes,
@@ -970,6 +979,15 @@
   void CompareToStack(Register src, intptr_t depth);
 
   void StoreToOffset(Register reg,
+                     const Address& address,
+                     OperandSize type,
+                     Condition cond);
+  void StoreToOffset(Register reg,
+                     const Address& address,
+                     OperandSize type = kFourBytes) override {
+    StoreToOffset(reg, address, type, AL);
+  }
+  void StoreToOffset(Register reg,
                      Register base,
                      int32_t offset,
                      OperandSize type = kFourBytes,
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index c54c816..1cc89a4 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -812,11 +812,11 @@
                                int32_t offset,
                                OperandSize sz) {
   if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
-    ldr(dest, Address(base, offset, Address::Offset, sz), sz);
+    LoadFromOffset(dest, Address(base, offset, Address::Offset, sz), sz);
   } else {
     ASSERT(base != TMP2);
     AddImmediate(TMP2, base, offset);
-    ldr(dest, Address(TMP2), sz);
+    LoadFromOffset(dest, Address(TMP2), sz);
   }
 }
 
@@ -856,11 +856,11 @@
                               OperandSize sz) {
   ASSERT(base != TMP2);
   if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
-    str(src, Address(base, offset, Address::Offset, sz), sz);
+    StoreToOffset(src, Address(base, offset, Address::Offset, sz), sz);
   } else {
     ASSERT(src != TMP2);
     AddImmediate(TMP2, base, offset);
-    str(src, Address(TMP2), sz);
+    StoreToOffset(src, Address(TMP2), sz);
   }
 }
 
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 949711e..52873c6 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -1685,7 +1685,9 @@
 
   void LoadFromOffset(Register dest,
                       const Address& address,
-                      OperandSize sz = kEightBytes);
+                      OperandSize sz = kEightBytes) override {
+    ldr(dest, address, sz);
+  }
   void LoadFromOffset(Register dest,
                       Register base,
                       int32_t offset,
@@ -1735,6 +1737,11 @@
   void CompareToStack(Register src, intptr_t depth);
 
   void StoreToOffset(Register src,
+                     const Address& address,
+                     OperandSize sz = kEightBytes) override {
+    str(src, address, sz);
+  }
+  void StoreToOffset(Register src,
                      Register base,
                      int32_t offset,
                      OperandSize sz = kEightBytes);
diff --git a/runtime/vm/compiler/assembler/assembler_base.h b/runtime/vm/compiler/assembler/assembler_base.h
index 1ce7b58..4b81856 100644
--- a/runtime/vm/compiler/assembler/assembler_base.h
+++ b/runtime/vm/compiler/assembler/assembler_base.h
@@ -199,10 +199,18 @@
 #endif
 };
 
+// For declaring default sizes in AssemblerBase.
+#if defined(TARGET_ARCH_IS_64_BIT)
+constexpr OperandSize kWordBytes = kEightBytes;
+#else
+constexpr OperandSize kWordBytes = kFourBytes;
+#endif
+
 // Forward declarations.
 class Assembler;
 class AssemblerFixup;
 class AssemblerBuffer;
+class Address;
 
 class Label : public ZoneAllocated {
  public:
@@ -553,6 +561,13 @@
 
   virtual void Breakpoint() = 0;
 
+  virtual void LoadFromOffset(Register dst,
+                              const Address& address,
+                              OperandSize sz = kWordBytes) = 0;
+  virtual void StoreToOffset(Register src,
+                             const Address& address,
+                             OperandSize sz = kWordBytes) = 0;
+
   intptr_t InsertAlignedRelocation(BSS::Relocation reloc);
 
   void Unimplemented(const char* message);
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index 56c4f3e..427cb95 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -182,6 +182,12 @@
   FATAL("Use movzxb or movsxb instead.");
 }
 
+void Assembler::movb(const Address& dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x88);
+  EmitOperand(src, dst);
+}
+
 void Assembler::movb(const Address& dst, ByteRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x88);
@@ -1774,6 +1780,7 @@
       return movsxw(reg, address);
     case kUnsignedTwoBytes:
       return movzxw(reg, address);
+    case kUnsignedFourBytes:
     case kFourBytes:
       return movl(reg, address);
     default:
@@ -1782,6 +1789,25 @@
   }
 }
 
+void Assembler::StoreToOffset(Register reg,
+                              const Address& address,
+                              OperandSize sz) {
+  switch (sz) {
+    case kByte:
+    case kUnsignedByte:
+      return movb(address, reg);
+    case kTwoBytes:
+    case kUnsignedTwoBytes:
+      return movw(address, reg);
+    case kFourBytes:
+    case kUnsignedFourBytes:
+      return movl(address, reg);
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
 void Assembler::LoadFromStack(Register dst, intptr_t depth) {
   ASSERT(depth >= 0);
   movl(dst, Address(ESP, depth * target::kWordSize));
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index 4cc8cd5..8b91fbd 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -272,6 +272,7 @@
   void movsxb(Register dst, ByteRegister src);
   void movsxb(Register dst, const Address& src);
   void movb(Register dst, const Address& src);
+  void movb(const Address& dst, Register src);
   void movb(const Address& dst, ByteRegister src);
   void movb(const Address& dst, const Immediate& imm);
 
@@ -590,7 +591,7 @@
   // Arch-specific LoadFromOffset to choose the right operation for [sz].
   void LoadFromOffset(Register dst,
                       const Address& address,
-                      OperandSize sz = kFourBytes);
+                      OperandSize sz = kFourBytes) override;
   void LoadFromOffset(Register dst,
                       Register base,
                       int32_t offset,
@@ -631,6 +632,21 @@
     LoadCompressedField(
         dst, FieldAddress(base, index, TIMES_COMPRESSED_WORD_SIZE, offset));
   }
+  void StoreToOffset(Register src,
+                     const Address& address,
+                     OperandSize sz = kFourBytes) override;
+  void StoreToOffset(Register src,
+                     Register base,
+                     int32_t offset,
+                     OperandSize sz = kFourBytes) {
+    StoreToOffset(src, Address(base, offset), sz);
+  }
+  void StoreFieldToOffset(Register src,
+                          Register base,
+                          int32_t offset,
+                          OperandSize sz = kFourBytes) {
+    StoreToOffset(src, FieldAddress(base, offset), sz);
+  }
   void LoadFromStack(Register dst, intptr_t depth);
   void StoreToStack(Register src, intptr_t depth);
   void CompareToStack(Register src, intptr_t depth);
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index 274a5d8..344097e 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -1646,6 +1646,8 @@
     case kUnsignedTwoBytes:
       return movzxw(reg, address);
     case kFourBytes:
+      return movsxd(reg, address);
+    case kUnsignedFourBytes:
       return movl(reg, address);
     case kEightBytes:
       return movq(reg, address);
@@ -1655,6 +1657,27 @@
   }
 }
 
+void Assembler::StoreToOffset(Register reg,
+                              const Address& address,
+                              OperandSize sz) {
+  switch (sz) {
+    case kByte:
+    case kUnsignedByte:
+      return movb(address, reg);
+    case kTwoBytes:
+    case kUnsignedTwoBytes:
+      return movw(address, reg);
+    case kFourBytes:
+    case kUnsignedFourBytes:
+      return movl(address, reg);
+    case kEightBytes:
+      return movq(address, reg);
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
 void Assembler::EnterFrame(intptr_t frame_size) {
   if (prologue_offset_ == -1) {
     prologue_offset_ = CodeSize();
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index 7627c588..f9c4ff1 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -935,7 +935,7 @@
   // Arch-specific LoadFromOffset to choose the right operation for [sz].
   void LoadFromOffset(Register dst,
                       const Address& address,
-                      OperandSize sz = kEightBytes);
+                      OperandSize sz = kEightBytes) override;
   void LoadFromOffset(Register dst,
                       Register base,
                       int32_t offset,
@@ -976,14 +976,20 @@
     LoadCompressed(
         dst, FieldAddress(base, index, TIMES_COMPRESSED_WORD_SIZE, offset));
   }
+  void StoreToOffset(Register src,
+                     const Address& address,
+                     OperandSize sz = kEightBytes) override;
+  void StoreToOffset(Register src,
+                     Register base,
+                     int32_t offset,
+                     OperandSize sz = kEightBytes) {
+    StoreToOffset(src, Address(base, offset), sz);
+  }
   void StoreFieldToOffset(Register src,
                           Register base,
                           int32_t offset,
                           OperandSize sz = kEightBytes) {
-    if (sz != kEightBytes) {
-      UNIMPLEMENTED();
-    }
-    StoreMemoryValue(src, base, offset - kHeapObjectTag);
+    StoreToOffset(src, FieldAddress(base, offset), sz);
   }
   void LoadFromStack(Register dst, intptr_t depth);
   void StoreToStack(Register src, intptr_t depth);
diff --git a/runtime/vm/compiler/backend/constant_propagator.cc b/runtime/vm/compiler/backend/constant_propagator.cc
index 868e0e2..9f9b3f4 100644
--- a/runtime/vm/compiler/backend/constant_propagator.cc
+++ b/runtime/vm/compiler/backend/constant_propagator.cc
@@ -288,8 +288,6 @@
 void ConstantPropagator::VisitCheckEitherNonSmi(CheckEitherNonSmiInstr* instr) {
 }
 
-void ConstantPropagator::VisitStoreUntagged(StoreUntaggedInstr* instr) {}
-
 void ConstantPropagator::VisitStoreIndexedUnsafe(
     StoreIndexedUnsafeInstr* instr) {}
 
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 834bc87..f6b9929 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -972,24 +972,10 @@
 }
 
 Representation LoadFieldInstr::representation() const {
-  if (slot().representation() != kTagged) {
-    return slot().representation();
-  } else if (IsUnboxedDartFieldLoad()) {
-    const Field& field = slot().field();
-    const intptr_t cid = field.UnboxedFieldCid();
-    switch (cid) {
-      case kDoubleCid:
-        return kUnboxedDouble;
-      case kFloat32x4Cid:
-        return kUnboxedFloat32x4;
-      case kFloat64x2Cid:
-        return kUnboxedFloat64x2;
-      default:
-        UNREACHABLE();
-        break;
-    }
+  if (IsUnboxedDartFieldLoad()) {
+    return FlowGraph::UnboxedFieldRepresentationOf(slot().field());
   }
-  return kTagged;
+  return slot().representation();
 }
 
 AllocateUninitializedContextInstr::AllocateUninitializedContextInstr(
@@ -1022,24 +1008,27 @@
                              locs(), deopt_id());
 }
 
-bool StoreInstanceFieldInstr::IsUnboxedStore() const {
-  return slot().IsDartField() &&
+bool StoreInstanceFieldInstr::IsUnboxedDartFieldStore() const {
+  return slot().representation() == kTagged && slot().IsDartField() &&
          FlowGraphCompiler::IsUnboxedField(slot().field());
 }
 
-bool StoreInstanceFieldInstr::IsPotentialUnboxedStore() const {
-  return slot().IsDartField() &&
+bool StoreInstanceFieldInstr::IsPotentialUnboxedDartFieldStore() const {
+  return slot().representation() == kTagged && slot().IsDartField() &&
          FlowGraphCompiler::IsPotentialUnboxedField(slot().field());
 }
 
 Representation StoreInstanceFieldInstr::RequiredInputRepresentation(
     intptr_t index) const {
   ASSERT((index == 0) || (index == 1));
-  if ((index == 1) && IsUnboxedStore()) {
-    const Field& field = slot().field();
-    return FlowGraph::UnboxedFieldRepresentationOf(field);
+  if (index == 0) {
+    // The instance is always tagged.
+    return kTagged;
   }
-  return kTagged;
+  if (IsUnboxedDartFieldStore()) {
+    return FlowGraph::UnboxedFieldRepresentationOf(slot().field());
+  }
+  return slot().representation();
 }
 
 Instruction* StoreInstanceFieldInstr::Canonicalize(FlowGraph* flow_graph) {
@@ -1048,7 +1037,7 @@
   // Context objects can be allocated uninitialized as a performance
   // optimization in JIT mode - however in AOT mode we always allocate them
   // null initialized.
-  if (is_initialization_ &&
+  if (is_initialization_ && slot().representation() == kTagged &&
       (!slot().IsContextSlot() ||
        !instance()->definition()->IsAllocateUninitializedContext()) &&
       value()->BindsToConstantNull()) {
@@ -2680,6 +2669,10 @@
       return false;
 
     // Not length loads.
+#define UNBOXED_NATIVE_SLOT_CASE(Class, Untagged, Field, Rep, IsFinal)         \
+  case Slot::Kind::k##Class##_##Field:
+      UNBOXED_NATIVE_SLOTS_LIST(UNBOXED_NATIVE_SLOT_CASE)
+#undef UNBOXED_NATIVE_SLOT_CASE
     case Slot::Kind::kLinkedHashMap_index:
     case Slot::Kind::kLinkedHashMap_data:
     case Slot::Kind::kLinkedHashMap_hash_mask:
@@ -2702,14 +2695,10 @@
     case Slot::Kind::kClosure_instantiator_type_arguments:
     case Slot::Kind::kClosure_hash:
     case Slot::Kind::kClosureData_default_type_arguments:
-    case Slot::Kind::kClosureData_default_type_arguments_kind:
     case Slot::Kind::kCapturedVariable:
     case Slot::Kind::kDartField:
     case Slot::Kind::kFunction_data:
-    case Slot::Kind::kFunction_kind_tag:
-    case Slot::Kind::kFunction_packed_fields:
     case Slot::Kind::kFunction_signature:
-    case Slot::Kind::kFunctionType_packed_fields:
     case Slot::Kind::kFunctionType_parameter_names:
     case Slot::Kind::kFunctionType_parameter_types:
     case Slot::Kind::kFunctionType_type_parameters:
@@ -2717,7 +2706,6 @@
     case Slot::Kind::kType_arguments:
     case Slot::Kind::kTypeArgumentsIndex:
     case Slot::Kind::kTypeParameter_bound:
-    case Slot::Kind::kTypeParameter_flags:
     case Slot::Kind::kTypeParameter_name:
     case Slot::Kind::kUnhandledException_exception:
     case Slot::Kind::kUnhandledException_stacktrace:
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 46a5d13..50300c3 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -432,7 +432,6 @@
   M(AllocateTypedData, _)                                                      \
   M(LoadField, _)                                                              \
   M(LoadUntagged, kNoGC)                                                       \
-  M(StoreUntagged, kNoGC)                                                      \
   M(LoadClassId, kNoGC)                                                        \
   M(InstantiateType, _)                                                        \
   M(InstantiateTypeArguments, _)                                               \
@@ -5301,6 +5300,16 @@
 // field initializers *must* be marked as initializing. Initializing stores
 // into unboxed fields are responsible for allocating the mutable box which
 // would be mutated by subsequent stores.
+//
+// Note: If the value to store is an unboxed derived pointer (e.g. pointer to
+// start of internal typed data array backing) then this instruction cannot be
+// moved across instructions which can trigger GC, to ensure that
+//
+//    LoadUntagged + Arithmetic + StoreInstanceField
+//
+// are performed as an effectively atomic set of instructions.
+//
+// See kernel_to_il.cc:BuildTypedDataViewFactoryConstructor.
 class StoreInstanceFieldInstr : public TemplateInstruction<2, NoThrow> {
  public:
   enum class Kind {
@@ -5345,8 +5354,9 @@
 
   virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
     // In AOT unbox is done based on TFA, therefore it was proven to be correct
-    // and it can never deoptmize.
-    return (IsUnboxedStore() && CompilerState::Current().is_aot())
+    // and it can never deoptimize.
+    return (slot().representation() != kTagged ||
+            (IsUnboxedDartFieldStore() && CompilerState::Current().is_aot()))
                ? kNotSpeculative
                : kGuardInputs;
   }
@@ -5363,6 +5373,10 @@
   bool is_initialization() const { return is_initialization_; }
 
   bool ShouldEmitStoreBarrier() const {
+    if (RepresentationUtils::IsUnboxed(slot().representation())) {
+      // The target field is native and unboxed, so not traversed by the GC.
+      return false;
+    }
     if (instance()->definition() == value()->definition()) {
       // `x.slot = x` cannot create an old->new or old&marked->old&unmarked
       // reference.
@@ -5381,7 +5395,7 @@
   }
 
   virtual bool CanTriggerGC() const {
-    return IsUnboxedStore() || IsPotentialUnboxedStore();
+    return IsUnboxedDartFieldStore() || IsPotentialUnboxedDartFieldStore();
   }
 
   virtual bool ComputeCanDeoptimize() const { return false; }
@@ -5394,8 +5408,14 @@
   // are marked as having no side-effects.
   virtual bool HasUnknownSideEffects() const { return false; }
 
-  bool IsUnboxedStore() const;
-  bool IsPotentialUnboxedStore() const;
+  // Returns whether this instruction is an unboxed store into a _boxed_ Dart
+  // field. Unboxed Dart fields are handled similar to unboxed native fields.
+  bool IsUnboxedDartFieldStore() const;
+
+  // Returns whether this instruction is an potential unboxed store into a
+  // _boxed_ Dart field. Unboxed Dart fields are handled similar to unboxed
+  // native fields.
+  bool IsPotentialUnboxedDartFieldStore() const;
 
   virtual Representation RequiredInputRepresentation(intptr_t index) const;
 
@@ -6443,56 +6463,6 @@
   DISALLOW_COPY_AND_ASSIGN(LoadUntaggedInstr);
 };
 
-// Stores an untagged value into the given object.
-//
-// If the untagged value is a derived pointer (e.g. pointer to start of internal
-// typed data array backing) then this instruction cannot be moved across
-// instructions which can trigger GC, to ensure that
-//
-//    LoadUntaggeed + Arithmetic + StoreUntagged
-//
-// are performed atomically
-//
-// See kernel_to_il.cc:BuildTypedDataViewFactoryConstructor.
-class StoreUntaggedInstr : public TemplateInstruction<2, NoThrow> {
- public:
-  StoreUntaggedInstr(Value* object, Value* value, intptr_t offset)
-      : offset_(offset) {
-    SetInputAt(0, object);
-    SetInputAt(1, value);
-  }
-
-  DECLARE_INSTRUCTION(StoreUntagged)
-
-  virtual Representation RequiredInputRepresentation(intptr_t idx) const {
-    ASSERT(idx == 0 || idx == 1);
-    // The object may be tagged or untagged (for external objects).
-    if (idx == 0) return kNoRepresentation;
-    return kUntagged;
-  }
-
-  Value* object() const { return inputs_[0]; }
-  Value* value() const { return inputs_[1]; }
-  intptr_t offset() const { return offset_; }
-
-  virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
-  virtual bool ComputeCanDeoptimize() const { return false; }
-  virtual bool HasUnknownSideEffects() const { return false; }
-  virtual bool AttributesEqual(const Instruction& other) const {
-    return other.AsStoreUntagged()->offset_ == offset_;
-  }
-
-  intptr_t offset_from_tagged() const {
-    const bool is_tagged = object()->definition()->representation() == kTagged;
-    return offset() - (is_tagged ? kHeapObjectTag : 0);
-  }
-
- private:
-  intptr_t offset_;
-
-  DISALLOW_COPY_AND_ASSIGN(StoreUntaggedInstr);
-};
-
 class LoadClassIdInstr : public TemplateDefinition<1, NoThrow, Pure> {
  public:
   explicit LoadClassIdInstr(Value* object,
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 70f7fb0..1709f7b 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -1753,10 +1753,6 @@
   }
 }
 
-DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
-  __ StoreToOffset(value, obj, instr->offset_from_tagged());
-}
-
 static bool CanBeImmediateIndex(Value* value,
                                 intptr_t cid,
                                 bool is_external,
@@ -2736,40 +2732,48 @@
                                                               bool opt) const {
   const intptr_t kNumInputs = 2;
   const intptr_t kNumTemps =
-      ((IsUnboxedStore() && opt) ? (FLAG_precompiled_mode ? 0 : 2)
-                                 : (IsPotentialUnboxedStore() ? 3 : 0));
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps,
-                      (!FLAG_precompiled_mode &&
-                       ((IsUnboxedStore() && opt && is_initialization()) ||
-                        IsPotentialUnboxedStore()))
-                          ? LocationSummary::kCallOnSlowPath
-                          : LocationSummary::kNoCall);
+      ((IsUnboxedDartFieldStore() && opt)
+           ? (FLAG_precompiled_mode ? 0 : 2)
+           : (IsPotentialUnboxedDartFieldStore() ? 3 : 0));
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps,
+      (!FLAG_precompiled_mode &&
+       ((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
+        IsPotentialUnboxedDartFieldStore()))
+          ? LocationSummary::kCallOnSlowPath
+          : LocationSummary::kNoCall);
 
-  summary->set_in(0, Location::RequiresRegister());
-  if (IsUnboxedStore() && opt) {
-    if (slot().field().is_non_nullable_integer()) {
-      ASSERT(FLAG_precompiled_mode);
-      summary->set_in(1, Location::Pair(Location::RequiresRegister(),
-                                        Location::RequiresRegister()));
+  summary->set_in(kInstancePos, Location::RequiresRegister());
+  if (slot().representation() != kTagged) {
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    const size_t value_size =
+        RepresentationUtils::ValueSize(slot().representation());
+    if (value_size <= compiler::target::kWordSize) {
+      summary->set_in(kValuePos, Location::RequiresRegister());
     } else {
-      summary->set_in(1, Location::RequiresFpuRegister());
+      ASSERT(value_size <= 2 * compiler::target::kWordSize);
+      summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
+                                                Location::RequiresRegister()));
     }
+  } else if (IsUnboxedDartFieldStore() && opt) {
+    summary->set_in(kValuePos, Location::RequiresFpuRegister());
     if (!FLAG_precompiled_mode) {
       summary->set_temp(0, Location::RequiresRegister());
       summary->set_temp(1, Location::RequiresRegister());
     }
-  } else if (IsPotentialUnboxedStore()) {
-    summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister()
-                                                : Location::RequiresRegister());
+  } else if (IsPotentialUnboxedDartFieldStore()) {
+    summary->set_in(kValuePos, ShouldEmitStoreBarrier()
+                                   ? Location::WritableRegister()
+                                   : Location::RequiresRegister());
     summary->set_temp(0, Location::RequiresRegister());
     summary->set_temp(1, Location::RequiresRegister());
     summary->set_temp(2, opt ? Location::RequiresFpuRegister()
                              : Location::FpuRegisterLocation(Q1));
   } else {
-    summary->set_in(1, ShouldEmitStoreBarrier()
-                           ? Location::RegisterLocation(kWriteBarrierValueReg)
-                           : LocationRegisterOrConstant(value()));
+    summary->set_in(kValuePos,
+                    ShouldEmitStoreBarrier()
+                        ? Location::RegisterLocation(kWriteBarrierValueReg)
+                        : LocationRegisterOrConstant(value()));
   }
   return summary;
 }
@@ -2801,24 +2805,34 @@
 
   compiler::Label skip_store;
 
-  const Register instance_reg = locs()->in(0).reg();
+  const Register instance_reg = locs()->in(kInstancePos).reg();
   const intptr_t offset_in_bytes = OffsetInBytes();
   ASSERT(offset_in_bytes > 0);  // Field is finalized and points after header.
 
-  if (IsUnboxedStore() && compiler->is_optimizing()) {
-    if (slot().field().is_non_nullable_integer()) {
-      const PairLocation* value_pair = locs()->in(1).AsPairLocation();
-      const Register value_lo = value_pair->At(0).reg();
-      const Register value_hi = value_pair->At(1).reg();
-      __ Comment("UnboxedIntegerStoreInstanceFieldInstr");
-      __ StoreFieldToOffset(value_lo, instance_reg, offset_in_bytes);
-      __ StoreFieldToOffset(value_hi, instance_reg,
-                            offset_in_bytes + compiler::target::kWordSize);
-      return;
+  if (slot().representation() != kTagged) {
+    auto const rep = slot().representation();
+    ASSERT(RepresentationUtils::IsUnboxedInteger(rep));
+    const size_t value_size = RepresentationUtils::ValueSize(rep);
+    __ Comment("NativeUnboxedStoreInstanceFieldInstr");
+    if (value_size <= compiler::target::kWordSize) {
+      const Register value = locs()->in(kValuePos).reg();
+      __ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
+                            RepresentationUtils::OperandSize(rep));
+    } else {
+      auto const in_pair = locs()->in(kValuePos).AsPairLocation();
+      const Register in_lo = in_pair->At(0).reg();
+      const Register in_hi = in_pair->At(1).reg();
+      const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
+      const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
+      __ StoreToOffset(in_lo, instance_reg, offset_lo);
+      __ StoreToOffset(in_hi, instance_reg, offset_hi);
     }
+    return;
+  }
 
+  if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
     const intptr_t cid = slot().field().UnboxedFieldCid();
-    const DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg());
+    const DRegister value = EvenDRegisterOf(locs()->in(kValuePos).fpu_reg());
 
     if (FLAG_precompiled_mode) {
       switch (cid) {
@@ -2894,8 +2908,8 @@
     return;
   }
 
-  if (IsPotentialUnboxedStore()) {
-    const Register value_reg = locs()->in(1).reg();
+  if (IsPotentialUnboxedDartFieldStore()) {
+    const Register value_reg = locs()->in(kValuePos).reg();
     const Register temp = locs()->temp(0).reg();
     const Register temp2 = locs()->temp(1).reg();
     const DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg());
@@ -2903,7 +2917,7 @@
     if (ShouldEmitStoreBarrier()) {
       // Value input is a writable register and should be manually preserved
       // across allocation slow-path.
-      locs()->live_registers()->Add(locs()->in(1), kTagged);
+      locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
     }
 
     compiler::Label store_pointer;
@@ -2942,8 +2956,8 @@
     __ b(&store_pointer);
 
     if (!compiler->is_optimizing()) {
-      locs()->live_registers()->Add(locs()->in(0));
-      locs()->live_registers()->Add(locs()->in(1));
+      locs()->live_registers()->Add(locs()->in(kInstancePos));
+      locs()->live_registers()->Add(locs()->in(kValuePos));
     }
 
     {
@@ -2974,15 +2988,15 @@
   }
 
   if (ShouldEmitStoreBarrier()) {
-    const Register value_reg = locs()->in(1).reg();
+    const Register value_reg = locs()->in(kValuePos).reg();
     __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
                              CanValueBeSmi());
   } else {
-    if (locs()->in(1).IsConstant()) {
+    if (locs()->in(kValuePos).IsConstant()) {
       __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
-                                        locs()->in(1).constant());
+                                        locs()->in(kValuePos).constant());
     } else {
-      const Register value_reg = locs()->in(1).reg();
+      const Register value_reg = locs()->in(kValuePos).reg();
       __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
                                         value_reg);
     }
@@ -3234,34 +3248,21 @@
   const Register instance_reg = locs()->in(0).reg();
   if (slot().representation() != kTagged) {
     ASSERT(!calls_initializer());
-    switch (slot().representation()) {
-      case kUnboxedInt64: {
-        auto const out_pair = locs()->out(0).AsPairLocation();
-        const Register out_lo = out_pair->At(0).reg();
-        const Register out_hi = out_pair->At(1).reg();
-        const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
-        const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
-        __ Comment("UnboxedInt64LoadFieldInstr");
-        __ LoadFromOffset(out_lo, instance_reg, offset_lo);
-        __ LoadFromOffset(out_hi, instance_reg, offset_hi);
-        break;
-      }
-      case kUnboxedUint32: {
-        const Register result = locs()->out(0).reg();
-        __ Comment("UnboxedUint32LoadFieldInstr");
-        __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes());
-        break;
-      }
-      case kUnboxedUint8: {
-        const Register result = locs()->out(0).reg();
-        __ Comment("UnboxedUint8LoadFieldInstr");
-        __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
-                               compiler::kUnsignedByte);
-        break;
-      }
-      default:
-        UNIMPLEMENTED();
-        break;
+    auto const rep = slot().representation();
+    const size_t value_size = RepresentationUtils::ValueSize(rep);
+    __ Comment("NativeUnboxedLoadFieldInstr");
+    if (value_size <= compiler::target::kWordSize) {
+      auto const result = locs()->out(0).reg();
+      __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
+                             RepresentationUtils::OperandSize(rep));
+    } else {
+      auto const out_pair = locs()->out(0).AsPairLocation();
+      const Register out_lo = out_pair->At(0).reg();
+      const Register out_hi = out_pair->At(1).reg();
+      const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
+      const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
+      __ LoadFromOffset(out_lo, instance_reg, offset_lo);
+      __ LoadFromOffset(out_hi, instance_reg, offset_hi);
     }
     return;
   }
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index d375c59..4bbe8ca 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -1658,10 +1658,6 @@
   }
 }
 
-DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
-  __ StoreToOffset(value, obj, instr->offset_from_tagged());
-}
-
 static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
   ConstantInstr* constant = value->definition()->AsConstant();
   if ((constant == NULL) || !constant->value().IsSmi()) {
@@ -2381,38 +2377,40 @@
 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
                                                               bool opt) const {
   const intptr_t kNumInputs = 2;
-  const intptr_t kNumTemps = (IsUnboxedStore() && opt)
+  const intptr_t kNumTemps = (IsUnboxedDartFieldStore() && opt)
                                  ? (FLAG_precompiled_mode ? 0 : 2)
-                                 : (IsPotentialUnboxedStore() ? 2 : 0);
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps,
-                      (!FLAG_precompiled_mode &&
-                       ((IsUnboxedStore() && opt && is_initialization()) ||
-                        IsPotentialUnboxedStore()))
-                          ? LocationSummary::kCallOnSlowPath
-                          : LocationSummary::kNoCall);
+                                 : (IsPotentialUnboxedDartFieldStore() ? 2 : 0);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps,
+      (!FLAG_precompiled_mode &&
+       ((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
+        IsPotentialUnboxedDartFieldStore()))
+          ? LocationSummary::kCallOnSlowPath
+          : LocationSummary::kNoCall);
 
-  summary->set_in(0, Location::RequiresRegister());
-  if (IsUnboxedStore() && opt) {
-    if (slot().field().is_non_nullable_integer()) {
-      ASSERT(FLAG_precompiled_mode);
-      summary->set_in(1, Location::RequiresRegister());
-    } else {
-      summary->set_in(1, Location::RequiresFpuRegister());
-    }
+  summary->set_in(kInstancePos, Location::RequiresRegister());
+  if (slot().representation() != kTagged) {
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    ASSERT(RepresentationUtils::ValueSize(slot().representation()) <=
+           compiler::target::kWordSize);
+    summary->set_in(kValuePos, Location::RequiresRegister());
+  } else if (IsUnboxedDartFieldStore() && opt) {
+    summary->set_in(kValuePos, Location::RequiresFpuRegister());
     if (!FLAG_precompiled_mode) {
       summary->set_temp(0, Location::RequiresRegister());
       summary->set_temp(1, Location::RequiresRegister());
     }
-  } else if (IsPotentialUnboxedStore()) {
-    summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister()
-                                                : Location::RequiresRegister());
+  } else if (IsPotentialUnboxedDartFieldStore()) {
+    summary->set_in(kValuePos, ShouldEmitStoreBarrier()
+                                   ? Location::WritableRegister()
+                                   : Location::RequiresRegister());
     summary->set_temp(0, Location::RequiresRegister());
     summary->set_temp(1, Location::RequiresRegister());
   } else {
-    summary->set_in(1, ShouldEmitStoreBarrier()
-                           ? Location::RegisterLocation(kWriteBarrierValueReg)
-                           : LocationRegisterOrConstant(value()));
+    summary->set_in(kValuePos,
+                    ShouldEmitStoreBarrier()
+                        ? Location::RegisterLocation(kWriteBarrierValueReg)
+                        : LocationRegisterOrConstant(value()));
   }
   return summary;
 }
@@ -2424,19 +2422,22 @@
 
   compiler::Label skip_store;
 
-  const Register instance_reg = locs()->in(0).reg();
+  const Register instance_reg = locs()->in(kInstancePos).reg();
   const intptr_t offset_in_bytes = OffsetInBytes();
   ASSERT(offset_in_bytes > 0);  // Field is finalized and points after header.
 
-  if (IsUnboxedStore() && compiler->is_optimizing()) {
-    if (slot().field().is_non_nullable_integer()) {
-      const Register value = locs()->in(1).reg();
-      __ Comment("UnboxedIntegerStoreInstanceFieldInstr");
-      __ StoreFieldToOffset(value, instance_reg, offset_in_bytes);
-      return;
-    }
+  if (slot().representation() != kTagged) {
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    const Register value = locs()->in(kValuePos).reg();
+    __ Comment("NativeUnboxedStoreInstanceFieldInstr");
+    __ StoreFieldToOffset(
+        value, instance_reg, offset_in_bytes,
+        RepresentationUtils::OperandSize(slot().representation()));
+    return;
+  }
 
-    const VRegister value = locs()->in(1).fpu_reg();
+  if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
+    const VRegister value = locs()->in(kValuePos).fpu_reg();
     const intptr_t cid = slot().field().UnboxedFieldCid();
 
     if (FLAG_precompiled_mode) {
@@ -2504,15 +2505,15 @@
     return;
   }
 
-  if (IsPotentialUnboxedStore()) {
-    const Register value_reg = locs()->in(1).reg();
+  if (IsPotentialUnboxedDartFieldStore()) {
+    const Register value_reg = locs()->in(kValuePos).reg();
     const Register temp = locs()->temp(0).reg();
     const Register temp2 = locs()->temp(1).reg();
 
     if (ShouldEmitStoreBarrier()) {
       // Value input is a writable register and should be manually preserved
       // across allocation slow-path.
-      locs()->live_registers()->Add(locs()->in(1), kTagged);
+      locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
     }
 
     compiler::Label store_pointer;
@@ -2551,8 +2552,8 @@
     __ b(&store_pointer);
 
     if (!compiler->is_optimizing()) {
-      locs()->live_registers()->Add(locs()->in(0));
-      locs()->live_registers()->Add(locs()->in(1));
+      locs()->live_registers()->Add(locs()->in(kInstancePos));
+      locs()->live_registers()->Add(locs()->in(kValuePos));
     }
 
     {
@@ -2587,7 +2588,7 @@
 
   const bool compressed = slot().is_compressed();
   if (ShouldEmitStoreBarrier()) {
-    const Register value_reg = locs()->in(1).reg();
+    const Register value_reg = locs()->in(kValuePos).reg();
     if (!compressed) {
       __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
                                CanValueBeSmi());
@@ -2596,16 +2597,16 @@
                                          value_reg, CanValueBeSmi());
     }
   } else {
-    if (locs()->in(1).IsConstant()) {
+    if (locs()->in(kValuePos).IsConstant()) {
+      const auto& value = locs()->in(kValuePos).constant();
       if (!compressed) {
-        __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
-                                          locs()->in(1).constant());
+        __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, value);
       } else {
-        __ StoreCompressedIntoObjectOffsetNoBarrier(
-            instance_reg, offset_in_bytes, locs()->in(1).constant());
+        __ StoreCompressedIntoObjectOffsetNoBarrier(instance_reg,
+                                                    offset_in_bytes, value);
       }
     } else {
-      const Register value_reg = locs()->in(1).reg();
+      const Register value_reg = locs()->in(kValuePos).reg();
       if (!compressed) {
         __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
                                           value_reg);
@@ -2852,26 +2853,11 @@
 
   const Register instance_reg = locs()->in(0).reg();
   if (slot().representation() != kTagged) {
-    const Register result_reg = locs()->out(0).reg();
-    switch (slot().representation()) {
-      case kUnboxedInt64:
-        __ Comment("UnboxedInt64LoadFieldInstr");
-        __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
-        break;
-      case kUnboxedUint32:
-        __ Comment("UnboxedUint32LoadFieldInstr");
-        __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes(),
-                               compiler::kUnsignedFourBytes);
-        break;
-      case kUnboxedUint8:
-        __ Comment("UnboxedUint8LoadFieldInstr");
-        __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes(),
-                               compiler::kUnsignedByte);
-        break;
-      default:
-        UNIMPLEMENTED();
-        break;
-    }
+    const Register result = locs()->out(0).reg();
+    __ Comment("NativeUnboxedLoadFieldInstr");
+    __ LoadFieldFromOffset(
+        result, instance_reg, OffsetInBytes(),
+        RepresentationUtils::OperandSize(slot().representation()));
     return;
   }
 
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index 4cd399c..c0cfbe2 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -1435,10 +1435,6 @@
   }
 }
 
-DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
-  __ movl(compiler::Address(obj, instr->offset_from_tagged()), value);
-}
-
 LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
                                                        bool opt) const {
   const intptr_t kNumInputs = 2;
@@ -2102,30 +2098,44 @@
                                                               bool opt) const {
   const intptr_t kNumInputs = 2;
   const intptr_t kNumTemps =
-      (IsUnboxedStore() && opt) ? 2 : ((IsPotentialUnboxedStore()) ? 3 : 0);
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps,
-                      ((IsUnboxedStore() && opt && is_initialization()) ||
-                       IsPotentialUnboxedStore())
-                          ? LocationSummary::kCallOnSlowPath
-                          : LocationSummary::kNoCall);
+      (IsUnboxedDartFieldStore() && opt)
+          ? 2
+          : ((IsPotentialUnboxedDartFieldStore()) ? 3 : 0);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps,
+      ((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
+       IsPotentialUnboxedDartFieldStore())
+          ? LocationSummary::kCallOnSlowPath
+          : LocationSummary::kNoCall);
 
-  summary->set_in(0, Location::RequiresRegister());
-  if (IsUnboxedStore() && opt) {
-    summary->set_in(1, Location::RequiresFpuRegister());
+  summary->set_in(kInstancePos, Location::RequiresRegister());
+  if (slot().representation() != kTagged) {
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    const size_t value_size =
+        RepresentationUtils::ValueSize(slot().representation());
+    if (value_size <= compiler::target::kWordSize) {
+      summary->set_in(kValuePos, Location::RequiresRegister());
+    } else {
+      ASSERT(value_size <= 2 * compiler::target::kWordSize);
+      summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
+                                                Location::RequiresRegister()));
+    }
+  } else if (IsUnboxedDartFieldStore() && opt) {
+    summary->set_in(kValuePos, Location::RequiresFpuRegister());
     summary->set_temp(0, Location::RequiresRegister());
     summary->set_temp(1, Location::RequiresRegister());
-  } else if (IsPotentialUnboxedStore()) {
-    summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister()
-                                                : Location::RequiresRegister());
+  } else if (IsPotentialUnboxedDartFieldStore()) {
+    summary->set_in(kValuePos, ShouldEmitStoreBarrier()
+                                   ? Location::WritableRegister()
+                                   : Location::RequiresRegister());
     summary->set_temp(0, Location::RequiresRegister());
     summary->set_temp(1, Location::RequiresRegister());
     summary->set_temp(2, opt ? Location::RequiresFpuRegister()
                              : Location::FpuRegisterLocation(XMM1));
   } else {
-    summary->set_in(1, ShouldEmitStoreBarrier()
-                           ? Location::WritableRegister()
-                           : LocationRegisterOrConstant(value()));
+    summary->set_in(kValuePos, ShouldEmitStoreBarrier()
+                                   ? Location::WritableRegister()
+                                   : LocationRegisterOrConstant(value()));
   }
   return summary;
 }
@@ -2158,12 +2168,33 @@
 
   compiler::Label skip_store;
 
-  const Register instance_reg = locs()->in(0).reg();
+  const Register instance_reg = locs()->in(kInstancePos).reg();
   const intptr_t offset_in_bytes = OffsetInBytes();
   ASSERT(offset_in_bytes > 0);  // Field is finalized and points after header.
 
-  if (IsUnboxedStore() && compiler->is_optimizing()) {
-    XmmRegister value = locs()->in(1).fpu_reg();
+  if (slot().representation() != kTagged) {
+    auto const rep = slot().representation();
+    ASSERT(RepresentationUtils::IsUnboxedInteger(rep));
+    const size_t value_size = RepresentationUtils::ValueSize(rep);
+    __ Comment("NativeUnboxedStoreInstanceFieldInstr");
+    if (value_size <= compiler::target::kWordSize) {
+      const Register value = locs()->in(kValuePos).reg();
+      __ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
+                            RepresentationUtils::OperandSize(rep));
+    } else {
+      auto const in_pair = locs()->in(kValuePos).AsPairLocation();
+      const Register in_lo = in_pair->At(0).reg();
+      const Register in_hi = in_pair->At(1).reg();
+      const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
+      const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
+      __ StoreToOffset(in_lo, instance_reg, offset_lo);
+      __ StoreToOffset(in_hi, instance_reg, offset_hi);
+    }
+    return;
+  }
+
+  if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
+    XmmRegister value = locs()->in(kValuePos).fpu_reg();
     Register temp = locs()->temp(0).reg();
     Register temp2 = locs()->temp(1).reg();
     const intptr_t cid = slot().field().UnboxedFieldCid();
@@ -2213,9 +2244,9 @@
     return;
   }
 
-  if (IsPotentialUnboxedStore()) {
+  if (IsPotentialUnboxedDartFieldStore()) {
     __ Comment("PotentialUnboxedStore");
-    Register value_reg = locs()->in(1).reg();
+    Register value_reg = locs()->in(kValuePos).reg();
     Register temp = locs()->temp(0).reg();
     Register temp2 = locs()->temp(1).reg();
     FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
@@ -2224,7 +2255,7 @@
       // Value input is a writable register and should be manually preserved
       // across allocation slow-path.  Add it to live_registers set which
       // determines which registers to preserve.
-      locs()->live_registers()->Add(locs()->in(1), kTagged);
+      locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
     }
 
     compiler::Label store_pointer;
@@ -2258,8 +2289,8 @@
     __ jmp(&store_pointer);
 
     if (!compiler->is_optimizing()) {
-      locs()->live_registers()->Add(locs()->in(0));
-      locs()->live_registers()->Add(locs()->in(1));
+      locs()->live_registers()->Add(locs()->in(kInstancePos));
+      locs()->live_registers()->Add(locs()->in(kValuePos));
     }
 
     {
@@ -2298,17 +2329,17 @@
   }
 
   if (ShouldEmitStoreBarrier()) {
-    Register value_reg = locs()->in(1).reg();
+    Register value_reg = locs()->in(kValuePos).reg();
     __ StoreIntoObject(instance_reg,
                        compiler::FieldAddress(instance_reg, offset_in_bytes),
                        value_reg, CanValueBeSmi());
   } else {
-    if (locs()->in(1).IsConstant()) {
+    if (locs()->in(kValuePos).IsConstant()) {
       __ StoreIntoObjectNoBarrier(
           instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
-          locs()->in(1).constant());
+          locs()->in(kValuePos).constant());
     } else {
-      Register value_reg = locs()->in(1).reg();
+      Register value_reg = locs()->in(kValuePos).reg();
       __ StoreIntoObjectNoBarrier(
           instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
           value_reg);
@@ -2545,34 +2576,22 @@
 
   const Register instance_reg = locs()->in(0).reg();
   if (slot().representation() != kTagged) {
-    switch (slot().representation()) {
-      case kUnboxedInt64: {
-        auto const out_pair = locs()->out(0).AsPairLocation();
-        const Register out_lo = out_pair->At(0).reg();
-        const Register out_hi = out_pair->At(1).reg();
-        const intptr_t offset_lo = OffsetInBytes();
-        const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
-        __ Comment("UnboxedInt64LoadFieldInstr");
-        __ movl(out_lo, compiler::FieldAddress(instance_reg, offset_lo));
-        __ movl(out_hi, compiler::FieldAddress(instance_reg, offset_hi));
-        break;
-      }
-      case kUnboxedUint32: {
-        const Register result = locs()->out(0).reg();
-        __ Comment("UnboxedUint32LoadFieldInstr");
-        __ movl(result, compiler::FieldAddress(instance_reg, OffsetInBytes()));
-        break;
-      }
-      case kUnboxedUint8: {
-        const Register result = locs()->out(0).reg();
-        __ Comment("UnboxedUint8LoadFieldInstr");
-        __ movzxb(result,
-                  compiler::FieldAddress(instance_reg, OffsetInBytes()));
-        break;
-      }
-      default:
-        UNIMPLEMENTED();
-        break;
+    ASSERT(!calls_initializer());
+    auto const rep = slot().representation();
+    const size_t value_size = RepresentationUtils::ValueSize(rep);
+    __ Comment("NativeUnboxedLoadFieldInstr");
+    if (value_size <= compiler::target::kWordSize) {
+      auto const result = locs()->out(0).reg();
+      __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
+                             RepresentationUtils::OperandSize(rep));
+    } else {
+      auto const out_pair = locs()->out(0).AsPairLocation();
+      const Register out_lo = out_pair->At(0).reg();
+      const Register out_hi = out_pair->At(1).reg();
+      const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
+      const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
+      __ LoadFromOffset(out_lo, instance_reg, offset_lo);
+      __ LoadFromOffset(out_hi, instance_reg, offset_hi);
     }
     return;
   }
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 03d9556..82faaac 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -1629,10 +1629,6 @@
   }
 }
 
-DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
-  __ movq(compiler::Address(obj, instr->offset_from_tagged()), value);
-}
-
 class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
  public:
   BoxAllocationSlowPath(Instruction* instruction,
@@ -2434,40 +2430,42 @@
 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
                                                               bool opt) const {
   const intptr_t kNumInputs = 2;
-  const intptr_t kNumTemps = (IsUnboxedStore() && opt)
+  const intptr_t kNumTemps = (IsUnboxedDartFieldStore() && opt)
                                  ? (FLAG_precompiled_mode ? 0 : 2)
-                                 : (IsPotentialUnboxedStore() ? 3 : 0);
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps,
-                      (!FLAG_precompiled_mode &&
-                       ((IsUnboxedStore() && opt && is_initialization()) ||
-                        IsPotentialUnboxedStore()))
-                          ? LocationSummary::kCallOnSlowPath
-                          : LocationSummary::kNoCall);
+                                 : (IsPotentialUnboxedDartFieldStore() ? 3 : 0);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps,
+      (!FLAG_precompiled_mode &&
+       ((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
+        IsPotentialUnboxedDartFieldStore()))
+          ? LocationSummary::kCallOnSlowPath
+          : LocationSummary::kNoCall);
 
-  summary->set_in(0, Location::RequiresRegister());
-  if (IsUnboxedStore() && opt) {
-    if (slot().field().is_non_nullable_integer()) {
-      ASSERT(FLAG_precompiled_mode);
-      summary->set_in(1, Location::RequiresRegister());
-    } else {
-      summary->set_in(1, Location::RequiresFpuRegister());
-    }
+  summary->set_in(kInstancePos, Location::RequiresRegister());
+  if (slot().representation() != kTagged) {
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    ASSERT(RepresentationUtils::ValueSize(slot().representation()) <=
+           compiler::target::kWordSize);
+    summary->set_in(kValuePos, Location::RequiresRegister());
+  } else if (IsUnboxedDartFieldStore() && opt) {
+    summary->set_in(kValuePos, Location::RequiresFpuRegister());
     if (!FLAG_precompiled_mode) {
       summary->set_temp(0, Location::RequiresRegister());
       summary->set_temp(1, Location::RequiresRegister());
     }
-  } else if (IsPotentialUnboxedStore()) {
-    summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister()
-                                                : Location::RequiresRegister());
+  } else if (IsPotentialUnboxedDartFieldStore()) {
+    summary->set_in(kValuePos, ShouldEmitStoreBarrier()
+                                   ? Location::WritableRegister()
+                                   : Location::RequiresRegister());
     summary->set_temp(0, Location::RequiresRegister());
     summary->set_temp(1, Location::RequiresRegister());
     summary->set_temp(2, opt ? Location::RequiresFpuRegister()
                              : Location::FpuRegisterLocation(XMM1));
   } else {
-    summary->set_in(1, ShouldEmitStoreBarrier()
-                           ? Location::RegisterLocation(kWriteBarrierValueReg)
-                           : LocationRegisterOrConstant(value()));
+    summary->set_in(kValuePos,
+                    ShouldEmitStoreBarrier()
+                        ? Location::RegisterLocation(kWriteBarrierValueReg)
+                        : LocationRegisterOrConstant(value()));
   }
   return summary;
 }
@@ -2498,19 +2496,22 @@
 
   compiler::Label skip_store;
 
-  const Register instance_reg = locs()->in(0).reg();
+  const Register instance_reg = locs()->in(kInstancePos).reg();
   const intptr_t offset_in_bytes = OffsetInBytes();
   ASSERT(offset_in_bytes > 0);  // Field is finalized and points after header.
 
-  if (IsUnboxedStore() && compiler->is_optimizing()) {
-    if (slot().field().is_non_nullable_integer()) {
-      const Register value = locs()->in(1).reg();
-      __ Comment("UnboxedIntegerStoreInstanceFieldInstr");
-      __ movq(compiler::FieldAddress(instance_reg, offset_in_bytes), value);
-      return;
-    }
+  if (slot().representation() != kTagged) {
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    const Register value = locs()->in(kValuePos).reg();
+    __ Comment("NativeUnboxedStoreInstanceFieldInstr");
+    __ StoreFieldToOffset(
+        value, instance_reg, offset_in_bytes,
+        RepresentationUtils::OperandSize(slot().representation()));
+    return;
+  }
 
-    XmmRegister value = locs()->in(1).fpu_reg();
+  if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
+    XmmRegister value = locs()->in(kValuePos).fpu_reg();
     const intptr_t cid = slot().field().UnboxedFieldCid();
 
     // Real unboxed field
@@ -2584,8 +2585,8 @@
     return;
   }
 
-  if (IsPotentialUnboxedStore()) {
-    Register value_reg = locs()->in(1).reg();
+  if (IsPotentialUnboxedDartFieldStore()) {
+    Register value_reg = locs()->in(kValuePos).reg();
     Register temp = locs()->temp(0).reg();
     Register temp2 = locs()->temp(1).reg();
     FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
@@ -2593,7 +2594,7 @@
     if (ShouldEmitStoreBarrier()) {
       // Value input is a writable register and should be manually preserved
       // across allocation slow-path.
-      locs()->live_registers()->Add(locs()->in(1), kTagged);
+      locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
     }
 
     compiler::Label store_pointer;
@@ -2627,8 +2628,8 @@
     __ jmp(&store_pointer);
 
     if (!compiler->is_optimizing()) {
-      locs()->live_registers()->Add(locs()->in(0));
-      locs()->live_registers()->Add(locs()->in(1));
+      locs()->live_registers()->Add(locs()->in(kInstancePos));
+      locs()->live_registers()->Add(locs()->in(kValuePos));
     }
 
     {
@@ -2668,7 +2669,7 @@
 
   const bool compressed = slot().is_compressed();
   if (ShouldEmitStoreBarrier()) {
-    Register value_reg = locs()->in(1).reg();
+    Register value_reg = locs()->in(kValuePos).reg();
     if (!compressed) {
       __ StoreIntoObject(instance_reg,
                          compiler::FieldAddress(instance_reg, offset_in_bytes),
@@ -2679,18 +2680,19 @@
           value_reg, CanValueBeSmi());
     }
   } else {
-    if (locs()->in(1).IsConstant()) {
+    if (locs()->in(kValuePos).IsConstant()) {
+      const auto& value = locs()->in(kValuePos).constant();
       if (!compressed) {
         __ StoreIntoObjectNoBarrier(
             instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
-            locs()->in(1).constant());
+            value);
       } else {
         __ StoreCompressedIntoObjectNoBarrier(
             instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
-            locs()->in(1).constant());
+            value);
       }
     } else {
-      Register value_reg = locs()->in(1).reg();
+      Register value_reg = locs()->in(kValuePos).reg();
       if (!compressed) {
         __ StoreIntoObjectNoBarrier(
             instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
@@ -2943,25 +2945,10 @@
   const Register instance_reg = locs()->in(0).reg();
   if (slot().representation() != kTagged) {
     const Register result = locs()->out(0).reg();
-    switch (slot().representation()) {
-      case kUnboxedInt64:
-        __ Comment("UnboxedInt64LoadFieldInstr");
-        __ movq(result, compiler::FieldAddress(instance_reg, OffsetInBytes()));
-        break;
-      case kUnboxedUint32:
-        __ Comment("UnboxedUint32LoadFieldInstr");
-        __ movl(result, compiler::FieldAddress(instance_reg, OffsetInBytes()));
-        break;
-      case kUnboxedUint8: {
-        __ Comment("UnboxedUint8LoadFieldInstr");
-        __ movzxb(result,
-                  compiler::FieldAddress(instance_reg, OffsetInBytes()));
-        break;
-      }
-      default:
-        UNIMPLEMENTED();
-        break;
-    }
+    __ Comment("NativeUnboxedLoadFieldInstr");
+    __ LoadFieldFromOffset(
+        result, instance_reg, OffsetInBytes(),
+        RepresentationUtils::OperandSize(slot().representation()));
     return;
   }
 
diff --git a/runtime/vm/compiler/backend/locations.cc b/runtime/vm/compiler/backend/locations.cc
index 858150d..abcbf3f 100644
--- a/runtime/vm/compiler/backend/locations.cc
+++ b/runtime/vm/compiler/backend/locations.cc
@@ -59,6 +59,33 @@
 #undef REP_SIZEOF_CLAUSE
 #undef REP_IN_SET_CLAUSE
 
+compiler::OperandSize RepresentationUtils::OperandSize(Representation rep) {
+  if (rep == kTagged || rep == kUntagged) {
+    return compiler::kObjectBytes;
+  }
+  ASSERT(IsUnboxedInteger(rep));
+  switch (ValueSize(rep)) {
+    case 8:
+      ASSERT_EQUAL(compiler::target::kWordSize, 8);
+      return compiler::kEightBytes;
+    case 4:
+      return IsUnsigned(rep) ? compiler::kUnsignedFourBytes
+                             : compiler::kFourBytes;
+    case 2:
+      // No kUnboxed{Uint,Int}16 yet.
+      UNIMPLEMENTED();
+      break;
+    case 1:
+      if (!IsUnsigned(rep)) {
+        // No kUnboxedInt8 yet.
+        UNIMPLEMENTED();
+      }
+      return compiler::kUnsignedByte;
+  }
+  UNREACHABLE();
+  return compiler::kObjectBytes;
+}
+
 const char* Location::RepresentationToCString(Representation repr) {
   switch (repr) {
 #define REPR_CASE(Name, __, ___)                                               \
diff --git a/runtime/vm/compiler/backend/locations.h b/runtime/vm/compiler/backend/locations.h
index c758233..a8ea373 100644
--- a/runtime/vm/compiler/backend/locations.h
+++ b/runtime/vm/compiler/backend/locations.h
@@ -77,6 +77,8 @@
 
   // Whether the values described by this representation are unsigned integers.
   static bool IsUnsigned(Representation rep);
+
+  static compiler::OperandSize OperandSize(Representation rep);
 };
 
 // 'UnboxedFfiIntPtr' should be able to hold a pointer of the target word-size.
diff --git a/runtime/vm/compiler/backend/range_analysis.cc b/runtime/vm/compiler/backend/range_analysis.cc
index f38dc9b2..3992c8b 100644
--- a/runtime/vm/compiler/backend/range_analysis.cc
+++ b/runtime/vm/compiler/backend/range_analysis.cc
@@ -2822,14 +2822,13 @@
       UNREACHABLE();
       break;
 
-    case Slot::Kind::kClosureData_default_type_arguments_kind:
-    case Slot::Kind::kFunction_kind_tag:
-    case Slot::Kind::kFunction_packed_fields:
-    case Slot::Kind::kTypeParameter_flags:
+#define UNBOXED_NATIVE_SLOT_CASE(Class, Untagged, Field, Rep, IsFinal)         \
+  case Slot::Kind::k##Class##_##Field:
+      UNBOXED_NATIVE_SLOTS_LIST(UNBOXED_NATIVE_SLOT_CASE)
+#undef UNBOXED_NATIVE_SLOT_CASE
       *range = Range::Full(RepresentationToRangeSize(slot().representation()));
       break;
 
-    case Slot::Kind::kFunctionType_packed_fields:
     case Slot::Kind::kClosure_hash:
     case Slot::Kind::kLinkedHashMap_hash_mask:
     case Slot::Kind::kLinkedHashMap_used_data:
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.cc b/runtime/vm/compiler/backend/redundancy_elimination.cc
index 99176ee..920a8cf 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination.cc
@@ -1452,7 +1452,6 @@
     case Instruction::kStoreStaticField:
     case Instruction::kStoreIndexed:
     case Instruction::kStoreIndexedUnsafe:
-    case Instruction::kStoreUntagged:
       return true;
     default:
       return instr->HasUnknownSideEffects() || instr->MayThrow();
diff --git a/runtime/vm/compiler/backend/slot.h b/runtime/vm/compiler/backend/slot.h
index 4465d4b..3292e7f 100644
--- a/runtime/vm/compiler/backend/slot.h
+++ b/runtime/vm/compiler/backend/slot.h
@@ -135,6 +135,8 @@
   V(Function, UntaggedFunction, kind_tag, Uint32, FINAL)                       \
   V(Function, UntaggedFunction, packed_fields, Uint32, FINAL)                  \
   V(FunctionType, UntaggedFunctionType, packed_fields, Uint32, FINAL)          \
+  V(Pointer, UntaggedPointer, data_field, FfiIntPtr, FINAL)                    \
+  V(TypedDataBase, UntaggedTypedDataBase, data_field, IntPtr, VAR)             \
   V(TypeParameter, UntaggedTypeParameter, flags, Uint8, FINAL)
 
 // For uses that do not need the exact_type (boxed) or representation (unboxed)
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index ecdb481..4d789d4 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -392,13 +392,6 @@
   return Fragment(load);
 }
 
-Fragment BaseFlowGraphBuilder::StoreUntagged(intptr_t offset) {
-  Value* value = Pop();
-  Value* object = Pop();
-  auto store = new (Z) StoreUntaggedInstr(object, value, offset);
-  return Fragment(store);
-}
-
 Fragment BaseFlowGraphBuilder::ConvertUntaggedToUnboxed(
     Representation to_representation) {
   ASSERT(to_representation == kUnboxedIntPtr ||
@@ -508,9 +501,9 @@
   }
 }
 
-Fragment BaseFlowGraphBuilder::StoreInstanceField(
+Fragment BaseFlowGraphBuilder::StoreNativeField(
     TokenPosition position,
-    const Slot& field,
+    const Slot& slot,
     StoreInstanceFieldInstr::Kind
         kind /* = StoreInstanceFieldInstr::Kind::kOther */,
     StoreBarrierType emit_store_barrier /* = kEmitStoreBarrier */) {
@@ -519,7 +512,7 @@
     emit_store_barrier = kNoStoreBarrier;
   }
   StoreInstanceFieldInstr* store =
-      new (Z) StoreInstanceFieldInstr(field, Pop(), value, emit_store_barrier,
+      new (Z) StoreInstanceFieldInstr(slot, Pop(), value, emit_store_barrier,
                                       InstructionSource(position), kind);
   return Fragment(store);
 }
@@ -529,16 +522,9 @@
     StoreInstanceFieldInstr::Kind
         kind /* = StoreInstanceFieldInstr::Kind::kOther */,
     StoreBarrierType emit_store_barrier) {
-  Value* value = Pop();
-  if (value->BindsToConstant()) {
-    emit_store_barrier = kNoStoreBarrier;
-  }
-
-  StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
-      MayCloneField(Z, field), Pop(), value, emit_store_barrier,
-      InstructionSource(), parsed_function_, kind);
-
-  return Fragment(store);
+  return StoreNativeField(TokenPosition::kNoSource,
+                          Slot::Get(MayCloneField(Z, field), parsed_function_),
+                          kind, emit_store_barrier);
 }
 
 Fragment BaseFlowGraphBuilder::StoreInstanceFieldGuarded(
@@ -573,7 +559,8 @@
           new (Z) GuardFieldTypeInstr(Pop(), field_clone, GetNextDeoptId());
     }
   }
-  instructions += StoreInstanceField(field_clone, kind);
+  instructions +=
+      StoreNativeField(Slot::Get(field_clone, parsed_function_), kind);
   return instructions;
 }
 
@@ -656,7 +643,7 @@
     LocalVariable* value = MakeTemporary();
     instructions += LoadContextAt(variable->owner()->context_level());
     instructions += LoadLocal(value);
-    instructions += StoreInstanceField(
+    instructions += StoreNativeField(
         position, Slot::GetContextVariableSlotFor(thread_, *variable));
     return instructions;
   }
@@ -1020,20 +1007,20 @@
 
   code += LoadLocal(context);
   code += LoadLocal(pointer);
-  code += StoreInstanceField(TokenPosition::kNoSource, *context_slots[0]);
+  code += StoreNativeField(*context_slots[0]);
 
   code += AllocateClosure(TokenPosition::kNoSource, target);
   LocalVariable* closure = MakeTemporary();
 
   code += LoadLocal(closure);
   code += LoadLocal(context);
-  code += StoreInstanceField(TokenPosition::kNoSource, Slot::Closure_context(),
-                             StoreInstanceFieldInstr::Kind::kInitializing);
+  code += StoreNativeField(Slot::Closure_context(),
+                           StoreInstanceFieldInstr::Kind::kInitializing);
 
   code += LoadLocal(closure);
   code += Constant(target);
-  code += StoreInstanceField(TokenPosition::kNoSource, Slot::Closure_function(),
-                             StoreInstanceFieldInstr::Kind::kInitializing);
+  code += StoreNativeField(Slot::Closure_function(),
+                           StoreInstanceFieldInstr::Kind::kInitializing);
 
   // Drop address and context.
   code += DropTempsPreserveTop(2);
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.h b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
index 917e820..ae5bcb3 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.h
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
@@ -171,7 +171,6 @@
                        AlignmentType alignment = kAlignedAccess);
 
   Fragment LoadUntagged(intptr_t offset);
-  Fragment StoreUntagged(intptr_t offset);
   Fragment ConvertUntaggedToUnboxed(Representation to);
   Fragment ConvertUnboxedToUntagged(Representation from);
   Fragment UnboxSmiToIntptr();
@@ -192,12 +191,20 @@
   Fragment GuardFieldLength(const Field& field, intptr_t deopt_id);
   Fragment GuardFieldClass(const Field& field, intptr_t deopt_id);
   static const Field& MayCloneField(Zone* zone, const Field& field);
-  Fragment StoreInstanceField(
+  Fragment StoreNativeField(
       TokenPosition position,
-      const Slot& field,
+      const Slot& slot,
       StoreInstanceFieldInstr::Kind kind =
           StoreInstanceFieldInstr::Kind::kOther,
       StoreBarrierType emit_store_barrier = kEmitStoreBarrier);
+  Fragment StoreNativeField(
+      const Slot& slot,
+      StoreInstanceFieldInstr::Kind kind =
+          StoreInstanceFieldInstr::Kind::kOther,
+      StoreBarrierType emit_store_barrier = kEmitStoreBarrier) {
+    return StoreNativeField(TokenPosition::kNoSource, slot, kind,
+                            emit_store_barrier);
+  }
   Fragment StoreInstanceField(
       const Field& field,
       StoreInstanceFieldInstr::Kind kind =
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
index 47e9439..583f59b 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
@@ -668,8 +668,7 @@
         // Copy the parameter from the stack to the context.
         body += LoadLocal(context);
         body += LoadLocal(&raw_parameter);
-        body += flow_graph_builder_->StoreInstanceField(
-            TokenPosition::kNoSource,
+        body += flow_graph_builder_->StoreNativeField(
             Slot::GetContextVariableSlotFor(thread(), *variable),
             StoreInstanceFieldInstr::Kind::kInitializing);
       }
@@ -3804,8 +3803,8 @@
 
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(type_args_vec);
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_delayed_type_arguments(),
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_delayed_type_arguments(),
       StoreInstanceFieldInstr::Kind::kInitializing);
   instructions += Drop();  // Drop type args.
 
@@ -3814,17 +3813,16 @@
   instructions += LoadLocal(original_closure);
   instructions +=
       flow_graph_builder_->LoadNativeField(Slot::Closure_function());
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_function(),
-      StoreInstanceFieldInstr::Kind::kInitializing);
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_function(), StoreInstanceFieldInstr::Kind::kInitializing);
 
   // Copy over the instantiator type arguments.
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(original_closure);
   instructions += flow_graph_builder_->LoadNativeField(
       Slot::Closure_instantiator_type_arguments());
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_instantiator_type_arguments(),
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_instantiator_type_arguments(),
       StoreInstanceFieldInstr::Kind::kInitializing);
 
   // Copy over the function type arguments.
@@ -3832,17 +3830,16 @@
   instructions += LoadLocal(original_closure);
   instructions += flow_graph_builder_->LoadNativeField(
       Slot::Closure_function_type_arguments());
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_function_type_arguments(),
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_function_type_arguments(),
       StoreInstanceFieldInstr::Kind::kInitializing);
 
   // Copy over the context.
   instructions += LoadLocal(new_closure);
   instructions += LoadLocal(original_closure);
   instructions += flow_graph_builder_->LoadNativeField(Slot::Closure_context());
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_context(),
-      StoreInstanceFieldInstr::Kind::kInitializing);
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_context(), StoreInstanceFieldInstr::Kind::kInitializing);
 
   instructions += DropTempsPreserveTop(1);  // Drop old closure.
 
@@ -4981,8 +4978,8 @@
   if (!function.HasInstantiatedSignature(kCurrentClass)) {
     instructions += LoadLocal(closure);
     instructions += LoadInstantiatorTypeArguments();
-    instructions += flow_graph_builder_->StoreInstanceField(
-        TokenPosition::kNoSource, Slot::Closure_instantiator_type_arguments(),
+    instructions += flow_graph_builder_->StoreNativeField(
+        Slot::Closure_instantiator_type_arguments(),
         StoreInstanceFieldInstr::Kind::kInitializing);
   }
 
@@ -4990,8 +4987,8 @@
   // type parameters.
   instructions += LoadLocal(closure);
   instructions += LoadFunctionTypeArguments();
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_function_type_arguments(),
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_function_type_arguments(),
       StoreInstanceFieldInstr::Kind::kInitializing);
 
   if (function.IsGeneric()) {
@@ -4999,23 +4996,21 @@
     // delayed_type_arguments.
     instructions += LoadLocal(closure);
     instructions += Constant(Object::empty_type_arguments());
-    instructions += flow_graph_builder_->StoreInstanceField(
-        TokenPosition::kNoSource, Slot::Closure_delayed_type_arguments(),
+    instructions += flow_graph_builder_->StoreNativeField(
+        Slot::Closure_delayed_type_arguments(),
         StoreInstanceFieldInstr::Kind::kInitializing);
   }
 
   // Store the function and the context in the closure.
   instructions += LoadLocal(closure);
   instructions += Constant(function);
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_function(),
-      StoreInstanceFieldInstr::Kind::kInitializing);
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_function(), StoreInstanceFieldInstr::Kind::kInitializing);
 
   instructions += LoadLocal(closure);
   instructions += LoadLocal(parsed_function()->current_context_var());
-  instructions += flow_graph_builder_->StoreInstanceField(
-      TokenPosition::kNoSource, Slot::Closure_context(),
-      StoreInstanceFieldInstr::Kind::kInitializing);
+  instructions += flow_graph_builder_->StoreNativeField(
+      Slot::Closure_context(), StoreInstanceFieldInstr::Kind::kInitializing);
 
   return instructions;
 }
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index 94867c4..3d4e86f 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -130,9 +130,8 @@
   LocalVariable* context = MakeTemporary();
   instructions += LoadLocal(context);
   instructions += LoadLocal(parsed_function_->current_context_var());
-  instructions +=
-      StoreInstanceField(TokenPosition::kNoSource, Slot::Context_parent(),
-                         StoreInstanceFieldInstr::Kind::kInitializing);
+  instructions += StoreNativeField(
+      Slot::Context_parent(), StoreInstanceFieldInstr::Kind::kInitializing);
   instructions += StoreLocal(TokenPosition::kNoSource,
                              parsed_function_->current_context_var());
   ++context_depth_;
@@ -268,15 +267,13 @@
   if (exception_var->is_captured()) {
     instructions += LoadLocal(context_variable);
     instructions += LoadLocal(raw_exception_var);
-    instructions += StoreInstanceField(
-        TokenPosition::kNoSource,
+    instructions += StoreNativeField(
         Slot::GetContextVariableSlotFor(thread_, *exception_var));
   }
   if (stacktrace_var->is_captured()) {
     instructions += LoadLocal(context_variable);
     instructions += LoadLocal(raw_stacktrace_var);
-    instructions += StoreInstanceField(
-        TokenPosition::kNoSource,
+    instructions += StoreNativeField(
         Slot::GetContextVariableSlotFor(thread_, *stacktrace_var));
   }
 
@@ -1171,8 +1168,7 @@
       ASSERT(function.NumParameters() == 2);
       body += LoadLocal(parsed_function_->RawParameterVariable(0));
       body += LoadLocal(parsed_function_->RawParameterVariable(1));
-      body += StoreInstanceField(TokenPosition::kNoSource,
-                                 Slot::LinkedHashMap_index());
+      body += StoreNativeField(Slot::LinkedHashMap_index());
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getData:
@@ -1184,8 +1180,7 @@
       ASSERT(function.NumParameters() == 2);
       body += LoadLocal(parsed_function_->RawParameterVariable(0));
       body += LoadLocal(parsed_function_->RawParameterVariable(1));
-      body += StoreInstanceField(TokenPosition::kNoSource,
-                                 Slot::LinkedHashMap_data());
+      body += StoreNativeField(Slot::LinkedHashMap_data());
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getHashMask:
@@ -1197,9 +1192,9 @@
       ASSERT(function.NumParameters() == 2);
       body += LoadLocal(parsed_function_->RawParameterVariable(0));
       body += LoadLocal(parsed_function_->RawParameterVariable(1));
-      body += StoreInstanceField(
-          TokenPosition::kNoSource, Slot::LinkedHashMap_hash_mask(),
-          StoreInstanceFieldInstr::Kind::kOther, kNoStoreBarrier);
+      body += StoreNativeField(Slot::LinkedHashMap_hash_mask(),
+                               StoreInstanceFieldInstr::Kind::kOther,
+                               kNoStoreBarrier);
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getUsedData:
@@ -1211,9 +1206,9 @@
       ASSERT(function.NumParameters() == 2);
       body += LoadLocal(parsed_function_->RawParameterVariable(0));
       body += LoadLocal(parsed_function_->RawParameterVariable(1));
-      body += StoreInstanceField(
-          TokenPosition::kNoSource, Slot::LinkedHashMap_used_data(),
-          StoreInstanceFieldInstr::Kind::kOther, kNoStoreBarrier);
+      body += StoreNativeField(Slot::LinkedHashMap_used_data(),
+                               StoreInstanceFieldInstr::Kind::kOther,
+                               kNoStoreBarrier);
       body += NullConstant();
       break;
     case MethodRecognizer::kLinkedHashMap_getDeletedKeys:
@@ -1225,9 +1220,9 @@
       ASSERT(function.NumParameters() == 2);
       body += LoadLocal(parsed_function_->RawParameterVariable(0));
       body += LoadLocal(parsed_function_->RawParameterVariable(1));
-      body += StoreInstanceField(
-          TokenPosition::kNoSource, Slot::LinkedHashMap_deleted_keys(),
-          StoreInstanceFieldInstr::Kind::kOther, kNoStoreBarrier);
+      body += StoreNativeField(Slot::LinkedHashMap_deleted_keys(),
+                               StoreInstanceFieldInstr::Kind::kOther,
+                               kNoStoreBarrier);
       body += NullConstant();
       break;
     case MethodRecognizer::kWeakProperty_getKey:
@@ -1239,8 +1234,7 @@
       ASSERT(function.NumParameters() == 2);
       body += LoadLocal(parsed_function_->RawParameterVariable(0));
       body += LoadLocal(parsed_function_->RawParameterVariable(1));
-      body += StoreInstanceField(TokenPosition::kNoSource,
-                                 Slot::WeakProperty_key());
+      body += StoreNativeField(Slot::WeakProperty_key());
       body += NullConstant();
       break;
     case MethodRecognizer::kWeakProperty_getValue:
@@ -1252,8 +1246,7 @@
       ASSERT(function.NumParameters() == 2);
       body += LoadLocal(parsed_function_->RawParameterVariable(0));
       body += LoadLocal(parsed_function_->RawParameterVariable(1));
-      body += StoreInstanceField(TokenPosition::kNoSource,
-                                 Slot::WeakProperty_value());
+      body += StoreNativeField(Slot::WeakProperty_value());
       body += NullConstant();
       break;
     case MethodRecognizer::kUtf8DecoderScan:
@@ -1358,8 +1351,7 @@
         body += LoadLocal(pointer);
         body += LoadLocal(address);
         body += UnboxTruncate(kUnboxedFfiIntPtr);
-        body += ConvertUnboxedToUntagged(kUnboxedFfiIntPtr);
-        body += StoreUntagged(compiler::target::Pointer::data_field_offset());
+        body += StoreNativeField(Slot::Pointer_data_field());
         body += DropTempsPreserveTop(1);  // Drop [address] keep [pointer].
       }
       body += DropTempsPreserveTop(1);  // Drop [arg_offset].
@@ -1476,8 +1468,7 @@
       body += CheckNullOptimized(TokenPosition::kNoSource,
                                  String::ZoneHandle(Z, function.name()));
       body += UnboxTruncate(kUnboxedFfiIntPtr);
-      body += ConvertUnboxedToUntagged(kUnboxedFfiIntPtr);
-      body += StoreUntagged(compiler::target::Pointer::data_field_offset());
+      body += StoreNativeField(Slot::Pointer_data_field());
     } break;
     case MethodRecognizer::kFfiGetAddress: {
       ASSERT(function.NumParameters() == 1);
@@ -1522,20 +1513,20 @@
 
   body += LoadLocal(view_object);
   body += LoadLocal(typed_data);
-  body += StoreInstanceField(token_pos, Slot::TypedDataView_data(),
-                             StoreInstanceFieldInstr::Kind::kInitializing);
+  body += StoreNativeField(token_pos, Slot::TypedDataView_data(),
+                           StoreInstanceFieldInstr::Kind::kInitializing);
 
   body += LoadLocal(view_object);
   body += LoadLocal(offset_in_bytes);
-  body += StoreInstanceField(token_pos, Slot::TypedDataView_offset_in_bytes(),
-                             StoreInstanceFieldInstr::Kind::kInitializing,
-                             kNoStoreBarrier);
+  body += StoreNativeField(token_pos, Slot::TypedDataView_offset_in_bytes(),
+                           StoreInstanceFieldInstr::Kind::kInitializing,
+                           kNoStoreBarrier);
 
   body += LoadLocal(view_object);
   body += LoadLocal(length);
-  body += StoreInstanceField(token_pos, Slot::TypedDataBase_length(),
-                             StoreInstanceFieldInstr::Kind::kInitializing,
-                             kNoStoreBarrier);
+  body += StoreNativeField(token_pos, Slot::TypedDataBase_length(),
+                           StoreInstanceFieldInstr::Kind::kInitializing,
+                           kNoStoreBarrier);
 
   // Update the inner pointer.
   //
@@ -1548,8 +1539,7 @@
   body += LoadLocal(offset_in_bytes);
   body += UnboxSmiToIntptr();
   body += AddIntptrIntegers();
-  body += ConvertUnboxedToUntagged(kUnboxedIntPtr);
-  body += StoreUntagged(compiler::target::TypedDataBase::data_field_offset());
+  body += StoreNativeField(Slot::TypedDataBase_data_field());
 
   return body;
 }
@@ -1601,9 +1591,8 @@
   if (!target.HasInstantiatedSignature(kCurrentClass)) {
     fragment += LoadLocal(closure);
     fragment += LoadInstantiatorTypeArguments();
-    fragment += StoreInstanceField(
-        TokenPosition::kNoSource, Slot::Closure_instantiator_type_arguments(),
-        StoreInstanceFieldInstr::Kind::kInitializing);
+    fragment += StoreNativeField(Slot::Closure_instantiator_type_arguments(),
+                                 StoreInstanceFieldInstr::Kind::kInitializing);
   }
 
   // The function cannot be local and have parent generic functions.
@@ -1619,32 +1608,28 @@
   // Store the function and the context in the closure.
   fragment += LoadLocal(closure);
   fragment += Constant(target);
-  fragment +=
-      StoreInstanceField(TokenPosition::kNoSource, Slot::Closure_function(),
-                         StoreInstanceFieldInstr::Kind::kInitializing);
+  fragment += StoreNativeField(Slot::Closure_function(),
+                               StoreInstanceFieldInstr::Kind::kInitializing);
 
   fragment += LoadLocal(closure);
   fragment += LoadLocal(context);
-  fragment +=
-      StoreInstanceField(TokenPosition::kNoSource, Slot::Closure_context(),
-                         StoreInstanceFieldInstr::Kind::kInitializing);
+  fragment += StoreNativeField(Slot::Closure_context(),
+                               StoreInstanceFieldInstr::Kind::kInitializing);
 
   if (target.IsGeneric()) {
     // Only generic functions need to have properly initialized
     // delayed_type_arguments.
     fragment += LoadLocal(closure);
     fragment += Constant(Object::empty_type_arguments());
-    fragment += StoreInstanceField(
-        TokenPosition::kNoSource, Slot::Closure_delayed_type_arguments(),
-        StoreInstanceFieldInstr::Kind::kInitializing);
+    fragment += StoreNativeField(Slot::Closure_delayed_type_arguments(),
+                                 StoreInstanceFieldInstr::Kind::kInitializing);
   }
 
   // The context is on top of the operand stack.  Store `this`.  The context
   // doesn't need a parent pointer because it doesn't close over anything
   // else.
   fragment += LoadLocal(parsed_function_->receiver_var());
-  fragment += StoreInstanceField(
-      TokenPosition::kNoSource,
+  fragment += StoreNativeField(
       Slot::GetContextVariableSlotFor(
           thread_, *implicit_closure_scope->context_variables()[0]),
       StoreInstanceFieldInstr::Kind::kInitializing);
@@ -3711,15 +3696,15 @@
 
   body += LoadLocal(error_instance);
   body += LoadLocal(CurrentException());
-  body += StoreInstanceField(
-      TokenPosition::kNoSource, Slot::UnhandledException_exception(),
-      StoreInstanceFieldInstr::Kind::kInitializing, kNoStoreBarrier);
+  body += StoreNativeField(Slot::UnhandledException_exception(),
+                           StoreInstanceFieldInstr::Kind::kInitializing,
+                           kNoStoreBarrier);
 
   body += LoadLocal(error_instance);
   body += LoadLocal(CurrentStackTrace());
-  body += StoreInstanceField(
-      TokenPosition::kNoSource, Slot::UnhandledException_stacktrace(),
-      StoreInstanceFieldInstr::Kind::kInitializing, kNoStoreBarrier);
+  body += StoreNativeField(Slot::UnhandledException_stacktrace(),
+                           StoreInstanceFieldInstr::Kind::kInitializing,
+                           kNoStoreBarrier);
 
   return body;
 }
@@ -3761,8 +3746,7 @@
   code += LoadLocal(pointer);
   code += LoadLocal(address);
   code += UnboxTruncate(kUnboxedFfiIntPtr);
-  code += ConvertUnboxedToUntagged(kUnboxedFfiIntPtr);
-  code += StoreUntagged(compiler::target::Pointer::data_field_offset());
+  code += StoreNativeField(Slot::Pointer_data_field());
   code += StoreLocal(TokenPosition::kNoSource, result);
   code += Drop();  // StoreLocal^
   code += Drop();  // address
diff --git a/runtime/vm/compiler/jit/jit_call_specializer.cc b/runtime/vm/compiler/jit/jit_call_specializer.cc
index 58115df..191e6bb 100644
--- a/runtime/vm/compiler/jit/jit_call_specializer.cc
+++ b/runtime/vm/compiler/jit/jit_call_specializer.cc
@@ -164,7 +164,7 @@
 
 void JitCallSpecializer::VisitStoreInstanceField(
     StoreInstanceFieldInstr* instr) {
-  if (instr->IsUnboxedStore()) {
+  if (instr->IsUnboxedDartFieldStore()) {
     // Determine if this field should be unboxed based on the usage of getter
     // and setter functions: The heuristic requires that the setter has a
     // usage count of at least 1/kGetterSetterRatio of the getter usage count.
diff --git a/tools/VERSION b/tools/VERSION
index 1ffa9e2..19e4d49 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 14
 PATCH 0
-PRERELEASE 75
+PRERELEASE 76
 PRERELEASE_PATCH 0
\ No newline at end of file