Version 2.12.0-54.0.dev

Merge commit 'e7a2fbe692a761ca2dae2a2643bbcce4b58089ad' into 'dev'
diff --git a/pkg/dartdev/lib/src/commands/compile.dart b/pkg/dartdev/lib/src/commands/compile.dart
index c0fdea3..6edd82b 100644
--- a/pkg/dartdev/lib/src/commands/compile.dart
+++ b/pkg/dartdev/lib/src/commands/compile.dart
@@ -92,12 +92,15 @@
       return 1;
     }
 
-    VmInteropHandler.run(sdk.dart2jsSnapshot, [
-      '--libraries-spec=$librariesPath',
-      if (argResults.enabledExperiments.isNotEmpty)
-        "--enable-experiment=${argResults.enabledExperiments.join(',')}",
-      ...argResults.arguments,
-    ]);
+    VmInteropHandler.run(
+        sdk.dart2jsSnapshot,
+        [
+          '--libraries-spec=$librariesPath',
+          if (argResults.enabledExperiments.isNotEmpty)
+            "--enable-experiment=${argResults.enabledExperiments.join(',')}",
+          ...argResults.arguments,
+        ],
+        packageConfigOverride: null);
 
     return 0;
   }
diff --git a/pkg/dartdev/lib/src/commands/pub.dart b/pkg/dartdev/lib/src/commands/pub.dart
index d77dce6..e6162a5 100644
--- a/pkg/dartdev/lib/src/commands/pub.dart
+++ b/pkg/dartdev/lib/src/commands/pub.dart
@@ -52,7 +52,7 @@
     log.trace('$command ${args.first}');
 
     // Call 'pub help'
-    VmInteropHandler.run(command, args);
+    VmInteropHandler.run(command, args, packageConfigOverride: null);
   }
 
   @override
@@ -83,7 +83,7 @@
     }
 
     log.trace('$command ${args.join(' ')}');
-    VmInteropHandler.run(command, args);
+    VmInteropHandler.run(command, args, packageConfigOverride: null);
     return 0;
   }
 }
diff --git a/pkg/dartdev/lib/src/commands/run.dart b/pkg/dartdev/lib/src/commands/run.dart
index 8d92c83..80ca891 100644
--- a/pkg/dartdev/lib/src/commands/run.dart
+++ b/pkg/dartdev/lib/src/commands/run.dart
@@ -191,18 +191,42 @@
     }
 
     String path;
+    String packagesConfigOverride;
+
     try {
-      path = await getExecutableForCommand(mainCommand);
+      final filename = maybeUriToFilename(mainCommand);
+      if (File(filename).existsSync()) {
+        // TODO(sigurdm): getExecutableForCommand is able to figure this out,
+        // but does not return a package config override.
+        path = filename;
+        packagesConfigOverride = null;
+      } else {
+        path = await getExecutableForCommand(mainCommand);
+        packagesConfigOverride =
+            join(current, '.dart_tool', 'package_config.json');
+      }
     } on CommandResolutionFailedException catch (e) {
       log.stderr(e.message);
       return errorExitCode;
     }
-
-    VmInteropHandler.run(path, runArgs);
+    VmInteropHandler.run(
+      path,
+      runArgs,
+      packageConfigOverride: packagesConfigOverride,
+    );
     return 0;
   }
 }
 
+/// Try parsing [maybeUri] as a file uri or [maybeUri] itself if that fails.
+String maybeUriToFilename(String maybeUri) {
+  try {
+    return Uri.parse(maybeUri).toFilePath();
+  } catch (_) {
+    return maybeUri;
+  }
+}
+
 class _DebuggingSession {
   Future<bool> start(
       String host, String port, bool disableServiceAuthCodes) async {
diff --git a/pkg/dartdev/lib/src/commands/test.dart b/pkg/dartdev/lib/src/commands/test.dart
index 2e34c97..b5b7c6a 100644
--- a/pkg/dartdev/lib/src/commands/test.dart
+++ b/pkg/dartdev/lib/src/commands/test.dart
@@ -5,6 +5,7 @@
 import 'dart:async';
 
 import 'package:args/args.dart';
+import 'package:path/path.dart';
 import 'package:pub/pub.dart';
 
 import '../core.dart';
@@ -43,7 +44,9 @@
     try {
       final testExecutable = await getExecutableForCommand('test:test');
       log.trace('dart $testExecutable ${argResults.rest.join(' ')}');
-      VmInteropHandler.run(testExecutable, argResults.rest);
+      VmInteropHandler.run(testExecutable, argResults.rest,
+          packageConfigOverride:
+              join(current, '.dart_tool', 'package_config.json'));
       return 0;
     } on CommandResolutionFailedException catch (e) {
       print(e.message);
diff --git a/pkg/dartdev/lib/src/vm_interop_handler.dart b/pkg/dartdev/lib/src/vm_interop_handler.dart
index fcf419c..6ff7bfa 100644
--- a/pkg/dartdev/lib/src/vm_interop_handler.dart
+++ b/pkg/dartdev/lib/src/vm_interop_handler.dart
@@ -4,6 +4,8 @@
 
 import 'dart:isolate';
 
+import 'package:meta/meta.dart';
+
 /// Contains methods used to communicate DartDev results back to the VM.
 abstract class VmInteropHandler {
   /// Initializes [VmInteropHandler] to utilize [port] to communicate with the
@@ -11,14 +13,22 @@
   static void initialize(SendPort port) => _port = port;
 
   /// Notifies the VM to run [script] with [args] upon DartDev exit.
-  static void run(String script, List<String> args) {
+  ///
+  /// If [packageConfigOverride] is given, that is where the packageConfig is found.
+  static void run(
+    String script,
+    List<String> args, {
+    @required String packageConfigOverride,
+  }) {
     assert(_port != null);
     if (_port == null) return;
-    final message = List<dynamic>.filled(3, null)
-      ..[0] = _kResultRun
-      ..[1] = script
+    final message = <dynamic>[
+      _kResultRun,
+      script,
+      packageConfigOverride,
       // Copy the list so it doesn't get GC'd underneath us.
-      ..[2] = args.toList();
+      args.toList()
+    ];
     _port.send(message);
   }
 
@@ -27,9 +37,7 @@
   static void exit(int exitCode) {
     assert(_port != null);
     if (_port == null) return;
-    final message = List<dynamic>.filled(2, null)
-      ..[0] = _kResultExit
-      ..[1] = exitCode;
+    final message = <dynamic>[_kResultExit, exitCode];
     _port.send(message);
   }
 
diff --git a/pkg/dartdev/test/commands/run_test.dart b/pkg/dartdev/test/commands/run_test.dart
index 324e3f5..9d2125c 100644
--- a/pkg/dartdev/test/commands/run_test.dart
+++ b/pkg/dartdev/test/commands/run_test.dart
@@ -88,6 +88,39 @@
     expect(result.exitCode, 0);
   });
 
+  test('from path-dependency with cyclic dependency', () {
+    p = project(name: 'foo');
+    final bar = TestProject(name: 'bar');
+    p.file('pubspec.yaml', '''
+name: foo
+environment:
+  sdk: '>=2.9.0<3.0.0'
+
+dependencies: { 'bar': {'path': '${bar.dir.path}'}}
+''');
+    p.file('lib/foo.dart', r'''
+import 'package:bar/bar.dart';
+final b = "FOO $bar";
+''');
+
+    try {
+      bar.file('lib/bar.dart', 'final bar = "BAR";');
+
+      bar.file('bin/main.dart', r'''
+import 'package:foo/foo.dart';
+void main(List<String> args) => print("$b $args");
+''');
+
+      ProcessResult result = p.runSync('run', ['bar:main', '--arg1', 'arg2']);
+
+      expect(result.stderr, isEmpty);
+      expect(result.stdout, contains('FOO BAR [--arg1, arg2]'));
+      expect(result.exitCode, 0);
+    } finally {
+      bar.dispose();
+    }
+  });
+
   test('with absolute file path', () async {
     p = project();
     p.file('main.dart', 'void main(args) { print(args); }');
@@ -101,8 +134,8 @@
     ]);
 
     // --enable-experiment and main.dart should not be passed.
-    expect(result.stdout, equals('[--argument1, argument2]\n'));
     expect(result.stderr, isEmpty);
+    expect(result.stdout, equals('[--argument1, argument2]\n'));
     expect(result.exitCode, 0);
   });
 
@@ -118,8 +151,8 @@
     ]);
 
     // --enable-experiment and main.dart should not be passed.
-    expect(result.stdout, equals('[--argument1, argument2]\n'));
     expect(result.stderr, isEmpty);
+    expect(result.stdout, equals('[--argument1, argument2]\n'));
     expect(result.exitCode, 0);
   });
 
diff --git a/pkg/dartdev/test/utils.dart b/pkg/dartdev/test/utils.dart
index bbc46df..a8de016 100644
--- a/pkg/dartdev/test/utils.dart
+++ b/pkg/dartdev/test/utils.dart
@@ -17,20 +17,23 @@
 const String dartVersionFilePrefix2_9 = '// @dart = 2.9\n';
 
 TestProject project(
-        {String mainSrc, String analysisOptions, bool logAnalytics = false}) =>
+        {String mainSrc,
+        String analysisOptions,
+        bool logAnalytics = false,
+        String name = TestProject._defaultProjectName}) =>
     TestProject(
         mainSrc: mainSrc,
         analysisOptions: analysisOptions,
         logAnalytics: logAnalytics);
 
 class TestProject {
-  static String get defaultProjectName => 'dartdev_temp';
+  static const String _defaultProjectName = 'dartdev_temp';
 
   Directory dir;
 
   String get dirPath => dir.path;
 
-  String get name => defaultProjectName;
+  final String name;
 
   String get relativeFilePath => 'lib/main.dart';
 
@@ -39,9 +42,10 @@
   TestProject({
     String mainSrc,
     String analysisOptions,
+    this.name = _defaultProjectName,
     this.logAnalytics = false,
   }) {
-    dir = Directory.systemTemp.createTempSync('dartdev');
+    dir = Directory.systemTemp.createTempSync(name);
     file('pubspec.yaml', '''
 name: $name
 environment:
diff --git a/runtime/bin/dartdev_isolate.cc b/runtime/bin/dartdev_isolate.cc
index 7b11213..ffedb72 100644
--- a/runtime/bin/dartdev_isolate.cc
+++ b/runtime/bin/dartdev_isolate.cc
@@ -40,6 +40,7 @@
 DartDevIsolate::DartDev_Result DartDevIsolate::DartDevRunner::result_ =
     DartDevIsolate::DartDev_Result_Unknown;
 char** DartDevIsolate::DartDevRunner::script_ = nullptr;
+char** DartDevIsolate::DartDevRunner::package_config_override_ = nullptr;
 std::unique_ptr<char*[], void (*)(char*[])>
     DartDevIsolate::DartDevRunner::argv_ =
         std::unique_ptr<char*[], void (*)(char**)>(nullptr, [](char**) {});
@@ -80,12 +81,12 @@
 
 void DartDevIsolate::DartDevRunner::Run(
     Dart_IsolateGroupCreateCallback create_isolate,
-    const char* packages_file,
+    char** packages_file,
     char** script,
     CommandLineOptions* dart_options) {
   create_isolate_ = create_isolate;
   dart_options_ = dart_options;
-  packages_file_ = packages_file;
+  package_config_override_ = packages_file;
   script_ = script;
 
   MonitorLocker locker(monitor_);
@@ -111,18 +112,33 @@
 void DartDevIsolate::DartDevRunner::DartDevResultCallback(
     Dart_Port dest_port_id,
     Dart_CObject* message) {
+  // These messages are produced in pkg/dartdev/lib/src/vm_interop_handler.dart.
   ASSERT(message->type == Dart_CObject_kArray);
   int32_t type = GetArrayItem(message, 0)->value.as_int32;
   switch (type) {
     case DartDevIsolate::DartDev_Result_Run: {
       result_ = DartDevIsolate::DartDev_Result_Run;
       ASSERT(GetArrayItem(message, 1)->type == Dart_CObject_kString);
+      auto item2 = GetArrayItem(message, 2);
+
+      ASSERT(item2->type == Dart_CObject_kString ||
+             item2->type == Dart_CObject_kNull);
+
       if (*script_ != nullptr) {
         free(*script_);
       }
+      if (*package_config_override_ != nullptr) {
+        free(*package_config_override_);
+        *package_config_override_ = nullptr;
+      }
       *script_ = Utils::StrDup(GetArrayItem(message, 1)->value.as_string);
-      ASSERT(GetArrayItem(message, 2)->type == Dart_CObject_kArray);
-      Dart_CObject* args = GetArrayItem(message, 2);
+
+      if (item2->type == Dart_CObject_kString) {
+        *package_config_override_ = Utils::StrDup(item2->value.as_string);
+      }
+
+      ASSERT(GetArrayItem(message, 3)->type == Dart_CObject_kArray);
+      Dart_CObject* args = GetArrayItem(message, 3);
       argc_ = args->value.as_array.length;
       Dart_CObject** dart_args = args->value.as_array.values;
 
@@ -244,7 +260,7 @@
 
 DartDevIsolate::DartDev_Result DartDevIsolate::RunDartDev(
     Dart_IsolateGroupCreateCallback create_isolate,
-    const char* packages_file,
+    char** packages_file,
     char** script,
     CommandLineOptions* dart_options) {
   runner_.Run(create_isolate, packages_file, script, dart_options);
diff --git a/runtime/bin/dartdev_isolate.h b/runtime/bin/dartdev_isolate.h
index 7145c9b..8fd0bd9 100644
--- a/runtime/bin/dartdev_isolate.h
+++ b/runtime/bin/dartdev_isolate.h
@@ -52,7 +52,7 @@
   // values.
   static DartDev_Result RunDartDev(
       Dart_IsolateGroupCreateCallback create_isolate,
-      const char* packages_file,
+      char** packages_file,
       char** script,
       CommandLineOptions* dart_options);
 
@@ -62,7 +62,7 @@
     DartDevRunner() {}
 
     void Run(Dart_IsolateGroupCreateCallback create_isolate,
-             const char* packages_file,
+             char** package_config_override_,
              char** script,
              CommandLineOptions* dart_options);
 
@@ -76,6 +76,7 @@
 
     static DartDev_Result result_;
     static char** script_;
+    static char** package_config_override_;
     static std::unique_ptr<char*[], void (*)(char**)> argv_;
     static intptr_t argc_;
 
diff --git a/runtime/bin/ffi_unit_test/BUILD.gn b/runtime/bin/ffi_unit_test/BUILD.gn
index d7c93ba..2a7b371 100644
--- a/runtime/bin/ffi_unit_test/BUILD.gn
+++ b/runtime/bin/ffi_unit_test/BUILD.gn
@@ -54,7 +54,9 @@
 }
 
 config("define_target_os_ios") {
-  defines = [ "TARGET_OS_IOS" ]
+  # This is TARGET_OS_MACOS_IOS instead of TARGET_OS_IOS because the latter is
+  # defined by Xcode already. See https://dartbug.com/24453.
+  defines = [ "TARGET_OS_MACOS_IOS" ]
 }
 
 config("define_target_os_linux") {
diff --git a/runtime/bin/main.cc b/runtime/bin/main.cc
index a453899..76d3b51 100644
--- a/runtime/bin/main.cc
+++ b/runtime/bin/main.cc
@@ -899,7 +899,9 @@
   file->Release();
 }
 
-bool RunMainIsolate(const char* script_name, CommandLineOptions* dart_options) {
+void RunMainIsolate(const char* script_name,
+                    const char* package_config_override,
+                    CommandLineOptions* dart_options) {
   // Call CreateIsolateGroupAndSetup which creates an isolate and loads up
   // the specified application script.
   char* error = NULL;
@@ -908,7 +910,9 @@
   Dart_IsolateFlagsInitialize(&flags);
 
   Dart_Isolate isolate = CreateIsolateGroupAndSetupHelper(
-      /* is_main_isolate */ true, script_name, "main", Options::packages_file(),
+      /* is_main_isolate */ true, script_name, "main",
+      Options::packages_file() == nullptr ? package_config_override
+                                          : Options::packages_file(),
       &flags, NULL /* callback_data */, &error, &exit_code);
 
   if (isolate == NULL) {
@@ -1035,9 +1039,6 @@
 
   // Shutdown the isolate.
   Dart_ShutdownIsolate();
-
-  // No restart.
-  return false;
 }
 
 #undef CHECK_RESULT
@@ -1076,6 +1077,8 @@
 
 void main(int argc, char** argv) {
   char* script_name = nullptr;
+  // Allows the dartdev process to point to the desired package_config.
+  char* package_config_override = nullptr;
   const int EXTRA_VM_ARGUMENTS = 10;
   CommandLineOptions vm_options(argc + EXTRA_VM_ARGUMENTS);
   CommandLineOptions dart_options(argc + EXTRA_VM_ARGUMENTS);
@@ -1290,7 +1293,7 @@
   if (DartDevIsolate::should_run_dart_dev() && !Options::disable_dart_dev() &&
       Options::gen_snapshot_kind() == SnapshotKind::kNone) {
     DartDevIsolate::DartDev_Result dartdev_result = DartDevIsolate::RunDartDev(
-        CreateIsolateGroupAndSetup, Options::packages_file(), &script_name,
+        CreateIsolateGroupAndSetup, &package_config_override, &script_name,
         &dart_options);
     ASSERT(dartdev_result != DartDevIsolate::DartDev_Result_Unknown);
     ran_dart_dev = true;
@@ -1310,9 +1313,7 @@
       Platform::Exit(kErrorExitCode);
     } else {
       // Run the main isolate until we aren't told to restart.
-      while (RunMainIsolate(script_name, &dart_options)) {
-        Syslog::PrintErr("Restarting VM\n");
-      }
+      RunMainIsolate(script_name, package_config_override, &dart_options);
     }
   }
 
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm.cc b/runtime/vm/compiler/asm_intrinsifier_arm.cc
index 22eff5a..26a7f91 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm.cc
@@ -1213,8 +1213,8 @@
   if (TargetCPUFeatures::vfp_supported()) {
     __ ldr(R0, Address(SP, 0 * target::kWordSize));
     // R1 <- value[0:31], R2 <- value[32:63]
-    __ LoadFieldFromOffset(kWord, R1, R0, target::Double::value_offset());
-    __ LoadFieldFromOffset(kWord, R2, R0,
+    __ LoadFieldFromOffset(R1, R0, target::Double::value_offset());
+    __ LoadFieldFromOffset(R2, R0,
                            target::Double::value_offset() + target::kWordSize);
 
     // If the low word isn't 0, then it isn't infinity.
@@ -1382,13 +1382,13 @@
       disp_0 + target::Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
 
   __ LoadImmediate(R0, a_int32_value);
-  __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag);
-  __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag);
+  __ LoadFieldFromOffset(R2, R1, disp_0);
+  __ LoadFieldFromOffset(R3, R1, disp_1);
   __ mov(R8, Operand(0));  // Zero extend unsigned _state[kSTATE_HI].
   // Unsigned 32-bit multiply and 64-bit accumulate into R8:R3.
   __ umlal(R3, R8, R0, R2);  // R8:R3 <- R8:R3 + R0 * R2.
-  __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag);
-  __ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag);
+  __ StoreFieldToOffset(R3, R1, disp_0);
+  __ StoreFieldToOffset(R8, R1, disp_1);
   ASSERT(target::ToRawSmi(0) == 0);
   __ eor(R0, R0, Operand(R0));
   __ Ret();
@@ -1466,25 +1466,22 @@
   __ b(&not_double, NE);
 
   __ LoadIsolate(R0);
-  __ LoadFromOffset(kWord, R0, R0,
-                    target::Isolate::cached_object_store_offset());
-  __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::double_type_offset());
+  __ LoadFromOffset(R0, R0, target::Isolate::cached_object_store_offset());
+  __ LoadFromOffset(R0, R0, target::ObjectStore::double_type_offset());
   __ Ret();
 
   __ Bind(&not_double);
   JumpIfNotInteger(assembler, R1, R0, &not_integer);
   __ LoadIsolate(R0);
-  __ LoadFromOffset(kWord, R0, R0,
-                    target::Isolate::cached_object_store_offset());
-  __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::int_type_offset());
+  __ LoadFromOffset(R0, R0, target::Isolate::cached_object_store_offset());
+  __ LoadFromOffset(R0, R0, target::ObjectStore::int_type_offset());
   __ Ret();
 
   __ Bind(&not_integer);
   JumpIfNotString(assembler, R1, R0, &use_declaration_type);
   __ LoadIsolate(R0);
-  __ LoadFromOffset(kWord, R0, R0,
-                    target::Isolate::cached_object_store_offset());
-  __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::string_type_offset());
+  __ LoadFromOffset(R0, R0, target::Isolate::cached_object_store_offset());
+  __ LoadFromOffset(R0, R0, target::ObjectStore::string_type_offset());
   __ Ret();
 
   __ Bind(&use_declaration_type);
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm64.cc b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
index a54be20..14857e9 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
@@ -1556,8 +1556,8 @@
   __ LoadClassById(R2, R1);
   __ ldr(
       R3,
-      FieldAddress(R2, target::Class::num_type_arguments_offset(), kHalfword),
-      kHalfword);
+      FieldAddress(R2, target::Class::num_type_arguments_offset(), kTwoBytes),
+      kTwoBytes);
   __ CompareImmediate(R3, 0);
   __ b(normal_ir_body, NE);
 
@@ -1597,8 +1597,8 @@
   __ LoadClassById(scratch, cid1);
   __ ldr(scratch,
          FieldAddress(scratch, target::Class::num_type_arguments_offset(),
-                      kHalfword),
-         kHalfword);
+                      kTwoBytes),
+         kTwoBytes);
   __ cbnz(normal_ir_body, scratch);
   __ b(equal);
 
@@ -1654,7 +1654,8 @@
 void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
                                          Label* normal_ir_body) {
   __ ldr(R0, Address(SP, 0 * target::kWordSize));
-  __ ldr(R0, FieldAddress(R0, target::String::hash_offset()), kUnsignedWord);
+  __ ldr(R0, FieldAddress(R0, target::String::hash_offset()),
+         kUnsignedFourBytes);
   __ adds(R0, R0, Operand(R0));  // Smi tag the hash code, setting Z flag.
   __ b(normal_ir_body, EQ);
   __ ret();
@@ -1729,8 +1730,8 @@
 void AsmIntrinsifier::Object_getHash(Assembler* assembler,
                                      Label* normal_ir_body) {
   __ ldr(R0, Address(SP, 0 * target::kWordSize));
-  __ ldr(R0, FieldAddress(R0, target::String::hash_offset(), kWord),
-         kUnsignedWord);
+  __ ldr(R0, FieldAddress(R0, target::String::hash_offset(), kFourBytes),
+         kUnsignedFourBytes);
   __ SmiTag(R0);
   __ ret();
 }
@@ -1740,8 +1741,8 @@
   __ ldr(R0, Address(SP, 1 * target::kWordSize));  // Object.
   __ ldr(R1, Address(SP, 0 * target::kWordSize));  // Value.
   __ SmiUntag(R1);
-  __ str(R1, FieldAddress(R0, target::String::hash_offset(), kWord),
-         kUnsignedWord);
+  __ str(R1, FieldAddress(R0, target::String::hash_offset(), kFourBytes),
+         kUnsignedFourBytes);
   __ ret();
 }
 
@@ -1795,10 +1796,10 @@
 
   // this.codeUnitAt(i + start)
   __ ldr(R10, Address(R0, 0),
-         receiver_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedHalfword);
+         receiver_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedTwoBytes);
   // other.codeUnitAt(i)
   __ ldr(R11, Address(R2, 0),
-         other_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedHalfword);
+         other_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedTwoBytes);
   __ cmp(R10, Operand(R11));
   __ b(return_false, NE);
 
@@ -1883,7 +1884,7 @@
   __ b(normal_ir_body, NE);
   ASSERT(kSmiTagShift == 1);
   __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
-  __ ldr(R1, Address(R0, R1), kUnsignedHalfword);
+  __ ldr(R1, Address(R0, R1), kUnsignedTwoBytes);
   __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
   __ b(normal_ir_body, GE);
   __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
@@ -1910,7 +1911,8 @@
                                                 Label* normal_ir_body) {
   Label compute_hash;
   __ ldr(R1, Address(SP, 0 * target::kWordSize));  // OneByteString object.
-  __ ldr(R0, FieldAddress(R1, target::String::hash_offset()), kUnsignedWord);
+  __ ldr(R0, FieldAddress(R1, target::String::hash_offset()),
+         kUnsignedFourBytes);
   __ adds(R0, R0, Operand(R0));  // Smi tag the hash code, setting Z flag.
   __ b(&compute_hash, EQ);
   __ ret();  // Return if already computed.
@@ -1963,7 +1965,8 @@
   // return hash_ == 0 ? 1 : hash_;
   __ Bind(&done);
   __ csinc(R0, R0, ZR, NE);  // R0 <- (R0 != 0) ? R0 : (ZR + 1).
-  __ str(R0, FieldAddress(R1, target::String::hash_offset()), kUnsignedWord);
+  __ str(R0, FieldAddress(R1, target::String::hash_offset()),
+         kUnsignedFourBytes);
   __ SmiTag(R0);
   __ ret();
 }
@@ -2127,7 +2130,7 @@
   __ SmiUntag(R2);
   __ AddImmediate(R3, R0,
                   target::TwoByteString::data_offset() - kHeapObjectTag);
-  __ str(R2, Address(R3, R1), kUnsignedHalfword);
+  __ str(R2, Address(R3, R1), kUnsignedTwoBytes);
   __ ret();
 }
 
@@ -2200,8 +2203,8 @@
     __ AddImmediate(R0, 1);
     __ AddImmediate(R1, 1);
   } else if (string_cid == kTwoByteStringCid) {
-    __ ldr(R3, Address(R0), kUnsignedHalfword);
-    __ ldr(R4, Address(R1), kUnsignedHalfword);
+    __ ldr(R3, Address(R0), kUnsignedTwoBytes);
+    __ ldr(R4, Address(R1), kUnsignedTwoBytes);
     __ AddImmediate(R0, 2);
     __ AddImmediate(R1, 2);
   } else {
diff --git a/runtime/vm/compiler/asm_intrinsifier_ia32.cc b/runtime/vm/compiler/asm_intrinsifier_ia32.cc
index c1e0863..feef3e5 100644
--- a/runtime/vm/compiler/asm_intrinsifier_ia32.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_ia32.cc
@@ -1910,7 +1910,8 @@
   __ cmpl(length_reg, Immediate(0));
   __ j(LESS, failure);
 
-  NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, EAX, failure, false));
+  NOT_IN_PRODUCT(
+      __ MaybeTraceAllocation(cid, EAX, failure, Assembler::kFarJump));
   if (length_reg != EDI) {
     __ movl(EDI, length_reg);
   }
diff --git a/runtime/vm/compiler/asm_intrinsifier_x64.cc b/runtime/vm/compiler/asm_intrinsifier_x64.cc
index 816f2fc..ac7112c 100644
--- a/runtime/vm/compiler/asm_intrinsifier_x64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_x64.cc
@@ -1935,7 +1935,7 @@
   __ cmpq(length_reg, Immediate(0));
   __ j(LESS, failure);
 
-  NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, false));
+  NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, Assembler::kFarJump));
   if (length_reg != RDI) {
     __ movq(RDI, length_reg);
   }
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 8417f7c..a71c36b 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -567,18 +567,17 @@
                                             Register tmp1,
                                             bool enter_safepoint) {
   // Save exit frame information to enable stack walking.
-  StoreToOffset(kWord, exit_frame_fp, THR,
+  StoreToOffset(exit_frame_fp, THR,
                 target::Thread::top_exit_frame_info_offset());
 
-  StoreToOffset(kWord, exit_through_ffi, THR,
+  StoreToOffset(exit_through_ffi, THR,
                 target::Thread::exit_through_ffi_offset());
   Register tmp2 = exit_through_ffi;
 
   // Mark that the thread is executing native code.
-  StoreToOffset(kWord, destination_address, THR,
-                target::Thread::vm_tag_offset());
+  StoreToOffset(destination_address, THR, target::Thread::vm_tag_offset());
   LoadImmediate(tmp1, target::Thread::native_execution_state());
-  StoreToOffset(kWord, tmp1, THR, target::Thread::execution_state_offset());
+  StoreToOffset(tmp1, THR, target::Thread::execution_state_offset());
 
   if (enter_safepoint) {
     EnterSafepoint(tmp1, tmp2);
@@ -640,15 +639,14 @@
 
   // Mark that the thread is executing Dart code.
   LoadImmediate(state, target::Thread::vm_tag_dart_id());
-  StoreToOffset(kWord, state, THR, target::Thread::vm_tag_offset());
+  StoreToOffset(state, THR, target::Thread::vm_tag_offset());
   LoadImmediate(state, target::Thread::generated_execution_state());
-  StoreToOffset(kWord, state, THR, target::Thread::execution_state_offset());
+  StoreToOffset(state, THR, target::Thread::execution_state_offset());
 
   // Reset exit frame information in Isolate's mutator thread structure.
   LoadImmediate(state, 0);
-  StoreToOffset(kWord, state, THR,
-                target::Thread::top_exit_frame_info_offset());
-  StoreToOffset(kWord, state, THR, target::Thread::exit_through_ffi_offset());
+  StoreToOffset(state, THR, target::Thread::top_exit_frame_info_offset());
+  StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
 }
 
 void Assembler::clrex() {
@@ -1214,11 +1212,11 @@
     case kByte:
     case kUnsignedByte:
       return 0;
-    case kHalfword:
-    case kUnsignedHalfword:
+    case kTwoBytes:
+    case kUnsignedTwoBytes:
       return 1;
-    case kWord:
-    case kUnsignedWord:
+    case kFourBytes:
+    case kUnsignedFourBytes:
       return 2;
     case kWordPair:
       return 3;
@@ -1388,14 +1386,14 @@
       code = 1 | (idx << 1);
       break;
     }
-    case kHalfword:
-    case kUnsignedHalfword: {
+    case kTwoBytes:
+    case kUnsignedTwoBytes: {
       ASSERT((idx >= 0) && (idx < 4));
       code = 2 | (idx << 2);
       break;
     }
-    case kWord:
-    case kUnsignedWord: {
+    case kFourBytes:
+    case kUnsignedFourBytes: {
       ASSERT((idx >= 0) && (idx < 2));
       code = 4 | (idx << 3);
       break;
@@ -1524,7 +1522,7 @@
   const int32_t offset =
       target::ObjectPool::element_offset(index) - kHeapObjectTag;
   int32_t offset_mask = 0;
-  if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) {
+  if (Address::CanHoldLoadOffset(kFourBytes, offset, &offset_mask)) {
     ldr(rd, Address(pp, offset), cond);
   } else {
     int32_t offset_hi = offset & ~offset_mask;  // signed
@@ -1839,7 +1837,8 @@
                                       CanBeSmi can_value_be_smi,
                                       bool lr_reserved) {
   int32_t ignored = 0;
-  if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
+  if (Address::CanHoldStoreOffset(kFourBytes, offset - kHeapObjectTag,
+                                  &ignored)) {
     StoreIntoObject(object, FieldAddress(object, offset), value,
                     can_value_be_smi, lr_reserved);
   } else {
@@ -1880,7 +1879,8 @@
                                                int32_t offset,
                                                Register value) {
   int32_t ignored = 0;
-  if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
+  if (Address::CanHoldStoreOffset(kFourBytes, offset - kHeapObjectTag,
+                                  &ignored)) {
     StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
   } else {
     Register base = object == R9 ? R8 : R9;
@@ -1896,7 +1896,8 @@
                                                const Object& value) {
   ASSERT(IsOriginalObject(value));
   int32_t ignored = 0;
-  if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
+  if (Address::CanHoldStoreOffset(kFourBytes, offset - kHeapObjectTag,
+                                  &ignored)) {
     StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
   } else {
     Register base = object == R9 ? R8 : R9;
@@ -2011,7 +2012,7 @@
       target::Isolate::cached_class_table_table_offset();
 
   LoadIsolate(result);
-  LoadFromOffset(kWord, result, result, table_offset);
+  LoadFromOffset(result, result, table_offset);
   ldr(result, Address(result, class_id, LSL, target::kWordSizeLog2));
 }
 
@@ -2210,13 +2211,13 @@
     case kArrayCid:
     case kImmutableArrayCid:
     case kTypeArgumentsCid:
-      return kWord;
+      return kFourBytes;
     case kOneByteStringCid:
     case kExternalOneByteStringCid:
       return kByte;
     case kTwoByteStringCid:
     case kExternalTwoByteStringCid:
-      return kHalfword;
+      return kTwoBytes;
     case kTypedDataInt8ArrayCid:
       return kByte;
     case kTypedDataUint8ArrayCid:
@@ -2225,13 +2226,13 @@
     case kExternalTypedDataUint8ClampedArrayCid:
       return kUnsignedByte;
     case kTypedDataInt16ArrayCid:
-      return kHalfword;
+      return kTwoBytes;
     case kTypedDataUint16ArrayCid:
-      return kUnsignedHalfword;
+      return kUnsignedTwoBytes;
     case kTypedDataInt32ArrayCid:
-      return kWord;
+      return kFourBytes;
     case kTypedDataUint32ArrayCid:
-      return kUnsignedWord;
+      return kUnsignedFourBytes;
     case kTypedDataInt64ArrayCid:
     case kTypedDataUint64ArrayCid:
       return kDWord;
@@ -2257,15 +2258,15 @@
                                 int32_t* offset_mask) {
   switch (size) {
     case kByte:
-    case kHalfword:
-    case kUnsignedHalfword:
+    case kTwoBytes:
+    case kUnsignedTwoBytes:
     case kWordPair: {
       *offset_mask = 0xff;
       return Utils::IsAbsoluteUint(8, offset);  // Addressing mode 3.
     }
     case kUnsignedByte:
-    case kWord:
-    case kUnsignedWord: {
+    case kFourBytes:
+    case kUnsignedFourBytes: {
       *offset_mask = 0xfff;
       return Utils::IsAbsoluteUint(12, offset);  // Addressing mode 2.
     }
@@ -2290,16 +2291,16 @@
                                  int32_t offset,
                                  int32_t* offset_mask) {
   switch (size) {
-    case kHalfword:
-    case kUnsignedHalfword:
+    case kTwoBytes:
+    case kUnsignedTwoBytes:
     case kWordPair: {
       *offset_mask = 0xff;
       return Utils::IsAbsoluteUint(8, offset);  // Addressing mode 3.
     }
     case kByte:
     case kUnsignedByte:
-    case kWord:
-    case kUnsignedWord: {
+    case kFourBytes:
+    case kUnsignedFourBytes: {
       *offset_mask = 0xfff;
       return Utils::IsAbsoluteUint(12, offset);  // Addressing mode 2.
     }
@@ -2639,7 +2640,7 @@
 void Assembler::BranchLinkOffset(Register base, int32_t offset) {
   ASSERT(base != PC);
   ASSERT(base != IP);
-  LoadFromOffset(kWord, IP, base, offset);
+  LoadFromOffset(IP, base, offset);
   blx(IP);  // Use blx instruction so that the return branch prediction works.
 }
 
@@ -2698,10 +2699,10 @@
   }
 }
 
-void Assembler::LoadFromOffset(OperandSize size,
-                               Register reg,
+void Assembler::LoadFromOffset(Register reg,
                                Register base,
                                int32_t offset,
+                               OperandSize size,
                                Condition cond) {
   ASSERT(size != kWordPair);
   int32_t offset_mask = 0;
@@ -2718,13 +2719,13 @@
     case kUnsignedByte:
       ldrb(reg, Address(base, offset), cond);
       break;
-    case kHalfword:
+    case kTwoBytes:
       ldrsh(reg, Address(base, offset), cond);
       break;
-    case kUnsignedHalfword:
+    case kUnsignedTwoBytes:
       ldrh(reg, Address(base, offset), cond);
       break;
-    case kWord:
+    case kFourBytes:
       ldr(reg, Address(base, offset), cond);
       break;
     default:
@@ -2732,10 +2733,10 @@
   }
 }
 
-void Assembler::StoreToOffset(OperandSize size,
-                              Register reg,
+void Assembler::StoreToOffset(Register reg,
                               Register base,
                               int32_t offset,
+                              OperandSize size,
                               Condition cond) {
   ASSERT(size != kWordPair);
   int32_t offset_mask = 0;
@@ -2750,10 +2751,10 @@
     case kByte:
       strb(reg, Address(base, offset), cond);
       break;
-    case kHalfword:
+    case kTwoBytes:
       strh(reg, Address(base, offset), cond);
       break;
-    case kWord:
+    case kFourBytes:
       str(reg, Address(base, offset), cond);
       break;
     default:
@@ -2844,16 +2845,12 @@
     LoadDFromOffset(dtmp, src, target::Double::value_offset() - kHeapObjectTag);
     StoreDToOffset(dtmp, dst, target::Double::value_offset() - kHeapObjectTag);
   } else {
-    LoadFromOffset(kWord, tmp1, src,
-                   target::Double::value_offset() - kHeapObjectTag);
-    LoadFromOffset(
-        kWord, tmp2, src,
-        target::Double::value_offset() + target::kWordSize - kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  target::Double::value_offset() - kHeapObjectTag);
-    StoreToOffset(
-        kWord, tmp2, dst,
-        target::Double::value_offset() + target::kWordSize - kHeapObjectTag);
+    LoadFieldFromOffset(tmp1, src, target::Double::value_offset());
+    LoadFieldFromOffset(tmp2, src,
+                        target::Double::value_offset() + target::kWordSize);
+    StoreFieldToOffset(tmp1, dst, target::Double::value_offset());
+    StoreFieldToOffset(tmp2, dst,
+                       target::Double::value_offset() + target::kWordSize);
   }
 }
 
@@ -2868,31 +2865,23 @@
     StoreMultipleDToOffset(dtmp, 2, dst,
                            target::Float32x4::value_offset() - kHeapObjectTag);
   } else {
-    LoadFromOffset(kWord, tmp1, src,
-                   (target::Float32x4::value_offset() + 0 * target::kWordSize) -
-                       kHeapObjectTag);
-    LoadFromOffset(kWord, tmp2, src,
-                   (target::Float32x4::value_offset() + 1 * target::kWordSize) -
-                       kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (target::Float32x4::value_offset() + 0 * target::kWordSize) -
-                      kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (target::Float32x4::value_offset() + 1 * target::kWordSize) -
-                      kHeapObjectTag);
+    LoadFieldFromOffset(
+        tmp1, src, target::Float32x4::value_offset() + 0 * target::kWordSize);
+    LoadFieldFromOffset(
+        tmp2, src, target::Float32x4::value_offset() + 1 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp1, dst, target::Float32x4::value_offset() + 0 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp2, dst, target::Float32x4::value_offset() + 1 * target::kWordSize);
 
-    LoadFromOffset(kWord, tmp1, src,
-                   (target::Float32x4::value_offset() + 2 * target::kWordSize) -
-                       kHeapObjectTag);
-    LoadFromOffset(kWord, tmp2, src,
-                   (target::Float32x4::value_offset() + 3 * target::kWordSize) -
-                       kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (target::Float32x4::value_offset() + 2 * target::kWordSize) -
-                      kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (target::Float32x4::value_offset() + 3 * target::kWordSize) -
-                      kHeapObjectTag);
+    LoadFieldFromOffset(
+        tmp1, src, target::Float32x4::value_offset() + 2 * target::kWordSize);
+    LoadFieldFromOffset(
+        tmp2, src, target::Float32x4::value_offset() + 3 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp1, dst, target::Float32x4::value_offset() + 2 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp2, dst, target::Float32x4::value_offset() + 3 * target::kWordSize);
   }
 }
 
@@ -2907,31 +2896,23 @@
     StoreMultipleDToOffset(dtmp, 2, dst,
                            target::Float64x2::value_offset() - kHeapObjectTag);
   } else {
-    LoadFromOffset(kWord, tmp1, src,
-                   (target::Float64x2::value_offset() + 0 * target::kWordSize) -
-                       kHeapObjectTag);
-    LoadFromOffset(kWord, tmp2, src,
-                   (target::Float64x2::value_offset() + 1 * target::kWordSize) -
-                       kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (target::Float64x2::value_offset() + 0 * target::kWordSize) -
-                      kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (target::Float64x2::value_offset() + 1 * target::kWordSize) -
-                      kHeapObjectTag);
+    LoadFieldFromOffset(
+        tmp1, src, target::Float64x2::value_offset() + 0 * target::kWordSize);
+    LoadFieldFromOffset(
+        tmp2, src, target::Float64x2::value_offset() + 1 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp1, dst, target::Float64x2::value_offset() + 0 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp2, dst, target::Float64x2::value_offset() + 1 * target::kWordSize);
 
-    LoadFromOffset(kWord, tmp1, src,
-                   (target::Float64x2::value_offset() + 2 * target::kWordSize) -
-                       kHeapObjectTag);
-    LoadFromOffset(kWord, tmp2, src,
-                   (target::Float64x2::value_offset() + 3 * target::kWordSize) -
-                       kHeapObjectTag);
-    StoreToOffset(kWord, tmp1, dst,
-                  (target::Float64x2::value_offset() + 2 * target::kWordSize) -
-                      kHeapObjectTag);
-    StoreToOffset(kWord, tmp2, dst,
-                  (target::Float64x2::value_offset() + 3 * target::kWordSize) -
-                      kHeapObjectTag);
+    LoadFieldFromOffset(
+        tmp1, src, target::Float64x2::value_offset() + 2 * target::kWordSize);
+    LoadFieldFromOffset(
+        tmp2, src, target::Float64x2::value_offset() + 3 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp1, dst, target::Float64x2::value_offset() + 2 * target::kWordSize);
+    StoreFieldToOffset(
+        tmp2, dst, target::Float64x2::value_offset() + 3 * target::kWordSize);
   }
 }
 
@@ -3553,8 +3534,8 @@
   ASSERT(array != IP);
   ASSERT(index != IP);
   const Register base = is_load ? IP : index;
-  if ((offset != 0) || (is_load && (size == kByte)) || (size == kHalfword) ||
-      (size == kUnsignedHalfword) || (size == kSWord) || (size == kDWord) ||
+  if ((offset != 0) || (is_load && (size == kByte)) || (size == kTwoBytes) ||
+      (size == kUnsignedTwoBytes) || (size == kSWord) || (size == kDWord) ||
       (size == kRegList)) {
     if (shift < 0) {
       ASSERT(shift == -1);
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index 71c5428f..2d6540a 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -32,21 +32,6 @@
 class RegisterSet;
 class RuntimeEntry;
 
-// TODO(vegorov) these enumerations are temporarily moved out of compiler
-// namespace to make refactoring easier.
-enum OperandSize {
-  kByte,
-  kUnsignedByte,
-  kHalfword,
-  kUnsignedHalfword,
-  kWord,
-  kUnsignedWord,
-  kWordPair,
-  kSWord,
-  kDWord,
-  kRegList,
-};
-
 // Load/store multiple addressing mode.
 enum BlockAddressMode {
   // bit encoding P U W
@@ -398,14 +383,17 @@
   }
 
   void Bind(Label* label);
-  void Jump(Label* label) { b(label); }
+  // Unconditional jump to a given label. [distance] is ignored on ARM.
+  void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
+  // Unconditional jump to a given address in memory.
+  void Jump(const Address& address) { Branch(address); }
 
   void LoadField(Register dst, FieldAddress address) { ldr(dst, address); }
   void LoadMemoryValue(Register dst, Register base, int32_t offset) {
-    LoadFromOffset(kWord, dst, base, offset, AL);
+    LoadFromOffset(dst, base, offset);
   }
   void StoreMemoryValue(Register src, Register base, int32_t offset) {
-    StoreToOffset(kWord, src, base, offset, AL);
+    StoreToOffset(src, base, offset);
   }
   void LoadAcquire(Register dst, Register address, int32_t offset = 0) {
     ldr(dst, Address(address, offset));
@@ -834,7 +822,7 @@
 
   void LoadIsolate(Register rd);
 
-  // Load word from pool from the given offset using encoding that
+  // Load word from pool from the given index using encoding that
   // InstructionPattern::DecodeLoadWordFromPool can decode.
   void LoadWordFromPoolIndex(Register rd,
                              intptr_t index,
@@ -930,29 +918,41 @@
 
   intptr_t FindImmediate(int32_t imm);
   bool CanLoadFromObjectPool(const Object& object) const;
-  void LoadFromOffset(OperandSize type,
-                      Register reg,
+  void LoadFromOffset(Register reg,
                       Register base,
                       int32_t offset,
+                      OperandSize type = kFourBytes,
                       Condition cond = AL);
-  void LoadFieldFromOffset(OperandSize type,
-                           Register reg,
+  void LoadFieldFromOffset(Register reg,
                            Register base,
                            int32_t offset,
+                           OperandSize type = kFourBytes,
                            Condition cond = AL) {
-    LoadFromOffset(type, reg, base, offset - kHeapObjectTag, cond);
+    LoadFromOffset(reg, base, offset - kHeapObjectTag, type, cond);
   }
-  void StoreToOffset(OperandSize type,
-                     Register reg,
+  // For loading indexed payloads out of tagged objects like Arrays. If the
+  // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
+  // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
+  void LoadIndexedPayload(Register reg,
+                          Register base,
+                          int32_t payload_start,
+                          Register index,
+                          ScaleFactor scale,
+                          OperandSize type = kFourBytes) {
+    add(reg, base, Operand(index, LSL, scale));
+    LoadFromOffset(reg, reg, payload_start - kHeapObjectTag, type);
+  }
+  void StoreToOffset(Register reg,
                      Register base,
                      int32_t offset,
+                     OperandSize type = kFourBytes,
                      Condition cond = AL);
-  void StoreFieldToOffset(OperandSize type,
-                          Register reg,
+  void StoreFieldToOffset(Register reg,
                           Register base,
                           int32_t offset,
+                          OperandSize type = kFourBytes,
                           Condition cond = AL) {
-    StoreToOffset(type, reg, base, offset - kHeapObjectTag, cond);
+    StoreToOffset(reg, base, offset - kHeapObjectTag, type, cond);
   }
   void LoadSFromOffset(SRegister reg,
                        Register base,
@@ -1012,7 +1012,13 @@
   void PopNativeCalleeSavedRegisters();
 
   void CompareRegisters(Register rn, Register rm) { cmp(rn, Operand(rm)); }
-  void BranchIf(Condition condition, Label* label) { b(label, condition); }
+  // Branches to the given label if the condition holds.
+  // [distance] is ignored on ARM.
+  void BranchIf(Condition condition,
+                Label* label,
+                JumpDistance distance = kFarJump) {
+    b(label, condition);
+  }
 
   void MoveRegister(Register rd, Register rm, Condition cond = AL);
 
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 67d07c8..c897811 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -680,11 +680,11 @@
                                      Register rn,
                                      int64_t imm,
                                      OperandSize sz) {
-  ASSERT(sz == kDoubleWord || sz == kWord);
+  ASSERT(sz == kEightBytes || sz == kFourBytes);
   Operand op;
   if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
     // Handles imm == kMinInt64.
-    if (sz == kDoubleWord) {
+    if (sz == kEightBytes) {
       adds(dest, rn, op);
     } else {
       addsw(dest, rn, op);
@@ -692,7 +692,7 @@
   } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
              Operand::Immediate) {
     ASSERT(imm != kMinInt64);  // Would cause erroneous overflow detection.
-    if (sz == kDoubleWord) {
+    if (sz == kEightBytes) {
       subs(dest, rn, op);
     } else {
       subsw(dest, rn, op);
@@ -701,7 +701,7 @@
     // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
     ASSERT(rn != TMP2);
     LoadImmediate(TMP2, imm);
-    if (sz == kDoubleWord) {
+    if (sz == kEightBytes) {
       adds(dest, rn, Operand(TMP2));
     } else {
       addsw(dest, rn, Operand(TMP2));
@@ -714,10 +714,10 @@
                                      int64_t imm,
                                      OperandSize sz) {
   Operand op;
-  ASSERT(sz == kDoubleWord || sz == kWord);
+  ASSERT(sz == kEightBytes || sz == kFourBytes);
   if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
     // Handles imm == kMinInt64.
-    if (sz == kDoubleWord) {
+    if (sz == kEightBytes) {
       subs(dest, rn, op);
     } else {
       subsw(dest, rn, op);
@@ -725,7 +725,7 @@
   } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
              Operand::Immediate) {
     ASSERT(imm != kMinInt64);  // Would cause erroneous overflow detection.
-    if (sz == kDoubleWord) {
+    if (sz == kEightBytes) {
       adds(dest, rn, op);
     } else {
       addsw(dest, rn, op);
@@ -734,7 +734,7 @@
     // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits.
     ASSERT(rn != TMP2);
     LoadImmediate(TMP2, imm);
-    if (sz == kDoubleWord) {
+    if (sz == kEightBytes) {
       subs(dest, rn, Operand(TMP2));
     } else {
       subsw(dest, rn, Operand(TMP2));
@@ -1141,7 +1141,7 @@
 void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
   ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
   ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
-  LsrImmediate(result, tags, target::ObjectLayout::kClassIdTagPos, kWord);
+  LsrImmediate(result, tags, target::ObjectLayout::kClassIdTagPos, kFourBytes);
 }
 
 void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
@@ -1159,7 +1159,7 @@
       target::Object::tags_offset() +
       target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
   LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
-                 kUnsignedHalfword);
+                 kUnsignedTwoBytes);
 }
 
 void Assembler::LoadClassById(Register result, Register class_id) {
@@ -2033,7 +2033,7 @@
 
 void Assembler::GenerateXCbzTbz(Register rn, Condition cond, Label* label) {
   constexpr int32_t bit_no = 63;
-  constexpr OperandSize sz = kDoubleWord;
+  constexpr OperandSize sz = kEightBytes;
   ASSERT(rn != CSP);
   switch (cond) {
     case EQ:  // equal
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 377cc4e..27e0478 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -32,6 +32,50 @@
 
 namespace compiler {
 
+static inline int Log2OperandSizeBytes(OperandSize os) {
+  switch (os) {
+    case kByte:
+    case kUnsignedByte:
+      return 0;
+    case kTwoBytes:
+    case kUnsignedTwoBytes:
+      return 1;
+    case kFourBytes:
+    case kUnsignedFourBytes:
+    case kSWord:
+      return 2;
+    case kEightBytes:
+    case kDWord:
+      return 3;
+    case kQWord:
+      return 4;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return -1;
+}
+
+static inline bool IsSignedOperand(OperandSize os) {
+  switch (os) {
+    case kByte:
+    case kTwoBytes:
+    case kFourBytes:
+      return true;
+    case kUnsignedByte:
+    case kUnsignedTwoBytes:
+    case kUnsignedFourBytes:
+    case kEightBytes:
+    case kSWord:
+    case kDWord:
+    case kQWord:
+      return false;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return false;
+}
 class Immediate : public ValueObject {
  public:
   explicit Immediate(int64_t value) : value_(value) {}
@@ -135,7 +179,7 @@
   Address(Register rn,
           int32_t offset = 0,
           AddressType at = Offset,
-          OperandSize sz = kDoubleWord) {
+          OperandSize sz = kEightBytes) {
     ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR));
     ASSERT(CanHoldOffset(offset, at, sz));
     log2sz_ = -1;
@@ -190,11 +234,11 @@
   Address(Register rn,
           Register offset,
           AddressType at,
-          OperandSize sz = kDoubleWord);
+          OperandSize sz = kEightBytes);
 
   static bool CanHoldOffset(int32_t offset,
                             AddressType at = Offset,
-                            OperandSize sz = kDoubleWord) {
+                            OperandSize sz = kEightBytes) {
     if (at == Offset) {
       // Offset fits in 12 bit unsigned and has right alignment for sz,
       // or fits in 9 bit signed offset with no alignment restriction.
@@ -230,7 +274,7 @@
   static Address Pair(Register rn,
                       int32_t offset = 0,
                       AddressType at = PairOffset,
-                      OperandSize sz = kDoubleWord) {
+                      OperandSize sz = kEightBytes) {
     return Address(rn, offset, at, sz);
   }
 
@@ -267,13 +311,13 @@
       case kArrayCid:
       case kImmutableArrayCid:
       case kTypeArgumentsCid:
-        return kWord;
+        return kFourBytes;
       case kOneByteStringCid:
       case kExternalOneByteStringCid:
         return kByte;
       case kTwoByteStringCid:
       case kExternalTwoByteStringCid:
-        return kHalfword;
+        return kTwoBytes;
       case kTypedDataInt8ArrayCid:
         return kByte;
       case kTypedDataUint8ArrayCid:
@@ -282,13 +326,13 @@
       case kExternalTypedDataUint8ClampedArrayCid:
         return kUnsignedByte;
       case kTypedDataInt16ArrayCid:
-        return kHalfword;
+        return kTwoBytes;
       case kTypedDataUint16ArrayCid:
-        return kUnsignedHalfword;
+        return kUnsignedTwoBytes;
       case kTypedDataInt32ArrayCid:
-        return kWord;
+        return kFourBytes;
       case kTypedDataUint32ArrayCid:
-        return kUnsignedWord;
+        return kUnsignedFourBytes;
       case kTypedDataInt64ArrayCid:
       case kTypedDataUint64ArrayCid:
         return kDWord;
@@ -326,11 +370,11 @@
 
 class FieldAddress : public Address {
  public:
-  FieldAddress(Register base, int32_t disp, OperandSize sz = kDoubleWord)
+  FieldAddress(Register base, int32_t disp, OperandSize sz = kEightBytes)
       : Address(base, disp - kHeapObjectTag, Offset, sz) {}
 
   // This addressing mode does not exist.
-  FieldAddress(Register base, Register disp, OperandSize sz = kDoubleWord);
+  FieldAddress(Register base, Register disp, OperandSize sz = kEightBytes);
 
   FieldAddress(const FieldAddress& other) : Address(other) {}
 
@@ -494,14 +538,20 @@
   }
 
   void Bind(Label* label);
-  void Jump(Label* label) { b(label); }
+  // Unconditional jump to a given label. [distance] is ignored on ARM.
+  void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
+  // Unconditional jump to a given address in memory. Clobbers TMP.
+  void Jump(const Address& address) {
+    ldr(TMP, address);
+    br(TMP);
+  }
 
   void LoadField(Register dst, FieldAddress address) { ldr(dst, address); }
   void LoadMemoryValue(Register dst, Register base, int32_t offset) {
-    LoadFromOffset(dst, base, offset, kDoubleWord);
+    LoadFromOffset(dst, base, offset, kEightBytes);
   }
   void StoreMemoryValue(Register src, Register base, int32_t offset) {
-    StoreToOffset(src, base, offset, kDoubleWord);
+    StoreToOffset(src, base, offset, kEightBytes);
   }
   void LoadAcquire(Register dst, Register address, int32_t offset = 0) {
     if (offset != 0) {
@@ -579,54 +629,54 @@
   // For add and sub, to use CSP for rn, o must be of type Operand::Extend.
   // For an unmodified rm in this case, use Operand(rm, UXTX, 0);
   void add(Register rd, Register rn, Operand o) {
-    AddSubHelper(kDoubleWord, false, false, rd, rn, o);
+    AddSubHelper(kEightBytes, false, false, rd, rn, o);
   }
   void adds(Register rd, Register rn, Operand o) {
-    AddSubHelper(kDoubleWord, true, false, rd, rn, o);
+    AddSubHelper(kEightBytes, true, false, rd, rn, o);
   }
   void addw(Register rd, Register rn, Operand o) {
-    AddSubHelper(kWord, false, false, rd, rn, o);
+    AddSubHelper(kFourBytes, false, false, rd, rn, o);
   }
   void addsw(Register rd, Register rn, Operand o) {
-    AddSubHelper(kWord, true, false, rd, rn, o);
+    AddSubHelper(kFourBytes, true, false, rd, rn, o);
   }
   void sub(Register rd, Register rn, Operand o) {
-    AddSubHelper(kDoubleWord, false, true, rd, rn, o);
+    AddSubHelper(kEightBytes, false, true, rd, rn, o);
   }
   void subs(Register rd, Register rn, Operand o) {
-    AddSubHelper(kDoubleWord, true, true, rd, rn, o);
+    AddSubHelper(kEightBytes, true, true, rd, rn, o);
   }
   void subw(Register rd, Register rn, Operand o) {
-    AddSubHelper(kWord, false, true, rd, rn, o);
+    AddSubHelper(kFourBytes, false, true, rd, rn, o);
   }
   void subsw(Register rd, Register rn, Operand o) {
-    AddSubHelper(kWord, true, true, rd, rn, o);
+    AddSubHelper(kFourBytes, true, true, rd, rn, o);
   }
 
   // Addition and subtraction with carry.
   void adc(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kDoubleWord, false, false, rd, rn, rm);
+    AddSubWithCarryHelper(kEightBytes, false, false, rd, rn, rm);
   }
   void adcs(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kDoubleWord, true, false, rd, rn, rm);
+    AddSubWithCarryHelper(kEightBytes, true, false, rd, rn, rm);
   }
   void adcw(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kWord, false, false, rd, rn, rm);
+    AddSubWithCarryHelper(kFourBytes, false, false, rd, rn, rm);
   }
   void adcsw(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kWord, true, false, rd, rn, rm);
+    AddSubWithCarryHelper(kFourBytes, true, false, rd, rn, rm);
   }
   void sbc(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kDoubleWord, false, true, rd, rn, rm);
+    AddSubWithCarryHelper(kEightBytes, false, true, rd, rn, rm);
   }
   void sbcs(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kDoubleWord, true, true, rd, rn, rm);
+    AddSubWithCarryHelper(kEightBytes, true, true, rd, rn, rm);
   }
   void sbcw(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kWord, false, true, rd, rn, rm);
+    AddSubWithCarryHelper(kFourBytes, false, true, rd, rn, rm);
   }
   void sbcsw(Register rd, Register rn, Register rm) {
-    AddSubWithCarryHelper(kWord, true, true, rd, rn, rm);
+    AddSubWithCarryHelper(kFourBytes, true, true, rd, rn, rm);
   }
 
   // PC relative immediate add. imm is in bytes.
@@ -640,7 +690,7 @@
            Register rn,
            int r_imm,
            int s_imm,
-           OperandSize size = kDoubleWord) {
+           OperandSize size = kEightBytes) {
     EmitBitfieldOp(BFM, rd, rn, r_imm, s_imm, size);
   }
 
@@ -649,7 +699,7 @@
             Register rn,
             int r_imm,
             int s_imm,
-            OperandSize size = kDoubleWord) {
+            OperandSize size = kEightBytes) {
     EmitBitfieldOp(SBFM, rd, rn, r_imm, s_imm, size);
   }
 
@@ -658,7 +708,7 @@
             Register rn,
             int r_imm,
             int s_imm,
-            OperandSize size = kDoubleWord) {
+            OperandSize size = kEightBytes) {
     EmitBitfieldOp(UBFM, rd, rn, r_imm, s_imm, size);
   }
 
@@ -668,8 +718,8 @@
            Register rn,
            int low_bit,
            int width,
-           OperandSize size = kDoubleWord) {
-    int wordsize = size == kDoubleWord ? 64 : 32;
+           OperandSize size = kEightBytes) {
+    int wordsize = size == kEightBytes ? 64 : 32;
     EmitBitfieldOp(BFM, rd, rn, -low_bit & (wordsize - 1), width - 1, size);
   }
 
@@ -679,7 +729,7 @@
              Register rn,
              int low_bit,
              int width,
-             OperandSize size = kDoubleWord) {
+             OperandSize size = kEightBytes) {
     EmitBitfieldOp(BFM, rd, rn, low_bit, low_bit + width - 1, size);
   }
 
@@ -690,8 +740,8 @@
              Register rn,
              int low_bit,
              int width,
-             OperandSize size = kDoubleWord) {
-    int wordsize = size == kDoubleWord ? 64 : 32;
+             OperandSize size = kEightBytes) {
+    int wordsize = size == kEightBytes ? 64 : 32;
     EmitBitfieldOp(SBFM, rd, rn, (wordsize - low_bit) & (wordsize - 1),
                    width - 1, size);
   }
@@ -702,7 +752,7 @@
             Register rn,
             int low_bit,
             int width,
-            OperandSize size = kDoubleWord) {
+            OperandSize size = kEightBytes) {
     EmitBitfieldOp(SBFM, rd, rn, low_bit, low_bit + width - 1, size);
   }
 
@@ -712,8 +762,8 @@
              Register rn,
              int low_bit,
              int width,
-             OperandSize size = kDoubleWord) {
-    int wordsize = size == kDoubleWord ? 64 : 32;
+             OperandSize size = kEightBytes) {
+    int wordsize = size == kEightBytes ? 64 : 32;
     ASSERT(width > 0);
     ASSERT(low_bit < wordsize);
     EmitBitfieldOp(UBFM, rd, rn, (-low_bit) & (wordsize - 1), width - 1, size);
@@ -725,38 +775,38 @@
             Register rn,
             int low_bit,
             int width,
-            OperandSize size = kDoubleWord) {
+            OperandSize size = kEightBytes) {
     EmitBitfieldOp(UBFM, rd, rn, low_bit, low_bit + width - 1, size);
   }
 
   // Sign extend byte->64 bit.
   void sxtb(Register rd, Register rn) {
-    EmitBitfieldOp(SBFM, rd, rn, 0, 7, kDoubleWord);
+    EmitBitfieldOp(SBFM, rd, rn, 0, 7, kEightBytes);
   }
 
   // Sign extend halfword->64 bit.
   void sxth(Register rd, Register rn) {
-    EmitBitfieldOp(SBFM, rd, rn, 0, 15, kDoubleWord);
+    EmitBitfieldOp(SBFM, rd, rn, 0, 15, kEightBytes);
   }
 
   // Sign extend word->64 bit.
   void sxtw(Register rd, Register rn) {
-    EmitBitfieldOp(SBFM, rd, rn, 0, 31, kDoubleWord);
+    EmitBitfieldOp(SBFM, rd, rn, 0, 31, kEightBytes);
   }
 
   // Zero/unsigned extend byte->64 bit.
   void uxtb(Register rd, Register rn) {
-    EmitBitfieldOp(UBFM, rd, rn, 0, 7, kDoubleWord);
+    EmitBitfieldOp(UBFM, rd, rn, 0, 7, kEightBytes);
   }
 
   // Zero/unsigned extend halfword->64 bit.
   void uxth(Register rd, Register rn) {
-    EmitBitfieldOp(UBFM, rd, rn, 0, 15, kDoubleWord);
+    EmitBitfieldOp(UBFM, rd, rn, 0, 15, kEightBytes);
   }
 
   // Zero/unsigned extend word->64 bit.
   void uxtw(Register rd, Register rn) {
-    EmitBitfieldOp(UBFM, rd, rn, 0, 31, kDoubleWord);
+    EmitBitfieldOp(UBFM, rd, rn, 0, 31, kEightBytes);
   }
 
   // Logical immediate operations.
@@ -765,153 +815,153 @@
     const bool immok =
         Operand::IsImmLogical(imm.value(), kXRegSizeInBits, &imm_op);
     ASSERT(immok);
-    EmitLogicalImmOp(ANDI, rd, rn, imm_op, kDoubleWord);
+    EmitLogicalImmOp(ANDI, rd, rn, imm_op, kEightBytes);
   }
   void orri(Register rd, Register rn, const Immediate& imm) {
     Operand imm_op;
     const bool immok =
         Operand::IsImmLogical(imm.value(), kXRegSizeInBits, &imm_op);
     ASSERT(immok);
-    EmitLogicalImmOp(ORRI, rd, rn, imm_op, kDoubleWord);
+    EmitLogicalImmOp(ORRI, rd, rn, imm_op, kEightBytes);
   }
   void eori(Register rd, Register rn, const Immediate& imm) {
     Operand imm_op;
     const bool immok =
         Operand::IsImmLogical(imm.value(), kXRegSizeInBits, &imm_op);
     ASSERT(immok);
-    EmitLogicalImmOp(EORI, rd, rn, imm_op, kDoubleWord);
+    EmitLogicalImmOp(EORI, rd, rn, imm_op, kEightBytes);
   }
   void andis(Register rd, Register rn, const Immediate& imm) {
     Operand imm_op;
     const bool immok =
         Operand::IsImmLogical(imm.value(), kXRegSizeInBits, &imm_op);
     ASSERT(immok);
-    EmitLogicalImmOp(ANDIS, rd, rn, imm_op, kDoubleWord);
+    EmitLogicalImmOp(ANDIS, rd, rn, imm_op, kEightBytes);
   }
 
   // Logical (shifted) register operations.
   void and_(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(AND, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(AND, rd, rn, o, kEightBytes);
   }
   void andw_(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(AND, rd, rn, o, kWord);
+    EmitLogicalShiftOp(AND, rd, rn, o, kFourBytes);
   }
   void bic(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(BIC, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(BIC, rd, rn, o, kEightBytes);
   }
   void orr(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(ORR, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(ORR, rd, rn, o, kEightBytes);
   }
   void orrw(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(ORR, rd, rn, o, kWord);
+    EmitLogicalShiftOp(ORR, rd, rn, o, kFourBytes);
   }
   void orn(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(ORN, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(ORN, rd, rn, o, kEightBytes);
   }
   void ornw(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(ORN, rd, rn, o, kWord);
+    EmitLogicalShiftOp(ORN, rd, rn, o, kFourBytes);
   }
   void eor(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(EOR, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(EOR, rd, rn, o, kEightBytes);
   }
   void eorw(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(EOR, rd, rn, o, kWord);
+    EmitLogicalShiftOp(EOR, rd, rn, o, kFourBytes);
   }
   void eon(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(EON, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(EON, rd, rn, o, kEightBytes);
   }
   void ands(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(ANDS, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(ANDS, rd, rn, o, kEightBytes);
   }
   void bics(Register rd, Register rn, Operand o) {
-    EmitLogicalShiftOp(BICS, rd, rn, o, kDoubleWord);
+    EmitLogicalShiftOp(BICS, rd, rn, o, kEightBytes);
   }
 
   // Count leading zero bits.
   void clz(Register rd, Register rn) {
-    EmitMiscDP1Source(CLZ, rd, rn, kDoubleWord);
+    EmitMiscDP1Source(CLZ, rd, rn, kEightBytes);
   }
 
   // Reverse bits.
   void rbit(Register rd, Register rn) {
-    EmitMiscDP1Source(RBIT, rd, rn, kDoubleWord);
+    EmitMiscDP1Source(RBIT, rd, rn, kEightBytes);
   }
 
   // Misc. arithmetic.
   void udiv(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(UDIV, rd, rn, rm, kDoubleWord);
+    EmitMiscDP2Source(UDIV, rd, rn, rm, kEightBytes);
   }
   void sdiv(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(SDIV, rd, rn, rm, kDoubleWord);
+    EmitMiscDP2Source(SDIV, rd, rn, rm, kEightBytes);
   }
   void lslv(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(LSLV, rd, rn, rm, kDoubleWord);
+    EmitMiscDP2Source(LSLV, rd, rn, rm, kEightBytes);
   }
   void lsrv(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(LSRV, rd, rn, rm, kDoubleWord);
+    EmitMiscDP2Source(LSRV, rd, rn, rm, kEightBytes);
   }
   void asrv(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(ASRV, rd, rn, rm, kDoubleWord);
+    EmitMiscDP2Source(ASRV, rd, rn, rm, kEightBytes);
   }
   void lslvw(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(LSLV, rd, rn, rm, kWord);
+    EmitMiscDP2Source(LSLV, rd, rn, rm, kFourBytes);
   }
   void lsrvw(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(LSRV, rd, rn, rm, kWord);
+    EmitMiscDP2Source(LSRV, rd, rn, rm, kFourBytes);
   }
   void asrvw(Register rd, Register rn, Register rm) {
-    EmitMiscDP2Source(ASRV, rd, rn, rm, kWord);
+    EmitMiscDP2Source(ASRV, rd, rn, rm, kFourBytes);
   }
   void madd(Register rd,
             Register rn,
             Register rm,
             Register ra,
-            OperandSize sz = kDoubleWord) {
+            OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(MADD, rd, rn, rm, ra, sz);
   }
   void msub(Register rd,
             Register rn,
             Register rm,
             Register ra,
-            OperandSize sz = kDoubleWord) {
+            OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(MSUB, rd, rn, rm, ra, sz);
   }
   void smulh(Register rd,
              Register rn,
              Register rm,
-             OperandSize sz = kDoubleWord) {
+             OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(SMULH, rd, rn, rm, R31, sz);
   }
   void umulh(Register rd,
              Register rn,
              Register rm,
-             OperandSize sz = kDoubleWord) {
+             OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(UMULH, rd, rn, rm, R31, sz);
   }
   void umaddl(Register rd,
               Register rn,
               Register rm,
               Register ra,
-              OperandSize sz = kDoubleWord) {
+              OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(UMADDL, rd, rn, rm, ra, sz);
   }
   void umull(Register rd,
              Register rn,
              Register rm,
-             OperandSize sz = kDoubleWord) {
+             OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(UMADDL, rd, rn, rm, ZR, sz);
   }
   void smaddl(Register rd,
               Register rn,
               Register rm,
               Register ra,
-              OperandSize sz = kDoubleWord) {
+              OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(SMADDL, rd, rn, rm, ra, sz);
   }
   void smull(Register rd,
              Register rn,
              Register rm,
-             OperandSize sz = kDoubleWord) {
+             OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(SMADDL, rd, rn, rm, ZR, sz);
   }
 
@@ -919,26 +969,26 @@
   void movk(Register rd, const Immediate& imm, int hw_idx) {
     ASSERT(rd != CSP);
     const Register crd = ConcreteRegister(rd);
-    EmitMoveWideOp(MOVK, crd, imm, hw_idx, kDoubleWord);
+    EmitMoveWideOp(MOVK, crd, imm, hw_idx, kEightBytes);
   }
   void movn(Register rd, const Immediate& imm, int hw_idx) {
     ASSERT(rd != CSP);
     const Register crd = ConcreteRegister(rd);
-    EmitMoveWideOp(MOVN, crd, imm, hw_idx, kDoubleWord);
+    EmitMoveWideOp(MOVN, crd, imm, hw_idx, kEightBytes);
   }
   void movz(Register rd, const Immediate& imm, int hw_idx) {
     ASSERT(rd != CSP);
     const Register crd = ConcreteRegister(rd);
-    EmitMoveWideOp(MOVZ, crd, imm, hw_idx, kDoubleWord);
+    EmitMoveWideOp(MOVZ, crd, imm, hw_idx, kEightBytes);
   }
 
   // Loads and Stores.
-  void ldr(Register rt, Address a, OperandSize sz = kDoubleWord) {
+  void ldr(Register rt, Address a, OperandSize sz = kEightBytes) {
     ASSERT((a.type() != Address::PairOffset) &&
            (a.type() != Address::PairPostIndex) &&
            (a.type() != Address::PairPreIndex));
     if (a.type() == Address::PCOffset) {
-      ASSERT(sz == kDoubleWord);
+      ASSERT(sz == kEightBytes);
       EmitLoadRegLiteral(LDRpc, rt, a, sz);
     } else {
       if (IsSignedOperand(sz)) {
@@ -948,27 +998,27 @@
       }
     }
   }
-  void str(Register rt, Address a, OperandSize sz = kDoubleWord) {
+  void str(Register rt, Address a, OperandSize sz = kEightBytes) {
     ASSERT((a.type() != Address::PairOffset) &&
            (a.type() != Address::PairPostIndex) &&
            (a.type() != Address::PairPreIndex));
     EmitLoadStoreReg(STR, rt, a, sz);
   }
 
-  void ldp(Register rt, Register rt2, Address a, OperandSize sz = kDoubleWord) {
+  void ldp(Register rt, Register rt2, Address a, OperandSize sz = kEightBytes) {
     ASSERT((a.type() == Address::PairOffset) ||
            (a.type() == Address::PairPostIndex) ||
            (a.type() == Address::PairPreIndex));
     EmitLoadStoreRegPair(LDP, rt, rt2, a, sz);
   }
-  void stp(Register rt, Register rt2, Address a, OperandSize sz = kDoubleWord) {
+  void stp(Register rt, Register rt2, Address a, OperandSize sz = kEightBytes) {
     ASSERT((a.type() == Address::PairOffset) ||
            (a.type() == Address::PairPostIndex) ||
            (a.type() == Address::PairPreIndex));
     EmitLoadStoreRegPair(STP, rt, rt2, a, sz);
   }
 
-  void ldxr(Register rt, Register rn, OperandSize size = kDoubleWord) {
+  void ldxr(Register rt, Register rn, OperandSize size = kEightBytes) {
     // rt = value
     // rn = address
     EmitLoadStoreExclusive(LDXR, R31, rn, rt, size);
@@ -976,7 +1026,7 @@
   void stxr(Register rs,
             Register rt,
             Register rn,
-            OperandSize size = kDoubleWord) {
+            OperandSize size = kEightBytes) {
     // rs = status (1 = failure, 0 = success)
     // rt = value
     // rn = address
@@ -987,20 +1037,20 @@
     Emit(encoding);
   }
 
-  void ldar(Register rt, Register rn, OperandSize sz = kDoubleWord) {
+  void ldar(Register rt, Register rn, OperandSize sz = kEightBytes) {
     EmitLoadStoreExclusive(LDAR, R31, rn, rt, sz);
   }
 
-  void stlr(Register rt, Register rn, OperandSize sz = kDoubleWord) {
+  void stlr(Register rt, Register rn, OperandSize sz = kEightBytes) {
     EmitLoadStoreExclusive(STLR, R31, rn, rt, sz);
   }
 
   // Conditional select.
   void csel(Register rd, Register rn, Register rm, Condition cond) {
-    EmitConditionalSelect(CSEL, rd, rn, rm, cond, kDoubleWord);
+    EmitConditionalSelect(CSEL, rd, rn, rm, cond, kEightBytes);
   }
   void csinc(Register rd, Register rn, Register rm, Condition cond) {
-    EmitConditionalSelect(CSINC, rd, rn, rm, cond, kDoubleWord);
+    EmitConditionalSelect(CSINC, rd, rn, rm, cond, kEightBytes);
   }
   void cinc(Register rd, Register rn, Condition cond) {
     csinc(rd, rn, rn, InvertCondition(cond));
@@ -1009,7 +1059,7 @@
     csinc(rd, ZR, ZR, InvertCondition(cond));
   }
   void csinv(Register rd, Register rn, Register rm, Condition cond) {
-    EmitConditionalSelect(CSINV, rd, rn, rm, cond, kDoubleWord);
+    EmitConditionalSelect(CSINV, rd, rn, rm, cond, kEightBytes);
   }
   void cinv(Register rd, Register rn, Condition cond) {
     csinv(rd, rn, rn, InvertCondition(cond));
@@ -1018,11 +1068,11 @@
     csinv(rd, ZR, ZR, InvertCondition(cond));
   }
   void csneg(Register rd, Register rn, Register rm, Condition cond) {
-    EmitConditionalSelect(CSNEG, rd, rn, rm, cond, kDoubleWord);
+    EmitConditionalSelect(CSNEG, rd, rn, rm, cond, kEightBytes);
   }
   void cneg(Register rd, Register rn, Condition cond) {
     EmitConditionalSelect(CSNEG, rd, rn, rn, InvertCondition(cond),
-                          kDoubleWord);
+                          kEightBytes);
   }
 
   // Comparison.
@@ -1052,13 +1102,19 @@
   void b(int32_t offset) { EmitUnconditionalBranchOp(B, offset); }
   void bl(int32_t offset) { EmitUnconditionalBranchOp(BL, offset); }
 
-  void BranchIf(Condition condition, Label* label) { b(label, condition); }
+  // Branches to the given label if the condition holds.
+  // [distance] is ignored on ARM.
+  void BranchIf(Condition condition,
+                Label* label,
+                JumpDistance distance = kFarJump) {
+    b(label, condition);
+  }
 
-  void cbz(Label* label, Register rt, OperandSize sz = kDoubleWord) {
+  void cbz(Label* label, Register rt, OperandSize sz = kEightBytes) {
     EmitCompareAndBranch(CBZ, rt, label, sz);
   }
 
-  void cbnz(Label* label, Register rt, OperandSize sz = kDoubleWord) {
+  void cbnz(Label* label, Register rt, OperandSize sz = kEightBytes) {
     EmitCompareAndBranch(CBNZ, rt, label, sz);
   }
 
@@ -1102,13 +1158,13 @@
     ASSERT(rn != R31);
     ASSERT(rn != CSP);
     const Register crn = ConcreteRegister(rn);
-    EmitFPIntCvtOp(FMOVSR, static_cast<Register>(vd), crn, kWord);
+    EmitFPIntCvtOp(FMOVSR, static_cast<Register>(vd), crn, kFourBytes);
   }
   void fmovrs(Register rd, VRegister vn) {
     ASSERT(rd != R31);
     ASSERT(rd != CSP);
     const Register crd = ConcreteRegister(rd);
-    EmitFPIntCvtOp(FMOVRS, crd, static_cast<Register>(vn), kWord);
+    EmitFPIntCvtOp(FMOVRS, crd, static_cast<Register>(vn), kFourBytes);
   }
   void fmovdr(VRegister vd, Register rn) {
     ASSERT(rn != R31);
@@ -1132,7 +1188,7 @@
     ASSERT(rn != R31);
     ASSERT(rn != CSP);
     const Register crn = ConcreteRegister(rn);
-    EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kWord);
+    EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kFourBytes);
   }
   void fcvtzds(Register rd, VRegister vn) {
     ASSERT(rd != R31);
@@ -1282,11 +1338,11 @@
   }
   void vdupw(VRegister vd, Register rn) {
     const VRegister vn = static_cast<VRegister>(rn);
-    EmitSIMDCopyOp(VDUPI, vd, vn, kWord, 0, 0);
+    EmitSIMDCopyOp(VDUPI, vd, vn, kFourBytes, 0, 0);
   }
   void vdupx(VRegister vd, Register rn) {
     const VRegister vn = static_cast<VRegister>(rn);
-    EmitSIMDCopyOp(VDUPI, vd, vn, kDoubleWord, 0, 0);
+    EmitSIMDCopyOp(VDUPI, vd, vn, kEightBytes, 0, 0);
   }
   void vdups(VRegister vd, VRegister vn, int32_t idx) {
     EmitSIMDCopyOp(VDUP, vd, vn, kSWord, 0, idx);
@@ -1296,11 +1352,11 @@
   }
   void vinsw(VRegister vd, int32_t didx, Register rn) {
     const VRegister vn = static_cast<VRegister>(rn);
-    EmitSIMDCopyOp(VINSI, vd, vn, kWord, 0, didx);
+    EmitSIMDCopyOp(VINSI, vd, vn, kFourBytes, 0, didx);
   }
   void vinsx(VRegister vd, int32_t didx, Register rn) {
     const VRegister vn = static_cast<VRegister>(rn);
-    EmitSIMDCopyOp(VINSI, vd, vn, kDoubleWord, 0, didx);
+    EmitSIMDCopyOp(VINSI, vd, vn, kEightBytes, 0, didx);
   }
   void vinss(VRegister vd, int32_t didx, VRegister vn, int32_t sidx) {
     EmitSIMDCopyOp(VINS, vd, vn, kSWord, sidx, didx);
@@ -1310,11 +1366,11 @@
   }
   void vmovrs(Register rd, VRegister vn, int32_t sidx) {
     const VRegister vd = static_cast<VRegister>(rd);
-    EmitSIMDCopyOp(VMOVW, vd, vn, kWord, 0, sidx);
+    EmitSIMDCopyOp(VMOVW, vd, vn, kFourBytes, 0, sidx);
   }
   void vmovrd(Register rd, VRegister vn, int32_t sidx) {
     const VRegister vd = static_cast<VRegister>(rd);
-    EmitSIMDCopyOp(VMOVX, vd, vn, kDoubleWord, 0, sidx);
+    EmitSIMDCopyOp(VMOVX, vd, vn, kEightBytes, 0, sidx);
   }
 
   // Aliases.
@@ -1339,10 +1395,10 @@
   void negs(Register rd, Register rm) { subs(rd, ZR, Operand(rm)); }
   void negsw(Register rd, Register rm) { subsw(rd, ZR, Operand(rm)); }
   void mul(Register rd, Register rn, Register rm) {
-    madd(rd, rn, rm, ZR, kDoubleWord);
+    madd(rd, rn, rm, ZR, kEightBytes);
   }
   void mulw(Register rd, Register rn, Register rm) {
-    madd(rd, rn, rm, ZR, kWord);
+    madd(rd, rn, rm, ZR, kFourBytes);
   }
   void Push(Register reg) {
     ASSERT(reg != PP);  // Only push PP with TagAndPushPP().
@@ -1400,17 +1456,17 @@
   void LslImmediate(Register rd,
                     Register rn,
                     int shift,
-                    OperandSize sz = kDoubleWord) {
+                    OperandSize sz = kEightBytes) {
     const int reg_size =
-        (sz == kDoubleWord) ? kXRegSizeInBits : kWRegSizeInBits;
+        (sz == kEightBytes) ? kXRegSizeInBits : kWRegSizeInBits;
     ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1, sz);
   }
   void LsrImmediate(Register rd,
                     Register rn,
                     int shift,
-                    OperandSize sz = kDoubleWord) {
+                    OperandSize sz = kEightBytes) {
     const int reg_size =
-        (sz == kDoubleWord) ? kXRegSizeInBits : kWRegSizeInBits;
+        (sz == kEightBytes) ? kXRegSizeInBits : kWRegSizeInBits;
     ubfm(rd, rn, shift, reg_size - 1, sz);
   }
   void AsrImmediate(Register rd, Register rn, int shift) {
@@ -1477,11 +1533,11 @@
   void AddImmediateSetFlags(Register dest,
                             Register rn,
                             int64_t imm,
-                            OperandSize sz = kDoubleWord);
+                            OperandSize sz = kEightBytes);
   void SubImmediateSetFlags(Register dest,
                             Register rn,
                             int64_t imm,
-                            OperandSize sz = kDoubleWord);
+                            OperandSize sz = kEightBytes);
   void AndImmediate(Register rd, Register rn, int64_t imm);
   void OrImmediate(Register rd, Register rn, int64_t imm);
   void XorImmediate(Register rd, Register rn, int64_t imm);
@@ -1489,15 +1545,30 @@
   void CompareImmediate(Register rn, int64_t imm);
 
   void LoadFromOffset(Register dest,
+                      const Address& address,
+                      OperandSize sz = kEightBytes);
+  void LoadFromOffset(Register dest,
                       Register base,
                       int32_t offset,
-                      OperandSize sz = kDoubleWord);
+                      OperandSize sz = kEightBytes);
   void LoadFieldFromOffset(Register dest,
                            Register base,
                            int32_t offset,
-                           OperandSize sz = kDoubleWord) {
+                           OperandSize sz = kEightBytes) {
     LoadFromOffset(dest, base, offset - kHeapObjectTag, sz);
   }
+  // For loading indexed payloads out of tagged objects like Arrays. If the
+  // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
+  // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
+  void LoadIndexedPayload(Register dest,
+                          Register base,
+                          int32_t payload_offset,
+                          Register index,
+                          ScaleFactor scale,
+                          OperandSize sz = kEightBytes) {
+    add(dest, base, Operand(index, LSL, scale));
+    LoadFromOffset(dest, dest, payload_offset - kHeapObjectTag, sz);
+  }
   void LoadSFromOffset(VRegister dest, Register base, int32_t offset);
   void LoadDFromOffset(VRegister dest, Register base, int32_t offset);
   void LoadDFieldFromOffset(VRegister dest, Register base, int32_t offset) {
@@ -1511,11 +1582,11 @@
   void StoreToOffset(Register src,
                      Register base,
                      int32_t offset,
-                     OperandSize sz = kDoubleWord);
+                     OperandSize sz = kEightBytes);
   void StoreFieldToOffset(Register src,
                           Register base,
                           int32_t offset,
-                          OperandSize sz = kDoubleWord) {
+                          OperandSize sz = kEightBytes) {
     StoreToOffset(src, base, offset - kHeapObjectTag, sz);
   }
 
@@ -1876,7 +1947,7 @@
                              Register rm) {
     ASSERT((rd != R31) && (rn != R31) && (rm != R31));
     ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t s = set_flags ? B29 : 0;
     const int32_t op = subtract ? SBC : ADC;
     const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
@@ -1890,8 +1961,9 @@
                        Operand o,
                        OperandSize sz,
                        bool set_flags) {
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t s = set_flags ? B29 : 0;
     const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
                              Arm64Encode::Rn(rn) | o.encoding();
@@ -1906,13 +1978,13 @@
                       int r_imm,
                       int s_imm,
                       OperandSize size) {
-    if (size != kDoubleWord) {
-      ASSERT(size == kWord);
+    if (size != kEightBytes) {
+      ASSERT(size == kFourBytes);
       ASSERT(r_imm < 32 && s_imm < 32);
     } else {
       ASSERT(r_imm < 64 && s_imm < 64);
     }
-    const int32_t instr = op | (size == kDoubleWord ? Bitfield64 : 0);
+    const int32_t instr = op | (size == kEightBytes ? Bitfield64 : 0);
     const int32_t encoding = instr | Operand(0, s_imm, r_imm).encoding() |
                              Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn);
     Emit(encoding);
@@ -1923,13 +1995,14 @@
                         Register rn,
                         Operand o,
                         OperandSize sz) {
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
     ASSERT((rd != R31) && (rn != R31));
     ASSERT(rn != CSP);
     ASSERT((op == ANDIS) || (rd != ZR));   // op != ANDIS => rd != ZR.
     ASSERT((op != ANDIS) || (rd != CSP));  // op == ANDIS => rd != CSP.
     ASSERT(o.type() == Operand::BitfieldImm);
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding =
         op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | o.encoding();
     Emit(encoding);
@@ -1940,11 +2013,12 @@
                           Register rn,
                           Operand o,
                           OperandSize sz) {
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
     ASSERT((rd != R31) && (rn != R31));
     ASSERT((rd != CSP) && (rn != CSP));
     ASSERT(o.type() == Operand::Shifted);
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding =
         op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | o.encoding();
     Emit(encoding);
@@ -1956,8 +2030,9 @@
                             Operand o,
                             OperandSize sz,
                             bool set_flags) {
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t s = set_flags ? B29 : 0;
     const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
                              Arm64Encode::Rn(rn) | o.encoding();
@@ -2050,10 +2125,11 @@
     // EncodeImm19BranchOffset will longjump out if the offset does not fit in
     // 19 bits.
     const int32_t encoded_offset = EncodeImm19BranchOffset(imm, 0);
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
     ASSERT(Utils::IsInt(21, imm) && ((imm & 0x3) == 0));
     ASSERT((rt != CSP) && (rt != R31));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding = op | size | Arm64Encode::Rt(rt) | encoded_offset;
     Emit(encoding);
   }
@@ -2223,8 +2299,9 @@
                       int hw_idx,
                       OperandSize sz) {
     ASSERT((hw_idx >= 0) && (hw_idx <= 3));
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding =
         op | size | Arm64Encode::Rd(rd) |
         (static_cast<int32_t>(hw_idx) << kHWShift) |
@@ -2236,9 +2313,9 @@
                               Register rs,
                               Register rn,
                               Register rt,
-                              OperandSize sz = kDoubleWord) {
-    ASSERT(sz == kDoubleWord || sz == kWord);
-    const int32_t size = B31 | (sz == kDoubleWord ? B30 : 0);
+                              OperandSize sz = kEightBytes) {
+    ASSERT(sz == kEightBytes || sz == kFourBytes);
+    const int32_t size = B31 | (sz == kEightBytes ? B30 : 0);
 
     ASSERT((rs != kNoRegister) && (rs != ZR));
     ASSERT((rn != kNoRegister) && (rn != ZR));
@@ -2269,10 +2346,11 @@
                           Register rt,
                           Address a,
                           OperandSize sz) {
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
     ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
     ASSERT((rt != CSP) && (rt != R31));
-    const int32_t size = (sz == kDoubleWord) ? B30 : 0;
+    const int32_t size = (sz == kEightBytes) ? B30 : 0;
     const int32_t encoding = op | size | Arm64Encode::Rt(rt) | a.encoding();
     Emit(encoding);
   }
@@ -2286,19 +2364,20 @@
     ASSERT(a.can_writeback_to(rt) && a.can_writeback_to(rt2));
     ASSERT(op != LDP || rt != rt2);
 
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
     ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
     ASSERT((rt != CSP) && (rt != R31));
     ASSERT((rt2 != CSP) && (rt2 != R31));
     int32_t opc = 0;
     switch (sz) {
-      case kDoubleWord:
+      case kEightBytes:
         opc = B31;
         break;
-      case kWord:
+      case kFourBytes:
         opc = B30;
         break;
-      case kUnsignedWord:
+      case kUnsignedFourBytes:
         opc = 0;
         break;
       default:
@@ -2325,8 +2404,9 @@
                          Register rn,
                          OperandSize sz) {
     ASSERT((rd != CSP) && (rn != CSP));
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding =
         op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn);
     Emit(encoding);
@@ -2338,8 +2418,9 @@
                          Register rm,
                          OperandSize sz) {
     ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
                              Arm64Encode::Rn(rn) | Arm64Encode::Rm(rm);
     Emit(encoding);
@@ -2352,8 +2433,9 @@
                          Register ra,
                          OperandSize sz) {
     ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP) && (ra != CSP));
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
                              Arm64Encode::Rn(rn) | Arm64Encode::Rm(rm) |
                              Arm64Encode::Ra(ra);
@@ -2367,8 +2449,9 @@
                              Condition cond,
                              OperandSize sz) {
     ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
-    ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
+           (sz == kUnsignedFourBytes));
+    const int32_t size = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
                              Arm64Encode::Rn(rn) | Arm64Encode::Rm(rm) |
                              (static_cast<int32_t>(cond) << kSelCondShift);
@@ -2384,9 +2467,9 @@
   void EmitFPIntCvtOp(FPIntCvtOp op,
                       Register rd,
                       Register rn,
-                      OperandSize sz = kDoubleWord) {
-    ASSERT((sz == kDoubleWord) || (sz == kWord));
-    const int32_t sfield = (sz == kDoubleWord) ? B31 : 0;
+                      OperandSize sz = kEightBytes) {
+    ASSERT((sz == kEightBytes) || (sz == kFourBytes));
+    const int32_t sfield = (sz == kEightBytes) ? B31 : 0;
     const int32_t encoding =
         op | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | sfield;
     Emit(encoding);
diff --git a/runtime/vm/compiler/assembler/assembler_arm64_test.cc b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
index ce1ba92..8cf9ac2 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
@@ -461,9 +461,9 @@
          Operand(2 * target::kWordSize));  // Must not access beyond CSP.
 
   __ LoadImmediate(R1, 0xffffffff);
-  __ str(R1, Address(SP, -4, Address::PreIndex, kWord), kWord);
-  __ ldr(R0, Address(SP), kWord);
-  __ ldr(R1, Address(SP, 4, Address::PostIndex, kWord), kWord);
+  __ str(R1, Address(SP, -4, Address::PreIndex, kFourBytes), kFourBytes);
+  __ ldr(R0, Address(SP), kFourBytes);
+  __ ldr(R1, Address(SP, 4, Address::PostIndex, kFourBytes), kFourBytes);
   __ RestoreCSP();
   __ ret();
 }
@@ -635,10 +635,10 @@
 
   Label retry;
   __ Bind(&retry);
-  __ ldxr(R0, SP, kWord);
+  __ ldxr(R0, SP, kFourBytes);
   // 32 bit operation should ignore the high word of R0 that was pushed on the
   // stack.
-  __ stxr(TMP, R1, SP, kWord);  // IP == 0, success
+  __ stxr(TMP, R1, SP, kFourBytes);  // IP == 0, success
   __ cmp(TMP, Operand(0));
   __ b(&retry, NE);  // NE if context switch occurred between ldrex and strex.
   __ Pop(R0);        // 42 + 42 * 2**32
@@ -664,9 +664,9 @@
   __ movz(R0, Immediate(40), 0);
   __ movz(R1, Immediate(42), 0);
 
-  __ ldxr(R0, SP, kWord);
+  __ ldxr(R0, SP, kFourBytes);
   __ clrex();                   // Simulate a context switch.
-  __ stxr(TMP, R1, SP, kWord);  // IP == 1, failure
+  __ stxr(TMP, R1, SP, kFourBytes);  // IP == 1, failure
   __ Pop(R0);                   // 40
   __ add(R0, R0, Operand(TMP));
   __ RestoreCSP();
@@ -692,14 +692,14 @@
 
   // Test 64-bit ladr.
   __ PushImmediate(0x1122334455667788);
-  __ ldar(R1, SP, kDoubleWord);
+  __ ldar(R1, SP, kEightBytes);
   __ CompareImmediate(R1, 0x1122334455667788);
   __ BranchIf(NOT_EQUAL, &failed);
   __ Drop(1);
 
   // Test 32-bit ladr - must zero extend.
   __ PushImmediate(0x1122334455667788);
-  __ ldar(R1, SP, kWord);
+  __ ldar(R1, SP, kFourBytes);
   __ CompareImmediate(R1, 0x55667788);
   __ BranchIf(NOT_EQUAL, &failed);
   __ Drop(1);
@@ -707,7 +707,7 @@
   // Test 64-bit stlr.
   __ PushImmediate(0);
   __ LoadImmediate(R1, 0x1122334455667788);
-  __ stlr(R1, SP, kDoubleWord);
+  __ stlr(R1, SP, kEightBytes);
   __ Pop(R1);
   __ CompareImmediate(R1, 0x1122334455667788);
   __ BranchIf(NOT_EQUAL, &failed);
@@ -715,7 +715,7 @@
   // Test 32-bit stlr.
   __ PushImmediate(0);
   __ LoadImmediate(R1, 0x1122334455667788);
-  __ stlr(R1, SP, kWord);
+  __ stlr(R1, SP, kFourBytes);
   __ Pop(R1);
   __ CompareImmediate(R1, 0x55667788);
   __ BranchIf(NOT_EQUAL, &failed);
@@ -1048,7 +1048,7 @@
 #define SHIFT_32_IMMEDIATE_TEST(macro_op, val, shift, expected)                \
   ASSEMBLER_TEST_GENERATE(macro_op##a_##val##_##shift, assembler) {            \
     __ LoadImmediate(R1, bit_cast<int32_t>(val));                              \
-    __ macro_op(R0, R1, (shift), kWord);                                       \
+    __ macro_op(R0, R1, (shift), kFourBytes);                                  \
     __ ret();                                                                  \
   }                                                                            \
                                                                                \
@@ -2373,7 +2373,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LoadHalfWordUnaligned, assembler) {
-  __ ldr(R1, R0, kHalfword);
+  __ ldr(R1, R0, kTwoBytes);
   __ mov(R0, R1);
   __ ret();
 }
@@ -2396,7 +2396,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LoadHalfWordUnsignedUnaligned, assembler) {
-  __ ldr(R1, R0, kUnsignedHalfword);
+  __ ldr(R1, R0, kUnsignedTwoBytes);
   __ mov(R0, R1);
   __ ret();
 }
@@ -2418,7 +2418,7 @@
 
 ASSEMBLER_TEST_GENERATE(StoreHalfWordUnaligned, assembler) {
   __ LoadImmediate(R1, 0xABCD);
-  __ str(R1, R0, kHalfword);
+  __ str(R1, R0, kTwoBytes);
   __ mov(R0, R1);
   __ ret();
 }
@@ -2446,7 +2446,7 @@
 }
 
 ASSEMBLER_TEST_GENERATE(LoadWordUnaligned, assembler) {
-  __ ldr(R1, R0, kUnsignedWord);
+  __ ldr(R1, R0, kUnsignedFourBytes);
   __ mov(R0, R1);
   __ ret();
 }
@@ -2476,7 +2476,7 @@
 
 ASSEMBLER_TEST_GENERATE(StoreWordUnaligned, assembler) {
   __ LoadImmediate(R1, 0x12345678);
-  __ str(R1, R0, kUnsignedWord);
+  __ str(R1, R0, kUnsignedFourBytes);
   __ mov(R0, R1);
   __ ret();
 }
diff --git a/runtime/vm/compiler/assembler/assembler_arm_test.cc b/runtime/vm/compiler/assembler/assembler_arm_test.cc
index 558895e..9199d5a 100644
--- a/runtime/vm/compiler/assembler/assembler_arm_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm_test.cc
@@ -2024,7 +2024,7 @@
     __ mov(R0, Operand(8));
     __ vmovsr(S7, R0);
 
-    __ vaddqi(kHalfword, Q2, Q0, Q1);
+    __ vaddqi(kTwoBytes, Q2, Q0, Q1);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -2065,7 +2065,7 @@
     __ mov(R0, Operand(8));
     __ vmovsr(S7, R0);
 
-    __ vaddqi(kWord, Q2, Q0, Q1);
+    __ vaddqi(kFourBytes, Q2, Q0, Q1);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -2214,7 +2214,7 @@
     __ rsb(shift, shift, Operand(0));
     __ vmovsr(stemp0, shift);
     __ vshlqi(kWordPair, temp, out, temp);
-    __ vceqqi(kWord, out, temp, value);
+    __ vceqqi(kFourBytes, out, temp, value);
     // Low 64 bits of temp should be all 1's, otherwise temp != value and
     // we deopt.
     __ vmovrs(shift, sout0);
@@ -2269,7 +2269,7 @@
     __ rsb(shift, shift, Operand(0));
     __ vmovsr(stemp0, shift);
     __ vshlqi(kWordPair, temp, out, temp);
-    __ vceqqi(kWord, out, temp, value);
+    __ vceqqi(kFourBytes, out, temp, value);
     // Low 64 bits of temp should be all 1's, otherwise temp != value and
     // we deopt.
     __ vmovrs(shift, sout0);
@@ -2356,7 +2356,7 @@
     __ mov(R0, Operand(8));
     __ vmovsr(S7, R0);
 
-    __ vsubqi(kHalfword, Q2, Q1, Q0);
+    __ vsubqi(kTwoBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -2397,7 +2397,7 @@
     __ mov(R0, Operand(8));
     __ vmovsr(S7, R0);
 
-    __ vsubqi(kWord, Q2, Q1, Q0);
+    __ vsubqi(kFourBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -2508,7 +2508,7 @@
     __ mov(R0, Operand(8));
     __ vmovsr(S7, R0);
 
-    __ vmulqi(kHalfword, Q2, Q1, Q0);
+    __ vmulqi(kTwoBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -2549,7 +2549,7 @@
     __ mov(R0, Operand(8));
     __ vmovsr(S7, R0);
 
-    __ vmulqi(kWord, Q2, Q1, Q0);
+    __ vmulqi(kFourBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -3071,7 +3071,7 @@
     __ vmovsr(S5, R1);
 
     // Should copy 0xff to each byte of Q0.
-    __ vdup(kHalfword, Q0, D2, 1);
+    __ vdup(kTwoBytes, Q0, D2, 1);
 
     __ vmovrs(R0, S0);
     __ vmovrs(R1, S1);
@@ -3101,7 +3101,7 @@
     __ vmovsr(S5, R1);
 
     // Should copy 0xff to each byte of Q0.
-    __ vdup(kWord, Q0, D2, 1);
+    __ vdup(kFourBytes, Q0, D2, 1);
 
     __ vmovrs(R0, S0);
     __ vmovrs(R1, S1);
@@ -3173,7 +3173,7 @@
     __ mov(R0, Operand(40));
     __ vmovsr(S7, R0);
 
-    __ vceqqi(kWord, Q2, Q1, Q0);
+    __ vceqqi(kFourBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -3247,7 +3247,7 @@
     __ mov(R0, Operand(1));
     __ vmovsr(S7, R0);
 
-    __ vcgeqi(kWord, Q2, Q1, Q0);
+    __ vcgeqi(kFourBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -3288,7 +3288,7 @@
     __ mov(R0, Operand(1));
     __ vmovsr(S7, R0);
 
-    __ vcugeqi(kWord, Q2, Q1, Q0);
+    __ vcugeqi(kFourBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -3362,7 +3362,7 @@
     __ mov(R0, Operand(1));
     __ vmovsr(S7, R0);
 
-    __ vcgtqi(kWord, Q2, Q1, Q0);
+    __ vcgtqi(kFourBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
@@ -3403,7 +3403,7 @@
     __ mov(R0, Operand(1));
     __ vmovsr(S7, R0);
 
-    __ vcugtqi(kWord, Q2, Q1, Q0);
+    __ vcugtqi(kFourBytes, Q2, Q1, Q0);
 
     __ vmovrs(R0, S8);
     __ vmovrs(R1, S9);
diff --git a/runtime/vm/compiler/assembler/assembler_base.h b/runtime/vm/compiler/assembler/assembler_base.h
index 03f0cfc..d240da5 100644
--- a/runtime/vm/compiler/assembler/assembler_base.h
+++ b/runtime/vm/compiler/assembler/assembler_base.h
@@ -28,6 +28,25 @@
 
 namespace compiler {
 
+enum OperandSize {
+  // Architecture-independent constants.
+  kByte,
+  kUnsignedByte,
+  kTwoBytes,  // Halfword (ARM), w(ord) (Intel)
+  kUnsignedTwoBytes,
+  kFourBytes,  // Word (ARM), l(ong) (Intel)
+  kUnsignedFourBytes,
+  kEightBytes,  // DoubleWord (ARM), q(uadword) (Intel)
+  // ARM-specific constants.
+  kSWord,
+  kDWord,
+  // 32-bit ARM specific constants.
+  kWordPair,
+  kRegList,
+  // 64-bit ARM specific constants.
+  kQWord,
+};
+
 // Forward declarations.
 class Assembler;
 class AssemblerFixup;
@@ -330,6 +349,12 @@
         object_pool_builder_(object_pool_builder) {}
   virtual ~AssemblerBase();
 
+  // Used for near/far jumps on IA32/X64, ignored for ARM.
+  enum JumpDistance : bool {
+    kFarJump = false,
+    kNearJump = true,
+  };
+
   intptr_t CodeSize() const { return buffer_.Size(); }
 
   uword CodeAddress(intptr_t offset) { return buffer_.Address(offset); }
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index 06f27c2..facc34e 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -1665,7 +1665,7 @@
   EmitUint8(0xF4);
 }
 
-void Assembler::j(Condition condition, Label* label, bool near) {
+void Assembler::j(Condition condition, Label* label, JumpDistance distance) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   if (label->IsBound()) {
     static const int kShortSize = 2;
@@ -1680,7 +1680,7 @@
       EmitUint8(0x80 + condition);
       EmitInt32(offset - kLongSize);
     }
-  } else if (near) {
+  } else if (distance == kNearJump) {
     EmitUint8(0x70 + condition);
     EmitNearLabelLink(label);
   } else {
@@ -1710,7 +1710,7 @@
   EmitOperand(4, address);
 }
 
-void Assembler::jmp(Label* label, bool near) {
+void Assembler::jmp(Label* label, JumpDistance distance) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   if (label->IsBound()) {
     static const int kShortSize = 2;
@@ -1724,7 +1724,7 @@
       EmitUint8(0xE9);
       EmitInt32(offset - kLongSize);
     }
-  } else if (near) {
+  } else if (distance == kNearJump) {
     EmitUint8(0xEB);
     EmitNearLabelLink(label);
   } else {
@@ -1917,7 +1917,7 @@
     testl(value, Immediate(0xf));
   }
   Condition condition = how_to_jump == kJumpToNoUpdate ? NOT_ZERO : ZERO;
-  bool distance = how_to_jump == kJumpToNoUpdate ? kNearJump : kFarJump;
+  auto const distance = how_to_jump == kJumpToNoUpdate ? kNearJump : kFarJump;
   j(condition, label, distance);
 }
 
@@ -2429,7 +2429,7 @@
 void Assembler::MaybeTraceAllocation(intptr_t cid,
                                      Register temp_reg,
                                      Label* trace,
-                                     bool near_jump) {
+                                     JumpDistance distance) {
   ASSERT(cid > 0);
   Address state_address(kNoRegister, 0);
 
@@ -2446,13 +2446,13 @@
   cmpb(Address(temp_reg, class_offset), Immediate(0));
   // We are tracing for this class, jump to the trace label which will use
   // the allocation stub.
-  j(NOT_ZERO, trace, near_jump);
+  j(NOT_ZERO, trace, distance);
 }
 #endif  // !PRODUCT
 
 void Assembler::TryAllocate(const Class& cls,
                             Label* failure,
-                            bool near_jump,
+                            JumpDistance distance,
                             Register instance_reg,
                             Register temp_reg) {
   ASSERT(failure != NULL);
@@ -2464,12 +2464,12 @@
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
     const classid_t cid = target::Class::GetId(cls);
-    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, distance));
     movl(instance_reg, Address(THR, target::Thread::top_offset()));
     addl(instance_reg, Immediate(instance_size));
     // instance_reg: potential next object start.
     cmpl(instance_reg, Address(THR, target::Thread::end_offset()));
-    j(ABOVE_EQUAL, failure, near_jump);
+    j(ABOVE_EQUAL, failure, distance);
     // Successfully allocated the object, now update top to point to
     // next object start and store the class in the class field of object.
     movl(Address(THR, target::Thread::top_offset()), instance_reg);
@@ -2487,7 +2487,7 @@
 void Assembler::TryAllocateArray(intptr_t cid,
                                  intptr_t instance_size,
                                  Label* failure,
-                                 bool near_jump,
+                                 JumpDistance distance,
                                  Register instance,
                                  Register end_address,
                                  Register temp_reg) {
@@ -2498,7 +2498,7 @@
     // If this allocation is traced, program will jump to failure path
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
-    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, distance));
     movl(instance, Address(THR, target::Thread::top_offset()));
     movl(end_address, instance);
 
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index 25bdf4b..a927fec 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -238,9 +238,6 @@
   }
   ~Assembler() {}
 
-  static const bool kNearJump = true;
-  static const bool kFarJump = false;
-
   /*
    * Emit Machine Instructions.
    */
@@ -556,12 +553,12 @@
   void int3();
   void hlt();
 
-  void j(Condition condition, Label* label, bool near = kFarJump);
+  void j(Condition condition, Label* label, JumpDistance distance = kFarJump);
   void j(Condition condition, const ExternalLabel* label);
 
   void jmp(Register reg);
   void jmp(const Address& address);
-  void jmp(Label* label, bool near = kFarJump);
+  void jmp(Label* label, JumpDistance distance = kFarJump);
   void jmp(const ExternalLabel* label);
 
   void lock();
@@ -878,7 +875,7 @@
   void MaybeTraceAllocation(intptr_t cid,
                             Register temp_reg,
                             Label* trace,
-                            bool near_jump);
+                            JumpDistance distance);
 
   // Inlined allocation of an instance of class 'cls', code has no runtime
   // calls. Jump to 'failure' if the instance cannot be allocated here.
@@ -886,14 +883,14 @@
   // Only the tags field of the object is initialized.
   void TryAllocate(const Class& cls,
                    Label* failure,
-                   bool near_jump,
+                   JumpDistance distance,
                    Register instance_reg,
                    Register temp_reg);
 
   void TryAllocateArray(intptr_t cid,
                         intptr_t instance_size,
                         Label* failure,
-                        bool near_jump,
+                        JumpDistance distance,
                         Register instance,
                         Register end_address,
                         Register temp);
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index 4310ef2..f33e605 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -808,16 +808,17 @@
 
 void Assembler::MulImmediate(Register reg,
                              const Immediate& imm,
-                             OperandWidth width) {
+                             OperandSize width) {
+  ASSERT(width == kFourBytes || width == kEightBytes);
   if (imm.is_int32()) {
-    if (width == k32Bit) {
+    if (width == kFourBytes) {
       imull(reg, imm);
     } else {
       imulq(reg, imm);
     }
   } else {
     ASSERT(reg != TMP);
-    ASSERT(width != k32Bit);
+    ASSERT(width == kEightBytes);
     movq(TMP, imm);
     imulq(reg, TMP);
   }
@@ -967,7 +968,7 @@
   }
 }
 
-void Assembler::j(Condition condition, Label* label, bool near) {
+void Assembler::j(Condition condition, Label* label, JumpDistance distance) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   if (label->IsBound()) {
     static const int kShortSize = 2;
@@ -982,7 +983,7 @@
       EmitUint8(0x80 + condition);
       EmitInt32(offset - kLongSize);
     }
-  } else if (near) {
+  } else if (distance == kNearJump) {
     EmitUint8(0x70 + condition);
     EmitNearLabelLink(label);
   } else {
@@ -1000,7 +1001,7 @@
   Bind(&no_jump);
 }
 
-void Assembler::jmp(Label* label, bool near) {
+void Assembler::jmp(Label* label, JumpDistance distance) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   if (label->IsBound()) {
     static const int kShortSize = 2;
@@ -1014,7 +1015,7 @@
       EmitUint8(0xE9);
       EmitInt32(offset - kLongSize);
     }
-  } else if (near) {
+  } else if (distance == kNearJump) {
     EmitUint8(0xEB);
     EmitNearLabelLink(label);
   } else {
@@ -1072,28 +1073,29 @@
 
 void Assembler::AddImmediate(Register reg,
                              const Immediate& imm,
-                             OperandWidth width) {
+                             OperandSize width) {
+  ASSERT(width == kFourBytes || width == kEightBytes);
   const int64_t value = imm.value();
   if (value == 0) {
     return;
   }
   if ((value > 0) || (value == kMinInt64)) {
     if (value == 1) {
-      if (width == k32Bit) {
+      if (width == kFourBytes) {
         incl(reg);
       } else {
         incq(reg);
       }
     } else {
-      if (imm.is_int32() || (width == k32Bit && imm.is_uint32())) {
-        if (width == k32Bit) {
+      if (imm.is_int32() || (width == kFourBytes && imm.is_uint32())) {
+        if (width == kFourBytes) {
           addl(reg, imm);
         } else {
           addq(reg, imm);
         }
       } else {
         ASSERT(reg != TMP);
-        ASSERT(width != k32Bit);
+        ASSERT(width == kEightBytes);
         LoadImmediate(TMP, imm);
         addq(reg, TMP);
       }
@@ -1126,29 +1128,30 @@
 
 void Assembler::SubImmediate(Register reg,
                              const Immediate& imm,
-                             OperandWidth width) {
+                             OperandSize width) {
+  ASSERT(width == kFourBytes || width == kEightBytes);
   const int64_t value = imm.value();
   if (value == 0) {
     return;
   }
   if ((value > 0) || (value == kMinInt64) ||
-      (value == kMinInt32 && width == k32Bit)) {
+      (value == kMinInt32 && width == kFourBytes)) {
     if (value == 1) {
-      if (width == k32Bit) {
+      if (width == kFourBytes) {
         decl(reg);
       } else {
         decq(reg);
       }
     } else {
       if (imm.is_int32()) {
-        if (width == k32Bit) {
+        if (width == kFourBytes) {
           subl(reg, imm);
         } else {
           subq(reg, imm);
         }
       } else {
         ASSERT(reg != TMP);
-        ASSERT(width != k32Bit);
+        ASSERT(width == kEightBytes);
         LoadImmediate(TMP, imm);
         subq(reg, TMP);
       }
@@ -1361,7 +1364,7 @@
     testl(value, Immediate(0x1f));
   }
   Condition condition = how_to_jump == kJumpToNoUpdate ? NOT_ZERO : ZERO;
-  bool distance = how_to_jump == kJumpToNoUpdate ? kNearJump : kFarJump;
+  JumpDistance distance = how_to_jump == kJumpToNoUpdate ? kNearJump : kFarJump;
   j(condition, label, distance);
 }
 
@@ -1532,6 +1535,28 @@
   label->BindTo(bound);
 }
 
+void Assembler::LoadFromOffset(Register reg,
+                               const Address& address,
+                               OperandSize sz) {
+  switch (sz) {
+    case kByte:
+      return movsxb(reg, address);
+    case kUnsignedByte:
+      return movzxb(reg, address);
+    case kTwoBytes:
+      return movsxw(reg, address);
+    case kUnsignedTwoBytes:
+      return movzxw(reg, address);
+    case kFourBytes:
+      return movl(reg, address);
+    case kEightBytes:
+      return movq(reg, address);
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
 void Assembler::EnterFrame(intptr_t frame_size) {
   if (prologue_offset_ == -1) {
     prologue_offset_ = CodeSize();
@@ -1580,9 +1605,8 @@
 #endif
 }
 
-void Assembler::PushRegisters(intptr_t cpu_register_set,
-                              intptr_t xmm_register_set) {
-  const intptr_t xmm_regs_count = RegisterSet::RegisterCount(xmm_register_set);
+void Assembler::PushRegisters(const RegisterSet& register_set) {
+  const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
   if (xmm_regs_count > 0) {
     AddImmediate(RSP, Immediate(-xmm_regs_count * kFpuRegisterSize));
     // Store XMM registers with the lowest register number at the lowest
@@ -1590,7 +1614,7 @@
     intptr_t offset = 0;
     for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
       XmmRegister xmm_reg = static_cast<XmmRegister>(i);
-      if (RegisterSet::Contains(xmm_register_set, xmm_reg)) {
+      if (register_set.ContainsFpuRegister(xmm_reg)) {
         movups(Address(RSP, offset), xmm_reg);
         offset += kFpuRegisterSize;
       }
@@ -1602,28 +1626,27 @@
   // in which the registers are encoded in the safe point's stack map.
   for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
     Register reg = static_cast<Register>(i);
-    if (RegisterSet::Contains(cpu_register_set, reg)) {
+    if (register_set.ContainsRegister(reg)) {
       pushq(reg);
     }
   }
 }
 
-void Assembler::PopRegisters(intptr_t cpu_register_set,
-                             intptr_t xmm_register_set) {
+void Assembler::PopRegisters(const RegisterSet& register_set) {
   for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
     Register reg = static_cast<Register>(i);
-    if (RegisterSet::Contains(cpu_register_set, reg)) {
+    if (register_set.ContainsRegister(reg)) {
       popq(reg);
     }
   }
 
-  const intptr_t xmm_regs_count = RegisterSet::RegisterCount(xmm_register_set);
+  const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
   if (xmm_regs_count > 0) {
     // XMM registers have the lowest register number at the lowest address.
     intptr_t offset = 0;
     for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
       XmmRegister xmm_reg = static_cast<XmmRegister>(i);
-      if (RegisterSet::Contains(xmm_register_set, xmm_reg)) {
+      if (register_set.ContainsFpuRegister(xmm_reg)) {
         movups(xmm_reg, Address(RSP, offset));
         offset += kFpuRegisterSize;
       }
@@ -1633,6 +1656,10 @@
   }
 }
 
+static const RegisterSet kVolatileRegisterSet(
+    CallingConventions::kVolatileCpuRegisters,
+    CallingConventions::kVolatileXmmRegisters);
+
 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
   Comment("EnterCallRuntimeFrame");
   EnterFrame(0);
@@ -1642,8 +1669,7 @@
   }
 
   // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
-  PushRegisters(CallingConventions::kVolatileCpuRegisters,
-                CallingConventions::kVolatileXmmRegisters);
+  PushRegisters(kVolatileRegisterSet);
 
   ReserveAlignedFrameSpace(frame_space);
 }
@@ -1665,8 +1691,7 @@
   leaq(RSP, Address(RBP, -kPushedRegistersSize));
 
   // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
-  PopRegisters(CallingConventions::kVolatileCpuRegisters,
-               CallingConventions::kVolatileXmmRegisters);
+  PopRegisters(kVolatileRegisterSet);
 
   LeaveStubFrame();
 }
@@ -1889,7 +1914,7 @@
 #ifndef PRODUCT
 void Assembler::MaybeTraceAllocation(intptr_t cid,
                                      Label* trace,
-                                     bool near_jump) {
+                                     JumpDistance distance) {
   ASSERT(cid > 0);
   const intptr_t shared_table_offset =
       target::Isolate::shared_class_table_offset();
@@ -1904,13 +1929,13 @@
   cmpb(Address(temp_reg, class_offset), Immediate(0));
   // We are tracing for this class, jump to the trace label which will use
   // the allocation stub.
-  j(NOT_ZERO, trace, near_jump);
+  j(NOT_ZERO, trace, distance);
 }
 #endif  // !PRODUCT
 
 void Assembler::TryAllocate(const Class& cls,
                             Label* failure,
-                            bool near_jump,
+                            JumpDistance distance,
                             Register instance_reg,
                             Register temp) {
   ASSERT(failure != NULL);
@@ -1921,12 +1946,12 @@
     // If this allocation is traced, program will jump to failure path
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
-    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, distance));
     movq(instance_reg, Address(THR, target::Thread::top_offset()));
     addq(instance_reg, Immediate(instance_size));
     // instance_reg: potential next object start.
     cmpq(instance_reg, Address(THR, target::Thread::end_offset()));
-    j(ABOVE_EQUAL, failure, near_jump);
+    j(ABOVE_EQUAL, failure, distance);
     // Successfully allocated the object, now update top to point to
     // next object start and store the class in the class field of object.
     movq(Address(THR, target::Thread::top_offset()), instance_reg);
@@ -1946,7 +1971,7 @@
 void Assembler::TryAllocateArray(intptr_t cid,
                                  intptr_t instance_size,
                                  Label* failure,
-                                 bool near_jump,
+                                 JumpDistance distance,
                                  Register instance,
                                  Register end_address,
                                  Register temp) {
@@ -1956,7 +1981,7 @@
     // If this allocation is traced, program will jump to failure path
     // (i.e. the allocation stub) which will allocate the object and trace the
     // allocation call site.
-    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, distance));
     movq(instance, Address(THR, target::Thread::top_offset()));
     movq(end_address, instance);
 
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index f6576ff..0634382 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -27,6 +27,7 @@
 
 // Forward declarations.
 class FlowGraphCompiler;
+class RegisterSet;
 
 namespace compiler {
 
@@ -292,9 +293,6 @@
 
   ~Assembler() {}
 
-  static const bool kNearJump = true;
-  static const bool kFarJump = false;
-
   /*
    * Emit Machine Instructions.
    */
@@ -307,6 +305,7 @@
   void pushq(const Address& address) { EmitUnaryL(address, 0xFF, 6); }
   void pushq(const Immediate& imm);
   void PushImmediate(const Immediate& imm);
+  void PushImmediate(int64_t value) { PushImmediate(Immediate(value)); }
 
   void popq(Register reg);
   void popq(const Address& address) { EmitUnaryL(address, 0x8F, 0); }
@@ -614,16 +613,12 @@
   REGULAR_UNARY(dec, 0xFF, 1)
 #undef REGULAR_UNARY
 
-  // We could use kWord, kDoubleWord, and kQuadWord here, but it is rather
-  // confusing since the same sizes mean something different on ARM.
-  enum OperandWidth { k32Bit, k64Bit };
-
   void imull(Register reg, const Immediate& imm);
 
   void imulq(Register dst, const Immediate& imm);
   void MulImmediate(Register reg,
                     const Immediate& imm,
-                    OperandWidth width = k64Bit);
+                    OperandSize width = kEightBytes);
 
   void shll(Register reg, const Immediate& imm);
   void shll(Register operand, Register shifter);
@@ -653,10 +648,10 @@
   // 'size' indicates size in bytes and must be in the range 1..8.
   void nop(int size = 1);
 
-  void j(Condition condition, Label* label, bool near = kFarJump);
+  void j(Condition condition, Label* label, JumpDistance distance = kFarJump);
   void jmp(Register reg) { EmitUnaryL(reg, 0xFF, 4); }
   void jmp(const Address& address) { EmitUnaryL(address, 0xFF, 4); }
-  void jmp(Label* label, bool near = kFarJump);
+  void jmp(Label* label, JumpDistance distance = kFarJump);
   void jmp(const ExternalLabel* label);
   void jmp(const Code& code);
 
@@ -683,7 +678,11 @@
   // Methods for High-level operations and implemented on all architectures.
   void Ret() { ret(); }
   void CompareRegisters(Register a, Register b);
-  void BranchIf(Condition condition, Label* label) { j(condition, label); }
+  void BranchIf(Condition condition,
+                Label* label,
+                JumpDistance distance = kFarJump) {
+    j(condition, label, distance);
+  }
 
   // Issues a move instruction if 'to' is not the same as 'from'.
   void MoveRegister(Register to, Register from);
@@ -704,14 +703,16 @@
   // TODO(koda): Assert that these are not used for heap objects.
   void AddImmediate(Register reg,
                     const Immediate& imm,
-                    OperandWidth width = k64Bit);
-  void AddImmediate(Register reg, int32_t value, OperandWidth width = k64Bit) {
+                    OperandSize width = kEightBytes);
+  void AddImmediate(Register reg,
+                    int32_t value,
+                    OperandSize width = kEightBytes) {
     AddImmediate(reg, Immediate(value), width);
   }
   void AddImmediate(const Address& address, const Immediate& imm);
   void SubImmediate(Register reg,
                     const Immediate& imm,
-                    OperandWidth width = k64Bit);
+                    OperandSize width = kEightBytes);
   void SubImmediate(const Address& address, const Immediate& imm);
 
   void Drop(intptr_t stack_elements, Register tmp = TMP);
@@ -804,8 +805,8 @@
     cmpxchgl(address, reg);
   }
 
-  void PushRegisters(intptr_t cpu_register_set, intptr_t xmm_register_set);
-  void PopRegisters(intptr_t cpu_register_set, intptr_t xmm_register_set);
+  void PushRegisters(const RegisterSet& registers);
+  void PopRegisters(const RegisterSet& registers);
 
   void CheckCodePointer();
 
@@ -868,9 +869,42 @@
 
   void Align(int alignment, intptr_t offset);
   void Bind(Label* label);
-  void Jump(Label* label) { jmp(label); }
+  // Unconditional jump to a given label.
+  void Jump(Label* label, JumpDistance distance = kFarJump) {
+    jmp(label, distance);
+  }
+  // Unconditional jump to a given address in memory.
+  void Jump(const Address& address) { jmp(address); }
 
-  void LoadField(Register dst, FieldAddress address) { movq(dst, address); }
+  // Arch-specific LoadFromOffset to choose the right operation for [sz].
+  void LoadFromOffset(Register dst,
+                      const Address& address,
+                      OperandSize sz = kEightBytes);
+  void LoadFromOffset(Register dst,
+                      Register base,
+                      int32_t offset,
+                      OperandSize sz = kEightBytes) {
+    LoadFromOffset(dst, Address(base, offset), sz);
+  }
+  void LoadField(Register dst,
+                 FieldAddress address,
+                 OperandSize sz = kEightBytes) {
+    LoadFromOffset(dst, address, sz);
+  }
+  void LoadFieldFromOffset(Register dst,
+                           Register base,
+                           int32_t offset,
+                           OperandSize sz = kEightBytes) {
+    LoadFromOffset(dst, FieldAddress(base, offset), sz);
+  }
+  void LoadIndexedPayload(Register dst,
+                          Register base,
+                          int32_t payload_offset,
+                          Register index,
+                          ScaleFactor scale,
+                          OperandSize sz = kEightBytes) {
+    LoadFromOffset(dst, FieldAddress(base, index, scale, payload_offset), sz);
+  }
   void LoadMemoryValue(Register dst, Register base, int32_t offset) {
     movq(dst, Address(base, offset));
   }
@@ -956,7 +990,7 @@
 
   // If allocation tracing for |cid| is enabled, will jump to |trace| label,
   // which will allocate in the runtime where tracing occurs.
-  void MaybeTraceAllocation(intptr_t cid, Label* trace, bool near_jump);
+  void MaybeTraceAllocation(intptr_t cid, Label* trace, JumpDistance distance);
 
   // Inlined allocation of an instance of class 'cls', code has no runtime
   // calls. Jump to 'failure' if the instance cannot be allocated here.
@@ -964,14 +998,14 @@
   // Only the tags field of the object is initialized.
   void TryAllocate(const Class& cls,
                    Label* failure,
-                   bool near_jump,
+                   JumpDistance distance,
                    Register instance_reg,
                    Register temp);
 
   void TryAllocateArray(intptr_t cid,
                         intptr_t instance_size,
                         Label* failure,
-                        bool near_jump,
+                        JumpDistance distance,
                         Register instance,
                         Register end_address,
                         Register temp);
diff --git a/runtime/vm/compiler/assembler/assembler_x64_test.cc b/runtime/vm/compiler/assembler/assembler_x64_test.cc
index 8c416ab..7a21b97 100644
--- a/runtime/vm/compiler/assembler/assembler_x64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64_test.cc
@@ -6,6 +6,7 @@
 #if defined(TARGET_ARCH_X64)
 
 #include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/backend/locations.h"
 #include "vm/cpu.h"
 #include "vm/os.h"
 #include "vm/unit_test.h"
@@ -3765,7 +3766,8 @@
   const intptr_t cpu_register_set = 0;
   const intptr_t fpu_register_set =
       ((1 << XMM10) | (1 << XMM11)) & CallingConventions::kVolatileXmmRegisters;
-  __ PushRegisters(cpu_register_set, fpu_register_set);
+  const RegisterSet register_set(cpu_register_set, fpu_register_set);
+  __ PushRegisters(register_set);
   __ movl(RAX, Immediate(0x2));
   __ movd(XMM10, RAX);
   __ shufps(XMM10, XMM10, Immediate(0x0));
@@ -3778,7 +3780,7 @@
   __ pushq(RAX);
   __ movss(Address(RSP, 0), XMM10);
   __ popq(RAX);
-  __ PopRegisters(cpu_register_set, fpu_register_set);
+  __ PopRegisters(register_set);
   __ ret();
 }
 
@@ -5878,7 +5880,7 @@
   }
   {
     __ LoadImmediate(RAX, Immediate(42));
-    __ MulImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
+    __ MulImmediate(RAX, Immediate(kBillion), kFourBytes);
     Label ok;
     __ CompareImmediate(RAX, Immediate((42 * kBillion) & 0xffffffffll));
     __ j(EQUAL, &ok);
@@ -5896,9 +5898,9 @@
   }
   {
     __ LoadImmediate(RAX, Immediate(kBillion));
-    __ AddImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
-    __ AddImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
-    __ AddImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
+    __ AddImmediate(RAX, Immediate(kBillion), kFourBytes);
+    __ AddImmediate(RAX, Immediate(kBillion), kFourBytes);
+    __ AddImmediate(RAX, Immediate(kBillion), kFourBytes);
     Label ok;
     __ CompareImmediate(RAX, Immediate((4 * kBillion) & 0xffffffffll));
     __ j(EQUAL, &ok);
@@ -5908,9 +5910,9 @@
   {
     __ LoadImmediate(RAX, Immediate(kBillion));
     __ AddImmediate(RAX, Immediate(static_cast<int32_t>(3 * kBillion)),
-                    Assembler::k32Bit);
-    __ AddImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
-    __ AddImmediate(RAX, Immediate(-kBillion), Assembler::k32Bit);
+                    kFourBytes);
+    __ AddImmediate(RAX, Immediate(kBillion), kFourBytes);
+    __ AddImmediate(RAX, Immediate(-kBillion), kFourBytes);
     Label ok;
     __ CompareImmediate(RAX, Immediate((4 * kBillion) & 0xffffffffll));
     __ j(EQUAL, &ok);
@@ -5928,9 +5930,9 @@
   }
   {
     __ LoadImmediate(RAX, Immediate(-kBillion));
-    __ SubImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
-    __ SubImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
-    __ SubImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
+    __ SubImmediate(RAX, Immediate(kBillion), kFourBytes);
+    __ SubImmediate(RAX, Immediate(kBillion), kFourBytes);
+    __ SubImmediate(RAX, Immediate(kBillion), kFourBytes);
     Label ok;
     __ CompareImmediate(RAX, Immediate((-4 * kBillion) & 0xffffffffll));
     __ j(EQUAL, &ok);
@@ -5939,10 +5941,9 @@
   }
   {
     __ LoadImmediate(RAX, Immediate(kBillion));
-    __ SubImmediate(RAX, Immediate((-3 * kBillion) & 0xffffffffll),
-                    Assembler::k32Bit);
-    __ SubImmediate(RAX, Immediate(kBillion), Assembler::k32Bit);
-    __ SubImmediate(RAX, Immediate(-kBillion), Assembler::k32Bit);
+    __ SubImmediate(RAX, Immediate((-3 * kBillion) & 0xffffffffll), kFourBytes);
+    __ SubImmediate(RAX, Immediate(kBillion), kFourBytes);
+    __ SubImmediate(RAX, Immediate(-kBillion), kFourBytes);
     Label ok;
     __ CompareImmediate(RAX, Immediate((4 * kBillion) & 0xffffffffll));
     __ j(EQUAL, &ok);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index 9f3015f..34df72b 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -2092,6 +2092,18 @@
 }
 
 #define __ assembler()->
+
+void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
+                                      const GrowableArray<intptr_t>& class_ids,
+                                      compiler::Label* is_equal_lbl,
+                                      compiler::Label* is_not_equal_lbl) {
+  for (const auto& id : class_ids) {
+    __ CompareImmediate(class_id_reg, id);
+    __ BranchIf(EQUAL, is_equal_lbl);
+  }
+  __ Jump(is_not_equal_lbl);
+}
+
 void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
                                         const String& function_name,
                                         ArgumentsInfo args_info,
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index 5f07a68..759a411 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -285,17 +285,6 @@
                                      is_not_instance_lbl);
 }
 
-void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
-                                      const GrowableArray<intptr_t>& class_ids,
-                                      compiler::Label* is_equal_lbl,
-                                      compiler::Label* is_not_equal_lbl) {
-  for (intptr_t i = 0; i < class_ids.length(); i++) {
-    __ CompareImmediate(class_id_reg, class_ids[i]);
-    __ b(is_equal_lbl, EQ);
-  }
-  __ b(is_not_equal_lbl);
-}
-
 // Testing against an instantiated type with no arguments, without
 // SubtypeTestCache.
 // R0: instance being type checked (preserved).
@@ -663,15 +652,15 @@
   if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
     kPoolReg = PP;
   } else {
-    __ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG,
+    __ LoadFieldFromOffset(kPoolReg, CODE_REG,
                            compiler::target::Code::object_pool_offset());
   }
   __ LoadImmediate(R4, type_arguments_field_offset);
   __ LoadFieldFromOffset(
-      kWord, R1, kPoolReg,
+      R1, kPoolReg,
       compiler::target::ObjectPool::element_offset(function_index));
   __ LoadFieldFromOffset(
-      kWord, CODE_REG, kPoolReg,
+      CODE_REG, kPoolReg,
       compiler::target::ObjectPool::element_offset(stub_index));
   __ Branch(compiler::FieldAddress(
       CODE_REG,
@@ -736,8 +725,7 @@
       const intptr_t slot_index =
           compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
       Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
-      __ StoreToOffset(kWord, value_reg, FP,
-                       slot_index * compiler::target::kWordSize);
+      __ StoreToOffset(value_reg, FP, slot_index * compiler::target::kWordSize);
     }
   }
 
@@ -871,7 +859,7 @@
   bool old_use_far_branches = assembler_->use_far_branches();
   assembler_->set_use_far_branches(true);
 #endif  // DEBUG
-  __ LoadFieldFromOffset(kWord, R1, R0,
+  __ LoadFieldFromOffset(R1, R0,
                          compiler::target::Array::element_offset(edge_id));
   __ add(R1, R1, compiler::Operand(Smi::RawValue(1)));
   __ StoreIntoObjectNoBarrierOffset(
@@ -897,8 +885,7 @@
   // Pass the function explicitly, it is used in IC stub.
 
   __ LoadObject(R8, parsed_function().function());
-  __ LoadFromOffset(kWord, R0, SP,
-                    (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
+  __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
   __ LoadUniqueObject(R9, ic_data);
   GenerateDartCall(deopt_id, token_pos, stub, PcDescriptorsLayout::kIcCall,
                    locs, entry_kind);
@@ -915,8 +902,7 @@
   ASSERT(entry_kind == Code::EntryKind::kNormal ||
          entry_kind == Code::EntryKind::kUnchecked);
   ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
-  __ LoadFromOffset(kWord, R0, SP,
-                    (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
+  __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
   __ LoadUniqueObject(R9, ic_data);
   __ LoadUniqueObject(CODE_REG, stub);
   const intptr_t entry_point_offset =
@@ -946,7 +932,7 @@
 
   __ Comment("MegamorphicCall");
   // Load receiver into R0.
-  __ LoadFromOffset(kWord, R0, SP,
+  __ LoadFromOffset(R0, SP,
                     (args_desc.Count() - 1) * compiler::target::kWordSize);
   // Use same code pattern as instance call so it can be parsed by code patcher.
   if (FLAG_precompiled_mode) {
@@ -1019,7 +1005,7 @@
 
   __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
   __ LoadFromOffset(
-      kWord, R0, SP,
+      R0, SP,
       (ic_data.SizeWithoutTypeArgs() - 1) * compiler::target::kWordSize);
   if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
     // The AOT runtime will replace the slot in the object pool with the
@@ -1207,8 +1193,7 @@
   __ Comment("EmitTestAndCall");
   // Load receiver into R0.
   __ LoadFromOffset(
-      kWord, R0, SP,
-      (count_without_type_args - 1) * compiler::target::kWordSize);
+      R0, SP, (count_without_type_args - 1) * compiler::target::kWordSize);
   __ LoadObject(R4, arguments_descriptor);
 }
 
@@ -1261,14 +1246,12 @@
     } else {
       ASSERT(destination.IsStackSlot());
       const intptr_t dest_offset = destination.ToStackSlotOffset();
-      __ StoreToOffset(kWord, source.reg(), destination.base_reg(),
-                       dest_offset);
+      __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
     }
   } else if (source.IsStackSlot()) {
     if (destination.IsRegister()) {
       const intptr_t source_offset = source.ToStackSlotOffset();
-      __ LoadFromOffset(kWord, destination.reg(), source.base_reg(),
-                        source_offset);
+      __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
     } else {
       ASSERT(destination.IsStackSlot());
       const intptr_t source_offset = source.ToStackSlotOffset();
@@ -1282,8 +1265,8 @@
       // temporary as we know we're in a ParallelMove.
       const Register temp_reg = LR;
 
-      __ LoadFromOffset(kWord, temp_reg, source.base_reg(), source_offset);
-      __ StoreToOffset(kWord, temp_reg, destination.base_reg(), dest_offset);
+      __ LoadFromOffset(temp_reg, source.base_reg(), source_offset);
+      __ StoreToOffset(temp_reg, destination.base_reg(), dest_offset);
     }
   } else if (source.IsFpuRegister()) {
     if (destination.IsFpuRegister()) {
@@ -1359,14 +1342,14 @@
   }
 }
 
-static OperandSize BytesToOperandSize(intptr_t bytes) {
+static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
   switch (bytes) {
     case 4:
-      return OperandSize::kWord;
+      return compiler::OperandSize::kFourBytes;
     case 2:
-      return OperandSize::kHalfword;
+      return compiler::OperandSize::kTwoBytes;
     case 1:
-      return OperandSize::kByte;
+      return compiler::OperandSize::kByte;
     default:
       UNIMPLEMENTED();
   }
@@ -1425,9 +1408,9 @@
       const auto& dst = destination.AsStack();
       ASSERT(!sign_or_zero_extend);
       ASSERT(dst_size <= 4);
-      const OperandSize op_size = BytesToOperandSize(dst_size);
-      __ StoreToOffset(op_size, src.reg_at(0), dst.base_register(),
-                       dst.offset_in_bytes());
+      auto const op_size = BytesToOperandSize(dst_size);
+      __ StoreToOffset(src.reg_at(0), dst.base_register(),
+                       dst.offset_in_bytes(), op_size);
     }
 
   } else if (source.IsFpuRegisters()) {
@@ -1485,9 +1468,9 @@
       const auto dst_reg = dst.reg_at(0);
       ASSERT(!sign_or_zero_extend);
       ASSERT(dst_size <= 4);
-      const OperandSize op_size = BytesToOperandSize(dst_size);
-      __ LoadFromOffset(op_size, dst_reg, src.base_register(),
-                        src.offset_in_bytes());
+      auto const op_size = BytesToOperandSize(dst_size);
+      __ LoadFromOffset(dst_reg, src.base_register(), src.offset_in_bytes(),
+                        op_size);
 
     } else if (destination.IsFpuRegisters()) {
       ASSERT(src_payload_type.Equals(dst_payload_type));
@@ -1665,8 +1648,8 @@
                                     intptr_t stack_offset) {
   ScratchRegisterScope tmp(this, reg);
   __ mov(tmp.reg(), compiler::Operand(reg));
-  __ LoadFromOffset(kWord, reg, base_reg, stack_offset);
-  __ StoreToOffset(kWord, tmp.reg(), base_reg, stack_offset);
+  __ LoadFromOffset(reg, base_reg, stack_offset);
+  __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
 }
 
 void ParallelMoveResolver::Exchange(Register base_reg1,
@@ -1675,10 +1658,10 @@
                                     intptr_t stack_offset2) {
   ScratchRegisterScope tmp1(this, kNoRegister);
   ScratchRegisterScope tmp2(this, tmp1.reg());
-  __ LoadFromOffset(kWord, tmp1.reg(), base_reg1, stack_offset1);
-  __ LoadFromOffset(kWord, tmp2.reg(), base_reg2, stack_offset2);
-  __ StoreToOffset(kWord, tmp1.reg(), base_reg2, stack_offset2);
-  __ StoreToOffset(kWord, tmp2.reg(), base_reg1, stack_offset1);
+  __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
+  __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
+  __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
+  __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
 }
 
 void ParallelMoveResolver::SpillScratch(Register reg) {
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index 803667f..8bed93e 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -271,17 +271,6 @@
                                      is_not_instance_lbl);
 }
 
-void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
-                                      const GrowableArray<intptr_t>& class_ids,
-                                      compiler::Label* is_equal_lbl,
-                                      compiler::Label* is_not_equal_lbl) {
-  for (intptr_t i = 0; i < class_ids.length(); i++) {
-    __ CompareImmediate(class_id_reg, class_ids[i]);
-    __ b(is_equal_lbl, EQ);
-  }
-  __ b(is_not_equal_lbl);
-}
-
 // Testing against an instantiated type with no arguments, without
 // SubtypeTestCache.
 // R0: instance being type checked (preserved).
@@ -653,13 +642,13 @@
            compiler::FieldAddress(CODE_REG, Code::owner_offset()));
 
     __ LoadFieldFromOffset(R7, function_reg, Function::usage_counter_offset(),
-                           kWord);
+                           compiler::kFourBytes);
     // Reoptimization of an optimized function is triggered by counting in
     // IC stubs, but not at the entry of the function.
     if (!is_optimizing()) {
       __ add(R7, R7, compiler::Operand(1));
       __ StoreFieldToOffset(R7, function_reg, Function::usage_counter_offset(),
-                            kWord);
+                            compiler::kFourBytes);
     }
     __ CompareImmediate(R7, GetOptimizationThreshold());
     ASSERT(function_reg == R6);
@@ -1305,16 +1294,16 @@
   }
 }
 
-static OperandSize BytesToOperandSize(intptr_t bytes) {
+static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
   switch (bytes) {
     case 8:
-      return OperandSize::kDoubleWord;
+      return compiler::OperandSize::kEightBytes;
     case 4:
-      return OperandSize::kWord;
+      return compiler::OperandSize::kFourBytes;
     case 2:
-      return OperandSize::kHalfword;
+      return compiler::OperandSize::kTwoBytes;
     case 1:
-      return OperandSize::kByte;
+      return compiler::OperandSize::kByte;
     default:
       UNIMPLEMENTED();
   }
@@ -1382,7 +1371,7 @@
       ASSERT(destination.IsStack());
       const auto& dst = destination.AsStack();
       ASSERT(!sign_or_zero_extend);
-      const OperandSize op_size = BytesToOperandSize(dst_size);
+      auto const op_size = BytesToOperandSize(dst_size);
       __ StoreToOffset(src.reg_at(0), dst.base_register(),
                        dst.offset_in_bytes(), op_size);
     }
@@ -1426,7 +1415,7 @@
       ASSERT(dst.num_regs() == 1);
       const auto dst_reg = dst.reg_at(0);
       ASSERT(!sign_or_zero_extend);
-      const OperandSize op_size = BytesToOperandSize(dst_size);
+      auto const op_size = BytesToOperandSize(dst_size);
       __ LoadFromOffset(dst_reg, src.base_register(), src.offset_in_bytes(),
                         op_size);
 
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index 3525a02..a7d6a6e 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -300,17 +300,6 @@
                                      is_not_instance_lbl);
 }
 
-void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
-                                      const GrowableArray<intptr_t>& class_ids,
-                                      compiler::Label* is_equal_lbl,
-                                      compiler::Label* is_not_equal_lbl) {
-  for (intptr_t i = 0; i < class_ids.length(); i++) {
-    __ cmpl(class_id_reg, compiler::Immediate(class_ids[i]));
-    __ j(EQUAL, is_equal_lbl);
-  }
-  __ jmp(is_not_equal_lbl);
-}
-
 // Testing against an instantiated type with no arguments, without
 // SubtypeTestCache.
 // EAX: instance to test against (preserved).
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index 80879b0..62efb18 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -279,17 +279,6 @@
                                      is_not_instance_lbl);
 }
 
-void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
-                                      const GrowableArray<intptr_t>& class_ids,
-                                      compiler::Label* is_equal_lbl,
-                                      compiler::Label* is_not_equal_lbl) {
-  for (intptr_t i = 0; i < class_ids.length(); i++) {
-    __ cmpl(class_id_reg, compiler::Immediate(class_ids[i]));
-    __ j(EQUAL, is_equal_lbl);
-  }
-  __ jmp(is_not_equal_lbl);
-}
-
 // Testing against an instantiated type with no arguments, without
 // SubtypeTestCache
 //
@@ -1135,13 +1124,11 @@
 #endif
 
   // TODO(vegorov): avoid saving non-volatile registers.
-  __ PushRegisters(locs->live_registers()->cpu_registers(),
-                   locs->live_registers()->fpu_registers());
+  __ PushRegisters(*locs->live_registers());
 }
 
 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
-  __ PopRegisters(locs->live_registers()->cpu_registers(),
-                  locs->live_registers()->fpu_registers());
+  __ PopRegisters(*locs->live_registers());
 }
 
 #if defined(DEBUG)
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index cbedc5b..b7fd68e 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -104,7 +104,7 @@
     case kTagged: {
       const auto out = locs()->out(0).reg();
       __ add(out, base_reg(), compiler::Operand(index, LSL, 1));
-      __ LoadFromOffset(kWord, out, out, offset());
+      __ LoadFromOffset(out, out, offset());
       break;
     }
     case kUnboxedInt64: {
@@ -112,9 +112,8 @@
       const auto out_hi = locs()->out(0).AsPairLocation()->At(1).reg();
 
       __ add(out_hi, base_reg(), compiler::Operand(index, LSL, 1));
-      __ LoadFromOffset(kWord, out_lo, out_hi, offset());
-      __ LoadFromOffset(kWord, out_hi, out_hi,
-                        offset() + compiler::target::kWordSize);
+      __ LoadFromOffset(out_lo, out_hi, offset());
+      __ LoadFromOffset(out_hi, out_hi, offset() + compiler::target::kWordSize);
       break;
     }
     case kUnboxedDouble: {
@@ -428,7 +427,7 @@
         } else {
           ASSERT(value.IsStackSlot());
           const intptr_t value_offset = value.ToStackSlotOffset();
-          __ LoadFromOffset(kWord, reg, value.base_reg(), value_offset);
+          __ LoadFromOffset(reg, value.base_reg(), value_offset);
         }
         pusher.PushRegister(compiler, reg);
       }
@@ -637,7 +636,7 @@
 
 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   const Register result = locs()->out(0).reg();
-  __ LoadFromOffset(kWord, result, FP,
+  __ LoadFromOffset(result, FP,
                     compiler::target::FrameOffsetInBytesForVariable(&local()));
 }
 
@@ -651,7 +650,7 @@
   const Register value = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
   ASSERT(result == value);  // Assert that register assignment is correct.
-  __ StoreToOffset(kWord, value, FP,
+  __ StoreToOffset(value, FP,
                    compiler::target::FrameOffsetInBytesForVariable(&local()));
 }
 
@@ -719,7 +718,7 @@
     } else {
       __ LoadObject(tmp, value_);
     }
-    __ StoreToOffset(kWord, tmp, destination.base_reg(), dest_offset);
+    __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
   }
 }
 
@@ -1377,8 +1376,7 @@
 
   // Restore top_resource.
   __ Pop(tmp);
-  __ StoreToOffset(kWord, tmp, THR,
-                   compiler::target::Thread::top_resource_offset());
+  __ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset());
 
   __ Pop(vm_tag_reg);
 
@@ -1466,24 +1464,24 @@
   }
 
   // Save the current VMTag on the stack.
-  __ LoadFromOffset(kWord, R0, THR, compiler::target::Thread::vm_tag_offset());
+  __ LoadFromOffset(R0, THR, compiler::target::Thread::vm_tag_offset());
   __ Push(R0);
 
   // Save top resource.
   const intptr_t top_resource_offset =
       compiler::target::Thread::top_resource_offset();
-  __ LoadFromOffset(kWord, R0, THR, top_resource_offset);
+  __ LoadFromOffset(R0, THR, top_resource_offset);
   __ Push(R0);
   __ LoadImmediate(R0, 0);
-  __ StoreToOffset(kWord, R0, THR, top_resource_offset);
+  __ StoreToOffset(R0, THR, top_resource_offset);
 
-  __ LoadFromOffset(kWord, R0, THR,
+  __ LoadFromOffset(R0, THR,
                     compiler::target::Thread::exit_through_ffi_offset());
   __ Push(R0);
 
   // Save top exit frame info. Don't set it to 0 yet,
   // TransitionNativeToGenerated will handle that.
-  __ LoadFromOffset(kWord, R0, THR,
+  __ LoadFromOffset(R0, THR,
                     compiler::target::Thread::top_exit_frame_info_offset());
   __ Push(R0);
 
@@ -1498,16 +1496,15 @@
   // handles.
 
   // Load the code object.
-  __ LoadFromOffset(kWord, R0, THR,
-                    compiler::target::Thread::callback_code_offset());
-  __ LoadFieldFromOffset(kWord, R0, R0,
+  __ LoadFromOffset(R0, THR, compiler::target::Thread::callback_code_offset());
+  __ LoadFieldFromOffset(R0, R0,
                          compiler::target::GrowableObjectArray::data_offset());
-  __ LoadFieldFromOffset(kWord, CODE_REG, R0,
+  __ LoadFieldFromOffset(CODE_REG, R0,
                          compiler::target::Array::data_offset() +
                              callback_id_ * compiler::target::kWordSize);
 
   // Put the code object in the reserved slot.
-  __ StoreToOffset(kWord, CODE_REG, FPREG,
+  __ StoreToOffset(CODE_REG, FPREG,
                    kPcMarkerSlotFromFp * compiler::target::kWordSize);
   if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
     __ SetupGlobalPoolAndDispatchTable();
@@ -1520,10 +1517,9 @@
 
   // Load a dummy return address which suggests that we are inside of
   // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
-  __ LoadFromOffset(kWord, LR, THR,
+  __ LoadFromOffset(LR, THR,
                     compiler::target::Thread::invoke_dart_code_stub_offset());
-  __ LoadFieldFromOffset(kWord, LR, LR,
-                         compiler::target::Code::entry_point_offset());
+  __ LoadFieldFromOffset(LR, LR, compiler::target::Code::entry_point_offset());
 
   FunctionEntryInstr::EmitNativeCode(compiler);
 }
@@ -1636,7 +1632,7 @@
   compiler::Label loop, loop_in;
 
   // Address of input bytes.
-  __ LoadFieldFromOffset(kWord, bytes_reg, bytes_reg,
+  __ LoadFieldFromOffset(bytes_reg, bytes_reg,
                          compiler::target::TypedDataBase::data_field_offset());
 
   // Table.
@@ -1684,11 +1680,9 @@
     decoder_reg = decoder_location.reg();
   }
   const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
-  __ LoadFieldFromOffset(kWord, flags_temp_reg, decoder_reg,
-                         scan_flags_field_offset);
+  __ LoadFieldFromOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
   __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
-  __ StoreFieldToOffset(kWord, flags_temp_reg, decoder_reg,
-                        scan_flags_field_offset);
+  __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
 }
 
 LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
@@ -1702,15 +1696,15 @@
   const Register obj = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
   if (object()->definition()->representation() == kUntagged) {
-    __ LoadFromOffset(kWord, result, obj, offset());
+    __ LoadFromOffset(result, obj, offset());
   } else {
     ASSERT(object()->definition()->representation() == kTagged);
-    __ LoadFieldFromOffset(kWord, result, obj, offset());
+    __ LoadFieldFromOffset(result, obj, offset());
   }
 }
 
 DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
-  __ StoreToOffset(kWord, value, obj, instr->offset_from_tagged());
+  __ StoreToOffset(value, obj, instr->offset_from_tagged());
 }
 
 Representation LoadIndexedInstr::representation() const {
@@ -2767,11 +2761,11 @@
       BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
                                       result, temp);
       __ eor(temp, temp, compiler::Operand(temp));
-      __ StoreToOffset(kWord, value, result,
-                       compiler::target::Mint::value_offset() - kHeapObjectTag);
-      __ StoreToOffset(kWord, temp, result,
-                       compiler::target::Mint::value_offset() - kHeapObjectTag +
-                           compiler::target::kWordSize);
+      __ StoreFieldToOffset(value, result,
+                            compiler::target::Mint::value_offset());
+      __ StoreFieldToOffset(
+          temp, result,
+          compiler::target::Mint::value_offset() + compiler::target::kWordSize);
       __ Bind(&done);
     }
   }
@@ -2856,11 +2850,9 @@
       const Register value_lo = value_pair->At(0).reg();
       const Register value_hi = value_pair->At(1).reg();
       __ Comment("UnboxedIntegerStoreInstanceFieldInstr");
-      __ StoreToOffset(kWord, value_lo, instance_reg,
-                       offset_in_bytes - kHeapObjectTag);
-      __ StoreToOffset(
-          kWord, value_hi, instance_reg,
-          offset_in_bytes - kHeapObjectTag + compiler::target::kWordSize);
+      __ StoreFieldToOffset(value_lo, instance_reg, offset_in_bytes);
+      __ StoreFieldToOffset(value_hi, instance_reg,
+                            offset_in_bytes + compiler::target::kWordSize);
       return;
     }
 
@@ -3059,10 +3051,10 @@
 
   compiler->used_static_fields().Add(&field());
 
-  __ LoadFromOffset(kWord, temp, THR,
+  __ LoadFromOffset(temp, THR,
                     compiler::target::Thread::field_table_values_offset());
   // Note: static fields ids won't be changed by hot-reload.
-  __ StoreToOffset(kWord, value, temp,
+  __ StoreToOffset(value, temp,
                    compiler::target::FieldTable::OffsetOf(field()));
 }
 
@@ -3292,21 +3284,21 @@
         const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
         const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
         __ Comment("UnboxedInt64LoadFieldInstr");
-        __ LoadFromOffset(kWord, out_lo, instance_reg, offset_lo);
-        __ LoadFromOffset(kWord, out_hi, instance_reg, offset_hi);
+        __ LoadFromOffset(out_lo, instance_reg, offset_lo);
+        __ LoadFromOffset(out_hi, instance_reg, offset_hi);
         break;
       }
       case kUnboxedUint32: {
         const Register result = locs()->out(0).reg();
         __ Comment("UnboxedUint32LoadFieldInstr");
-        __ LoadFieldFromOffset(kWord, result, instance_reg, OffsetInBytes());
+        __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes());
         break;
       }
       case kUnboxedUint8: {
         const Register result = locs()->out(0).reg();
         __ Comment("UnboxedUint8LoadFieldInstr");
-        __ LoadFieldFromOffset(kUnsignedByte, result, instance_reg,
-                               OffsetInBytes());
+        __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
+                               compiler::kUnsignedByte);
         break;
       }
       default:
@@ -3346,7 +3338,7 @@
     }
 
     const Register temp = locs()->temp(0).reg();
-    __ LoadFieldFromOffset(kWord, temp, instance_reg, OffsetInBytes());
+    __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes());
     switch (cid) {
       case kDoubleCid:
         __ Comment("UnboxedDoubleLoadFieldInstr");
@@ -3446,7 +3438,7 @@
     __ Bind(&load_pointer);
   }
 
-  __ LoadFieldFromOffset(kWord, result_reg, instance_reg, OffsetInBytes());
+  __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
 
   if (calls_initializer()) {
     EmitNativeCodeForInitializerCall(compiler);
@@ -3704,12 +3696,12 @@
   if (!compiler->is_optimizing()) {
     if (raw_exception_var_ != nullptr) {
       __ StoreToOffset(
-          kWord, kExceptionObjectReg, FP,
+          kExceptionObjectReg, FP,
           compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
     }
     if (raw_stacktrace_var_ != nullptr) {
       __ StoreToOffset(
-          kWord, kStackTraceObjectReg, FP,
+          kStackTraceObjectReg, FP,
           compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
     }
   }
@@ -4860,8 +4852,8 @@
     case kUnboxedInt64: {
       PairLocation* result = locs()->out(0).AsPairLocation();
       ASSERT(result->At(0).reg() != box);
-      __ LoadFieldFromOffset(kWord, result->At(0).reg(), box, ValueOffset());
-      __ LoadFieldFromOffset(kWord, result->At(1).reg(), box,
+      __ LoadFieldFromOffset(result->At(0).reg(), box, ValueOffset());
+      __ LoadFieldFromOffset(result->At(1).reg(), box,
                              ValueOffset() + compiler::target::kWordSize);
       break;
     }
@@ -4925,8 +4917,7 @@
   const Register result = locs()->out(0).reg();
   compiler::Label done;
   __ SmiUntag(result, value, &done);
-  __ LoadFieldFromOffset(kWord, result, value,
-                         compiler::target::Mint::value_offset());
+  __ LoadFieldFromOffset(result, value, compiler::target::Mint::value_offset());
   __ Bind(&done);
 }
 
@@ -5007,11 +4998,10 @@
       ASSERT(from_representation() == kUnboxedUint32);
       __ eor(temp, temp, compiler::Operand(temp));
     }
-    __ StoreToOffset(kWord, value, out,
-                     compiler::target::Mint::value_offset() - kHeapObjectTag);
-    __ StoreToOffset(kWord, temp, out,
-                     compiler::target::Mint::value_offset() - kHeapObjectTag +
-                         compiler::target::kWordSize);
+    __ StoreFieldToOffset(value, out, compiler::target::Mint::value_offset());
+    __ StoreFieldToOffset(
+        temp, out,
+        compiler::target::Mint::value_offset() + compiler::target::kWordSize);
     __ Bind(&done);
   }
 }
@@ -5097,11 +5087,11 @@
                                     out_reg, tmp);
   }
 
-  __ StoreToOffset(kWord, value_lo, out_reg,
-                   compiler::target::Mint::value_offset() - kHeapObjectTag);
-  __ StoreToOffset(kWord, value_hi, out_reg,
-                   compiler::target::Mint::value_offset() - kHeapObjectTag +
-                       compiler::target::kWordSize);
+  __ StoreFieldToOffset(value_lo, out_reg,
+                        compiler::target::Mint::value_offset());
+  __ StoreFieldToOffset(
+      value_hi, out_reg,
+      compiler::target::Mint::value_offset() + compiler::target::kWordSize);
   __ Bind(&done);
 }
 
@@ -5110,11 +5100,10 @@
                               Register result,
                               Register temp,
                               compiler::Label* deopt) {
-  __ LoadFieldFromOffset(kWord, result, mint,
-                         compiler::target::Mint::value_offset());
+  __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
   if (deopt != NULL) {
     __ LoadFieldFromOffset(
-        kWord, temp, mint,
+        temp, mint,
         compiler::target::Mint::value_offset() + compiler::target::kWordSize);
     __ cmp(temp,
            compiler::Operand(result, ASR, compiler::target::kBitsPerWord - 1));
@@ -5293,7 +5282,7 @@
       break;
     case SimdOpInstr::kFloat32x4Scale:
       __ vcvtsd(STMP, EvenDRegisterOf(left));
-      __ vdup(kWord, result, DTMP, 0);
+      __ vdup(compiler::kFourBytes, result, DTMP, 0);
       __ vmulqs(result, result, right);
       break;
     case SimdOpInstr::kInt32x4BitAnd:
@@ -5306,10 +5295,10 @@
       __ veorq(result, left, right);
       break;
     case SimdOpInstr::kInt32x4Add:
-      __ vaddqi(kWord, result, left, right);
+      __ vaddqi(compiler::kFourBytes, result, left, right);
       break;
     case SimdOpInstr::kInt32x4Sub:
-      __ vsubqi(kWord, result, left, right);
+      __ vsubqi(compiler::kFourBytes, result, left, right);
       break;
     default:
       UNREACHABLE();
@@ -5363,13 +5352,13 @@
     case SimdOpInstr::kInt32x4Shuffle:
     case SimdOpInstr::kFloat32x4Shuffle: {
       if (instr->mask() == 0x00) {
-        __ vdup(kWord, result, value.d(0), 0);
+        __ vdup(compiler::kFourBytes, result, value.d(0), 0);
       } else if (instr->mask() == 0x55) {
-        __ vdup(kWord, result, value.d(0), 1);
+        __ vdup(compiler::kFourBytes, result, value.d(0), 1);
       } else if (instr->mask() == 0xAA) {
-        __ vdup(kWord, result, value.d(1), 0);
+        __ vdup(compiler::kFourBytes, result, value.d(1), 0);
       } else if (instr->mask() == 0xFF) {
-        __ vdup(kWord, result, value.d(1), 1);
+        __ vdup(compiler::kFourBytes, result, value.d(1), 1);
       } else {
         // TODO(zra): Investigate better instruction sequences for other
         // shuffle masks.
@@ -5442,7 +5431,7 @@
   __ vcvtsd(STMP, value.d(0));
 
   // Splat across all lanes.
-  __ vdup(kWord, result, DTMP, 0);
+  __ vdup(compiler::kFourBytes, result, DTMP, 0);
 }
 
 DEFINE_EMIT(Float32x4Sqrt,
@@ -6985,10 +6974,10 @@
     // TODO(dartbug.com/33549): Clean this up when unboxed values
     // could be passed as arguments.
     __ StoreToOffset(
-        kWord, right_lo, THR,
+        right_lo, THR,
         compiler::target::Thread::unboxed_int64_runtime_arg_offset());
     __ StoreToOffset(
-        kWord, right_hi, THR,
+        right_hi, THR,
         compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
             compiler::target::kWordSize);
   }
@@ -7126,10 +7115,10 @@
     // TODO(dartbug.com/33549): Clean this up when unboxed values
     // could be passed as arguments.
     __ StoreToOffset(
-        kWord, right_lo, THR,
+        right_lo, THR,
         compiler::target::Thread::unboxed_int64_runtime_arg_offset());
     __ StoreToOffset(
-        kWord, right_hi, THR,
+        right_hi, THR,
         compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
             compiler::target::kWordSize);
   }
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 7cb909a..eeebf97 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -188,24 +188,24 @@
   __ Bind(&loop);
   switch (element_size_) {
     case 1:
-      __ ldr(temp_reg, src_address, kUnsignedByte);
-      __ str(temp_reg, dest_address, kUnsignedByte);
+      __ ldr(temp_reg, src_address, compiler::kUnsignedByte);
+      __ str(temp_reg, dest_address, compiler::kUnsignedByte);
       break;
     case 2:
-      __ ldr(temp_reg, src_address, kUnsignedHalfword);
-      __ str(temp_reg, dest_address, kUnsignedHalfword);
+      __ ldr(temp_reg, src_address, compiler::kUnsignedTwoBytes);
+      __ str(temp_reg, dest_address, compiler::kUnsignedTwoBytes);
       break;
     case 4:
-      __ ldr(temp_reg, src_address, kUnsignedWord);
-      __ str(temp_reg, dest_address, kUnsignedWord);
+      __ ldr(temp_reg, src_address, compiler::kUnsignedFourBytes);
+      __ str(temp_reg, dest_address, compiler::kUnsignedFourBytes);
       break;
     case 8:
-      __ ldr(temp_reg, src_address, kDoubleWord);
-      __ str(temp_reg, dest_address, kDoubleWord);
+      __ ldr(temp_reg, src_address, compiler::kEightBytes);
+      __ str(temp_reg, dest_address, compiler::kEightBytes);
       break;
     case 16:
-      __ ldp(temp_reg, temp_reg2, src_address, kDoubleWord);
-      __ stp(temp_reg, temp_reg2, dest_address, kDoubleWord);
+      __ ldp(temp_reg, temp_reg2, src_address, compiler::kEightBytes);
+      __ stp(temp_reg, temp_reg2, dest_address, compiler::kEightBytes);
       break;
   }
   __ subs(length_reg, length_reg, compiler::Operand(1));
@@ -1407,8 +1407,10 @@
   const Register str = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
   __ LoadFieldFromOffset(result, str, String::length_offset());
-  __ ldr(TMP, compiler::FieldAddress(str, OneByteString::data_offset(), kByte),
-         kUnsignedByte);
+  __ ldr(TMP,
+         compiler::FieldAddress(str, OneByteString::data_offset(),
+                                compiler::kByte),
+         compiler::kUnsignedByte);
   __ CompareImmediate(result, Smi::RawValue(1));
   __ LoadImmediate(result, -1);
   __ csel(result, TMP, result, EQ);
@@ -1498,10 +1500,11 @@
   // Read byte and increment pointer.
   __ ldr(temp_reg,
          compiler::Address(bytes_ptr_reg, 1, compiler::Address::PostIndex),
-         kUnsignedByte);
+         compiler::kUnsignedByte);
 
   // Update size and flags based on byte value.
-  __ ldr(temp_reg, compiler::Address(table_reg, temp_reg), kUnsignedByte);
+  __ ldr(temp_reg, compiler::Address(table_reg, temp_reg),
+         compiler::kUnsignedByte);
   __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
   __ andi(temp_reg, temp_reg, compiler::Immediate(kSizeMask));
   __ add(size_reg, size_reg, compiler::Operand(temp_reg));
@@ -1675,21 +1678,21 @@
   switch (class_id()) {
     case kTypedDataInt32ArrayCid:
       ASSERT(representation() == kUnboxedInt32);
-      __ ldr(result, element_address, kWord);
+      __ ldr(result, element_address, compiler::kFourBytes);
       break;
     case kTypedDataUint32ArrayCid:
       ASSERT(representation() == kUnboxedUint32);
-      __ ldr(result, element_address, kUnsignedWord);
+      __ ldr(result, element_address, compiler::kUnsignedFourBytes);
       break;
     case kTypedDataInt64ArrayCid:
     case kTypedDataUint64ArrayCid:
       ASSERT(representation() == kUnboxedInt64);
-      __ ldr(result, element_address, kDoubleWord);
+      __ ldr(result, element_address, compiler::kEightBytes);
       break;
     case kTypedDataInt8ArrayCid:
       ASSERT(representation() == kUnboxedIntPtr);
       ASSERT(index_scale() == 1);
-      __ ldr(result, element_address, kByte);
+      __ ldr(result, element_address, compiler::kByte);
       break;
     case kTypedDataUint8ArrayCid:
     case kTypedDataUint8ClampedArrayCid:
@@ -1699,17 +1702,17 @@
     case kExternalOneByteStringCid:
       ASSERT(representation() == kUnboxedIntPtr);
       ASSERT(index_scale() == 1);
-      __ ldr(result, element_address, kUnsignedByte);
+      __ ldr(result, element_address, compiler::kUnsignedByte);
       break;
     case kTypedDataInt16ArrayCid:
       ASSERT(representation() == kUnboxedIntPtr);
-      __ ldr(result, element_address, kHalfword);
+      __ ldr(result, element_address, compiler::kTwoBytes);
       break;
     case kTypedDataUint16ArrayCid:
     case kTwoByteStringCid:
     case kExternalTwoByteStringCid:
       ASSERT(representation() == kUnboxedIntPtr);
-      __ ldr(result, element_address, kUnsignedHalfword);
+      __ ldr(result, element_address, compiler::kUnsignedTwoBytes);
       break;
     default:
       ASSERT(representation() == kTagged);
@@ -1736,7 +1739,7 @@
   // The string register points to the backing store for external strings.
   const Register str = locs()->in(0).reg();
   const Location index = locs()->in(1);
-  OperandSize sz = OperandSize::kByte;
+  compiler::OperandSize sz = compiler::kByte;
 
   Register result = locs()->out(0).reg();
   switch (class_id()) {
@@ -1744,13 +1747,13 @@
     case kExternalOneByteStringCid:
       switch (element_count()) {
         case 1:
-          sz = kUnsignedByte;
+          sz = compiler::kUnsignedByte;
           break;
         case 2:
-          sz = kUnsignedHalfword;
+          sz = compiler::kUnsignedTwoBytes;
           break;
         case 4:
-          sz = kUnsignedWord;
+          sz = compiler::kUnsignedFourBytes;
           break;
         default:
           UNREACHABLE();
@@ -1760,10 +1763,10 @@
     case kExternalTwoByteStringCid:
       switch (element_count()) {
         case 1:
-          sz = kUnsignedHalfword;
+          sz = compiler::kUnsignedTwoBytes;
           break;
         case 2:
-          sz = kUnsignedWord;
+          sz = compiler::kUnsignedFourBytes;
           break;
         default:
           UNREACHABLE();
@@ -1935,10 +1938,10 @@
       if (locs()->in(2).IsConstant()) {
         const Smi& constant = Smi::Cast(locs()->in(2).constant());
         __ LoadImmediate(TMP, static_cast<int8_t>(constant.Value()));
-        __ str(TMP, element_address, kUnsignedByte);
+        __ str(TMP, element_address, compiler::kUnsignedByte);
       } else {
         const Register value = locs()->in(2).reg();
-        __ str(value, element_address, kUnsignedByte);
+        __ str(value, element_address, compiler::kUnsignedByte);
       }
       break;
     }
@@ -1955,14 +1958,14 @@
           value = 0;
         }
         __ LoadImmediate(TMP, static_cast<int8_t>(value));
-        __ str(TMP, element_address, kUnsignedByte);
+        __ str(TMP, element_address, compiler::kUnsignedByte);
       } else {
         const Register value = locs()->in(2).reg();
         // Clamp to 0x00 or 0xFF respectively.
         __ CompareImmediate(value, 0xFF);
         __ csetm(TMP, GT);             // TMP = value > 0xFF ? -1 : 0.
         __ csel(TMP, value, TMP, LS);  // TMP = value in range ? value : TMP.
-        __ str(TMP, element_address, kUnsignedByte);
+        __ str(TMP, element_address, compiler::kUnsignedByte);
       }
       break;
     }
@@ -1971,19 +1974,19 @@
     case kTypedDataUint16ArrayCid: {
       ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
       const Register value = locs()->in(2).reg();
-      __ str(value, element_address, kUnsignedHalfword);
+      __ str(value, element_address, compiler::kUnsignedTwoBytes);
       break;
     }
     case kTypedDataInt32ArrayCid:
     case kTypedDataUint32ArrayCid: {
       const Register value = locs()->in(2).reg();
-      __ str(value, element_address, kUnsignedWord);
+      __ str(value, element_address, compiler::kUnsignedFourBytes);
       break;
     }
     case kTypedDataInt64ArrayCid:
     case kTypedDataUint64ArrayCid: {
       const Register value = locs()->in(2).reg();
-      __ str(value, element_address, kDoubleWord);
+      __ str(value, element_address, compiler::kEightBytes);
       break;
     }
     case kTypedDataFloat32ArrayCid: {
@@ -2100,24 +2103,25 @@
     __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
 
     compiler::FieldAddress field_cid_operand(
-        field_reg, Field::guarded_cid_offset(), kUnsignedHalfword);
+        field_reg, Field::guarded_cid_offset(), compiler::kUnsignedTwoBytes);
     compiler::FieldAddress field_nullability_operand(
-        field_reg, Field::is_nullable_offset(), kUnsignedHalfword);
+        field_reg, Field::is_nullable_offset(), compiler::kUnsignedTwoBytes);
 
     if (value_cid == kDynamicCid) {
       LoadValueCid(compiler, value_cid_reg, value_reg);
       compiler::Label skip_length_check;
-      __ ldr(TMP, field_cid_operand, kUnsignedHalfword);
+      __ ldr(TMP, field_cid_operand, compiler::kUnsignedTwoBytes);
       __ CompareRegisters(value_cid_reg, TMP);
       __ b(&ok, EQ);
-      __ ldr(TMP, field_nullability_operand, kUnsignedHalfword);
+      __ ldr(TMP, field_nullability_operand, compiler::kUnsignedTwoBytes);
       __ CompareRegisters(value_cid_reg, TMP);
     } else if (value_cid == kNullCid) {
-      __ ldr(value_cid_reg, field_nullability_operand, kUnsignedHalfword);
+      __ ldr(value_cid_reg, field_nullability_operand,
+             compiler::kUnsignedTwoBytes);
       __ CompareImmediate(value_cid_reg, value_cid);
     } else {
       compiler::Label skip_length_check;
-      __ ldr(value_cid_reg, field_cid_operand, kUnsignedHalfword);
+      __ ldr(value_cid_reg, field_cid_operand, compiler::kUnsignedTwoBytes);
       __ CompareImmediate(value_cid_reg, value_cid);
     }
     __ b(&ok, EQ);
@@ -2131,17 +2135,18 @@
     if (!field().needs_length_check()) {
       // Uninitialized field can be handled inline. Check if the
       // field is still unitialized.
-      __ ldr(TMP, field_cid_operand, kUnsignedHalfword);
+      __ ldr(TMP, field_cid_operand, compiler::kUnsignedTwoBytes);
       __ CompareImmediate(TMP, kIllegalCid);
       __ b(fail, NE);
 
       if (value_cid == kDynamicCid) {
-        __ str(value_cid_reg, field_cid_operand, kUnsignedHalfword);
-        __ str(value_cid_reg, field_nullability_operand, kUnsignedHalfword);
+        __ str(value_cid_reg, field_cid_operand, compiler::kUnsignedTwoBytes);
+        __ str(value_cid_reg, field_nullability_operand,
+               compiler::kUnsignedTwoBytes);
       } else {
         __ LoadImmediate(TMP, value_cid);
-        __ str(TMP, field_cid_operand, kUnsignedHalfword);
-        __ str(TMP, field_nullability_operand, kUnsignedHalfword);
+        __ str(TMP, field_cid_operand, compiler::kUnsignedTwoBytes);
+        __ str(TMP, field_nullability_operand, compiler::kUnsignedTwoBytes);
       }
 
       __ b(&ok);
@@ -2152,7 +2157,7 @@
       __ Bind(fail);
 
       __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
-                             kUnsignedHalfword);
+                             compiler::kUnsignedTwoBytes);
       __ CompareImmediate(TMP, kDynamicCid);
       __ b(&ok, EQ);
 
@@ -2242,7 +2247,7 @@
     __ ldr(offset_reg,
            compiler::FieldAddress(
                field_reg, Field::guarded_list_length_in_object_offset_offset()),
-           kByte);
+           compiler::kByte);
     __ ldr(length_reg, compiler::FieldAddress(
                            field_reg, Field::guarded_list_length_offset()));
 
@@ -2495,27 +2500,27 @@
     __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
 
     __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(),
-                           kUnsignedHalfword);
+                           compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp2, kNullCid);
     __ b(&store_pointer, EQ);
 
     __ LoadFromOffset(temp2, temp, Field::kind_bits_offset() - kHeapObjectTag,
-                      kUnsignedByte);
+                      compiler::kUnsignedByte);
     __ tsti(temp2, compiler::Immediate(1 << Field::kUnboxingCandidateBit));
     __ b(&store_pointer, EQ);
 
     __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
-                           kUnsignedHalfword);
+                           compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp2, kDoubleCid);
     __ b(&store_double, EQ);
 
     __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
-                           kUnsignedHalfword);
+                           compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp2, kFloat32x4Cid);
     __ b(&store_float32x4, EQ);
 
     __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
-                           kUnsignedHalfword);
+                           compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp2, kFloat64x2Cid);
     __ b(&store_float64x2, EQ);
 
@@ -2819,12 +2824,12 @@
       case kUnboxedUint32:
         __ Comment("UnboxedUint32LoadFieldInstr");
         __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes(),
-                               kUnsignedWord);
+                               compiler::kUnsignedFourBytes);
         break;
       case kUnboxedUint8:
         __ Comment("UnboxedUint8LoadFieldInstr");
         __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes(),
-                               kUnsignedByte);
+                               compiler::kUnsignedByte);
         break;
       default:
         UNIMPLEMENTED();
@@ -2889,23 +2894,23 @@
     __ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
 
     compiler::FieldAddress field_cid_operand(
-        result_reg, Field::guarded_cid_offset(), kUnsignedHalfword);
+        result_reg, Field::guarded_cid_offset(), compiler::kUnsignedTwoBytes);
     compiler::FieldAddress field_nullability_operand(
-        result_reg, Field::is_nullable_offset(), kUnsignedHalfword);
+        result_reg, Field::is_nullable_offset(), compiler::kUnsignedTwoBytes);
 
-    __ ldr(temp, field_nullability_operand, kUnsignedHalfword);
+    __ ldr(temp, field_nullability_operand, compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp, kNullCid);
     __ b(&load_pointer, EQ);
 
-    __ ldr(temp, field_cid_operand, kUnsignedHalfword);
+    __ ldr(temp, field_cid_operand, compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp, kDoubleCid);
     __ b(&load_double, EQ);
 
-    __ ldr(temp, field_cid_operand, kUnsignedHalfword);
+    __ ldr(temp, field_cid_operand, compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp, kFloat32x4Cid);
     __ b(&load_float32x4, EQ);
 
-    __ ldr(temp, field_cid_operand, kUnsignedHalfword);
+    __ ldr(temp, field_cid_operand, compiler::kUnsignedTwoBytes);
     __ CompareImmediate(temp, kFloat64x2Cid);
     __ b(&load_float64x2, EQ);
 
@@ -3334,10 +3339,10 @@
     intptr_t threshold =
         FLAG_optimization_counter_threshold * (loop_depth() + 1);
     __ LoadFieldFromOffset(TMP, function, Function::usage_counter_offset(),
-                           kWord);
+                           compiler::kFourBytes);
     __ add(TMP, TMP, compiler::Operand(1));
     __ StoreFieldToOffset(TMP, function, Function::usage_counter_offset(),
-                          kWord);
+                          compiler::kFourBytes);
     __ CompareImmediate(TMP, threshold);
     __ b(slow_path->osr_entry_label(), GE);
   }
@@ -4157,8 +4162,10 @@
   compiler::Label done;
   __ SmiUntag(result, value);
   __ BranchIfSmi(value, &done);
-  __ ldr(result, compiler::FieldAddress(value, Mint::value_offset(), kWord),
-         kWord);
+  __ ldr(
+      result,
+      compiler::FieldAddress(value, Mint::value_offset(), compiler::kFourBytes),
+      compiler::kFourBytes);
   __ LoadFieldFromOffset(result, value, Mint::value_offset());
   __ Bind(&done);
 }
@@ -6017,10 +6024,10 @@
   } else {
     switch (op_kind) {
       case Token::kSHR:
-        __ LsrImmediate(out, left, shift, kWord);
+        __ LsrImmediate(out, left, shift, compiler::kFourBytes);
         break;
       case Token::kSHL:
-        __ LslImmediate(out, left, shift, kWord);
+        __ LslImmediate(out, left, shift, compiler::kFourBytes);
         break;
       default:
         UNREACHABLE();
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 50b1287..92d8205 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -337,6 +337,10 @@
   __ set_constant_pool_allowed(true);
 }
 
+static const RegisterSet kCalleeSaveRegistersSet(
+    CallingConventions::kCalleeSaveCpuRegisters,
+    CallingConventions::kCalleeSaveXmmRegisters);
+
 void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   EmitReturnMoves(compiler);
 
@@ -369,8 +373,7 @@
       /*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
 
   // Restore C++ ABI callee-saved registers.
-  __ PopRegisters(CallingConventions::kCalleeSaveCpuRegisters,
-                  CallingConventions::kCalleeSaveXmmRegisters);
+  __ PopRegisters(kCalleeSaveRegistersSet);
 
 #if defined(TARGET_OS_FUCHSIA)
   UNREACHABLE();  // Fuchsia does not allow dart:ffi.
@@ -1148,8 +1151,7 @@
   __ PushImmediate(compiler::Immediate(0));
 
   // Save ABI callee-saved registers.
-  __ PushRegisters(CallingConventions::kCalleeSaveCpuRegisters,
-                   CallingConventions::kCalleeSaveXmmRegisters);
+  __ PushRegisters(kCalleeSaveRegistersSet);
 
   // Load the address of DLRT_GetThreadForNativeCallback without using Thread.
   if (FLAG_precompiled_mode) {
@@ -4553,7 +4555,7 @@
   if (compiler->intrinsic_mode()) {
     __ TryAllocate(compiler->mint_class(),
                    compiler->intrinsic_slow_path_label(),
-                   /*near_jump=*/true, out, temp);
+                   compiler::Assembler::kNearJump, out, temp);
   } else if (locs()->call_on_shared_slow_path()) {
     auto object_store = compiler->isolate()->object_store();
     const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
diff --git a/runtime/vm/compiler/backend/locations.h b/runtime/vm/compiler/backend/locations.h
index 1d29a39..942e10a 100644
--- a/runtime/vm/compiler/backend/locations.h
+++ b/runtime/vm/compiler/backend/locations.h
@@ -538,6 +538,12 @@
     ASSERT(kNumberOfFpuRegisters <= (kWordSize * kBitsPerByte));
   }
 
+  explicit RegisterSet(intptr_t cpu_register_mask,
+                       intptr_t fpu_register_mask = 0)
+      : RegisterSet() {
+    AddTaggedRegisters(cpu_register_mask, fpu_register_mask);
+  }
+
   void AddAllNonReservedRegisters(bool include_fpu_registers) {
     for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
       if ((kReservedCpuRegisters & (1 << i)) != 0u) continue;
@@ -594,6 +600,22 @@
 #endif
   }
 
+  void AddTaggedRegisters(intptr_t cpu_register_mask,
+                          intptr_t fpu_register_mask) {
+    for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
+      if (Utils::TestBit(cpu_register_mask, i)) {
+        const Register reg = static_cast<Register>(i);
+        Add(Location::RegisterLocation(reg));
+      }
+    }
+    for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
+      if (Utils::TestBit(fpu_register_mask, i)) {
+        const FpuRegister reg = static_cast<FpuRegister>(i);
+        Add(Location::FpuRegisterLocation(reg));
+      }
+    }
+  }
+
   void Add(Location loc, Representation rep = kTagged) {
     if (loc.IsRegister()) {
       cpu_registers_.Add(loc.reg());
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index 386dd86..c4572df 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -210,4 +210,5 @@
   "ffi/native_location_test.cc",
   "ffi/native_type_test.cc",
   "ffi/unit_test_custom_zone.cc",
+  "ffi/unit_test.cc",
 ]
diff --git a/runtime/vm/compiler/ffi/native_calling_convention_test.cc b/runtime/vm/compiler/ffi/native_calling_convention_test.cc
index 23ed777..aed5fc7 100644
--- a/runtime/vm/compiler/ffi/native_calling_convention_test.cc
+++ b/runtime/vm/compiler/ffi/native_calling_convention_test.cc
@@ -11,59 +11,6 @@
 namespace compiler {
 namespace ffi {
 
-#if defined(TARGET_ARCH_ARM)
-const char* kArch = "arm";
-#elif defined(TARGET_ARCH_ARM64)
-const char* kArch = "arm64";
-#elif defined(TARGET_ARCH_IA32)
-const char* kArch = "ia32";
-#elif defined(TARGET_ARCH_X64)
-const char* kArch = "x64";
-#endif
-
-#if defined(TARGET_OS_ANDROID)
-const char* kOs = "android";
-#elif defined(TARGET_OS_IOS)
-const char* kOs = "ios";
-#elif defined(TARGET_OS_LINUX)
-const char* kOs = "linux";
-#elif defined(TARGET_OS_MACOS)
-const char* kOs = "macos";
-#elif defined(TARGET_OS_WINDOWS)
-const char* kOs = "win";
-#endif
-
-void WriteToFile(char* path, const char* contents) {
-  FILE* file;
-  file = fopen(path, "w");
-  if (file != nullptr) {
-    fprintf(file, "%s", contents);
-  } else {
-    Syslog::Print("Error %d \n", errno);
-  }
-  fclose(file);
-}
-
-void ReadFromFile(char* path, char** buffer_pointer) {
-  FILE* file = fopen(path, "rb");
-  if (file == nullptr) {
-    Syslog::Print("Error %d \n", errno);
-    return;
-  }
-
-  fseek(file, 0, SEEK_END);
-  size_t size = ftell(file);
-  rewind(file);
-
-  char* buffer = reinterpret_cast<char*>(malloc(sizeof(char) * (size + 1)));
-
-  fread(buffer, 1, size, file);
-  buffer[size] = 0;
-
-  fclose(file);
-  *buffer_pointer = buffer;
-}
-
 void RunSignatureTest(dart::Zone* zone,
                       const char* name,
                       const NativeTypes& argument_types,
diff --git a/runtime/vm/compiler/ffi/unit_test.cc b/runtime/vm/compiler/ffi/unit_test.cc
new file mode 100644
index 0000000..044b398
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_test.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2020, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/compiler/ffi/unit_test.h"
+
+#include "platform/syslog.h"
+
+namespace dart {
+namespace compiler {
+namespace ffi {
+
+#if defined(TARGET_ARCH_ARM)
+const char* kArch = "arm";
+#elif defined(TARGET_ARCH_ARM64)
+const char* kArch = "arm64";
+#elif defined(TARGET_ARCH_IA32)
+const char* kArch = "ia32";
+#elif defined(TARGET_ARCH_X64)
+const char* kArch = "x64";
+#endif
+
+#if defined(TARGET_OS_ANDROID)
+const char* kOs = "android";
+#elif defined(TARGET_OS_MACOS_IOS)
+const char* kOs = "ios";
+#elif defined(TARGET_OS_LINUX)
+const char* kOs = "linux";
+#elif defined(TARGET_OS_MACOS)
+const char* kOs = "macos";
+#elif defined(TARGET_OS_WINDOWS)
+const char* kOs = "win";
+#endif
+
+void WriteToFile(char* path, const char* contents) {
+  FILE* file;
+  file = fopen(path, "w");
+  if (file != nullptr) {
+    fprintf(file, "%s", contents);
+  } else {
+    Syslog::Print("Error %d \n", errno);
+  }
+  fclose(file);
+}
+
+void ReadFromFile(char* path, char** buffer_pointer) {
+  FILE* file = fopen(path, "rb");
+  if (file == nullptr) {
+    Syslog::Print("Error %d \n", errno);
+    return;
+  }
+
+  fseek(file, 0, SEEK_END);
+  size_t size = ftell(file);
+  rewind(file);
+
+  char* buffer = reinterpret_cast<char*>(malloc(sizeof(char) * (size + 1)));
+
+  fread(buffer, 1, size, file);
+  buffer[size] = 0;
+
+  fclose(file);
+  *buffer_pointer = buffer;
+}
+
+}  // namespace ffi
+}  // namespace compiler
+}  // namespace dart
diff --git a/runtime/vm/compiler/ffi/unit_test.h b/runtime/vm/compiler/ffi/unit_test.h
index 44c4f0d..5cb4da0 100644
--- a/runtime/vm/compiler/ffi/unit_test.h
+++ b/runtime/vm/compiler/ffi/unit_test.h
@@ -39,6 +39,13 @@
 namespace compiler {
 namespace ffi {
 
+extern const char* kArch;
+extern const char* kOs;
+
+void WriteToFile(char* path, const char* contents);
+
+void ReadFromFile(char* path, char** buffer_pointer);
+
 class TestCaseBase {
  public:
   explicit TestCaseBase(const char* name, const char* expectation);
diff --git a/runtime/vm/compiler/ffi/unit_tests/floatx10/arm64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/floatx10/arm64_ios.expect
index 8f237e3..0a25996 100644
--- a/runtime/vm/compiler/ffi/unit_tests/floatx10/arm64_ios.expect
+++ b/runtime/vm/compiler/ffi/unit_tests/floatx10/arm64_ios.expect
@@ -7,6 +7,6 @@
 v6 float
 v7 float
 S+0 float
-S+8 float
+S+4 float
 =>
 v0 float
diff --git a/runtime/vm/compiler/ffi/unit_tests/int8x10/arm64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/int8x10/arm64_ios.expect
index 04f72e5f..f8d22bc 100644
--- a/runtime/vm/compiler/ffi/unit_tests/int8x10/arm64_ios.expect
+++ b/runtime/vm/compiler/ffi/unit_tests/int8x10/arm64_ios.expect
@@ -1,12 +1,12 @@
-r0 int8
-r1 int8
-r2 int8
-r3 int8
-r4 int8
-r5 int8
-r6 int8
-r7 int8
+r0 int32[int8]
+r1 int32[int8]
+r2 int32[int8]
+r3 int32[int8]
+r4 int32[int8]
+r5 int32[int8]
+r6 int32[int8]
+r7 int32[int8]
 S+0 int8
-S+8 int8
+S+1 int8
 =>
-r0 int8
+r0 int32[int8]
diff --git a/runtime/vm/compiler/stub_code_compiler.cc b/runtime/vm/compiler/stub_code_compiler.cc
index d1bb15d..ede86e0 100644
--- a/runtime/vm/compiler/stub_code_compiler.cc
+++ b/runtime/vm/compiler/stub_code_compiler.cc
@@ -12,6 +12,7 @@
 
 #include "vm/compiler/stub_code_compiler.h"
 
+#include "vm/compiler/api/type_check_mode.h"
 #include "vm/compiler/assembler/assembler.h"
 
 #define __ assembler->
@@ -188,6 +189,237 @@
   __ Ret();
 }
 
+#if !defined(TARGET_ARCH_IA32)
+// The <X>TypeTestStubs are used to test whether a given value is of a given
+// type. All variants have the same calling convention:
+//
+// Inputs (from TypeTestABI struct):
+//   - kSubtypeTestCacheReg: RawSubtypeTestCache
+//   - kInstanceReg: instance to test against.
+//   - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
+//   - kFunctionTypeArgumentsReg : function type arguments (if needed).
+//
+// See GenerateSubtypeNTestCacheStub for registers that may need saving by the
+// caller.
+//
+// Output (from TypeTestABI struct):
+//   - kResultReg: checked instance.
+//
+// Throws if the check is unsuccessful.
+//
+// Note of warning: The caller will not populate CODE_REG and we have therefore
+// no access to the pool.
+void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
+  __ LoadFromOffset(CODE_REG, THR,
+                    target::Thread::slow_type_test_stub_offset());
+  __ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+}
+
+// Used instead of DefaultTypeTestStub when null is assignable.
+void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
+    Assembler* assembler) {
+  Label done;
+
+  // Fast case for 'null'.
+  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
+  __ BranchIf(EQUAL, &done);
+
+  __ LoadFromOffset(CODE_REG, THR,
+                    target::Thread::slow_type_test_stub_offset());
+  __ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+
+  __ Bind(&done);
+  __ Ret();
+}
+
+void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
+  __ Ret();
+}
+
+void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
+  __ Breakpoint();
+}
+
+static void BuildTypeParameterTypeTestStub(Assembler* assembler,
+                                           bool allow_null) {
+  Label done;
+
+  if (allow_null) {
+    __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
+    __ BranchIf(EQUAL, &done, Assembler::kNearJump);
+  }
+
+  auto handle_case = [&](Register tav) {
+    // If the TAV is null, then resolving the type parameter gives the dynamic
+    // type, which is a top type.
+    __ CompareObject(tav, NullObject());
+    __ BranchIf(EQUAL, &done, Assembler::kNearJump);
+    // Resolve the type parameter to its instantiated type and tail call the
+    // instantiated type's TTS.
+    __ LoadFieldFromOffset(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg,
+                           target::TypeParameter::index_offset(), kTwoBytes);
+    __ LoadIndexedPayload(TypeTestABI::kScratchReg, tav,
+                          target::TypeArguments::types_offset(),
+                          TypeTestABI::kScratchReg, TIMES_WORD_SIZE);
+    __ Jump(FieldAddress(
+        TypeTestABI::kScratchReg,
+        target::AbstractType::type_test_stub_entry_point_offset()));
+  };
+
+  Label function_type_param;
+  __ LoadFieldFromOffset(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg,
+                         target::TypeParameter::parameterized_class_id_offset(),
+                         kUnsignedTwoBytes);
+  __ CompareImmediate(TypeTestABI::kScratchReg, kFunctionCid);
+  __ BranchIf(EQUAL, &function_type_param, Assembler::kNearJump);
+  handle_case(TypeTestABI::kInstantiatorTypeArgumentsReg);
+  __ Bind(&function_type_param);
+  handle_case(TypeTestABI::kFunctionTypeArgumentsReg);
+  __ Bind(&done);
+  __ Ret();
+}
+
+void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
+    Assembler* assembler) {
+  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
+}
+
+void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
+  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
+}
+
+static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
+                                            TypeCheckMode mode) {
+  __ PushObject(NullObject());  // Make room for result.
+  __ PushRegister(TypeTestABI::kInstanceReg);
+  __ PushRegister(TypeTestABI::kDstTypeReg);
+  __ PushRegister(TypeTestABI::kInstantiatorTypeArgumentsReg);
+  __ PushRegister(TypeTestABI::kFunctionTypeArgumentsReg);
+  __ PushObject(NullObject());
+  __ PushRegister(TypeTestABI::kSubtypeTestCacheReg);
+  __ PushImmediate(target::ToRawSmi(mode));
+  __ CallRuntime(kTypeCheckRuntimeEntry, 7);
+  __ Drop(1);  // mode
+  __ PopRegister(TypeTestABI::kSubtypeTestCacheReg);
+  __ Drop(1);  // dst_name
+  __ PopRegister(TypeTestABI::kFunctionTypeArgumentsReg);
+  __ PopRegister(TypeTestABI::kInstantiatorTypeArgumentsReg);
+  __ PopRegister(TypeTestABI::kDstTypeReg);
+  __ PopRegister(TypeTestABI::kInstanceReg);
+  __ Drop(1);  // Discard return value.
+}
+
+void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
+    Assembler* assembler) {
+  __ LoadFromOffset(CODE_REG, THR,
+                    target::Thread::lazy_specialize_type_test_stub_offset());
+  __ EnterStubFrame();
+  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
+  __ LeaveStubFrame();
+  __ Ret();
+}
+
+// Used instead of LazySpecializeTypeTestStub when null is assignable.
+void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
+    Assembler* assembler) {
+  Label done;
+
+  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
+  __ BranchIf(EQUAL, &done);
+
+  __ LoadFromOffset(CODE_REG, THR,
+                    target::Thread::lazy_specialize_type_test_stub_offset());
+  __ EnterStubFrame();
+  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
+  __ LeaveStubFrame();
+
+  __ Bind(&done);
+  __ Ret();
+}
+
+void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
+  Label done, call_runtime;
+
+  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
+    __ LoadFromOffset(CODE_REG, THR,
+                      target::Thread::slow_type_test_stub_offset());
+  }
+  __ EnterStubFrame();
+
+  // If the subtype-cache is null, it needs to be lazily-created by the runtime.
+  __ CompareObject(TypeTestABI::kSubtypeTestCacheReg, NullObject());
+  __ BranchIf(EQUAL, &call_runtime, Assembler::kNearJump);
+
+  // If this is not a [Type] object, we'll go to the runtime.
+  Label is_simple_case, is_complex_case;
+  __ LoadClassId(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg);
+  __ CompareImmediate(TypeTestABI::kScratchReg, kTypeCid);
+  __ BranchIf(NOT_EQUAL, &is_complex_case, Assembler::kNearJump);
+
+  // Check whether this [Type] is instantiated/uninstantiated.
+  __ LoadFieldFromOffset(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg,
+                         target::Type::type_state_offset(), kByte);
+  __ CompareImmediate(
+      TypeTestABI::kScratchReg,
+      target::AbstractTypeLayout::kTypeStateFinalizedInstantiated);
+  __ BranchIf(NOT_EQUAL, &is_complex_case, Assembler::kNearJump);
+
+  // Check whether this [Type] is a function type.
+  __ LoadFieldFromOffset(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg,
+                         target::Type::signature_offset());
+  __ CompareObject(TypeTestABI::kScratchReg, NullObject());
+  __ BranchIf(NOT_EQUAL, &is_complex_case, Assembler::kNearJump);
+
+  // This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
+  __ BranchIfSmi(TypeTestABI::kInstanceReg, &is_complex_case);
+
+  // Fall through to &is_simple_case
+
+  const RegisterSet caller_saved_registers(
+      TypeTestABI::kSubtypeTestCacheStubCallerSavedRegisters);
+
+  __ Bind(&is_simple_case);
+  {
+    __ PushRegisters(caller_saved_registers);
+    __ Call(StubCodeSubtype2TestCache());
+    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
+                     CastHandle<Object>(TrueObject()));
+    __ PopRegisters(caller_saved_registers);
+    __ BranchIf(EQUAL, &done);  // Cache said: yes.
+    __ Jump(&call_runtime, Assembler::kNearJump);
+  }
+
+  __ Bind(&is_complex_case);
+  {
+    __ PushRegisters(caller_saved_registers);
+    __ Call(StubCodeSubtype6TestCache());
+    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
+                     CastHandle<Object>(TrueObject()));
+    __ PopRegisters(caller_saved_registers);
+    __ BranchIf(EQUAL, &done);  // Cache said: yes.
+    // Fall through to runtime_call
+  }
+
+  __ Bind(&call_runtime);
+
+  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
+
+  __ Bind(&done);
+  __ LeaveStubFrame();
+  __ Ret();
+}
+#else
+// Type testing stubs are not implemented on IA32.
+#define GENERATE_BREAKPOINT_STUB(Name)                                         \
+  void StubCodeCompiler::Generate##Name##Stub(Assembler* assembler) {          \
+    __ Breakpoint();                                                           \
+  }
+
+VM_TYPE_TESTING_STUB_CODE_LIST(GENERATE_BREAKPOINT_STUB)
+
+#undef GENERATE_BREAKPOINT_STUB
+#endif  // !defined(TARGET_ARCH_IA32)
+
 // The UnhandledException class lives in the VM isolate, so it cannot cache
 // an allocation stub for itself. Instead, we cache it in the stub code list.
 void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub(
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index 0b759e0..995fa9e 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -84,18 +84,17 @@
 
   // Save exit frame information to enable stack walking as we are about
   // to transition to Dart VM C++ code.
-  __ StoreToOffset(kWord, FP, THR,
-                   target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
 
   // Mark that the thread exited generated code through a runtime call.
   __ LoadImmediate(R8, target::Thread::exit_through_runtime_call());
-  __ StoreToOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
+  __ StoreToOffset(R8, THR, target::Thread::exit_through_ffi_offset());
 
 #if defined(DEBUG)
   {
     Label ok;
     // Check that we are always entering from Dart code.
-    __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
+    __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
     __ CompareImmediate(R8, VMTag::kDartTagId);
     __ b(&ok, EQ);
     __ Stop("Not coming from Dart code.");
@@ -104,7 +103,7 @@
 #endif
 
   // Mark that the thread is executing VM code.
-  __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
+  __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
 
   // Reserve space for arguments and align frame before entering C++ world.
   // target::NativeArguments are passed in registers.
@@ -138,15 +137,14 @@
 
   // Mark that the thread is executing Dart code.
   __ LoadImmediate(R2, VMTag::kDartTagId);
-  __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
+  __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
 
   // Mark that the thread has not exited generated Dart code.
   __ LoadImmediate(R2, 0);
-  __ StoreToOffset(kWord, R2, THR, target::Thread::exit_through_ffi_offset());
+  __ StoreToOffset(R2, THR, target::Thread::exit_through_ffi_offset());
 
   // Reset exit frame information in Isolate's mutator thread structure.
-  __ StoreToOffset(kWord, R2, THR,
-                   target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(R2, THR, target::Thread::top_exit_frame_info_offset());
 
   // Restore the global object pool after returning from runtime (old space is
   // moving, so the GOP could have been relocated).
@@ -447,9 +445,8 @@
   COMPILE_ASSERT(!IsArgumentRegister(R8));
 
   // Load the code object.
-  __ LoadFromOffset(kWord, R5, THR,
-                    compiler::target::Thread::callback_code_offset());
-  __ LoadFieldFromOffset(kWord, R5, R5,
+  __ LoadFromOffset(R5, THR, compiler::target::Thread::callback_code_offset());
+  __ LoadFieldFromOffset(R5, R5,
                          compiler::target::GrowableObjectArray::data_offset());
   __ ldr(R5, __ ElementAddressForRegIndex(
                  /*is_load=*/true,
@@ -459,8 +456,7 @@
                  /*index_unboxed=*/false,
                  /*array=*/R5,
                  /*index=*/R4));
-  __ LoadFieldFromOffset(kWord, R5, R5,
-                         compiler::target::Code::entry_point_offset());
+  __ LoadFieldFromOffset(R5, R5, compiler::target::Code::entry_point_offset());
 
   // On entry to the function, there will be four extra slots on the stack:
   // saved THR, R4, R5 and the return address. The target will know to skip
@@ -528,18 +524,17 @@
 
   // Save exit frame information to enable stack walking as we are about
   // to transition to native code.
-  __ StoreToOffset(kWord, FP, THR,
-                   target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
 
   // Mark that the thread exited generated code through a runtime call.
   __ LoadImmediate(R8, target::Thread::exit_through_runtime_call());
-  __ StoreToOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
+  __ StoreToOffset(R8, THR, target::Thread::exit_through_ffi_offset());
 
 #if defined(DEBUG)
   {
     Label ok;
     // Check that we are always entering from Dart code.
-    __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
+    __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
     __ CompareImmediate(R8, VMTag::kDartTagId);
     __ b(&ok, EQ);
     __ Stop("Not coming from Dart code.");
@@ -548,7 +543,7 @@
 #endif
 
   // Mark that the thread is executing native code.
-  __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
+  __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
 
   // Reserve space for the native arguments structure passed on the stack (the
   // outgoing pointer parameter to the native arguments structure is passed in
@@ -588,15 +583,14 @@
 
   // Mark that the thread is executing Dart code.
   __ LoadImmediate(R2, VMTag::kDartTagId);
-  __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
+  __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
 
   // Mark that the thread has not exited generated Dart code.
   __ LoadImmediate(R2, 0);
-  __ StoreToOffset(kWord, R2, THR, target::Thread::exit_through_ffi_offset());
+  __ StoreToOffset(R2, THR, target::Thread::exit_through_ffi_offset());
 
   // Reset exit frame information in Isolate's mutator thread structure.
-  __ StoreToOffset(kWord, R2, THR,
-                   target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(R2, THR, target::Thread::top_exit_frame_info_offset());
 
   // Restore the global object pool after returning from runtime (old space is
   // moving, so the GOP could have been relocated).
@@ -1197,25 +1191,23 @@
 #endif
 
   // Save the current VMTag on the stack.
-  __ LoadFromOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
+  __ LoadFromOffset(R9, THR, target::Thread::vm_tag_offset());
   __ Push(R9);
 
   // Save top resource and top exit frame info. Use R4-6 as temporary registers.
   // StackFrameIterator reads the top exit frame info saved in this frame.
-  __ LoadFromOffset(kWord, R4, THR, target::Thread::top_resource_offset());
+  __ LoadFromOffset(R4, THR, target::Thread::top_resource_offset());
   __ Push(R4);
   __ LoadImmediate(R8, 0);
-  __ StoreToOffset(kWord, R8, THR, target::Thread::top_resource_offset());
+  __ StoreToOffset(R8, THR, target::Thread::top_resource_offset());
 
-  __ LoadFromOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
+  __ LoadFromOffset(R8, THR, target::Thread::exit_through_ffi_offset());
   __ Push(R8);
   __ LoadImmediate(R8, 0);
-  __ StoreToOffset(kWord, R8, THR, target::Thread::exit_through_ffi_offset());
+  __ StoreToOffset(R8, THR, target::Thread::exit_through_ffi_offset());
 
-  __ LoadFromOffset(kWord, R9, THR,
-                    target::Thread::top_exit_frame_info_offset());
-  __ StoreToOffset(kWord, R8, THR,
-                   target::Thread::top_exit_frame_info_offset());
+  __ LoadFromOffset(R9, THR, target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(R8, THR, target::Thread::top_exit_frame_info_offset());
 
   // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
   // with the code below.
@@ -1231,7 +1223,7 @@
   // Mark that the thread is executing Dart code. Do this after initializing the
   // exit link for the profiler.
   __ LoadImmediate(R9, VMTag::kDartTagId);
-  __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
+  __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
 
   // Load arguments descriptor array into R4, which is passed to Dart code.
   __ ldr(R4, Address(R1, target::VMHandles::kOffsetOfRawPtrInHandle));
@@ -1282,16 +1274,15 @@
   // Restore the saved top exit frame info and top resource back into the
   // Isolate structure. Uses R9 as a temporary register for this.
   __ Pop(R9);
-  __ StoreToOffset(kWord, R9, THR,
-                   target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(R9, THR, target::Thread::top_exit_frame_info_offset());
   __ Pop(R9);
-  __ StoreToOffset(kWord, R9, THR, target::Thread::exit_through_ffi_offset());
+  __ StoreToOffset(R9, THR, target::Thread::exit_through_ffi_offset());
   __ Pop(R9);
-  __ StoreToOffset(kWord, R9, THR, target::Thread::top_resource_offset());
+  __ StoreToOffset(R9, THR, target::Thread::top_resource_offset());
 
   // Restore the current VMTag from the stack.
   __ Pop(R4);
-  __ StoreToOffset(kWord, R4, THR, target::Thread::vm_tag_offset());
+  __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset());
 
 #if defined(USING_SHADOW_CALL_STACK)
 #error Unimplemented
@@ -2082,7 +2073,7 @@
     // Update counter, ignore overflow.
     const intptr_t count_offset =
         target::ICData::CountIndexFor(num_args) * target::kWordSize;
-    __ LoadFromOffset(kWord, R1, R8, count_offset);
+    __ LoadFromOffset(R1, R8, count_offset);
     __ adds(R1, R1, Operand(target::ToRawSmi(1)));
     __ StoreIntoSmiField(Address(R8, count_offset), R1);
   }
@@ -2274,7 +2265,7 @@
   __ PushList(regs);
   // Push call arguments.
   for (intptr_t i = 0; i < num_args; i++) {
-    __ LoadFromOffset(kWord, TMP, R1, -i * target::kWordSize);
+    __ LoadFromOffset(TMP, R1, -i * target::kWordSize);
     __ Push(TMP);
   }
   // Pass IC data object.
@@ -2303,11 +2294,11 @@
       target::ICData::TargetIndexFor(num_args) * target::kWordSize;
   const intptr_t count_offset =
       target::ICData::CountIndexFor(num_args) * target::kWordSize;
-  __ LoadFromOffset(kWord, R0, R8, kIcDataOffset + target_offset);
+  __ LoadFromOffset(R0, R8, kIcDataOffset + target_offset);
 
   if (FLAG_optimization_counter_threshold >= 0) {
     __ Comment("Update caller's counter");
-    __ LoadFromOffset(kWord, R1, R8, kIcDataOffset + count_offset);
+    __ LoadFromOffset(R1, R8, kIcDataOffset + count_offset);
     // Ignore overflow.
     __ adds(R1, R1, Operand(target::ToRawSmi(1)));
     __ StoreIntoSmiField(Address(R8, kIcDataOffset + count_offset), R1);
@@ -2482,7 +2473,7 @@
 
   if (FLAG_optimization_counter_threshold >= 0) {
     // Increment count for this call, ignore overflow.
-    __ LoadFromOffset(kWord, R1, R8, count_offset);
+    __ LoadFromOffset(R1, R8, count_offset);
     __ adds(R1, R1, Operand(target::ToRawSmi(1)));
     __ StoreIntoSmiField(Address(R8, count_offset), R1);
   }
@@ -2492,7 +2483,7 @@
          FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
 
   // Get function and call it, if possible.
-  __ LoadFromOffset(kWord, R0, R8, target_offset);
+  __ LoadFromOffset(R0, R8, target_offset);
   __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
 
   __ Branch(Address(R0, R3));
@@ -2838,229 +2829,6 @@
   GenerateSubtypeNTestCacheStub(assembler, 6);
 }
 
-// The <X>TypeTestStubs are used to test whether a given value is of a given
-// type. All variants have the same calling convention:
-//
-// Inputs (from TypeTestABI struct):
-//   - kSubtypeTestCacheReg: RawSubtypeTestCache
-//   - kInstanceReg: instance to test against.
-//   - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
-//   - kFunctionTypeArgumentsReg : function type arguments (if needed).
-//
-// See GenerateSubtypeNTestCacheStub for registers that may need saving by the
-// caller.
-//
-// Output (from TypeTestABI struct):
-//   - kResultReg: checked instance.
-//
-// Throws if the check is unsuccessful.
-//
-// Note of warning: The caller will not populate CODE_REG and we have therefore
-// no access to the pool.
-void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
-  __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
-  __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
-}
-
-// Used instead of DefaultTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
-    Assembler* assembler) {
-  Label done;
-
-  // Fast case for 'null'.
-  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-  __ BranchIf(EQUAL, &done);
-
-  __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
-  __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
-  __ Breakpoint();
-}
-
-static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
-                                            TypeCheckMode mode) {
-  __ PushObject(NullObject());  // Make room for result.
-  __ Push(TypeTestABI::kInstanceReg);
-  __ Push(TypeTestABI::kDstTypeReg);
-  __ Push(TypeTestABI::kInstantiatorTypeArgumentsReg);
-  __ Push(TypeTestABI::kFunctionTypeArgumentsReg);
-  __ PushObject(NullObject());
-  __ Push(TypeTestABI::kSubtypeTestCacheReg);
-  __ PushImmediate(target::ToRawSmi(mode));
-  __ CallRuntime(kTypeCheckRuntimeEntry, 7);
-  __ Drop(1);  // mode
-  __ Pop(TypeTestABI::kSubtypeTestCacheReg);
-  __ Drop(1);  // dst_name
-  __ Pop(TypeTestABI::kFunctionTypeArgumentsReg);
-  __ Pop(TypeTestABI::kInstantiatorTypeArgumentsReg);
-  __ Pop(TypeTestABI::kDstTypeReg);
-  __ Pop(TypeTestABI::kInstanceReg);
-  __ Drop(1);  // Discard return value.
-}
-
-void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
-    Assembler* assembler) {
-  __ ldr(CODE_REG,
-         Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
-  __ EnterStubFrame();
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
-  __ LeaveStubFrame();
-  __ Ret();
-}
-
-// Used instead of LazySpecializeTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
-    Assembler* assembler) {
-  Label done;
-
-  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-  __ BranchIf(EQUAL, &done);
-
-  __ ldr(CODE_REG,
-         Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
-  __ EnterStubFrame();
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
-  __ LeaveStubFrame();
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-static void BuildTypeParameterTypeTestStub(Assembler* assembler,
-                                           bool allow_null) {
-  Label done;
-
-  if (allow_null) {
-    __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-    __ BranchIf(EQUAL, &done);
-  }
-
-  Label function_type_param;
-  __ ldrh(TypeTestABI::kScratchReg,
-          FieldAddress(TypeTestABI::kDstTypeReg,
-                       TypeParameter::parameterized_class_id_offset()));
-  __ cmp(TypeTestABI::kScratchReg, Operand(kFunctionCid));
-  __ BranchIf(EQUAL, &function_type_param);
-
-  auto handle_case = [&](Register tav) {
-    __ CompareObject(tav, NullObject());
-    __ BranchIf(EQUAL, &done);
-    __ ldrh(
-        TypeTestABI::kScratchReg,
-        FieldAddress(TypeTestABI::kDstTypeReg, TypeParameter::index_offset()));
-    __ add(TypeTestABI::kScratchReg, tav,
-           Operand(TypeTestABI::kScratchReg, LSL, 8));
-    __ ldr(TypeTestABI::kScratchReg,
-           FieldAddress(TypeTestABI::kScratchReg,
-                        target::TypeArguments::InstanceSize()));
-    __ Branch(FieldAddress(TypeTestABI::kScratchReg,
-                           AbstractType::type_test_stub_entry_point_offset()));
-  };
-
-  // Class type parameter: If dynamic we're done, otherwise dereference type
-  // parameter and tail call.
-  handle_case(TypeTestABI::kInstantiatorTypeArgumentsReg);
-
-  // Function type parameter: If dynamic we're done, otherwise dereference type
-  // parameter and tail call.
-  __ Bind(&function_type_param);
-  handle_case(TypeTestABI::kFunctionTypeArgumentsReg);
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
-    Assembler* assembler) {
-  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
-}
-
-void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
-  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
-}
-
-void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
-  Label done, call_runtime;
-
-  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
-    __ ldr(CODE_REG,
-           Address(THR, target::Thread::slow_type_test_stub_offset()));
-  }
-  __ EnterStubFrame();
-
-  // If the subtype-cache is null, it needs to be lazily-created by the runtime.
-  __ CompareObject(TypeTestABI::kSubtypeTestCacheReg, NullObject());
-  __ BranchIf(EQUAL, &call_runtime);
-
-  // If this is not a [Type] object, we'll go to the runtime.
-  Label is_simple_case, is_complex_case;
-  __ LoadClassId(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg);
-  __ cmp(TypeTestABI::kScratchReg, Operand(kTypeCid));
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // Check whether this [Type] is instantiated/uninstantiated.
-  __ ldrb(TypeTestABI::kScratchReg,
-          FieldAddress(TypeTestABI::kDstTypeReg,
-                       target::Type::type_state_offset()));
-  __ cmp(TypeTestABI::kScratchReg,
-         Operand(target::AbstractTypeLayout::kTypeStateFinalizedInstantiated));
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // Check whether this [Type] is a function type.
-  __ ldr(
-      TypeTestABI::kScratchReg,
-      FieldAddress(TypeTestABI::kDstTypeReg, target::Type::signature_offset()));
-  __ CompareObject(TypeTestABI::kScratchReg, NullObject());
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
-  __ BranchIfSmi(TypeTestABI::kInstanceReg, &is_complex_case);
-
-  // Fall through to &is_simple_case
-
-  const intptr_t kRegsToSave = (1 << TypeTestABI::kSubtypeTestCacheReg) |
-                               (1 << TypeTestABI::kDstTypeReg);
-
-  __ Bind(&is_simple_case);
-  {
-    __ PushList(kRegsToSave);
-    __ BranchLink(StubCodeSubtype2TestCache());
-    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
-                     CastHandle<Object>(TrueObject()));
-    __ PopList(kRegsToSave);
-    __ BranchIf(EQUAL, &done);  // Cache said: yes.
-    __ Jump(&call_runtime);
-  }
-
-  __ Bind(&is_complex_case);
-  {
-    __ PushList(kRegsToSave);
-    __ BranchLink(StubCodeSubtype6TestCache());
-    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
-                     CastHandle<Object>(TrueObject()));
-    __ PopList(kRegsToSave);
-    __ BranchIf(EQUAL, &done);  // Cache said: yes.
-    // Fall through to runtime_call
-  }
-
-  __ Bind(&call_runtime);
-
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
-
-  __ Bind(&done);
-  __ LeaveStubFrame();
-  __ Ret();
-}
-
 // Return the current stack pointer address, used to do stack alignment checks.
 void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
   __ mov(R0, Operand(SP));
@@ -3090,7 +2858,7 @@
   Label exit_through_non_ffi;
   Register tmp1 = R0, tmp2 = R1;
   // Check if we exited generated from FFI. If so do transition.
-  __ LoadFromOffset(kWord, tmp1, THR,
+  __ LoadFromOffset(tmp1, THR,
                     compiler::target::Thread::exit_through_ffi_offset());
   __ LoadImmediate(tmp2, target::Thread::exit_through_ffi());
   __ cmp(tmp1, Operand(tmp2));
@@ -3101,11 +2869,10 @@
 
   // Set the tag.
   __ LoadImmediate(R2, VMTag::kDartTagId);
-  __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
+  __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
   // Clear top exit frame.
   __ LoadImmediate(R2, 0);
-  __ StoreToOffset(kWord, R2, THR,
-                   target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(R2, THR, target::Thread::top_exit_frame_info_offset());
   // Restore the pool pointer.
   __ RestoreCodePointer();
   if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
@@ -3123,20 +2890,20 @@
 // The arguments are stored in the Thread object.
 // Does not return.
 void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
-  __ LoadFromOffset(kWord, LR, THR, target::Thread::resume_pc_offset());
+  __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset());
 
   word offset_from_thread = 0;
   bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
   ASSERT(ok);
-  __ LoadFromOffset(kWord, R2, THR, offset_from_thread);
+  __ LoadFromOffset(R2, THR, offset_from_thread);
 
   // Exception object.
-  __ LoadFromOffset(kWord, R0, THR, target::Thread::active_exception_offset());
-  __ StoreToOffset(kWord, R2, THR, target::Thread::active_exception_offset());
+  __ LoadFromOffset(R0, THR, target::Thread::active_exception_offset());
+  __ StoreToOffset(R2, THR, target::Thread::active_exception_offset());
 
   // StackTrace object.
-  __ LoadFromOffset(kWord, R1, THR, target::Thread::active_stacktrace_offset());
-  __ StoreToOffset(kWord, R2, THR, target::Thread::active_stacktrace_offset());
+  __ LoadFromOffset(R1, THR, target::Thread::active_stacktrace_offset());
+  __ StoreToOffset(R2, THR, target::Thread::active_stacktrace_offset());
 
   __ bx(LR);  // Jump to the exception handler code.
 }
@@ -3150,7 +2917,7 @@
   __ Push(IP);
 
   // Load the deopt pc into LR.
-  __ LoadFromOffset(kWord, LR, THR, target::Thread::resume_pc_offset());
+  __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset());
   GenerateDeoptimizationSequence(assembler, kEagerDeopt);
 
   // After we have deoptimized, jump to the correct frame.
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index d64b50d..f64edbe 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -470,7 +470,7 @@
 
   // Build type_arguments vector (or null)
   Label no_type_args;
-  __ ldr(R3, Address(THR, target::Thread::object_null_offset()), kDoubleWord);
+  __ ldr(R3, Address(THR, target::Thread::object_null_offset()), kEightBytes);
   __ cmp(R4, Operand(0));
   __ b(&no_type_args, EQ);
   __ ldr(R0, Address(FP, kReceiverOffset * target::kWordSize));
@@ -1591,8 +1591,10 @@
     Label slow_case;
 
     // Load num. variable (int32) in the existing context.
-    __ ldr(R1, FieldAddress(R5, target::Context::num_variables_offset(), kWord),
-           kWord);
+    __ ldr(
+        R1,
+        FieldAddress(R5, target::Context::num_variables_offset(), kFourBytes),
+        kFourBytes);
 
     GenerateAllocateContextSpaceStub(assembler, &slow_case);
 
@@ -1693,12 +1695,12 @@
          target::ObjectAlignment::kNewObjectBitPosition);
 
   if (cards) {
-    __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kWord);
+    __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kFourBytes);
     __ tbnz(&remember_card, TMP, target::ObjectLayout::kCardRememberedBit);
   } else {
 #if defined(DEBUG)
     Label ok;
-    __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kWord);
+    __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kFourBytes);
     __ tbz(&ok, TMP, target::ObjectLayout::kCardRememberedBit);
     __ Stop("Wrong barrier");
     __ Bind(&ok);
@@ -1718,17 +1720,17 @@
   // background sweeper which is also manipulating this 32 bit word.
   Label retry;
   __ Bind(&retry);
-  __ ldxr(R2, R3, kWord);
+  __ ldxr(R2, R3, kFourBytes);
   __ AndImmediate(R2, R2,
                   ~(1 << target::ObjectLayout::kOldAndNotRememberedBit));
-  __ stxr(R4, R2, R3, kWord);
+  __ stxr(R4, R2, R3, kFourBytes);
   __ cbnz(&retry, R4);
 
   // Load the StoreBuffer block out of the thread. Then load top_ out of the
   // StoreBufferBlock and add the address to the pointers_.
   __ LoadFromOffset(R4, THR, target::Thread::store_buffer_block_offset());
   __ LoadFromOffset(R2, R4, target::StoreBufferBlock::top_offset(),
-                    kUnsignedWord);
+                    kUnsignedFourBytes);
   __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
   __ StoreToOffset(R1, R3, target::StoreBufferBlock::pointers_offset());
 
@@ -1738,7 +1740,7 @@
   Label overflow;
   __ add(R2, R2, Operand(1));
   __ StoreToOffset(R2, R4, target::StoreBufferBlock::top_offset(),
-                   kUnsignedWord);
+                   kUnsignedFourBytes);
   __ CompareImmediate(R2, target::StoreBufferBlock::kSize);
   // Restore values.
   __ Pop(R4);
@@ -1771,20 +1773,20 @@
   __ sub(R3, R0, Operand(kHeapObjectTag));
   // R3: Untagged address of header word (ldxr/stxr do not support offsets).
   __ Bind(&marking_retry);
-  __ ldxr(R2, R3, kWord);
+  __ ldxr(R2, R3, kFourBytes);
   __ tbz(&lost_race, R2, target::ObjectLayout::kOldAndNotMarkedBit);
   __ AndImmediate(R2, R2, ~(1 << target::ObjectLayout::kOldAndNotMarkedBit));
-  __ stxr(R4, R2, R3, kWord);
+  __ stxr(R4, R2, R3, kFourBytes);
   __ cbnz(&marking_retry, R4);
 
   __ LoadFromOffset(R4, THR, target::Thread::marking_stack_block_offset());
   __ LoadFromOffset(R2, R4, target::MarkingStackBlock::top_offset(),
-                    kUnsignedWord);
+                    kUnsignedFourBytes);
   __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
   __ StoreToOffset(R0, R3, target::MarkingStackBlock::pointers_offset());
   __ add(R2, R2, Operand(1));
   __ StoreToOffset(R2, R4, target::MarkingStackBlock::top_offset(),
-                   kUnsignedWord);
+                   kUnsignedFourBytes);
   __ CompareImmediate(R2, target::MarkingStackBlock::kSize);
   __ Pop(R4);  // Unspill.
   __ Pop(R3);  // Unspill.
@@ -1923,7 +1925,7 @@
           FieldAddress(kTypeOffestReg,
                        target::Class::
                            host_type_arguments_field_offset_in_words_offset()),
-          kWord);
+          kFourBytes);
 
       // Set the type arguments in the new object.
       __ StoreIntoObjectNoBarrier(
@@ -2141,10 +2143,10 @@
     __ LeaveStubFrame();
   }
   __ LoadFieldFromOffset(R7, func_reg, target::Function::usage_counter_offset(),
-                         kWord);
+                         kFourBytes);
   __ add(R7, R7, Operand(1));
   __ StoreFieldToOffset(R7, func_reg, target::Function::usage_counter_offset(),
-                        kWord);
+                        kFourBytes);
 }
 
 // Loads function into 'temp_reg'.
@@ -2160,11 +2162,11 @@
     ASSERT(temp_reg == R6);
     __ Comment("Increment function counter");
     __ LoadFieldFromOffset(func_reg, ic_reg, target::ICData::owner_offset());
-    __ LoadFieldFromOffset(R7, func_reg,
-                           target::Function::usage_counter_offset(), kWord);
+    __ LoadFieldFromOffset(
+        R7, func_reg, target::Function::usage_counter_offset(), kFourBytes);
     __ AddImmediate(R7, 1);
     __ StoreFieldToOffset(R7, func_reg,
-                          target::Function::usage_counter_offset(), kWord);
+                          target::Function::usage_counter_offset(), kFourBytes);
   }
 }
 
@@ -2295,7 +2297,7 @@
     // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
     __ LoadFromOffset(R6, R5,
                       target::ICData::state_bits_offset() - kHeapObjectTag,
-                      kUnsignedWord);
+                      kUnsignedFourBytes);
     ASSERT(target::ICData::NumArgsTestedShift() == 0);  // No shift needed.
     __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask()));
     __ CompareImmediate(R6, num_args);
@@ -2609,7 +2611,7 @@
     // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
     __ LoadFromOffset(R6, R5,
                       target::ICData::state_bits_offset() - kHeapObjectTag,
-                      kUnsignedWord);
+                      kUnsignedFourBytes);
     ASSERT(target::ICData::NumArgsTestedShift() == 0);  // No shift needed.
     __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask()));
     __ CompareImmediate(R6, 0);
@@ -2872,7 +2874,7 @@
       __ LoadFieldFromOffset(
           kScratchReg, kScratchReg,
           target::Class::host_type_arguments_field_offset_in_words_offset(),
-          kWord);
+          kFourBytes);
       __ CompareImmediate(kScratchReg, target::Class::kNoTypeArguments);
       __ b(&has_no_type_arguments, EQ);
       __ add(kScratchReg, TypeTestABI::kInstanceReg,
@@ -2982,239 +2984,6 @@
   GenerateSubtypeNTestCacheStub(assembler, 6);
 }
 
-// The <X>TypeTestStubs are used to test whether a given value is of a given
-// type. All variants have the same calling convention:
-//
-// Inputs (from TypeTestABI struct):
-//   - kSubtypeTestCacheReg: RawSubtypeTestCache
-//   - kInstanceReg: instance to test against.
-//   - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
-//   - kFunctionTypeArgumentsReg : function type arguments (if needed).
-//
-// See GenerateSubtypeNTestCacheStub for registers that may need saving by the
-// caller.
-//
-// Output (from TypeTestABI struct):
-//   - kResultReg: checked instance.
-//
-// Throws if the check is unsuccessful.
-//
-// Note of warning: The caller will not populate CODE_REG and we have therefore
-// no access to the pool.
-void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
-  // Tail call the [SubtypeTestCache]-based implementation.
-  __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
-  __ ldr(R9, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
-  __ br(R9);
-}
-
-// Used instead of DefaultTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
-    Assembler* assembler) {
-  Label done;
-
-  // Fast case for 'null'.
-  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-  __ BranchIf(EQUAL, &done);
-
-  // Tail call the [SubtypeTestCache]-based implementation.
-  __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
-  __ ldr(R9, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
-  __ br(R9);
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
-  __ Breakpoint();
-}
-
-static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
-                                            TypeCheckMode mode) {
-  __ PushObject(NullObject());  // Make room for result.
-  __ Push(TypeTestABI::kInstanceReg);
-  __ Push(TypeTestABI::kDstTypeReg);
-  __ Push(TypeTestABI::kInstantiatorTypeArgumentsReg);
-  __ Push(TypeTestABI::kFunctionTypeArgumentsReg);
-  __ PushObject(NullObject());
-  __ Push(TypeTestABI::kSubtypeTestCacheReg);
-  __ PushImmediate(target::ToRawSmi(mode));
-  __ CallRuntime(kTypeCheckRuntimeEntry, 7);
-  __ Drop(1);  // mode
-  __ Pop(TypeTestABI::kSubtypeTestCacheReg);
-  __ Drop(1);  // dst_name
-  __ Pop(TypeTestABI::kFunctionTypeArgumentsReg);
-  __ Pop(TypeTestABI::kInstantiatorTypeArgumentsReg);
-  __ Pop(TypeTestABI::kDstTypeReg);
-  __ Pop(TypeTestABI::kInstanceReg);
-  __ Drop(1);  // Discard return value.
-}
-
-void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
-    Assembler* assembler) {
-  __ ldr(CODE_REG,
-         Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
-  __ EnterStubFrame();
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
-  __ LeaveStubFrame();
-  __ Ret();
-}
-
-// Used instead of LazySpecializeTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
-    Assembler* assembler) {
-  Label done;
-
-  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-  __ BranchIf(EQUAL, &done);
-
-  __ ldr(CODE_REG,
-         Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
-  __ EnterStubFrame();
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
-  __ LeaveStubFrame();
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-static void BuildTypeParameterTypeTestStub(Assembler* assembler,
-                                           bool allow_null) {
-  Label done;
-
-  if (allow_null) {
-    __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-    __ BranchIf(EQUAL, &done);
-  }
-
-  Label function_type_param;
-  __ ldr(TypeTestABI::kScratchReg,
-         FieldAddress(TypeTestABI::kDstTypeReg,
-                      TypeParameter::parameterized_class_id_offset()),
-         kUnsignedHalfword);
-  __ cmp(TypeTestABI::kScratchReg, Operand(kFunctionCid));
-  __ BranchIf(EQUAL, &function_type_param);
-
-  auto handle_case = [&](Register tav) {
-    __ CompareObject(tav, NullObject());
-    __ BranchIf(EQUAL, &done);
-    __ ldr(
-        TypeTestABI::kScratchReg,
-        FieldAddress(TypeTestABI::kDstTypeReg, TypeParameter::index_offset()),
-        kUnsignedHalfword);
-    __ add(TypeTestABI::kScratchReg, tav,
-           Operand(TypeTestABI::kScratchReg, LSL, 8));
-    __ ldr(TypeTestABI::kScratchReg,
-           FieldAddress(TypeTestABI::kScratchReg,
-                        target::TypeArguments::InstanceSize()));
-    __ ldr(TypeTestABI::kScratchReg,
-           FieldAddress(TypeTestABI::kScratchReg,
-                        AbstractType::type_test_stub_entry_point_offset()));
-    __ br(TypeTestABI::kScratchReg);
-  };
-
-  // Class type parameter: If dynamic we're done, otherwise dereference type
-  // parameter and tail call.
-  handle_case(TypeTestABI::kInstantiatorTypeArgumentsReg);
-
-  // Function type parameter: If dynamic we're done, otherwise dereference type
-  // parameter and tail call.
-  __ Bind(&function_type_param);
-  handle_case(TypeTestABI::kFunctionTypeArgumentsReg);
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
-    Assembler* assembler) {
-  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
-}
-
-void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
-  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
-}
-
-void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
-  Label done, call_runtime;
-
-  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
-    __ ldr(CODE_REG,
-           Address(THR, target::Thread::slow_type_test_stub_offset()));
-  }
-  __ EnterStubFrame();
-
-  // If the subtype-cache is null, it needs to be lazily-created by the runtime.
-  __ CompareObject(TypeTestABI::kSubtypeTestCacheReg, NullObject());
-  __ BranchIf(EQUAL, &call_runtime);
-
-  // If this is not a [Type] object, we'll go to the runtime.
-  Label is_simple_case, is_complex_case;
-  __ LoadClassId(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg);
-  __ cmp(TypeTestABI::kScratchReg, Operand(kTypeCid));
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // Check whether this [Type] is instantiated/uninstantiated.
-  __ ldr(TypeTestABI::kScratchReg,
-         FieldAddress(TypeTestABI::kDstTypeReg,
-                      target::Type::type_state_offset(), kByte),
-         kByte);
-  __ cmp(TypeTestABI::kScratchReg,
-         Operand(target::AbstractTypeLayout::kTypeStateFinalizedInstantiated));
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // Check whether this [Type] is a function type.
-  __ ldr(
-      TypeTestABI::kScratchReg,
-      FieldAddress(TypeTestABI::kDstTypeReg, target::Type::signature_offset()));
-  __ CompareObject(TypeTestABI::kScratchReg, NullObject());
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
-  __ BranchIfSmi(TypeTestABI::kInstanceReg, &is_complex_case);
-
-  // Fall through to &is_simple_case
-
-  __ Bind(&is_simple_case);
-  {
-    __ PushPair(TypeTestABI::kFunctionTypeArgumentsReg,
-                TypeTestABI::kSubtypeTestCacheReg);
-    __ BranchLink(StubCodeSubtype2TestCache());
-    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
-                     CastHandle<Object>(TrueObject()));
-    __ PopPair(TypeTestABI::kFunctionTypeArgumentsReg,
-               TypeTestABI::kSubtypeTestCacheReg);
-    __ BranchIf(EQUAL, &done);  // Cache said: yes.
-    __ Jump(&call_runtime);
-  }
-
-  __ Bind(&is_complex_case);
-  {
-    __ PushPair(TypeTestABI::kFunctionTypeArgumentsReg,
-                TypeTestABI::kSubtypeTestCacheReg);
-    __ BranchLink(StubCodeSubtype6TestCache());
-    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
-                     CastHandle<Object>(TrueObject()));
-    __ PopPair(TypeTestABI::kFunctionTypeArgumentsReg,
-               TypeTestABI::kSubtypeTestCacheReg);
-    __ BranchIf(EQUAL, &done);  // Cache said: yes.
-    // Fall through to runtime_call
-  }
-
-  __ Bind(&call_runtime);
-
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
-
-  __ Bind(&done);
-  __ LeaveStubFrame();
-  __ Ret();
-}
-
 void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
   __ mov(R0, CSP);
   __ ret();
@@ -3629,12 +3398,12 @@
   __ LoadClassIdMayBeSmi(R1, R0);
   __ ldr(R2,
          FieldAddress(R5, target::SingleTargetCache::lower_limit_offset(),
-                      kHalfword),
-         kUnsignedHalfword);
+                      kTwoBytes),
+         kUnsignedTwoBytes);
   __ ldr(R3,
          FieldAddress(R5, target::SingleTargetCache::upper_limit_offset(),
-                      kHalfword),
-         kUnsignedHalfword);
+                      kTwoBytes),
+         kUnsignedTwoBytes);
 
   __ cmp(R1, Operand(R2));
   __ b(&miss, LT);
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index efc4e36..b306279 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -1060,9 +1060,9 @@
   // EDX: number of context variables.
   __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
 #if defined(DEBUG)
-  static const bool kJumpLength = Assembler::kFarJump;
+  static auto const kJumpLength = Assembler::kFarJump;
 #else
-  static const bool kJumpLength = Assembler::kNearJump;
+  static auto const kJumpLength = Assembler::kNearJump;
 #endif  // DEBUG
   __ j(ABOVE_EQUAL, slow_case, kJumpLength);
 
@@ -2416,55 +2416,6 @@
   GenerateSubtypeNTestCacheStub(assembler, 6);
 }
 
-void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
-    Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
-    Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
-    Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
-    Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
-void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
-  // Not implemented on ia32.
-  __ Breakpoint();
-}
-
 // Return the current stack pointer address, used to do stack alignment checks.
 // TOS + 0: return address
 // Result in EAX.
@@ -2939,7 +2890,8 @@
   Label call_runtime;
   __ pushl(AllocateTypedDataArrayABI::kLengthReg);
 
-  NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, ECX, &call_runtime, false));
+  NOT_IN_PRODUCT(
+      __ MaybeTraceAllocation(cid, ECX, &call_runtime, Assembler::kFarJump));
   __ movl(EDI, AllocateTypedDataArrayABI::kLengthReg);
   /* Check that length is a positive Smi. */
   /* EDI: requested array length argument. */
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index 676714f..17d7303 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -172,8 +172,9 @@
     std::function<void()> perform_runtime_call) {
   // We want the saved registers to appear like part of the caller's frame, so
   // we push them before calling EnterStubFrame.
-  __ PushRegisters(kDartAvailableCpuRegs,
-                   save_fpu_registers ? kAllFpuRegistersList : 0);
+  const RegisterSet saved_registers(
+      kDartAvailableCpuRegs, save_fpu_registers ? kAllFpuRegistersList : 0);
+  __ PushRegisters(saved_registers);
 
   const intptr_t kSavedCpuRegisterSlots =
       Utils::CountOneBitsWord(kDartAvailableCpuRegs);
@@ -197,8 +198,7 @@
   // Copy up the return address (in case it was changed).
   __ popq(TMP);
   __ movq(Address(RSP, kAllSavedRegistersSlots * target::kWordSize), TMP);
-  __ PopRegisters(kDartAvailableCpuRegs,
-                  save_fpu_registers ? kAllFpuRegistersList : 0);
+  __ PopRegisters(saved_registers);
   __ ret();
 }
 
@@ -230,8 +230,7 @@
 void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
   RegisterSet all_registers;
   all_registers.AddAllGeneralRegisters();
-  __ PushRegisters(all_registers.cpu_registers(),
-                   all_registers.fpu_registers());
+  __ PushRegisters(all_registers);
 
   __ EnterFrame(0);
   __ ReserveAlignedFrameSpace(0);
@@ -239,15 +238,14 @@
   __ CallCFunction(RAX);
   __ LeaveFrame();
 
-  __ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
+  __ PopRegisters(all_registers);
   __ ret();
 }
 
 void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
   RegisterSet all_registers;
   all_registers.AddAllGeneralRegisters();
-  __ PushRegisters(all_registers.cpu_registers(),
-                   all_registers.fpu_registers());
+  __ PushRegisters(all_registers);
 
   __ EnterFrame(0);
   __ ReserveAlignedFrameSpace(0);
@@ -262,7 +260,7 @@
   __ CallCFunction(RAX);
   __ LeaveFrame();
 
-  __ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
+  __ PopRegisters(all_registers);
   __ ret();
 }
 
@@ -292,6 +290,10 @@
 }
 
 #if !defined(DART_PRECOMPILER)
+static const RegisterSet kArgumentRegisterSet(
+    CallingConventions::kArgumentRegisters,
+    CallingConventions::kFpuArgumentRegisters);
+
 void StubCodeCompiler::GenerateJITCallbackTrampolines(
     Assembler* assembler,
     intptr_t next_callback_id) {
@@ -324,8 +326,7 @@
   __ pushq(RAX);
 
   // Save all registers which might hold arguments.
-  __ PushRegisters(CallingConventions::kArgumentRegisters,
-                   CallingConventions::kFpuArgumentRegisters);
+  __ PushRegisters(kArgumentRegisterSet);
 
   // Load the thread, verify the callback ID and exit the safepoint.
   //
@@ -346,8 +347,7 @@
   }
 
   // Restore the arguments.
-  __ PopRegisters(CallingConventions::kArgumentRegisters,
-                  CallingConventions::kFpuArgumentRegisters);
+  __ PopRegisters(kArgumentRegisterSet);
 
   // Restore the callback ID.
   __ popq(RAX);
@@ -765,9 +765,9 @@
   // RBX: address of first argument in array.
   Label loop, loop_condition;
 #if defined(DEBUG)
-  static const bool kJumpLength = Assembler::kFarJump;
+  static auto const kJumpLength = Assembler::kFarJump;
 #else
-  static const bool kJumpLength = Assembler::kNearJump;
+  static auto const kJumpLength = Assembler::kNearJump;
 #endif  // DEBUG
   __ jmp(&loop_condition, kJumpLength);
   __ Bind(&loop);
@@ -1138,9 +1138,9 @@
     __ Bind(&init_loop);
     __ cmpq(RDI, RCX);
 #if defined(DEBUG)
-    static const bool kJumpLength = Assembler::kFarJump;
+    static auto const kJumpLength = Assembler::kFarJump;
 #else
-    static const bool kJumpLength = Assembler::kNearJump;
+    static auto const kJumpLength = Assembler::kNearJump;
 #endif  // DEBUG
     __ j(ABOVE_EQUAL, &done, kJumpLength);
     // No generational barrier needed, since we are storing null.
@@ -1180,7 +1180,7 @@
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path) {
     Label slow_case;
-    __ TryAllocate(compiler::MintClass(), &slow_case, /*near_jump=*/true,
+    __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
                    AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
     __ Ret();
 
@@ -1200,7 +1200,7 @@
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path) {
     Label slow_case;
-    __ TryAllocate(compiler::MintClass(), &slow_case, /*near_jump=*/true,
+    __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
                    AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
     __ Ret();
 
@@ -1215,6 +1215,10 @@
       /*store_runtime_result_in_result_register=*/true);
 }
 
+static const RegisterSet kCalleeSavedRegisterSet(
+    CallingConventions::kCalleeSaveCpuRegisters,
+    CallingConventions::kCalleeSaveXmmRegisters);
+
 // Called when invoking Dart code from C++ (VM code).
 // Input parameters:
 //   RSP : points to return address.
@@ -1245,8 +1249,7 @@
   __ pushq(kArgDescReg);
 
   // Save C++ ABI callee-saved registers.
-  __ PushRegisters(CallingConventions::kCalleeSaveCpuRegisters,
-                   CallingConventions::kCalleeSaveXmmRegisters);
+  __ PushRegisters(kCalleeSavedRegisterSet);
 
   // If any additional (or fewer) values are pushed, the offsets in
   // target::frame_layout.exit_link_slot_from_entry_fp will need to be changed.
@@ -1356,8 +1359,7 @@
 #endif
 
   // Restore C++ ABI callee-saved registers.
-  __ PopRegisters(CallingConventions::kCalleeSaveCpuRegisters,
-                  CallingConventions::kCalleeSaveXmmRegisters);
+  __ PopRegisters(kCalleeSavedRegisterSet);
   __ set_constant_pool_allowed(false);
 
   // Restore the frame pointer.
@@ -1471,9 +1473,9 @@
       Label loop, entry;
       __ leaq(R13, FieldAddress(RAX, target::Context::variable_offset(0)));
 #if defined(DEBUG)
-      static const bool kJumpLength = Assembler::kFarJump;
+      static auto const kJumpLength = Assembler::kFarJump;
 #else
-      static const bool kJumpLength = Assembler::kNearJump;
+      static auto const kJumpLength = Assembler::kNearJump;
 #endif  // DEBUG
       __ jmp(&entry, kJumpLength);
       __ Bind(&loop);
@@ -1819,9 +1821,9 @@
       __ Bind(&init_loop);
       __ cmpq(kNextFieldReg, kNewTopReg);
 #if defined(DEBUG)
-      static const bool kJumpLength = Assembler::kFarJump;
+      static auto const kJumpLength = Assembler::kFarJump;
 #else
-      static const bool kJumpLength = Assembler::kNearJump;
+      static auto const kJumpLength = Assembler::kNearJump;
 #endif  // DEBUG
       __ j(ABOVE_EQUAL, &done, kJumpLength);
       __ StoreIntoObjectNoBarrier(RAX, Address(kNextFieldReg, 0), kNullReg);
@@ -2566,9 +2568,9 @@
   __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
   __ cmpq(RAX, Immediate(0));
 #if defined(DEBUG)
-  static const bool kJumpLength = Assembler::kFarJump;
+  static auto const kJumpLength = Assembler::kFarJump;
 #else
-  static const bool kJumpLength = Assembler::kNearJump;
+  static auto const kJumpLength = Assembler::kNearJump;
 #endif  // DEBUG
   __ j(NOT_EQUAL, &stepping, kJumpLength);
   __ Bind(&done_stepping);
@@ -2936,220 +2938,6 @@
   GenerateSubtypeNTestCacheStub(assembler, 6);
 }
 
-// The <X>TypeTestStubs are used to test whether a given value is of a given
-// type. All variants have the same calling convention:
-//
-// Inputs (from TypeTestABI struct):
-//   - kSubtypeTestCacheReg: RawSubtypeTestCache
-//   - kInstanceReg: instance to test against.
-//   - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
-//   - kFunctionTypeArgumentsReg : function type arguments (if needed).
-//
-// See GenerateSubtypeNTestCacheStub for registers that may need saving by the
-// caller.
-//
-// Output (from TypeTestABI struct):
-//   - kResultReg: checked instance.
-//
-// Throws if the check is unsuccessful.
-//
-// Note of warning: The caller will not populate CODE_REG and we have therefore
-// no access to the pool.
-void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
-  __ movq(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
-  __ jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
-}
-
-// Used instead of DefaultTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
-    Assembler* assembler) {
-  Label done;
-
-  // Fast case for 'null'.
-  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-  __ BranchIf(EQUAL, &done);
-
-  __ movq(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
-  __ jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
-  __ Breakpoint();
-}
-
-static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
-                                            TypeCheckMode mode) {
-  __ PushObject(NullObject());  // Make room for result.
-  __ pushq(TypeTestABI::kInstanceReg);
-  __ pushq(TypeTestABI::kDstTypeReg);
-  __ pushq(TypeTestABI::kInstantiatorTypeArgumentsReg);
-  __ pushq(TypeTestABI::kFunctionTypeArgumentsReg);
-  __ PushObject(NullObject());
-  __ pushq(TypeTestABI::kSubtypeTestCacheReg);
-  __ PushImmediate(Immediate(target::ToRawSmi(mode)));
-  __ CallRuntime(kTypeCheckRuntimeEntry, 7);
-  __ Drop(1);  // mode
-  __ popq(TypeTestABI::kSubtypeTestCacheReg);
-  __ Drop(1);
-  __ popq(TypeTestABI::kFunctionTypeArgumentsReg);
-  __ popq(TypeTestABI::kInstantiatorTypeArgumentsReg);
-  __ popq(TypeTestABI::kDstTypeReg);
-  __ popq(TypeTestABI::kInstanceReg);
-  __ Drop(1);  // Discard return value.
-}
-
-void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
-    Assembler* assembler) {
-  __ movq(
-      CODE_REG,
-      Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
-  __ EnterStubFrame();
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
-  __ LeaveStubFrame();
-  __ Ret();
-}
-
-// Used instead of LazySpecializeTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
-    Assembler* assembler) {
-  Label done;
-
-  // Fast case for 'null'.
-  __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-  __ BranchIf(EQUAL, &done);
-
-  __ movq(
-      CODE_REG,
-      Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
-  __ EnterStubFrame();
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
-  __ LeaveStubFrame();
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-static void BuildTypeParameterTypeTestStub(Assembler* assembler,
-                                           bool allow_null) {
-  Label done;
-
-  if (allow_null) {
-    __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
-    __ BranchIf(EQUAL, &done);
-  }
-
-  Label function_type_param;
-  __ cmpw(FieldAddress(TypeTestABI::kDstTypeReg,
-                       TypeParameter::parameterized_class_id_offset()),
-          Immediate(kFunctionCid));
-  __ BranchIf(EQUAL, &function_type_param);
-
-  auto handle_case = [&](Register tav) {
-    __ CompareObject(tav, NullObject());
-    __ BranchIf(EQUAL, &done);
-    __ movzxw(
-        TypeTestABI::kScratchReg,
-        FieldAddress(TypeTestABI::kDstTypeReg, TypeParameter::index_offset()));
-    __ movq(TypeTestABI::kScratchReg,
-            FieldAddress(tav, TypeTestABI::kScratchReg, TIMES_8,
-                         target::TypeArguments::InstanceSize()));
-    __ jmp(FieldAddress(TypeTestABI::kScratchReg,
-                        AbstractType::type_test_stub_entry_point_offset()));
-  };
-
-  // Class type parameter: If dynamic we're done, otherwise dereference type
-  // parameter and tail call.
-  handle_case(TypeTestABI::kInstantiatorTypeArgumentsReg);
-
-  // Function type parameter: If dynamic we're done, otherwise dereference type
-  // parameter and tail call.
-  __ Bind(&function_type_param);
-  handle_case(TypeTestABI::kFunctionTypeArgumentsReg);
-
-  __ Bind(&done);
-  __ Ret();
-}
-
-void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
-    Assembler* assembler) {
-  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
-}
-
-void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
-  BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
-}
-
-void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
-  Label done, call_runtime;
-
-  if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
-    __ movq(CODE_REG,
-            Address(THR, target::Thread::slow_type_test_stub_offset()));
-  }
-  __ EnterStubFrame();
-
-  // If the subtype-cache is null, it needs to be lazily-created by the runtime.
-  __ CompareObject(TypeTestABI::kSubtypeTestCacheReg, NullObject());
-  __ BranchIf(EQUAL, &call_runtime);
-
-  // If this is not a [Type] object, we'll go to the runtime.
-  Label is_simple_case, is_complex_case;
-  __ LoadClassId(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg);
-  __ cmpq(TypeTestABI::kScratchReg, Immediate(kTypeCid));
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // Check whether this [Type] is instantiated/uninstantiated.
-  __ cmpb(
-      FieldAddress(TypeTestABI::kDstTypeReg, target::Type::type_state_offset()),
-      Immediate(target::AbstractTypeLayout::kTypeStateFinalizedInstantiated));
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // Check whether this [Type] is a function type.
-  __ movq(
-      TypeTestABI::kScratchReg,
-      FieldAddress(TypeTestABI::kDstTypeReg, target::Type::signature_offset()));
-  __ CompareObject(TypeTestABI::kScratchReg, NullObject());
-  __ BranchIf(NOT_EQUAL, &is_complex_case);
-
-  // This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
-  __ BranchIfSmi(TypeTestABI::kInstanceReg, &is_complex_case);
-
-  // Fall through to &is_simple_case
-
-  __ Bind(&is_simple_case);
-  {
-    __ Call(StubCodeSubtype2TestCache());
-    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
-                     CastHandle<Object>(TrueObject()));
-    __ BranchIf(EQUAL, &done);  // Cache said: yes.
-    __ Jump(&call_runtime);
-  }
-
-  __ Bind(&is_complex_case);
-  {
-    __ Call(StubCodeSubtype6TestCache());
-    __ CompareObject(TypeTestABI::kSubtypeTestCacheResultReg,
-                     CastHandle<Object>(TrueObject()));
-    __ BranchIf(EQUAL, &done);  // Cache said: yes.
-    // Fall through to runtime_call
-  }
-
-  __ Bind(&call_runtime);
-
-  InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
-
-  __ Bind(&done);
-  __ LeaveStubFrame();
-  __ Ret();
-}
-
 // Return the current stack pointer address, used to stack alignment
 // checks.
 // TOS + 0: return address
@@ -3729,7 +3517,8 @@
   Label call_runtime;
   __ pushq(AllocateTypedDataArrayABI::kLengthReg);
 
-  NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, false));
+  NOT_IN_PRODUCT(
+      __ MaybeTraceAllocation(cid, &call_runtime, Assembler::kFarJump));
   __ movq(RDI, AllocateTypedDataArrayABI::kLengthReg);
   /* Check that length is a positive Smi. */
   /* RDI: requested array length argument. */
diff --git a/runtime/vm/constants_arm.h b/runtime/vm/constants_arm.h
index 23b16b3..dd2f3a7 100644
--- a/runtime/vm/constants_arm.h
+++ b/runtime/vm/constants_arm.h
@@ -339,6 +339,10 @@
   // original value is needed after the call.
   static const Register kSubtypeTestCacheResultReg = kSubtypeTestCacheReg;
 
+  // Registers that need saving across SubtypeTestCacheStub calls.
+  static const intptr_t kSubtypeTestCacheStubCallerSavedRegisters =
+      (1 << kSubtypeTestCacheReg) | (1 << kDstTypeReg);
+
   static const intptr_t kAbiRegisters =
       (1 << kInstanceReg) | (1 << kDstTypeReg) |
       (1 << kInstantiatorTypeArgumentsReg) | (1 << kFunctionTypeArgumentsReg) |
@@ -702,6 +706,24 @@
   kBranchOffsetMask = 0x00ffffff
 };
 
+enum ScaleFactor {
+  TIMES_1 = 0,
+  TIMES_2 = 1,
+  TIMES_4 = 2,
+  TIMES_8 = 3,
+  TIMES_16 = 4,
+// Don't use (dart::)kWordSizeLog2, as this needs to work for crossword as
+// well. If this is included, we know the target is 32 bit.
+#if defined(TARGET_ARCH_IS_32_BIT)
+  // Used for Smi-boxed indices.
+  TIMES_HALF_WORD_SIZE = kInt32SizeLog2 - 1,
+  // Used for unboxed indices.
+  TIMES_WORD_SIZE = kInt32SizeLog2,
+#else
+#error "Unexpected word size"
+#endif
+};
+
 // The class Instr enables access to individual fields defined in the ARM
 // architecture instruction set encoding as described in figure A3-1.
 //
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index 898f10e..7b18369 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -171,6 +171,10 @@
   // registers above, for it is also used internally as kNullReg in those stubs.
   static const Register kSubtypeTestCacheResultReg = R7;
 
+  // Registers that need saving across SubtypeTestCacheStub calls.
+  static const intptr_t kSubtypeTestCacheStubCallerSavedRegisters =
+      (1 << kFunctionTypeArgumentsReg) | (1 << kSubtypeTestCacheReg);
+
   static const intptr_t kAbiRegisters =
       (1 << kInstanceReg) | (1 << kDstTypeReg) |
       (1 << kInstantiatorTypeArgumentsReg) | (1 << kFunctionTypeArgumentsReg) |
@@ -491,64 +495,6 @@
   B31 = (1 << 31),
 };
 
-enum OperandSize {
-  kByte,
-  kUnsignedByte,
-  kHalfword,
-  kUnsignedHalfword,
-  kWord,
-  kUnsignedWord,
-  kDoubleWord,
-  kSWord,
-  kDWord,
-  kQWord,
-};
-
-static inline int Log2OperandSizeBytes(OperandSize os) {
-  switch (os) {
-    case kByte:
-    case kUnsignedByte:
-      return 0;
-    case kHalfword:
-    case kUnsignedHalfword:
-      return 1;
-    case kWord:
-    case kUnsignedWord:
-    case kSWord:
-      return 2;
-    case kDoubleWord:
-    case kDWord:
-      return 3;
-    case kQWord:
-      return 4;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  return -1;
-}
-
-static inline bool IsSignedOperand(OperandSize os) {
-  switch (os) {
-    case kByte:
-    case kHalfword:
-    case kWord:
-      return true;
-    case kUnsignedByte:
-    case kUnsignedHalfword:
-    case kUnsignedWord:
-    case kDoubleWord:
-    case kSWord:
-    case kDWord:
-    case kQWord:
-      return false;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  return false;
-}
-
 // Opcodes from C3
 // C3.1.
 enum MainOp {
@@ -1099,6 +1045,24 @@
   return result;
 }
 
+enum ScaleFactor {
+  TIMES_1 = 0,
+  TIMES_2 = 1,
+  TIMES_4 = 2,
+  TIMES_8 = 3,
+  TIMES_16 = 4,
+// We can't include vm/compiler/runtime_api.h, so just be explicit instead
+// of using (dart::)kWordSizeLog2.
+#if defined(TARGET_ARCH_IS_64_BIT)
+  // Used for Smi-boxed indices.
+  TIMES_HALF_WORD_SIZE = kInt64SizeLog2 - 1,
+  // Used for unboxed indices.
+  TIMES_WORD_SIZE = kInt64SizeLog2,
+#else
+#error "Unexpected word size"
+#endif
+};
+
 // The class Instr enables access to individual fields defined in the ARM
 // architecture instruction set encoding as described in figure A3-1.
 //
@@ -1114,6 +1078,8 @@
  public:
   enum { kInstrSize = 4, kInstrSizeLog2 = 2, kPCReadOffset = 8 };
 
+  enum class WideSize { k32Bits, k64Bits };
+
   static const int32_t kNopInstruction = HINT;  // hint #0 === nop.
 
   // Reserved brk and hlt instruction codes.
@@ -1157,10 +1123,9 @@
                               Register rd,
                               uint16_t imm,
                               int hw,
-                              OperandSize sz) {
+                              WideSize sz) {
     ASSERT((hw >= 0) && (hw <= 3));
-    ASSERT((sz == kDoubleWord) || (sz == kWord));
-    const int32_t size = (sz == kDoubleWord) ? B31 : 0;
+    const int32_t size = (sz == WideSize::k64Bits) ? B31 : 0;
     SetInstructionBits(op | size | (static_cast<int32_t>(rd) << kRdShift) |
                        (static_cast<int32_t>(hw) << kHWShift) |
                        (static_cast<int32_t>(imm) << kImm16Shift));
diff --git a/runtime/vm/constants_ia32.h b/runtime/vm/constants_ia32.h
index 94db1e2..a07e20f 100644
--- a/runtime/vm/constants_ia32.h
+++ b/runtime/vm/constants_ia32.h
@@ -205,7 +205,16 @@
   TIMES_4 = 2,
   TIMES_8 = 3,
   TIMES_16 = 4,
-  TIMES_HALF_WORD_SIZE = kWordSizeLog2 - 1
+// We can't include vm/compiler/runtime_api.h, so just be explicit instead
+// of using (dart::)kWordSizeLog2.
+#if defined(TARGET_ARCH_IS_32_BIT)
+  // Used for Smi-boxed indices.
+  TIMES_HALF_WORD_SIZE = kInt32SizeLog2 - 1,
+  // Used for unboxed indices.
+  TIMES_WORD_SIZE = kInt32SizeLog2,
+#else
+#error "Unexpected word size"
+#endif
 };
 
 class Instr {
diff --git a/runtime/vm/constants_x64.h b/runtime/vm/constants_x64.h
index 80e37ab..e154cb2 100644
--- a/runtime/vm/constants_x64.h
+++ b/runtime/vm/constants_x64.h
@@ -161,6 +161,9 @@
   // registers above, for it is also used internally as kNullReg in those stubs.
   static const Register kSubtypeTestCacheResultReg = R8;
 
+  // No registers need saving across SubtypeTestCacheStub calls.
+  static const intptr_t kSubtypeTestCacheStubCallerSavedRegisters = 0;
+
   static const intptr_t kAbiRegisters =
       (1 << kInstanceReg) | (1 << kDstTypeReg) |
       (1 << kInstantiatorTypeArgumentsReg) | (1 << kFunctionTypeArgumentsReg) |
@@ -281,7 +284,16 @@
   // https://software.intel.com/en-us/download/intel-64-and-ia-32-architectures-sdm-combined-volumes-1-2a-2b-2c-2d-3a-3b-3c-3d-and-4
   // 3.7.5 Specifying an Offset
   TIMES_16 = 4,
-  TIMES_HALF_WORD_SIZE = kWordSizeLog2 - 1
+// We can't include vm/compiler/runtime_api.h, so just be explicit instead
+// of using (dart::)kWordSizeLog2.
+#if defined(TARGET_ARCH_IS_64_BIT)
+  // Used for Smi-boxed indices.
+  TIMES_HALF_WORD_SIZE = kInt64SizeLog2 - 1,
+  // Used for unboxed indices.
+  TIMES_WORD_SIZE = kInt64SizeLog2,
+#else
+#error "Unexpected word size"
+#endif
 };
 
 #define R(reg) (1 << (reg))
diff --git a/runtime/vm/runtime_entry_arm.cc b/runtime/vm/runtime_entry_arm.cc
index 752ca63..b2cd9da 100644
--- a/runtime/vm/runtime_entry_arm.cc
+++ b/runtime/vm/runtime_entry_arm.cc
@@ -51,8 +51,7 @@
   if (runtime_entry->is_leaf()) {
     ASSERT(argument_count == runtime_entry->argument_count());
     __ LoadFromOffset(
-        kWord, TMP, THR,
-        compiler::target::Thread::OffsetFromThread(runtime_entry));
+        TMP, THR, compiler::target::Thread::OffsetFromThread(runtime_entry));
     __ str(TMP,
            compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
     __ blx(TMP);
@@ -65,8 +64,7 @@
     // Argument count is not checked here, but in the runtime entry for a more
     // informative error message.
     __ LoadFromOffset(
-        kWord, R9, THR,
-        compiler::target::Thread::OffsetFromThread(runtime_entry));
+        R9, THR, compiler::target::Thread::OffsetFromThread(runtime_entry));
     __ LoadImmediate(R4, argument_count);
     __ BranchLinkToRuntime();
   }
diff --git a/runtime/vm/stub_code_list.h b/runtime/vm/stub_code_list.h
index d127701..2725513 100644
--- a/runtime/vm/stub_code_list.h
+++ b/runtime/vm/stub_code_list.h
@@ -7,6 +7,17 @@
 
 namespace dart {
 
+#define VM_TYPE_TESTING_STUB_CODE_LIST(V)                                      \
+  V(DefaultTypeTest)                                                           \
+  V(DefaultNullableTypeTest)                                                   \
+  V(TopTypeTypeTest)                                                           \
+  V(UnreachableTypeTest)                                                       \
+  V(TypeParameterTypeTest)                                                     \
+  V(NullableTypeParameterTypeTest)                                             \
+  V(SlowTypeTest)                                                              \
+  V(LazySpecializeTypeTest)                                                    \
+  V(LazySpecializeNullableTypeTest)
+
 // List of stubs created in the VM isolate, these stubs are shared by different
 // isolates running in this dart process.
 #define VM_STUB_CODE_LIST(V)                                                   \
@@ -79,15 +90,7 @@
   V(Subtype2TestCache)                                                         \
   V(Subtype4TestCache)                                                         \
   V(Subtype6TestCache)                                                         \
-  V(DefaultTypeTest)                                                           \
-  V(DefaultNullableTypeTest)                                                   \
-  V(TopTypeTypeTest)                                                           \
-  V(UnreachableTypeTest)                                                       \
-  V(TypeParameterTypeTest)                                                     \
-  V(NullableTypeParameterTypeTest)                                             \
-  V(SlowTypeTest)                                                              \
-  V(LazySpecializeTypeTest)                                                    \
-  V(LazySpecializeNullableTypeTest)                                            \
+  VM_TYPE_TESTING_STUB_CODE_LIST(V)                                            \
   V(CallClosureNoSuchMethod)                                                   \
   V(FrameAwaitingMaterialization)                                              \
   V(AsynchronousGapMarker)                                                     \
diff --git a/tools/VERSION b/tools/VERSION
index 30e4294..8870174 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 12
 PATCH 0
-PRERELEASE 53
+PRERELEASE 54
 PRERELEASE_PATCH 0
\ No newline at end of file