Version 2.17.0-35.0.dev

Merge commit '04ba20aa9847d40844446bfe21d37bb11a665d3a' into 'dev'
diff --git a/AUTHORS b/AUTHORS
index b3a3ac6..5879a77 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -35,3 +35,4 @@
 K. Alex Gann <k.alexgann@gmail.com>
 Kenneth Endfinger <kaendfinger@gmail.com>
 Cristian Almstrand <cristian.almstrand@gmail.com>
+Ryan Macnak <rmacnak@gmail.com>
diff --git a/benchmarks/FfiMemory/dart/FfiMemory.dart b/benchmarks/FfiMemory/dart/FfiMemory.dart
index 686d6bd..881eff2 100644
--- a/benchmarks/FfiMemory/dart/FfiMemory.dart
+++ b/benchmarks/FfiMemory/dart/FfiMemory.dart
@@ -33,6 +33,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint64(),
diff --git a/benchmarks/FfiMemory/dart2/FfiMemory.dart b/benchmarks/FfiMemory/dart2/FfiMemory.dart
index a170aaa..86f7e1f 100644
--- a/benchmarks/FfiMemory/dart2/FfiMemory.dart
+++ b/benchmarks/FfiMemory/dart2/FfiMemory.dart
@@ -35,6 +35,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint64(),
diff --git a/build/config/compiler/BUILD.gn b/build/config/compiler/BUILD.gn
index a4614c2..b38dc18 100644
--- a/build/config/compiler/BUILD.gn
+++ b/build/config/compiler/BUILD.gn
@@ -258,6 +258,12 @@
       } else if (current_cpu == "arm64") {
         cflags += [ "--target=aarch64-linux-gnu" ]
         ldflags += [ "--target=aarch64-linux-gnu" ]
+      } else if (current_cpu == "riscv32") {
+        cflags += [ "--target=riscv32-linux-gnu" ]
+        ldflags += [ "--target=riscv32-linux-gnu" ]
+      } else if (current_cpu == "riscv64") {
+        cflags += [ "--target=riscv64-linux-gnu" ]
+        ldflags += [ "--target=riscv64-linux-gnu" ]
       } else if (current_cpu == "x86") {
         cflags += [ "--target=i386-linux-gnu" ]
         ldflags += [ "--target=i386-linux-gnu" ]
@@ -554,7 +560,7 @@
   if (is_clang) {
     default_warning_flags += [
       "-Wno-tautological-constant-compare",
-      "-Wno-unused-but-set-variable", # icu
+      "-Wno-unused-but-set-variable",  # icu
     ]
   } else {
     default_warning_flags +=
@@ -700,6 +706,7 @@
   common_optimize_on_ldflags = [
     # Linker GC.
     "/OPT:REF",
+
     # Identical code folding to reduce size.
     # Warning: This changes C/C++ semantics of function pointer comparison.
     "/OPT:ICF",
diff --git a/build/toolchain/linux/BUILD.gn b/build/toolchain/linux/BUILD.gn
index 4692784..5f9bc32 100644
--- a/build/toolchain/linux/BUILD.gn
+++ b/build/toolchain/linux/BUILD.gn
@@ -156,3 +156,75 @@
   toolchain_os = "linux"
   is_clang = false
 }
+
+gcc_toolchain("riscv32") {
+  prefix = "riscv32-linux-gnu-"
+  if (toolchain_prefix != "") {
+    prefix = toolchain_prefix
+  }
+
+  cc = "${compiler_prefix}${prefix}gcc"
+  cxx = "${compiler_prefix}${prefix}g++"
+
+  ar = "${prefix}ar"
+  ld = cxx
+  readelf = "${prefix}readelf"
+  nm = "${prefix}nm"
+  strip = "${prefix}strip"
+
+  toolchain_cpu = "riscv32"
+  toolchain_os = "linux"
+  is_clang = false
+}
+
+gcc_toolchain("clang_riscv32") {
+  prefix = rebase_path("//buildtools/linux-x64/clang/bin", root_build_dir)
+  cc = "${compiler_prefix}${prefix}/clang"
+  cxx = "${compiler_prefix}${prefix}/clang++"
+
+  readelf = "readelf"
+  nm = "${prefix}/llvm-nm"
+  ar = "${prefix}/llvm-ar"
+  ld = cxx
+  llvm_objcopy = "${prefix}/llvm-objcopy"
+
+  toolchain_cpu = "riscv32"
+  toolchain_os = "linux"
+  is_clang = true
+}
+
+gcc_toolchain("riscv64") {
+  prefix = "riscv64-linux-gnu-"
+  if (toolchain_prefix != "") {
+    prefix = toolchain_prefix
+  }
+
+  cc = "${compiler_prefix}${prefix}gcc"
+  cxx = "${compiler_prefix}${prefix}g++"
+
+  ar = "${prefix}ar"
+  ld = cxx
+  readelf = "${prefix}readelf"
+  nm = "${prefix}nm"
+  strip = "${prefix}strip"
+
+  toolchain_cpu = "riscv64"
+  toolchain_os = "linux"
+  is_clang = false
+}
+
+gcc_toolchain("clang_riscv64") {
+  prefix = rebase_path("//buildtools/linux-x64/clang/bin", root_build_dir)
+  cc = "${compiler_prefix}${prefix}/clang"
+  cxx = "${compiler_prefix}${prefix}/clang++"
+
+  readelf = "readelf"
+  nm = "${prefix}/llvm-nm"
+  ar = "${prefix}/llvm-ar"
+  ld = cxx
+  llvm_objcopy = "${prefix}/llvm-objcopy"
+
+  toolchain_cpu = "riscv64"
+  toolchain_os = "linux"
+  is_clang = true
+}
diff --git a/pkg/_fe_analyzer_shared/pubspec.yaml b/pkg/_fe_analyzer_shared/pubspec.yaml
index 16c442b..cfc2267 100644
--- a/pkg/_fe_analyzer_shared/pubspec.yaml
+++ b/pkg/_fe_analyzer_shared/pubspec.yaml
@@ -1,5 +1,5 @@
 name: _fe_analyzer_shared
-version: 33.0.0
+version: 34.0.0
 description: Logic that is shared between the front_end and analyzer packages.
 homepage: https://github.com/dart-lang/sdk/tree/master/pkg/_fe_analyzer_shared
 
diff --git a/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart b/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart
index c378884..9276b71 100644
--- a/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart
+++ b/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart
@@ -18,13 +18,13 @@
     await assertHasFix('''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
   language:
     enableSuperMixins: true
 ''', '''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
 ''');
   }
 
@@ -33,11 +33,11 @@
 analyzer:
   enable-experiment:
     - not-an-experiment
-    - non-nullable
+    - super-parameters
 ''', '''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
 ''');
   }
 
@@ -45,12 +45,12 @@
     await assertHasFix('''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
     - not-an-experiment
 ''', '''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
 ''');
   }
 
diff --git a/pkg/analyzer/CHANGELOG.md b/pkg/analyzer/CHANGELOG.md
index 4d6ca01..65409ce 100644
--- a/pkg/analyzer/CHANGELOG.md
+++ b/pkg/analyzer/CHANGELOG.md
@@ -1,4 +1,4 @@
-## 3.2.0-dev
+## 3.2.0
 * Deprecated `changes` getter in `File` and `Folder`, use `watch()` instead.
 
 ## 3.1.0
diff --git a/pkg/analyzer/lib/dart/ast/ast.dart b/pkg/analyzer/lib/dart/ast/ast.dart
index a0b7a9c..748df11 100644
--- a/pkg/analyzer/lib/dart/ast/ast.dart
+++ b/pkg/analyzer/lib/dart/ast/ast.dart
@@ -402,6 +402,8 @@
 
   R? visitConstructorReference(ConstructorReference node);
 
+  R? visitConstructorSelector(ConstructorSelector node);
+
   R? visitContinueStatement(ContinueStatement node);
 
   R? visitDeclaredIdentifier(DeclaredIdentifier node);
@@ -418,6 +420,8 @@
 
   R? visitEmptyStatement(EmptyStatement node);
 
+  R? visitEnumConstantArguments(EnumConstantArguments node);
+
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node);
 
   R? visitEnumDeclaration(EnumDeclaration node);
@@ -1365,6 +1369,20 @@
   ConstructorElement? get staticElement;
 }
 
+/// The name of a constructor being invoked.
+///
+///    constructorSelector ::=
+///        '.' identifier
+///
+/// Clients may not extend, implement or mix-in this class.
+abstract class ConstructorSelector implements AstNode {
+  /// Return the constructor name.
+  SimpleIdentifier get name;
+
+  /// Return the period before the constructor name.
+  Token get period;
+}
+
 /// A continue statement.
 ///
 ///    continueStatement ::=
@@ -1563,10 +1581,41 @@
   Token get semicolon;
 }
 
+/// The arguments part of an enum constant.
+///
+///    enumConstantArguments ::=
+///        [TypeArgumentList]? [ConstructorSelector]? [ArgumentList]
+///
+/// Clients may not extend, implement or mix-in this class.
+abstract class EnumConstantArguments implements AstNode {
+  /// Return the explicit arguments (there are always implicit `index` and
+  /// `name` leading arguments) to the invoked constructor.
+  ArgumentList get argumentList;
+
+  /// Return the selector of the constructor that is invoked by this enum
+  /// constant, or `null` if the default constructor is invoked.
+  ConstructorSelector? get constructorSelector;
+
+  /// Return the type arguments applied to the enclosing enum declaration
+  /// when invoking the constructor, or `null` if no type arguments were
+  /// provided.
+  TypeArgumentList? get typeArguments;
+}
+
 /// The declaration of an enum constant.
 ///
 /// Clients may not extend, implement or mix-in this class.
 abstract class EnumConstantDeclaration implements Declaration {
+  /// Return the explicit arguments (there are always implicit `index` and
+  /// `name` leading arguments) to the invoked constructor, or `null` if this
+  /// constant does not provide any explicit arguments.
+  EnumConstantArguments? get arguments;
+
+  /// Return the constructor that is invoked by this enum constant, or `null`
+  /// if the AST structure has not been resolved, or if the constructor could
+  /// not be resolved.
+  ConstructorElement? get constructorElement;
+
   /// Return the name of the constant.
   SimpleIdentifier get name;
 }
diff --git a/pkg/analyzer/lib/dart/ast/visitor.dart b/pkg/analyzer/lib/dart/ast/visitor.dart
index a77a83e..309fcf1 100644
--- a/pkg/analyzer/lib/dart/ast/visitor.dart
+++ b/pkg/analyzer/lib/dart/ast/visitor.dart
@@ -230,6 +230,9 @@
       visitExpression(node);
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => visitNode(node);
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => visitStatement(node);
 
   R? visitDeclaration(Declaration node) => visitAnnotatedNode(node);
@@ -259,6 +262,9 @@
   R? visitEmptyStatement(EmptyStatement node) => visitStatement(node);
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => visitNode(node);
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) =>
       visitDeclaration(node);
 
@@ -803,6 +809,12 @@
   }
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) {
+    node.visitChildren(this);
+    return null;
+  }
+
+  @override
   R? visitContinueStatement(ContinueStatement node) {
     node.visitChildren(this);
     return null;
@@ -851,6 +863,12 @@
   }
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) {
+    node.visitChildren(this);
+    return null;
+  }
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     node.visitChildren(this);
     return null;
@@ -1511,6 +1529,9 @@
   R? visitConstructorReference(ConstructorReference node) => null;
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => null;
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => null;
 
   @override
@@ -1535,6 +1556,9 @@
   R? visitEmptyStatement(EmptyStatement node) => null;
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => null;
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) => null;
 
   @override
@@ -1917,6 +1941,9 @@
   R? visitConstructorReference(ConstructorReference node) => _throw(node);
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => _throw(node);
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => _throw(node);
 
   @override
@@ -1941,6 +1968,9 @@
   R? visitEmptyStatement(EmptyStatement node) => _throw(node);
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => _throw(node);
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) => _throw(node);
 
   @override
@@ -2466,6 +2496,14 @@
   }
 
   @override
+  T? visitConstructorSelector(ConstructorSelector node) {
+    stopwatch.start();
+    T? result = _baseVisitor.visitConstructorSelector(node);
+    stopwatch.stop();
+    return result;
+  }
+
+  @override
   T? visitContinueStatement(ContinueStatement node) {
     stopwatch.start();
     T? result = _baseVisitor.visitContinueStatement(node);
@@ -2530,6 +2568,14 @@
   }
 
   @override
+  T? visitEnumConstantArguments(EnumConstantArguments node) {
+    stopwatch.start();
+    T? result = _baseVisitor.visitEnumConstantArguments(node);
+    stopwatch.stop();
+    return result;
+  }
+
+  @override
   T? visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     stopwatch.start();
     T? result = _baseVisitor.visitEnumConstantDeclaration(node);
@@ -3387,6 +3433,9 @@
   R? visitConstructorReference(ConstructorReference node) => visitNode(node);
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => visitNode(node);
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => visitNode(node);
 
   @override
@@ -3412,6 +3461,9 @@
   R? visitEmptyStatement(EmptyStatement node) => visitNode(node);
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => visitNode(node);
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) =>
       visitNode(node);
 
diff --git a/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart b/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart
index 9cda58d..ae8a33d 100644
--- a/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart
+++ b/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart
@@ -374,13 +374,13 @@
   static const bool enhanced_enums = false;
 
   /// Expiration status of the experiment "extension-methods"
-  static const bool extension_methods = false;
+  static const bool extension_methods = true;
 
   /// Expiration status of the experiment "extension-types"
   static const bool extension_types = false;
 
   /// Expiration status of the experiment "generic-metadata"
-  static const bool generic_metadata = false;
+  static const bool generic_metadata = true;
 
   /// Expiration status of the experiment "macros"
   static const bool macros = false;
@@ -389,10 +389,10 @@
   static const bool named_arguments_anywhere = false;
 
   /// Expiration status of the experiment "non-nullable"
-  static const bool non_nullable = false;
+  static const bool non_nullable = true;
 
   /// Expiration status of the experiment "nonfunction-type-aliases"
-  static const bool nonfunction_type_aliases = false;
+  static const bool nonfunction_type_aliases = true;
 
   /// Expiration status of the experiment "set-literals"
   static const bool set_literals = true;
@@ -407,7 +407,7 @@
   static const bool test_experiment = false;
 
   /// Expiration status of the experiment "triple-shift"
-  static const bool triple_shift = false;
+  static const bool triple_shift = true;
 
   /// Expiration status of the experiment "value-class"
   static const bool value_class = false;
diff --git a/pkg/analyzer/lib/src/dart/ast/ast.dart b/pkg/analyzer/lib/src/dart/ast/ast.dart
index ca91eb4..5218135 100644
--- a/pkg/analyzer/lib/src/dart/ast/ast.dart
+++ b/pkg/analyzer/lib/src/dart/ast/ast.dart
@@ -2725,6 +2725,41 @@
   }
 }
 
+class ConstructorSelectorImpl extends AstNodeImpl
+    implements ConstructorSelector {
+  @override
+  final Token period;
+
+  @override
+  final SimpleIdentifierImpl name;
+
+  ConstructorSelectorImpl({
+    required this.period,
+    required this.name,
+  }) {
+    _becomeParentOf(name);
+  }
+
+  @override
+  Token get beginToken => period;
+
+  @override
+  Iterable<SyntacticEntity> get childEntities => ChildEntities()
+    ..add(period)
+    ..add(name);
+
+  @override
+  Token get endToken => name.token;
+
+  @override
+  E? accept<E>(AstVisitor<E> visitor) {
+    return visitor.visitConstructorSelector(this);
+  }
+
+  @override
+  void visitChildren(AstVisitor visitor) {}
+}
+
 /// A continue statement.
 ///
 ///    continueStatement ::=
@@ -3242,31 +3277,89 @@
   }
 }
 
+class EnumConstantArgumentsImpl extends AstNodeImpl
+    implements EnumConstantArguments {
+  @override
+  final TypeArgumentListImpl? typeArguments;
+
+  @override
+  final ConstructorSelectorImpl? constructorSelector;
+
+  @override
+  final ArgumentListImpl argumentList;
+
+  EnumConstantArgumentsImpl({
+    required this.typeArguments,
+    required this.constructorSelector,
+    required this.argumentList,
+  }) {
+    _becomeParentOf(typeArguments);
+    _becomeParentOf(constructorSelector);
+    _becomeParentOf(argumentList);
+  }
+
+  @override
+  Token get beginToken =>
+      (typeArguments ?? constructorSelector ?? argumentList).beginToken;
+
+  @override
+  Iterable<SyntacticEntity> get childEntities => ChildEntities()
+    ..add(typeArguments)
+    ..add(constructorSelector)
+    ..add(argumentList);
+
+  @override
+  Token get endToken => argumentList.endToken;
+
+  @override
+  E? accept<E>(AstVisitor<E> visitor) {
+    return visitor.visitEnumConstantArguments(this);
+  }
+
+  @override
+  void visitChildren(AstVisitor visitor) {
+    typeArguments?.accept(visitor);
+    constructorSelector?.accept(visitor);
+    argumentList.accept(visitor);
+  }
+}
+
 /// The declaration of an enum constant.
 class EnumConstantDeclarationImpl extends DeclarationImpl
     implements EnumConstantDeclaration {
   /// The name of the constant.
   SimpleIdentifierImpl _name;
 
+  @override
+  final EnumConstantArgumentsImpl? arguments;
+
+  @override
+  ConstructorElement? constructorElement;
+
   /// Initialize a newly created enum constant declaration. Either or both of
-  /// the [comment] and [metadata] can be `null` if the constant does not have
-  /// the corresponding attribute. (Technically, enum constants cannot have
-  /// metadata, but we allow it for consistency.)
-  EnumConstantDeclarationImpl(
-      CommentImpl? comment, List<Annotation>? metadata, this._name)
-      : super(comment, metadata) {
+  /// the [documentationComment] and [metadata] can be `null` if the constant
+  /// does not have the corresponding attribute.
+  EnumConstantDeclarationImpl({
+    required CommentImpl? documentationComment,
+    required List<Annotation>? metadata,
+    required SimpleIdentifierImpl name,
+    required this.arguments,
+  })  : _name = name,
+        super(documentationComment, metadata) {
     _becomeParentOf(_name);
+    _becomeParentOf(arguments);
   }
 
   @override
-  Iterable<SyntacticEntity> get childEntities =>
-      super._childEntities..add(_name);
+  Iterable<SyntacticEntity> get childEntities => super._childEntities
+    ..add(_name)
+    ..add(arguments);
 
   @override
   FieldElement get declaredElement => _name.staticElement as FieldElement;
 
   @override
-  Token get endToken => _name.endToken;
+  Token get endToken => (arguments ?? _name).endToken;
 
   @override
   Token get firstTokenAfterCommentAndMetadata => _name.beginToken;
@@ -3286,6 +3379,7 @@
   void visitChildren(AstVisitor visitor) {
     super.visitChildren(visitor);
     _name.accept(visitor);
+    arguments?.accept(visitor);
   }
 }
 
diff --git a/pkg/analyzer/lib/src/dart/ast/ast_factory.dart b/pkg/analyzer/lib/src/dart/ast/ast_factory.dart
index 806d84e..37cfef3 100644
--- a/pkg/analyzer/lib/src/dart/ast/ast_factory.dart
+++ b/pkg/analyzer/lib/src/dart/ast/ast_factory.dart
@@ -385,7 +385,11 @@
   EnumConstantDeclarationImpl enumConstantDeclaration(Comment? comment,
           List<Annotation>? metadata, SimpleIdentifier name) =>
       EnumConstantDeclarationImpl(
-          comment as CommentImpl?, metadata, name as SimpleIdentifierImpl);
+        documentationComment: comment as CommentImpl?,
+        metadata: metadata,
+        name: name as SimpleIdentifierImpl,
+        arguments: null,
+      );
 
   @Deprecated('Use enumDeclaration2() instead')
   @override
diff --git a/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart b/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart
index 294a89f..d2c7f88 100644
--- a/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart
+++ b/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart
@@ -249,6 +249,12 @@
   }
 
   @override
+  void visitConstructorSelector(ConstructorSelector node) {
+    _visitToken(node.period);
+    _visitNode(node.name);
+  }
+
+  @override
   void visitContinueStatement(ContinueStatement node) {
     sink.write('continue');
     _visitNode(node.label, prefix: ' ');
@@ -306,9 +312,17 @@
   }
 
   @override
+  void visitEnumConstantArguments(EnumConstantArguments node) {
+    _visitNode(node.typeArguments);
+    _visitNode(node.constructorSelector);
+    _visitNode(node.argumentList);
+  }
+
+  @override
   void visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     _visitNodeList(node.metadata, separator: ' ', suffix: ' ');
     _visitNode(node.name);
+    _visitNode(node.arguments);
   }
 
   @override
diff --git a/pkg/analyzer/lib/src/dart/ast/utilities.dart b/pkg/analyzer/lib/src/dart/ast/utilities.dart
index 4db6958..1aca07f 100644
--- a/pkg/analyzer/lib/src/dart/ast/utilities.dart
+++ b/pkg/analyzer/lib/src/dart/ast/utilities.dart
@@ -350,6 +350,13 @@
   }
 
   @override
+  bool visitConstructorSelector(ConstructorSelector node) {
+    var other = _other as ConstructorSelector;
+    return isEqualTokens(node.period, other.period) &&
+        isEqualNodes(node.name, other.name);
+  }
+
+  @override
   bool visitContinueStatement(ContinueStatement node) {
     ContinueStatement other = _other as ContinueStatement;
     return isEqualTokens(node.continueKeyword, other.continueKeyword) &&
@@ -415,6 +422,14 @@
   }
 
   @override
+  bool visitEnumConstantArguments(EnumConstantArguments node) {
+    var other = _other as EnumConstantArguments;
+    return isEqualNodes(node.typeArguments, other.typeArguments) &&
+        isEqualNodes(node.constructorSelector, other.constructorSelector) &&
+        isEqualNodes(node.argumentList, other.argumentList);
+  }
+
+  @override
   bool visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     EnumConstantDeclaration other = _other as EnumConstantDeclaration;
     return isEqualNodes(
@@ -1913,6 +1928,11 @@
   }
 
   @override
+  bool visitConstructorSelector(ConstructorSelector node) {
+    throw UnimplementedError();
+  }
+
+  @override
   bool visitContinueStatement(covariant ContinueStatementImpl node) {
     if (identical(node.label, _oldNode)) {
       node.label = _newNode as SimpleIdentifier;
@@ -1981,6 +2001,11 @@
   bool visitEmptyStatement(EmptyStatement node) => visitNode(node);
 
   @override
+  bool visitEnumConstantArguments(EnumConstantArguments node) {
+    throw UnimplementedError();
+  }
+
+  @override
   bool visitEnumConstantDeclaration(
       covariant EnumConstantDeclarationImpl node) {
     if (identical(node.name, _oldNode)) {
diff --git a/pkg/analyzer/lib/src/fasta/ast_builder.dart b/pkg/analyzer/lib/src/fasta/ast_builder.dart
index 576a41f..db03646 100644
--- a/pkg/analyzer/lib/src/fasta/ast_builder.dart
+++ b/pkg/analyzer/lib/src/fasta/ast_builder.dart
@@ -64,6 +64,9 @@
         ClassDeclarationImpl,
         CompilationUnitImpl,
         ConstructorNameImpl,
+        EnumConstantArgumentsImpl,
+        ConstructorSelectorImpl,
+        EnumConstantDeclarationImpl,
         EnumDeclarationImpl,
         ExtensionDeclarationImpl,
         ImportDirectiveImpl,
@@ -2809,17 +2812,17 @@
   @override
   void handleEnumElement(Token beginToken) {
     debugEvent("EnumElement");
-    var arguments = pop() as MethodInvocationImpl?;
-    var constructorName = pop() as ConstructorNameImpl?;
+    var tmpArguments = pop() as MethodInvocationImpl?;
+    var tmpConstructor = pop() as ConstructorNameImpl?;
 
     if (!enableEnhancedEnums &&
-        (arguments != null ||
-            constructorName != null &&
-                (constructorName.type2.typeArguments != null ||
-                    constructorName.name != null))) {
-      Token token = arguments != null
-          ? arguments.argumentList.beginToken
-          : constructorName!.beginToken;
+        (tmpArguments != null ||
+            tmpConstructor != null &&
+                (tmpConstructor.type2.typeArguments != null ||
+                    tmpConstructor.name != null))) {
+      Token token = tmpArguments != null
+          ? tmpArguments.argumentList.beginToken
+          : tmpConstructor!.beginToken;
       var feature = ExperimentalFeatures.enhanced_enums;
       handleRecoverableError(
         templateExperimentNotEnabled.withArguments(
@@ -2830,6 +2833,37 @@
         token,
       );
     }
+
+    var constant = pop() as EnumConstantDeclarationImpl;
+
+    // Replace the constant to include arguments.
+    if (tmpArguments != null) {
+      TypeArgumentListImpl? typeArguments;
+      ConstructorSelectorImpl? constructorName;
+      if (tmpConstructor != null) {
+        typeArguments = tmpConstructor.type2.typeArguments;
+        var constructorNamePeriod = tmpConstructor.period;
+        var constructorNameId = tmpConstructor.name;
+        if (constructorNamePeriod != null && constructorNameId != null) {
+          constructorName = ConstructorSelectorImpl(
+            period: constructorNamePeriod,
+            name: constructorNameId,
+          );
+        }
+      }
+      constant = EnumConstantDeclarationImpl(
+        documentationComment: constant.documentationComment,
+        metadata: constant.metadata,
+        name: constant.name,
+        arguments: EnumConstantArgumentsImpl(
+          typeArguments: typeArguments,
+          constructorSelector: constructorName,
+          argumentList: tmpArguments.argumentList,
+        ),
+      );
+    }
+
+    push(constant);
   }
 
   @override
diff --git a/pkg/analyzer/pubspec.yaml b/pkg/analyzer/pubspec.yaml
index 547e31e..5994057 100644
--- a/pkg/analyzer/pubspec.yaml
+++ b/pkg/analyzer/pubspec.yaml
@@ -1,5 +1,5 @@
 name: analyzer
-version: 3.1.0
+version: 3.2.0
 description: This package provides a library that performs static analysis of Dart code.
 homepage: https://github.com/dart-lang/sdk/tree/main/pkg/analyzer
 
@@ -7,7 +7,7 @@
   sdk: '>=2.14.0 <3.0.0'
 
 dependencies:
-  _fe_analyzer_shared: ^33.0.0
+  _fe_analyzer_shared: ^34.0.0
   cli_util: ^0.3.0
   collection: ^1.15.0
   convert: ^3.0.0
diff --git a/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart b/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart
index 082ba9d..eac40aa 100644
--- a/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart
+++ b/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart
@@ -810,6 +810,30 @@
     _assertSource(";", AstTestFactory.emptyStatement());
   }
 
+  void test_visitEnumDeclaration_constant_arguments_named() {
+    var findNode = _parseStringToFindNode(r'''
+enum E {
+  v<double>.named(42)
+}
+''');
+    _assertSource(
+      'enum E {v<double>.named(42)}',
+      findNode.enumDeclaration('enum E'),
+    );
+  }
+
+  void test_visitEnumDeclaration_constant_arguments_unnamed() {
+    var findNode = _parseStringToFindNode(r'''
+enum E {
+  v<double>(42)
+}
+''');
+    _assertSource(
+      'enum E {v<double>(42)}',
+      findNode.enumDeclaration('enum E'),
+    );
+  }
+
   void test_visitEnumDeclaration_constants_multiple() {
     var findNode = _parseStringToFindNode(r'''
 enum E {one, two}
diff --git a/pkg/analyzer/test/src/dart/resolution/language_version_test.dart b/pkg/analyzer/test/src/dart/resolution/language_version_test.dart
index a9868b6..fa4ab1c 100644
--- a/pkg/analyzer/test/src/dart/resolution/language_version_test.dart
+++ b/pkg/analyzer/test/src/dart/resolution/language_version_test.dart
@@ -11,7 +11,6 @@
 main() {
   defineReflectiveSuite(() {
     defineReflectiveTests(NullSafetyExperimentGlobalTest);
-    defineReflectiveTests(NullSafetyUsingAllowedExperimentsTest);
     defineReflectiveTests(PackageConfigAndLanguageOverrideTest);
   });
 }
@@ -99,171 +98,6 @@
 }
 
 @reflectiveTest
-class NullSafetyUsingAllowedExperimentsTest extends _FeaturesTest {
-  test_jsonConfig_disable_bin() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/",
-      "languageVersion": "2.8"
-    }
-  ]
-}
-''');
-
-    var path = '$testPackageRootPath/bin/a.dart';
-
-    await resolveFileCode(path, r'''
-var x = 0;
-''');
-    assertErrorsInResult([]);
-    assertType(findElement.topVar('x').type, 'int*');
-
-    // Upgrade the language version to `2.10`, so enable Null Safety.
-    _changeFile(path);
-    await resolveFileCode(path, r'''
-// @dart = 2.10
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int');
-  }
-
-  test_jsonConfig_disable_lib() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/",
-      "languageVersion": "2.8"
-    }
-  ]
-}
-''');
-
-    var path = testFilePath;
-
-    await resolveFileCode(path, '''
-var x = 0;
-''');
-    assertErrorsInResult([]);
-    assertType(findElement.topVar('x').type, 'int*');
-
-    // Upgrade the language version to `2.10`, so enable Null Safety.
-    _changeFile(path);
-    await assertNoErrorsInCode('''
-// @dart = 2.10
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int');
-  }
-
-  test_jsonConfig_enable_bin() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/"
-    }
-  ]
-}
-''');
-
-    var path = '$testPackageRootPath/bin/a.dart';
-
-    await resolveFileCode(path, r'''
-var x = 0;
-''');
-    assertErrorsInList(result.errors, []);
-    assertType(findElement.topVar('x').type, 'int');
-
-    // Downgrade the version to `2.8`, so disable Null Safety.
-    _changeFile(path);
-    await resolveFileCode(path, r'''
-// @dart = 2.8
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int*');
-  }
-
-  test_jsonConfig_enable_lib() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/"
-    }
-  ]
-}
-''');
-
-    var path = testFilePath;
-
-    await resolveFileCode(path, '''
-var x = 0;
-''');
-    assertErrorsInResult([]);
-    assertType(findElement.topVar('x').type, 'int');
-
-    // Downgrade the version to `2.8`, so disable Null Safety.
-    _changeFile(path);
-    await assertNoErrorsInCode('''
-// @dart = 2.8
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int*');
-  }
-
-  void _configureAllowedExperimentsTestNullSafety() {
-    _newSdkExperimentsFile(r'''
-{
-  "version": 1,
-  "experimentSets": {
-    "nullSafety": ["non-nullable"]
-  },
-  "sdk": {
-    "default": {
-      "experimentSet": "nullSafety"
-    }
-  },
-  "packages": {
-    "test": {
-      "experimentSet": "nullSafety"
-    }
-  }
-}
-''');
-  }
-
-  void _newSdkExperimentsFile(String content) {
-    newFile(
-      '${sdkRoot.path}/lib/_internal/allowed_experiments.json',
-      content: content,
-    );
-  }
-}
-
-@reflectiveTest
 class PackageConfigAndLanguageOverrideTest extends _FeaturesTest {
   test_jsonConfigDisablesExtensions() async {
     _configureTestWithJsonConfig('''
@@ -313,15 +147,6 @@
 }
 
 class _FeaturesTest extends PubPackageResolutionTest {
-  /// Do necessary work to ensure that the file with the [path] is considered
-  /// changed for the purpose of following analysis.
-  ///
-  /// Currently we just dispose the whole analysis context collection, so when
-  /// we ask to analyze anything again, we will pick up the new file content.
-  void _changeFile(String path) {
-    disposeAnalysisContextCollection();
-  }
-
   void _configureTestWithJsonConfig(String content) {
     newFile(
       '$testPackageRootPath/.dart_tool/package_config.json',
diff --git a/pkg/analyzer/test/src/dart/resolution/library_element_test.dart b/pkg/analyzer/test/src/dart/resolution/library_element_test.dart
index b569f89..61f0885 100644
--- a/pkg/analyzer/test/src/dart/resolution/library_element_test.dart
+++ b/pkg/analyzer/test/src/dart/resolution/library_element_test.dart
@@ -62,34 +62,6 @@
     ]);
   }
 
-  test_language208_experimentNonNullable() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.8',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('');
-
-    _assertLanguageVersion(
-      package: Version.parse('2.8.0'),
-      override: null,
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
   test_language208_override205() async {
     writeTestPackageConfig(
       PackageConfigFileBuilder(),
@@ -134,36 +106,6 @@
     ]);
   }
 
-  test_language209_experimentNonNullable_override210() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.9',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('// @dart = 2.10');
-
-    // Valid override, even if greater than the package language version.
-    _assertLanguageVersion(
-      package: Version.parse('2.9.0'),
-      override: Version.parse('2.10.0'),
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.non_nullable,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
   test_language209_override299() async {
     writeTestPackageConfig(
       PackageConfigFileBuilder(),
@@ -210,63 +152,6 @@
     ]);
   }
 
-  test_language210_experimentNonNullable() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.10',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('');
-
-    _assertLanguageVersion(
-      package: Version.parse('2.10.0'),
-      override: null,
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.non_nullable,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
-  test_language210_experimentNonNullable_override209() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.10',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('// @dart = 2.9');
-
-    _assertLanguageVersion(
-      package: Version.parse('2.10.0'),
-      override: Version.parse('2.9.0'),
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
   void _assertFeatureSet(List<Feature> expected) {
     var featureSet = result.libraryElement.featureSet;
 
diff --git a/pkg/analyzer/tool/experiments/experiments_test.dart b/pkg/analyzer/tool/experiments/experiments_test.dart
index 575c084..dc8e2ac 100644
--- a/pkg/analyzer/tool/experiments/experiments_test.dart
+++ b/pkg/analyzer/tool/experiments/experiments_test.dart
@@ -10,7 +10,7 @@
 import 'generate.dart';
 
 /// Check that all targets have been code generated.  If they haven't tell the
-/// user to run generate_all.dart.
+/// user to run `generate.dart`.
 main() async {
   String script = Platform.script.toFilePath(windows: Platform.isWindows);
   List<String> components = split(script);
diff --git a/pkg/dartdev/test/experiments_test.dart b/pkg/dartdev/test/experiments_test.dart
index f9bec0e..5608ac7 100644
--- a/pkg/dartdev/test/experiments_test.dart
+++ b/pkg/dartdev/test/experiments_test.dart
@@ -11,7 +11,7 @@
       expect(experimentalFeatures, isNotEmpty);
       expect(
         experimentalFeatures.map((experiment) => experiment.enableString),
-        contains('non-nullable'),
+        contains('super-parameters'),
       );
     });
   });
diff --git a/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect b/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect
index 54231c0..8c513a4 100644
--- a/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect
+++ b/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect
@@ -40,7 +40,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect b/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect
index 54231c0..8c513a4 100644
--- a/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect
+++ b/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect
@@ -40,7 +40,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect b/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect
index dc06840..ae497d5a 100644
--- a/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect
@@ -59,22 +59,22 @@
   #C7 = core::pragma {name:#C1, options:#C6}
   #C8 = ffi::Double {}
   #C9 = 0
-  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
 
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect b/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect
index cd22c39..bcb7537 100644
--- a/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect
@@ -56,11 +56,11 @@
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = dart.ffi::Uint32 {}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = "vm:prefer-inline"
   #C11 = dart.core::pragma {name:#C10, options:#C4}
   #C12 = 4
-  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C14 = TypeLiteralConstant(lib::Y)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect b/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect
index cd22c39..bcb7537 100644
--- a/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect
@@ -56,11 +56,11 @@
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = dart.ffi::Uint32 {}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = "vm:prefer-inline"
   #C11 = dart.core::pragma {name:#C10, options:#C4}
   #C12 = 4
-  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C14 = TypeLiteralConstant(lib::Y)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect b/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect
index 2b217e2..a7835ef 100644
--- a/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect
@@ -62,7 +62,7 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
 }
diff --git a/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect b/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect
index 2b217e2..a7835ef 100644
--- a/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect
@@ -62,7 +62,7 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
 }
diff --git a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect
index 72d79b8..e6e7693 100644
--- a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect
@@ -62,14 +62,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect
index 6d9d90c..7712ae2 100644
--- a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect
@@ -66,14 +66,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect b/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect
index eb79aaf..99eefd4 100644
--- a/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect
@@ -63,14 +63,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect
index beb059f..a889a57 100644
--- a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect
@@ -101,19 +101,19 @@
   #C6 = dart.ffi::_FfiStructLayout {fieldTypes:#C4, packing:#C5}
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = 4
   #C11 = 8
-  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
+  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
+  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
   #C15 = 12
   #C16 = 24
-  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
+  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 48
-  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
+  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
   #C22 = <dart.core::Type>[#C2, #C2, #C2]
   #C23 = dart.ffi::_FfiStructLayout {fieldTypes:#C22, packing:#C5}
   #C24 = dart.core::pragma {name:#C1, options:#C23}
diff --git a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect
index beb059f..a889a57 100644
--- a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect
@@ -101,19 +101,19 @@
   #C6 = dart.ffi::_FfiStructLayout {fieldTypes:#C4, packing:#C5}
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = 4
   #C11 = 8
-  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
+  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
+  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
   #C15 = 12
   #C16 = 24
-  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
+  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 48
-  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
+  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
   #C22 = <dart.core::Type>[#C2, #C2, #C2]
   #C23 = dart.ffi::_FfiStructLayout {fieldTypes:#C22, packing:#C5}
   #C24 = dart.core::pragma {name:#C1, options:#C23}
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect
index 72d79b8..e6e7693 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect
@@ -62,14 +62,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect
index d4ce18c..1134f3c 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect
@@ -63,14 +63,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect
index 4bfcd2e..e52f835 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect
@@ -64,14 +64,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect
index 3f3d73e..a86189c 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 1
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = dart.ffi::Uint64 {}
   #C14 = 8
   #C15 = 4
-  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C17 = "vm:prefer-inline"
   #C18 = dart.core::pragma {name:#C17, options:#C5}
   #C19 = 16
   #C20 = 12
-  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19]
+  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19, #C19, #C19]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 32
   #C27 = 24
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 40
   #C30 = 28
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect
index e241ee1..a9302fa 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = dart.ffi::Uint64 {}
   #C12 = 8
   #C13 = 4
-  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C15 = 16
   #C16 = 12
-  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15]
+  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15, #C15, #C15]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 24
-  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20]
+  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20, #C20, #C20]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 48
   #C27 = 32
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 56
   #C30 = 36
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect
index 3f3d73e..a86189c 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 1
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = dart.ffi::Uint64 {}
   #C14 = 8
   #C15 = 4
-  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C17 = "vm:prefer-inline"
   #C18 = dart.core::pragma {name:#C17, options:#C5}
   #C19 = 16
   #C20 = 12
-  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19]
+  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19, #C19, #C19]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 32
   #C27 = 24
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 40
   #C30 = 28
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect
index e241ee1..a9302fa 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = dart.ffi::Uint64 {}
   #C12 = 8
   #C13 = 4
-  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C15 = 16
   #C16 = 12
-  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15]
+  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15, #C15, #C15]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 24
-  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20]
+  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20, #C20, #C20]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 48
   #C27 = 32
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 56
   #C30 = 36
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect
index 3c26667..6c37a20d 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect
@@ -234,26 +234,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 1
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = dart.ffi::Uint64 {}
   #C14 = 8
   #C15 = 4
-  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C17 = "vm:prefer-inline"
   #C18 = dart.core::pragma {name:#C17, options:#C5}
   #C19 = 16
   #C20 = 12
-  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19]
+  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19, #C19, #C19]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 32
   #C27 = 24
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 40
   #C30 = 28
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect
index 7c86d77..e7c047a 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect
@@ -234,26 +234,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = dart.ffi::Uint64 {}
   #C12 = 8
   #C13 = 4
-  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C15 = 16
   #C16 = 12
-  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15]
+  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15, #C15, #C15]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 24
-  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20]
+  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20, #C20, #C20]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 48
   #C27 = 32
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 56
   #C30 = 36
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect
index 1cb94e2..81904cb 100644
--- a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect
@@ -55,12 +55,12 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
   #C11 = 4
   #C12 = 8
-  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
+  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
   #C14 = TypeLiteralConstant(lib::COMObject)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect
index 1cb94e2..81904cb 100644
--- a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect
@@ -55,12 +55,12 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
   #C11 = 4
   #C12 = 8
-  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
+  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
   #C14 = TypeLiteralConstant(lib::COMObject)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect
index f21662b..35dda10 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect
@@ -34,7 +34,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect
index 5082fb7..02dfd2a 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect
@@ -50,22 +50,22 @@
   #C7 = core::pragma {name:#C1, options:#C6}
   #C8 = ffi::Double {}
   #C9 = 0
-  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
 
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect
index f21662b..35dda10 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect
@@ -34,7 +34,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect
index f21662b..35dda10 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect
@@ -34,7 +34,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect
index 5082fb7..02dfd2a 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect
@@ -50,22 +50,22 @@
   #C7 = core::pragma {name:#C1, options:#C6}
   #C8 = ffi::Double {}
   #C9 = 0
-  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
 
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect
index b41e171..525cfd9 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect
@@ -42,8 +42,8 @@
   #C8 = core::pragma {name:#C1, options:#C7}
   #C9 = ffi::_ArraySize<ffi::NativeType> {dimension1:#C3, dimension2:#C6, dimension3:#C6, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C10 = 0
-  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
-  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
+  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C13 = <core::int*>[]
   #C14 = "vm:prefer-inline"
   #C15 = core::pragma {name:#C14, options:#C6}
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect
index c21a02f..b38ad34 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect
@@ -42,8 +42,8 @@
   #C8 = core::pragma {name:#C1, options:#C7}
   #C9 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C3, dimension2:#C6, dimension3:#C6, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C10 = 0
-  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
-  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
+  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C13 = <core::int*>[]
   #C14 = "vm:prefer-inline"
   #C15 = core::pragma {name:#C14, options:#C6}
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect
index dec8714..ecf21c0 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect
@@ -67,8 +67,8 @@
   #C9 = 2
   #C10 = ffi::_ArraySize<ffi::NativeType> {dimension1:#C9, dimension2:#C9, dimension3:#C9, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C11 = 0
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
-  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C14 = <core::int*>[#C9, #C9]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C6}
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect
index e5881fb..a9ba39c 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect
@@ -67,8 +67,8 @@
   #C9 = 2
   #C10 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C9, dimension2:#C9, dimension3:#C9, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C11 = 0
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
-  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C14 = <core::int*>[#C9, #C9]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C6}
diff --git a/pkg/smith/lib/configuration.dart b/pkg/smith/lib/configuration.dart
index b1576bb7..bac7ac7 100644
--- a/pkg/smith/lib/configuration.dart
+++ b/pkg/smith/lib/configuration.dart
@@ -584,6 +584,10 @@
   static const simarmv6 = Architecture._('simarmv6');
   static const simarm64 = Architecture._('simarm64');
   static const simarm64c = Architecture._('simarm64c');
+  static const riscv32 = Architecture._('riscv32');
+  static const riscv64 = Architecture._('riscv64');
+  static const simriscv32 = Architecture._('simriscv32');
+  static const simriscv64 = Architecture._('simriscv64');
 
   static final List<String> names = _all.keys.toList();
 
@@ -600,6 +604,10 @@
     simarmv6,
     simarm64,
     simarm64c,
+    riscv32,
+    riscv64,
+    simriscv32,
+    simriscv64,
   ], key: (architecture) => (architecture as Architecture).name);
 
   static Architecture find(String name) {
diff --git a/pkg/test_runner/lib/src/compiler_configuration.dart b/pkg/test_runner/lib/src/compiler_configuration.dart
index 6185d8f..16ac923 100644
--- a/pkg/test_runner/lib/src/compiler_configuration.dart
+++ b/pkg/test_runner/lib/src/compiler_configuration.dart
@@ -99,6 +99,8 @@
         if (configuration.architecture == Architecture.simarm ||
             configuration.architecture == Architecture.simarm64 ||
             configuration.architecture == Architecture.simarm64c ||
+            configuration.architecture == Architecture.simriscv32 ||
+            configuration.architecture == Architecture.simriscv64 ||
             configuration.system == System.android) {
           return VMKernelCompilerConfiguration(configuration);
         }
@@ -705,6 +707,16 @@
 
   bool get _isIA32 => _configuration.architecture == Architecture.ia32;
 
+  bool get _isRiscv32 => _configuration.architecture == Architecture.riscv32;
+
+  bool get _isSimRiscv32 =>
+      _configuration.architecture == Architecture.simriscv32;
+
+  bool get _isRiscv64 => _configuration.architecture == Architecture.riscv64;
+
+  bool get _isSimRiscv64 =>
+      _configuration.architecture == Architecture.simriscv64;
+
   bool get _isAot => true;
 
   PrecompilerCompilerConfiguration(TestConfiguration configuration)
@@ -880,6 +892,10 @@
         cc = 'arm-linux-gnueabihf-gcc';
       } else if (_isSimArm64 || (_isArm64 && _configuration.useQemu)) {
         cc = 'aarch64-linux-gnu-gcc';
+      } else if (_isSimRiscv32 || (_isRiscv32 && _configuration.useQemu)) {
+        cc = 'riscv32-linux-gnu-gcc';
+      } else if (_isSimRiscv64 || (_isRiscv64 && _configuration.useQemu)) {
+        cc = 'riscv64-linux-gnu-gcc';
       } else {
         cc = 'gcc';
       }
@@ -911,6 +927,10 @@
       case Architecture.arm_x64:
       case Architecture.arm64:
       case Architecture.arm64c:
+      case Architecture.riscv32:
+      case Architecture.riscv64:
+      case Architecture.simriscv32:
+      case Architecture.simriscv64:
         ccFlags = null;
         break;
       default:
diff --git a/pkg/test_runner/lib/src/options.dart b/pkg/test_runner/lib/src/options.dart
index c028e33..cd2c23c 100644
--- a/pkg/test_runner/lib/src/options.dart
+++ b/pkg/test_runner/lib/src/options.dart
@@ -143,8 +143,8 @@
 Allowed values are:
 all
 ia32, x64
-arm, armv6, arm64,
-simarm, simarmv6, simarm64, arm_x64''',
+arm, arm64, simarm, simarm64, arm_x64
+riscv32, riscv64, simriscv32, simriscv64''',
         abbr: 'a',
         values: ['all', ...Architecture.names],
         defaultsTo: Architecture.x64.name,
diff --git a/pkg/test_runner/lib/src/runtime_configuration.dart b/pkg/test_runner/lib/src/runtime_configuration.dart
index 1810702..2341361 100644
--- a/pkg/test_runner/lib/src/runtime_configuration.dart
+++ b/pkg/test_runner/lib/src/runtime_configuration.dart
@@ -220,10 +220,18 @@
 
 class QemuConfig {
   static const all = <Architecture, QemuConfig>{
+    Architecture.ia32:
+        QemuConfig('qemu-i386', ['-L', '/usr/lib/i386-linux-gnu/']),
+    Architecture.x64:
+        QemuConfig('qemu-x86_64', ['-L', '/usr/lib/x86_64-linux-gnu/']),
     Architecture.arm:
         QemuConfig('qemu-arm', ['-L', '/usr/arm-linux-gnueabihf/']),
     Architecture.arm64:
         QemuConfig('qemu-aarch64', ['-L', '/usr/aarch64-linux-gnu/']),
+    Architecture.riscv32:
+        QemuConfig('qemu-riscv32', ['-L', '/usr/riscv32-linux-gnu/']),
+    Architecture.riscv64:
+        QemuConfig('qemu-riscv64', ['-L', '/usr/riscv64-linux-gnu/']),
   };
 
   final String executable;
@@ -253,6 +261,8 @@
       case Architecture.armv6:
       case Architecture.simarm64:
       case Architecture.simarm64c:
+      case Architecture.simriscv32:
+      case Architecture.simriscv64:
         multiplier *= 4;
         break;
     }
diff --git a/pkg/test_runner/lib/src/test_case.dart b/pkg/test_runner/lib/src/test_case.dart
index 995bf0a..2597e45 100644
--- a/pkg/test_runner/lib/src/test_case.dart
+++ b/pkg/test_runner/lib/src/test_case.dart
@@ -285,7 +285,9 @@
               executable = '/usr/bin/sample';
             } else if (io.Platform.isWindows) {
               var isX64 = command.executable.contains("X64") ||
-                  command.executable.contains("SIMARM64");
+                  command.executable.contains("SIMARM64") ||
+                  command.executable.contains("SIMARM64C") ||
+                  command.executable.contains("SIMRISCV64");
               if (configuration.windowsSdkPath != null) {
                 executable = configuration.windowsSdkPath +
                     "\\Debuggers\\${isX64 ? 'x64' : 'x86'}\\cdb.exe";
diff --git a/pkg/test_runner/lib/src/test_suite.dart b/pkg/test_runner/lib/src/test_suite.dart
index bae578b..5aeaf54 100644
--- a/pkg/test_runner/lib/src/test_suite.dart
+++ b/pkg/test_runner/lib/src/test_suite.dart
@@ -399,6 +399,8 @@
     "x64_linux",
     "x64_macos",
     "x64_win",
+    "riscv32_linux",
+    "riscv64_linux",
   ];
 
   FfiTestSuite(TestConfiguration configuration)
diff --git a/pkg/vm/lib/transformations/ffi/abi.dart b/pkg/vm/lib/transformations/ffi/abi.dart
index 04bc8e2..cc2d86b 100644
--- a/pkg/vm/lib/transformations/ffi/abi.dart
+++ b/pkg/vm/lib/transformations/ffi/abi.dart
@@ -12,6 +12,8 @@
   arm64,
   ia32,
   x64,
+  riscv32,
+  riscv64,
 }
 
 extension on _Architecture {
@@ -20,9 +22,11 @@
     switch (this) {
       case _Architecture.arm:
       case _Architecture.ia32:
+      case _Architecture.riscv32:
         return 4;
       case _Architecture.arm64:
       case _Architecture.x64:
+      case _Architecture.riscv64:
         return 8;
     }
   }
@@ -91,6 +95,12 @@
   /// The application binary interface for linux on the X64 architecture.
   static const linuxX64 = _linuxX64;
 
+  /// The application binary interface for linux on 32-bit RISC-V.
+  static const linuxRiscv32 = _linuxRiscv32;
+
+  /// The application binary interface for linux on 64-bit RISC-V.
+  static const linuxRiscv64 = _linuxRiscv64;
+
   /// The application binary interface for MacOS on the Arm64 architecture.
   static const macosArm64 = _macosArm64;
 
@@ -128,6 +138,8 @@
     linuxArm64,
     linuxIA32,
     linuxX64,
+    linuxRiscv32,
+    linuxRiscv64,
     macosArm64,
     macosX64,
     windowsArm64,
@@ -171,6 +183,8 @@
   static const _linuxArm64 = Abi._(_Architecture.arm64, _OS.linux);
   static const _linuxIA32 = Abi._(_Architecture.ia32, _OS.linux);
   static const _linuxX64 = Abi._(_Architecture.x64, _OS.linux);
+  static const _linuxRiscv32 = Abi._(_Architecture.riscv32, _OS.linux);
+  static const _linuxRiscv64 = Abi._(_Architecture.riscv64, _OS.linux);
   static const _macosArm64 = Abi._(_Architecture.arm64, _OS.macos);
   static const _macosX64 = Abi._(_Architecture.x64, _OS.macos);
   static const _windowsArm64 = Abi._(_Architecture.arm64, _OS.windows);
@@ -193,6 +207,8 @@
   Abi.linuxArm64: 'linuxArm64',
   Abi.linuxIA32: 'linuxIA32',
   Abi.linuxX64: 'linuxX64',
+  Abi.linuxRiscv32: 'linuxRiscv32',
+  Abi.linuxRiscv64: 'linuxRiscv64',
   Abi.macosArm64: 'macosArm64',
   Abi.macosX64: 'macosX64',
   Abi.windowsArm64: 'windowsArm64',
@@ -229,6 +245,7 @@
   Abi.iosX64: _wordSize64,
   Abi.linuxArm64: _wordSize64,
   Abi.linuxX64: _wordSize64,
+  Abi.linuxRiscv64: _wordSize64,
   Abi.macosArm64: _wordSize64,
   Abi.macosX64: _wordSize64,
   Abi.windowsArm64: _wordSize64,
@@ -240,6 +257,7 @@
   // _wordSize32Align64
   Abi.androidArm: _wordSize32Align64,
   Abi.linuxArm: _wordSize32Align64,
+  Abi.linuxRiscv32: _wordSize32Align64,
   Abi.windowsIA32: _wordSize32Align64,
 };
 
diff --git a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart
index 50266bd..c3ff180 100644
--- a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart
+++ b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart
@@ -18,6 +18,8 @@
   Abi.linuxArm64: Uint32(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint32(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint32(),
   Abi.macosArm64: Uint32(),
   Abi.macosX64: Uint32(),
   Abi.windowsArm64: Uint16(),
diff --git a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect
index d8ba7e5..63b991b 100644
--- a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect
+++ b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect
@@ -7,17 +7,17 @@
 
 import "dart:ffi";
 
-@#C49
-@#C56
+@#C55
+@#C62
 class WChar extends ffi::AbiSpecificInteger /*hasConstConstructor*/  {
   const constructor •() → self::WChar
     : super ffi::AbiSpecificInteger::•()
     ;
-  @#C59
+  @#C65
   static get #sizeOf() → core::int*
-    return #C61.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    return #C67.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
 }
-@#C66
+@#C72
 class WCharStruct extends ffi::Struct {
   synthetic constructor •() → self::WCharStruct
     : super ffi::Struct::•()
@@ -25,23 +25,23 @@
   constructor #fromTypedDataBase(core::Object #typedDataBase) → self::WCharStruct
     : super ffi::Struct::_fromTypedDataBase(#typedDataBase)
     ;
-  @#C67
+  @#C73
   get a0() → core::int
-    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
-  @#C67
+    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
+  @#C73
   set a0(core::int #externalFieldValue) → void
-    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
-  @#C67
+    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
+  @#C73
   get a1() → core::int
-    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C61.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
-  @#C67
+    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C67.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
+  @#C73
   set a1(core::int #externalFieldValue) → void
-    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C61.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
-  @#C59
+    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C67.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
+  @#C65
   static get #sizeOf() → core::int*
-    return #C70.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    return #C76.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
 }
-@#C75
+@#C81
 class WCharArrayStruct extends ffi::Struct {
   synthetic constructor •() → self::WCharArrayStruct
     : super ffi::Struct::•()
@@ -49,31 +49,31 @@
   constructor #fromTypedDataBase(core::Object #typedDataBase) → self::WCharArrayStruct
     : super ffi::Struct::_fromTypedDataBase(#typedDataBase)
     ;
-  @#C76
+  @#C82
   get a0() → ffi::Array<self::WChar>
     return new ffi::Array::_<self::WChar>( block {
       core::Object #typedDataBase = this.{ffi::_Compound::_typedDataBase}{core::Object};
-      core::int #offset = #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
-    } =>#typedDataBase is ffi::Pointer<dynamic> ?{core::Object} ffi::_fromAddress<self::WChar>(#typedDataBase.{ffi::Pointer::address}{core::int}.{core::num::+}(#offset){(core::num) → core::num}) : let typ::TypedData #typedData = _in::unsafeCast<typ::TypedData>(#typedDataBase) in #typedData.{typ::TypedData::buffer}{typ::ByteBuffer}.{typ::ByteBuffer::asUint8List}(#typedData.{typ::TypedData::offsetInBytes}{core::int}.{core::num::+}(#offset){(core::num) → core::num}, #C80.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}){([core::int, core::int?]) → typ::Uint8List}, #C71, #C81);
-  @#C76
+      core::int #offset = #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    } =>#typedDataBase is ffi::Pointer<dynamic> ?{core::Object} ffi::_fromAddress<self::WChar>(#typedDataBase.{ffi::Pointer::address}{core::int}.{core::num::+}(#offset){(core::num) → core::num}) : let typ::TypedData #typedData = _in::unsafeCast<typ::TypedData>(#typedDataBase) in #typedData.{typ::TypedData::buffer}{typ::ByteBuffer}.{typ::ByteBuffer::asUint8List}(#typedData.{typ::TypedData::offsetInBytes}{core::int}.{core::num::+}(#offset){(core::num) → core::num}, #C86.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}){([core::int, core::int?]) → typ::Uint8List}, #C77, #C87);
+  @#C82
   set a0(ffi::Array<self::WChar> #externalFieldValue) → void
-    return ffi::_memCopy(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue.{ffi::Array::_typedDataBase}{core::Object}, #C1, #C80.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
-  @#C59
+    return ffi::_memCopy(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue.{ffi::Array::_typedDataBase}{core::Object}, #C1, #C86.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
+  @#C65
   static get #sizeOf() → core::int*
-    return #C80.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    return #C86.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
 }
 class _DummyAllocator extends core::Object implements ffi::Allocator /*hasConstConstructor*/  {
   const constructor •() → self::_DummyAllocator
     : super core::Object::•()
     ;
-  @#C82
-  method allocate<T extends ffi::NativeType>(core::int byteCount, {core::int? alignment = #C58}) → ffi::Pointer<self::_DummyAllocator::allocate::T> {
+  @#C88
+  method allocate<T extends ffi::NativeType>(core::int byteCount, {core::int? alignment = #C64}) → ffi::Pointer<self::_DummyAllocator::allocate::T> {
     return ffi::Pointer::fromAddress<self::_DummyAllocator::allocate::T>(0);
   }
-  @#C82
+  @#C88
   method free(ffi::Pointer<ffi::NativeType> pointer) → void {}
 }
-static const field self::_DummyAllocator noAlloc = #C83;
+static const field self::_DummyAllocator noAlloc = #C89;
 static method main() → void {
   self::testSizeOf();
   self::testStoreLoad();
@@ -86,29 +86,29 @@
   core::print(size);
 }
 static method testStoreLoad() → void {
-  final ffi::Pointer<self::WChar> p = #C83.{ffi::Allocator::allocate}<self::WChar>(self::WChar::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
+  final ffi::Pointer<self::WChar> p = #C89.{ffi::Allocator::allocate}<self::WChar>(self::WChar::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
   ffi::_storeAbiSpecificInt<self::WChar>(p, #C1, 10);
   core::print(ffi::_loadAbiSpecificInt<self::WChar>(p, #C1));
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 static method testStoreLoadIndexed() → void {
-  final ffi::Pointer<self::WChar> p = #C83.{ffi::Allocator::allocate}<self::WChar>(2.{core::num::*}(self::WChar::#sizeOf){(core::num) → core::num}){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
+  final ffi::Pointer<self::WChar> p = #C89.{ffi::Allocator::allocate}<self::WChar>(2.{core::num::*}(self::WChar::#sizeOf){(core::num) → core::num}){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
   ffi::_storeAbiSpecificIntAtIndex<self::WChar>(p, 0, 10);
   ffi::_storeAbiSpecificIntAtIndex<self::WChar>(p, 1, 3);
   core::print(ffi::_loadAbiSpecificIntAtIndex<self::WChar>(p, 0));
   core::print(ffi::_loadAbiSpecificIntAtIndex<self::WChar>(p, 1));
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 static method testStruct() → void {
-  final ffi::Pointer<self::WCharStruct> p = #C83.{ffi::Allocator::allocate}<self::WCharStruct>(self::WCharStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharStruct>};
+  final ffi::Pointer<self::WCharStruct> p = #C89.{ffi::Allocator::allocate}<self::WCharStruct>(self::WCharStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharStruct>};
   new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0} = 1;
   core::print(new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0}{core::int});
   new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0} = 2;
   core::print(new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0}{core::int});
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 static method testInlineArray() → void {
-  final ffi::Pointer<self::WCharArrayStruct> p = #C83.{ffi::Allocator::allocate}<self::WCharArrayStruct>(self::WCharArrayStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharArrayStruct>};
+  final ffi::Pointer<self::WCharArrayStruct> p = #C89.{ffi::Allocator::allocate}<self::WCharArrayStruct>(self::WCharArrayStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharArrayStruct>};
   final ffi::Array<self::WChar> array = new self::WCharArrayStruct::#fromTypedDataBase(p!).{self::WCharArrayStruct::a0}{ffi::Array<self::WChar>};
   for (core::int i = 0; i.{core::num::<}(100){(core::num) → core::bool}; i = i.{core::num::+}(1){(core::num) → core::int}) {
     ffi::_storeAbiSpecificIntAtIndex<self::WChar>(array.{ffi::Array::_typedDataBase}{core::Object}, i, i);
@@ -116,7 +116,7 @@
   for (core::int i = 0; i.{core::num::<}(100){(core::num) → core::bool}; i = i.{core::num::+}(1){(core::num) → core::int}) {
     core::print(ffi::_loadAbiSpecificIntAtIndex<self::WChar>(array.{ffi::Array::_typedDataBase}{core::Object}, i));
   }
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 constants  {
   #C1 = 0
@@ -155,51 +155,57 @@
   #C34 = ffi::Abi {_os:#C31, _architecture:#C14}
   #C35 = ffi::Abi {_os:#C31, _architecture:#C18}
   #C36 = 4
-  #C37 = "macos"
-  #C38 = ffi::_OS {index:#C36, _name:#C37}
-  #C39 = ffi::Abi {_os:#C38, _architecture:#C10}
-  #C40 = ffi::Abi {_os:#C38, _architecture:#C18}
-  #C41 = 5
-  #C42 = "windows"
-  #C43 = ffi::_OS {index:#C41, _name:#C42}
-  #C44 = ffi::Abi {_os:#C43, _architecture:#C10}
-  #C45 = ffi::Uint16 {}
-  #C46 = ffi::Abi {_os:#C43, _architecture:#C14}
-  #C47 = ffi::Abi {_os:#C43, _architecture:#C18}
-  #C48 = <ffi::Abi*, ffi::NativeType*>{#C6:#C7, #C11:#C7, #C15:#C7, #C19:#C7, #C22:#C23, #C24:#C7, #C27:#C7, #C28:#C7, #C29:#C7, #C32:#C7, #C33:#C7, #C34:#C7, #C35:#C7, #C39:#C7, #C40:#C7, #C44:#C45, #C46:#C45, #C47:#C45)
-  #C49 = ffi::AbiSpecificIntegerMapping {mapping:#C48}
-  #C50 = "vm:ffi:abi-specific-mapping"
-  #C51 = TypeLiteralConstant(ffi::Uint32)
-  #C52 = TypeLiteralConstant(ffi::Uint64)
-  #C53 = TypeLiteralConstant(ffi::Uint16)
-  #C54 = <core::Type?>[#C51, #C51, #C51, #C51, #C52, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C53, #C53, #C53]
-  #C55 = ffi::_FfiAbiSpecificMapping {nativeTypes:#C54}
-  #C56 = core::pragma {name:#C50, options:#C55}
-  #C57 = "vm:prefer-inline"
-  #C58 = null
-  #C59 = core::pragma {name:#C57, options:#C58}
-  #C60 = 8
-  #C61 = <core::int*>[#C36, #C36, #C36, #C36, #C60, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C12, #C12, #C12]
-  #C62 = "vm:ffi:struct-fields"
-  #C63 = TypeLiteralConstant(self::WChar)
-  #C64 = <core::Type>[#C63, #C63]
-  #C65 = ffi::_FfiStructLayout {fieldTypes:#C64, packing:#C58}
-  #C66 = core::pragma {name:#C62, options:#C65}
-  #C67 = self::WChar {}
-  #C68 = <core::int*>[#C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1]
-  #C69 = 16
-  #C70 = <core::int*>[#C60, #C60, #C60, #C60, #C69, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C36, #C36, #C36]
-  #C71 = 100
-  #C72 = ffi::_FfiInlineArray {elementType:#C63, length:#C71}
-  #C73 = <core::Type>[#C72]
-  #C74 = ffi::_FfiStructLayout {fieldTypes:#C73, packing:#C58}
-  #C75 = core::pragma {name:#C62, options:#C74}
-  #C76 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C71, dimension2:#C58, dimension3:#C58, dimension4:#C58, dimension5:#C58, dimensions:#C58}
-  #C77 = 400
-  #C78 = 800
-  #C79 = 200
-  #C80 = <core::int*>[#C77, #C77, #C77, #C77, #C78, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C79, #C79, #C79]
-  #C81 = <core::int*>[]
-  #C82 = core::_Override {}
-  #C83 = self::_DummyAllocator {}
+  #C37 = "riscv32"
+  #C38 = ffi::_Architecture {index:#C36, _name:#C37}
+  #C39 = ffi::Abi {_os:#C31, _architecture:#C38}
+  #C40 = 5
+  #C41 = "riscv64"
+  #C42 = ffi::_Architecture {index:#C40, _name:#C41}
+  #C43 = ffi::Abi {_os:#C31, _architecture:#C42}
+  #C44 = "macos"
+  #C45 = ffi::_OS {index:#C36, _name:#C44}
+  #C46 = ffi::Abi {_os:#C45, _architecture:#C10}
+  #C47 = ffi::Abi {_os:#C45, _architecture:#C18}
+  #C48 = "windows"
+  #C49 = ffi::_OS {index:#C40, _name:#C48}
+  #C50 = ffi::Abi {_os:#C49, _architecture:#C10}
+  #C51 = ffi::Uint16 {}
+  #C52 = ffi::Abi {_os:#C49, _architecture:#C14}
+  #C53 = ffi::Abi {_os:#C49, _architecture:#C18}
+  #C54 = <ffi::Abi*, ffi::NativeType*>{#C6:#C7, #C11:#C7, #C15:#C7, #C19:#C7, #C22:#C23, #C24:#C7, #C27:#C7, #C28:#C7, #C29:#C7, #C32:#C7, #C33:#C7, #C34:#C7, #C35:#C7, #C39:#C7, #C43:#C7, #C46:#C7, #C47:#C7, #C50:#C51, #C52:#C51, #C53:#C51)
+  #C55 = ffi::AbiSpecificIntegerMapping {mapping:#C54}
+  #C56 = "vm:ffi:abi-specific-mapping"
+  #C57 = TypeLiteralConstant(ffi::Uint32)
+  #C58 = TypeLiteralConstant(ffi::Uint64)
+  #C59 = TypeLiteralConstant(ffi::Uint16)
+  #C60 = <core::Type?>[#C57, #C57, #C57, #C57, #C58, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C59, #C59, #C59]
+  #C61 = ffi::_FfiAbiSpecificMapping {nativeTypes:#C60}
+  #C62 = core::pragma {name:#C56, options:#C61}
+  #C63 = "vm:prefer-inline"
+  #C64 = null
+  #C65 = core::pragma {name:#C63, options:#C64}
+  #C66 = 8
+  #C67 = <core::int*>[#C36, #C36, #C36, #C36, #C66, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C12, #C12, #C12]
+  #C68 = "vm:ffi:struct-fields"
+  #C69 = TypeLiteralConstant(self::WChar)
+  #C70 = <core::Type>[#C69, #C69]
+  #C71 = ffi::_FfiStructLayout {fieldTypes:#C70, packing:#C64}
+  #C72 = core::pragma {name:#C68, options:#C71}
+  #C73 = self::WChar {}
+  #C74 = <core::int*>[#C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1]
+  #C75 = 16
+  #C76 = <core::int*>[#C66, #C66, #C66, #C66, #C75, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C36, #C36, #C36]
+  #C77 = 100
+  #C78 = ffi::_FfiInlineArray {elementType:#C69, length:#C77}
+  #C79 = <core::Type>[#C78]
+  #C80 = ffi::_FfiStructLayout {fieldTypes:#C79, packing:#C64}
+  #C81 = core::pragma {name:#C68, options:#C80}
+  #C82 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C77, dimension2:#C64, dimension3:#C64, dimension4:#C64, dimension5:#C64, dimensions:#C64}
+  #C83 = 400
+  #C84 = 800
+  #C85 = 200
+  #C86 = <core::int*>[#C83, #C83, #C83, #C83, #C84, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C85, #C85, #C85]
+  #C87 = <core::int*>[]
+  #C88 = core::_Override {}
+  #C89 = self::_DummyAllocator {}
 }
diff --git a/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect b/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect
index 2acfbf6..306f413 100644
--- a/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect
+++ b/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect
@@ -143,22 +143,22 @@
   #C22 = "vm:ffi:abi-specific-mapping"
   #C23 = null
   #C24 = TypeLiteralConstant(ffi::Uint32)
-  #C25 = <core::Type?>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C24, #C24, #C24, #C24, #C23, #C23, #C23, #C23, #C23]
+  #C25 = <core::Type?>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C24, #C24, #C24, #C24, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C26 = ffi::_FfiAbiSpecificMapping {nativeTypes:#C25}
   #C27 = core::pragma {name:#C22, options:#C26}
   #C28 = "vm:prefer-inline"
   #C29 = core::pragma {name:#C28, options:#C23}
   #C30 = 4
-  #C31 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C30, #C30, #C30, #C30, #C23, #C23, #C23, #C23, #C23]
+  #C31 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C30, #C30, #C30, #C30, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C32 = "vm:ffi:struct-fields"
   #C33 = TypeLiteralConstant(self::Incomplete)
   #C34 = <core::Type>[#C33, #C33]
   #C35 = ffi::_FfiStructLayout {fieldTypes:#C34, packing:#C23}
   #C36 = core::pragma {name:#C32, options:#C35}
   #C37 = self::Incomplete {}
-  #C38 = <core::int*>[#C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4]
+  #C38 = <core::int*>[#C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4]
   #C39 = 8
-  #C40 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C39, #C39, #C39, #C39, #C23, #C23, #C23, #C23, #C23]
+  #C40 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C39, #C39, #C39, #C39, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C41 = 100
   #C42 = ffi::_FfiInlineArray {elementType:#C33, length:#C41}
   #C43 = <core::Type>[#C42]
@@ -166,7 +166,7 @@
   #C45 = core::pragma {name:#C32, options:#C44}
   #C46 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C41, dimension2:#C23, dimension3:#C23, dimension4:#C23, dimension5:#C23, dimensions:#C23}
   #C47 = 400
-  #C48 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C47, #C47, #C47, #C47, #C23, #C23, #C23, #C23, #C23]
+  #C48 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C47, #C47, #C47, #C47, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C49 = <core::int*>[]
   #C50 = core::_Override {}
   #C51 = self::_DummyAllocator {}
diff --git a/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect b/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect
index 98ee713..b1fe1cf 100644
--- a/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect
+++ b/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect
@@ -73,13 +73,13 @@
   #C6 = core::pragma {name:#C1, options:#C5}
   #C7 = ffi::Int64 {}
   #C8 = 0
-  #C9 = <core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = 8
-  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
+  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
   #C12 = "vm:prefer-inline"
   #C13 = core::pragma {name:#C12, options:#C4}
   #C14 = 16
-  #C15 = <core::int*>[#C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C15 = <core::int*>[#C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C16 = TypeLiteralConstant(self::Coordinate)
   #C17 = <core::Type>[#C16, #C2]
   #C18 = ffi::_FfiStructLayout {fieldTypes:#C17, packing:#C4}
diff --git a/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect b/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect
index c2c6faa..5927788 100644
--- a/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect
+++ b/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect
@@ -128,10 +128,10 @@
   #C9 = ffi::_FfiStructLayout {fieldTypes:#C8, packing:#C4}
   #C10 = core::pragma {name:#C1, options:#C9}
   #C11 = 0
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 4
   #C14 = 8
-  #C15 = <core::int*>[#C13, #C14, #C13, #C14, #C14, #C14, #C13, #C14, #C14, #C13, #C14, #C13, #C14, #C14, #C14, #C14, #C13, #C14]
+  #C15 = <core::int*>[#C13, #C14, #C13, #C14, #C14, #C14, #C13, #C14, #C14, #C13, #C14, #C13, #C14, #C13, #C14, #C14, #C14, #C14, #C13, #C14]
   #C16 = static-tearoff self::useStruct3
   #C17 = static-tearoff self::returnStruct7
   #C18 = 1
diff --git a/runtime/BUILD.gn b/runtime/BUILD.gn
index 8b3999a..a4c477a 100644
--- a/runtime/BUILD.gn
+++ b/runtime/BUILD.gn
@@ -112,6 +112,10 @@
     defines += [ "TARGET_ARCH_X64" ]
   } else if (dart_target_arch == "ia32" || dart_target_arch == "x86") {
     defines += [ "TARGET_ARCH_IA32" ]
+  } else if (dart_target_arch == "riscv32") {
+    defines += [ "TARGET_ARCH_RISCV32" ]
+  } else if (dart_target_arch == "riscv64") {
+    defines += [ "TARGET_ARCH_RISCV64" ]
   } else {
     print("Invalid dart_target_arch: $dart_target_arch")
     assert(false)
diff --git a/runtime/bin/elf_loader.cc b/runtime/bin/elf_loader.cc
index a5ff8c0..fb0e3a0 100644
--- a/runtime/bin/elf_loader.cc
+++ b/runtime/bin/elf_loader.cc
@@ -313,6 +313,8 @@
 #elif defined(TARGET_ARCH_ARM64)
   CHECK_ERROR(header_.machine == dart::elf::EM_AARCH64,
               "Architecture mismatch.");
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+  CHECK_ERROR(header_.machine == dart::elf::EM_RISCV, "Architecture mismatch.");
 #else
 #error Unsupported architecture architecture.
 #endif
diff --git a/runtime/bin/ffi_test/clobber_riscv32.S b/runtime/bin/ffi_test/clobber_riscv32.S
new file mode 100644
index 0000000..374c1d5
--- /dev/null
+++ b/runtime/bin/ffi_test/clobber_riscv32.S
@@ -0,0 +1,27 @@
+.text
+
+#if defined(__linux__) || defined(__FreeBSD__) /* HOST_OS_LINUX */
+.globl ClobberAndCall
+.type ClobberAndCall, @function
+ClobberAndCall:
+#else /* HOST_OS_MACOS */
+.globl _ClobberAndCall
+_ClobberAndCall:
+#endif
+
+li a0, 1
+li a1, 1
+li a2, 1
+li a3, 1
+li a4, 1
+li a5, 1
+li a6, 1
+li a7, 1
+li t0, 1
+li t1, 1
+li t2, 1
+li t3, 1
+li t4, 1
+li t5, 1
+li t6, 1
+ret
diff --git a/runtime/bin/ffi_test/clobber_riscv64.S b/runtime/bin/ffi_test/clobber_riscv64.S
new file mode 100644
index 0000000..374c1d5
--- /dev/null
+++ b/runtime/bin/ffi_test/clobber_riscv64.S
@@ -0,0 +1,27 @@
+.text
+
+#if defined(__linux__) || defined(__FreeBSD__) /* HOST_OS_LINUX */
+.globl ClobberAndCall
+.type ClobberAndCall, @function
+ClobberAndCall:
+#else /* HOST_OS_MACOS */
+.globl _ClobberAndCall
+_ClobberAndCall:
+#endif
+
+li a0, 1
+li a1, 1
+li a2, 1
+li a3, 1
+li a4, 1
+li a5, 1
+li a6, 1
+li a7, 1
+li t0, 1
+li t1, 1
+li t2, 1
+li t3, 1
+li t4, 1
+li t5, 1
+li t6, 1
+ret
diff --git a/runtime/bin/ffi_unit_test/BUILD.gn b/runtime/bin/ffi_unit_test/BUILD.gn
index 99396af..bad85d0 100644
--- a/runtime/bin/ffi_unit_test/BUILD.gn
+++ b/runtime/bin/ffi_unit_test/BUILD.gn
@@ -49,6 +49,14 @@
   defines = [ "TARGET_ARCH_X64" ]
 }
 
+config("define_target_arch_riscv32") {
+  defines = [ "TARGET_ARCH_RISCV32" ]
+}
+
+config("define_target_arch_riscv64") {
+  defines = [ "TARGET_ARCH_RISCV64" ]
+}
+
 config("define_target_os_android") {
   defines = [ "DART_TARGET_OS_ANDROID" ]
 }
@@ -167,6 +175,20 @@
   ]
 }
 
+build_run_ffi_unit_tests("run_ffi_unit_tests_riscv32_linux") {
+  extra_configs = [
+    ":define_target_arch_riscv32",
+    ":define_target_os_linux",
+  ]
+}
+
+build_run_ffi_unit_tests("run_ffi_unit_tests_riscv64_linux") {
+  extra_configs = [
+    ":define_target_arch_riscv64",
+    ":define_target_os_linux",
+  ]
+}
+
 group("run_ffi_unit_tests") {
   deps = [
     ":run_ffi_unit_tests_arm64_android",
@@ -179,6 +201,8 @@
     ":run_ffi_unit_tests_ia32_android",  # Emulator, no other test coverage.
     ":run_ffi_unit_tests_ia32_linux",
     ":run_ffi_unit_tests_ia32_win",
+    ":run_ffi_unit_tests_riscv32_linux",
+    ":run_ffi_unit_tests_riscv64_linux",
     ":run_ffi_unit_tests_x64_ios",  # Simulator, no other test coverage.
     ":run_ffi_unit_tests_x64_linux",
     ":run_ffi_unit_tests_x64_macos",
diff --git a/runtime/bin/gen_snapshot.cc b/runtime/bin/gen_snapshot.cc
index 6a24139..b3c5cbb 100644
--- a/runtime/bin/gen_snapshot.cc
+++ b/runtime/bin/gen_snapshot.cc
@@ -957,3 +957,8 @@
 int main(int argc, char** argv) {
   return dart::bin::main(argc, argv);
 }
+
+// TODO(riscv): Why is this missing from libc?
+#if defined(__riscv)
+char __libc_single_threaded = 0;
+#endif
diff --git a/runtime/bin/main.cc b/runtime/bin/main.cc
index cf76d5f..2e80b0a 100644
--- a/runtime/bin/main.cc
+++ b/runtime/bin/main.cc
@@ -1400,3 +1400,8 @@
   dart::bin::main(argc, argv);
   UNREACHABLE();
 }
+
+// TODO(riscv): Why is this missing from libc?
+#if defined(__riscv)
+char __libc_single_threaded = 0;
+#endif
diff --git a/runtime/bin/platform.h b/runtime/bin/platform.h
index 68723c2..4b5bb50 100644
--- a/runtime/bin/platform.h
+++ b/runtime/bin/platform.h
@@ -47,6 +47,10 @@
     return "ia32";
 #elif defined(HOST_ARCH_X64)
     return "x64";
+#elif defined(HOST_ARCH_RISCV32)
+    return "riscv32";
+#elif defined(HOST_ARCH_RISCV64)
+    return "riscv64";
 #else
 #error Architecture detection failed.
 #endif
diff --git a/runtime/bin/run_vm_tests.cc b/runtime/bin/run_vm_tests.cc
index d560dab..58d16b6 100644
--- a/runtime/bin/run_vm_tests.cc
+++ b/runtime/bin/run_vm_tests.cc
@@ -380,6 +380,7 @@
       /*shutdown_isolate=*/nullptr,
       /*cleanup_isolate=*/nullptr,
       /*cleanup_group=*/CleanupIsolateGroup,
+      /*thread_start=*/nullptr,
       /*thread_exit=*/nullptr, dart::bin::DartUtils::OpenFile,
       dart::bin::DartUtils::ReadFile, dart::bin::DartUtils::WriteFile,
       dart::bin::DartUtils::CloseFile, /*entropy_source=*/nullptr,
@@ -426,3 +427,8 @@
 int main(int argc, const char** argv) {
   dart::bin::Platform::Exit(dart::Main(argc, argv));
 }
+
+// TODO(riscv): Why is this missing from libc?
+#if defined(__riscv)
+char __libc_single_threaded = 0;
+#endif
diff --git a/runtime/include/dart_api.h b/runtime/include/dart_api.h
index 61c56f6..360de52 100644
--- a/runtime/include/dart_api.h
+++ b/runtime/include/dart_api.h
@@ -757,6 +757,15 @@
 typedef void (*Dart_IsolateGroupCleanupCallback)(void* isolate_group_data);
 
 /**
+ * A thread start callback function.
+ * This callback, provided by the embedder, is called after a thread in the
+ * vm thread pool starts.
+ * This function could be used to adjust thread priority or attach native
+ * resources to the thread.
+ */
+typedef void (*Dart_ThreadStartCallback)(void);
+
+/**
  * A thread death callback function.
  * This callback, provided by the embedder, is called before a thread in the
  * vm thread pool exits.
@@ -840,7 +849,7 @@
  * The current version of the Dart_InitializeFlags. Should be incremented every
  * time Dart_InitializeFlags changes in a binary incompatible way.
  */
-#define DART_INITIALIZE_PARAMS_CURRENT_VERSION (0x00000005)
+#define DART_INITIALIZE_PARAMS_CURRENT_VERSION (0x00000006)
 
 /** Forward declaration */
 struct Dart_CodeObserver;
@@ -966,6 +975,7 @@
    */
   Dart_IsolateGroupCleanupCallback cleanup_group;
 
+  Dart_ThreadStartCallback thread_start;
   Dart_ThreadExitCallback thread_exit;
   Dart_FileOpenCallback file_open;
   Dart_FileReadCallback file_read;
diff --git a/runtime/platform/elf.h b/runtime/platform/elf.h
index 8f58005..545e9e8 100644
--- a/runtime/platform/elf.h
+++ b/runtime/platform/elf.h
@@ -171,6 +171,7 @@
 static constexpr intptr_t EM_ARM = 40;
 static constexpr intptr_t EM_X86_64 = 62;
 static constexpr intptr_t EM_AARCH64 = 183;
+static constexpr intptr_t EM_RISCV = 243;
 
 static const intptr_t EV_CURRENT = 1;
 
diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h
index 3891afc..28bf82c 100644
--- a/runtime/platform/globals.h
+++ b/runtime/platform/globals.h
@@ -206,6 +206,16 @@
 #elif defined(__aarch64__)
 #define HOST_ARCH_ARM64 1
 #define ARCH_IS_64_BIT 1
+#elif defined(__riscv)
+#if __SIZEOF_POINTER__ == 4
+#define HOST_ARCH_RISCV32 1
+#define ARCH_IS_32_BIT 1
+#elif __SIZEOF_POINTER__ == 8
+#define HOST_ARCH_RISCV64 1
+#define ARCH_IS_64_BIT 1
+#else
+#error Unknown XLEN
+#endif
 #else
 #error Architecture was not detected as supported by Dart.
 #endif
@@ -286,7 +296,8 @@
 #endif
 
 #if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_X64) &&                  \
-    !defined(TARGET_ARCH_IA32) && !defined(TARGET_ARCH_ARM64)
+    !defined(TARGET_ARCH_IA32) && !defined(TARGET_ARCH_ARM64) &&               \
+    !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
 // No target architecture specified pick the one matching the host architecture.
 #if defined(HOST_ARCH_ARM)
 #define TARGET_ARCH_ARM 1
@@ -296,14 +307,20 @@
 #define TARGET_ARCH_IA32 1
 #elif defined(HOST_ARCH_ARM64)
 #define TARGET_ARCH_ARM64 1
+#elif defined(HOST_ARCH_RISCV32)
+#define TARGET_ARCH_RISCV32 1
+#elif defined(HOST_ARCH_RISCV64)
+#define TARGET_ARCH_RISCV64 1
 #else
 #error Automatic target architecture detection failed.
 #endif
 #endif
 
-#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
+#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM) ||                   \
+    defined(TARGET_ARCH_RISCV32)
 #define TARGET_ARCH_IS_32_BIT 1
-#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
+#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64) ||                \
+    defined(TARGET_ARCH_RISCV64)
 #define TARGET_ARCH_IS_64_BIT 1
 #else
 #error Automatic target architecture detection failed.
@@ -315,11 +332,13 @@
 
 // Verify that host and target architectures match, we cannot
 // have a 64 bit Dart VM generating 32 bit code or vice-versa.
-#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
+#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64) ||                  \
+    defined(TARGET_ARCH_RISCV64)
 #if !defined(ARCH_IS_64_BIT) && !defined(FFI_UNIT_TESTS)
 #error Mismatched Host/Target architectures.
 #endif  // !defined(ARCH_IS_64_BIT) && !defined(FFI_UNIT_TESTS)
-#elif defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
+#elif defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM) ||                 \
+    defined(TARGET_ARCH_RISCV32)
 #if defined(HOST_ARCH_X64) && defined(TARGET_ARCH_ARM)
 // This is simarm_x64, which is the only case where host/target architecture
 // mismatch is allowed. Unless, we're running FFI unit tests.
@@ -345,12 +364,18 @@
 #define USING_SIMULATOR 1
 #endif
 #endif
-
 #elif defined(TARGET_ARCH_ARM64)
 #if !defined(HOST_ARCH_ARM64)
 #define USING_SIMULATOR 1
 #endif
-
+#elif defined(TARGET_ARCH_RISCV32)
+#if !defined(HOST_ARCH_RISCV32)
+#define USING_SIMULATOR 1
+#endif
+#elif defined(TARGET_ARCH_RISCV64)
+#if !defined(HOST_ARCH_RISCV64)
+#define USING_SIMULATOR 1
+#endif
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/third_party/double-conversion/src/utils.h b/runtime/third_party/double-conversion/src/utils.h
index 51d5e61..c419a6c 100644
--- a/runtime/third_party/double-conversion/src/utils.h
+++ b/runtime/third_party/double-conversion/src/utils.h
@@ -67,16 +67,14 @@
 // the output of the division with the expected result. (Inlining must be
 // disabled.)
 // On Linux,x86 89255e-22 != Div_double(89255.0/1e22)
-#if defined(_M_X64) || defined(__x86_64__) || \
-    defined(__ARMEL__) || defined(__avr32__) || \
-    defined(__hppa__) || defined(__ia64__) || \
-    defined(__mips__) || \
-    defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
-    defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
-    defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
-    defined(__SH4__) || defined(__alpha__) || \
-    defined(_MIPS_ARCH_MIPS32R2) || \
-    defined(__AARCH64EL__) || defined(__aarch64__)
+#if defined(_M_X64) || defined(__x86_64__) || defined(__ARMEL__) ||            \
+    defined(__avr32__) || defined(__hppa__) || defined(__ia64__) ||            \
+    defined(__mips__) || defined(__powerpc__) || defined(__ppc__) ||           \
+    defined(__ppc64__) || defined(_POWER) || defined(_ARCH_PPC) ||             \
+    defined(_ARCH_PPC64) || defined(__sparc__) || defined(__sparc) ||          \
+    defined(__s390__) || defined(__SH4__) || defined(__alpha__) ||             \
+    defined(_MIPS_ARCH_MIPS32R2) || defined(__AARCH64EL__) ||                  \
+    defined(__aarch64__) || defined(__riscv)
 #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
 #elif defined(__mc68000__)
 #undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
diff --git a/runtime/tools/dartfuzz/dartfuzz_test.dart b/runtime/tools/dartfuzz/dartfuzz_test.dart
index b6e32b7..6cf216e 100644
--- a/runtime/tools/dartfuzz/dartfuzz_test.dart
+++ b/runtime/tools/dartfuzz/dartfuzz_test.dart
@@ -112,12 +112,16 @@
     if (mode.endsWith('debug-arm32')) return 'DebugSIMARM';
     if (mode.endsWith('debug-arm64')) return 'DebugSIMARM64';
     if (mode.endsWith('debug-arm64c')) return 'DebugSIMARM64C';
+    if (mode.endsWith('debug-riscv32')) return 'DebugSIMRISCV32';
+    if (mode.endsWith('debug-riscv64')) return 'DebugSIMRISCV64';
     if (mode.endsWith('ia32')) return 'ReleaseIA32';
     if (mode.endsWith('x64')) return 'ReleaseX64';
     if (mode.endsWith('x64c')) return 'ReleaseX64C';
     if (mode.endsWith('arm32')) return 'ReleaseSIMARM';
     if (mode.endsWith('arm64')) return 'ReleaseSIMARM64';
     if (mode.endsWith('arm64c')) return 'ReleaseSIMARM64C';
+    if (mode.endsWith('riscv32')) return 'ReleaseSIMRISCV32';
+    if (mode.endsWith('riscv64')) return 'ReleaseSIMRISCV64';
     throw ('unknown tag in mode: $mode');
   }
 
@@ -333,7 +337,8 @@
       ((mode1.contains('arm32') && mode2.contains('arm32')) ||
           (mode1.contains('arm64') && mode2.contains('arm64')) ||
           (mode1.contains('x64') && mode2.contains('x64')) ||
-          (mode1.contains('ia32') && mode2.contains('ia32')));
+          (mode1.contains('riscv32') && mode2.contains('riscv32')) ||
+          (mode1.contains('riscv64') && mode2.contains('riscv64')));
 
   bool ffiCapable(String mode1, String mode2) =>
       mode1.startsWith('jit') &&
@@ -676,12 +681,16 @@
     'jit-debug-arm32',
     'jit-debug-arm64',
     'jit-debug-arm64c',
+    'jit-debug-riscv32',
+    'jit-debug-riscv64',
     'jit-ia32',
     'jit-x64',
     'jit-x64c',
     'jit-arm32',
     'jit-arm64',
     'jit-arm64c',
+    'jit-riscv32',
+    'jit-riscv64',
     'aot-debug-x64',
     'aot-debug-x64c',
     'aot-x64',
@@ -694,9 +703,13 @@
     'aot-debug-arm32',
     'aot-debug-arm64',
     'aot-debug-arm64c',
+    'aot-debug-riscv32',
+    'aot-debug-riscv64',
     'aot-arm32',
     'aot-arm64',
     'aot-arm64c',
+    'aot-riscv32',
+    'aot-riscv64',
     // Too many divergences (due to arithmetic):
     'js-x64',
   ];
diff --git a/runtime/tools/run_clang_tidy.dart b/runtime/tools/run_clang_tidy.dart
index aa32b3b..84b66e8 100644
--- a/runtime/tools/run_clang_tidy.dart
+++ b/runtime/tools/run_clang_tidy.dart
@@ -79,22 +79,26 @@
   'runtime/platform/utils_linux.h',
   'runtime/platform/utils_macos.h',
   'runtime/platform/utils_win.h',
-  'runtime/vm/compiler/assembler/assembler_arm64.h',
   'runtime/vm/compiler/assembler/assembler_arm.h',
+  'runtime/vm/compiler/assembler/assembler_arm64.h',
   'runtime/vm/compiler/assembler/assembler_ia32.h',
+  'runtime/vm/compiler/assembler/assembler_riscv.h',
   'runtime/vm/compiler/assembler/assembler_x64.h',
   'runtime/vm/compiler/runtime_offsets_extracted.h',
-  'runtime/vm/constants_arm64.h',
   'runtime/vm/constants_arm.h',
+  'runtime/vm/constants_arm64.h',
   'runtime/vm/constants_ia32.h',
+  'runtime/vm/constants_riscv.h',
   'runtime/vm/constants_x64.h',
-  'runtime/vm/cpu_arm64.h',
   'runtime/vm/cpu_arm.h',
+  'runtime/vm/cpu_arm64.h',
   'runtime/vm/cpu_ia32.h',
+  'runtime/vm/cpu_riscv.h',
   'runtime/vm/cpu_x64.h',
-  'runtime/vm/instructions_arm64.h',
   'runtime/vm/instructions_arm.h',
+  'runtime/vm/instructions_arm64.h',
   'runtime/vm/instructions_ia32.h',
+  'runtime/vm/instructions_riscv.h',
   'runtime/vm/instructions_x64.h',
   'runtime/vm/os_thread_android.h',
   'runtime/vm/os_thread_fuchsia.h',
@@ -104,9 +108,11 @@
   'runtime/vm/regexp_assembler_bytecode_inl.h',
   'runtime/vm/simulator_arm64.h',
   'runtime/vm/simulator_arm.h',
-  'runtime/vm/stack_frame_arm64.h',
+  'runtime/vm/simulator_riscv.h',
   'runtime/vm/stack_frame_arm.h',
+  'runtime/vm/stack_frame_arm64.h',
   'runtime/vm/stack_frame_ia32.h',
+  'runtime/vm/stack_frame_riscv.h',
   'runtime/vm/stack_frame_x64.h',
 
   // Only available in special builds
diff --git a/runtime/vm/code_patcher_riscv.cc b/runtime/vm/code_patcher_riscv.cc
new file mode 100644
index 0000000..415e645
--- /dev/null
+++ b/runtime/vm/code_patcher_riscv.cc
@@ -0,0 +1,196 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/code_patcher.h"
+#include "vm/cpu.h"
+#include "vm/instructions.h"
+#include "vm/object.h"
+
+namespace dart {
+
+class PoolPointerCall : public ValueObject {
+ public:
+  PoolPointerCall(uword pc, const Code& code)
+      : end_(pc), object_pool_(ObjectPool::Handle(code.GetObjectPool())) {
+    ASSERT(*reinterpret_cast<uint16_t*>(end_ - 2) == 0x9082);  // jalr ra
+    uint32_t load_entry = *reinterpret_cast<uint32_t*>(end_ - 6);
+#if XLEN == 32
+    ASSERT((load_entry == 0x00362083) ||  // lw ra, entry(code)
+           (load_entry == 0x00b62083));   // lw ra, unchecked_entry(code)
+#elif XLEN == 64
+    ASSERT((load_entry == 0x00763083) ||  // ld ra, entry(code)
+           (load_entry = 0x01763083));    // ld ra, unchecked_entry(code)
+#endif
+    InstructionPattern::DecodeLoadWordFromPool(end_ - 6, &reg_, &index_);
+  }
+
+  intptr_t pp_index() const { return index_; }
+
+  CodePtr Target() const {
+    return static_cast<CodePtr>(object_pool_.ObjectAt(pp_index()));
+  }
+
+  void SetTarget(const Code& target) const {
+    object_pool_.SetObjectAt(pp_index(), target);
+    // No need to flush the instruction cache, since the code is not modified.
+  }
+
+ private:
+  uword end_;
+  const ObjectPool& object_pool_;
+  Register reg_;
+  intptr_t index_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PoolPointerCall);
+};
+
+CodePtr CodePatcher::GetStaticCallTargetAt(uword return_address,
+                                           const Code& code) {
+  ASSERT(code.ContainsInstructionAt(return_address));
+  PoolPointerCall call(return_address, code);
+  return call.Target();
+}
+
+void CodePatcher::PatchStaticCallAt(uword return_address,
+                                    const Code& code,
+                                    const Code& new_target) {
+  PatchPoolPointerCallAt(return_address, code, new_target);
+}
+
+void CodePatcher::PatchPoolPointerCallAt(uword return_address,
+                                         const Code& code,
+                                         const Code& new_target) {
+  ASSERT(code.ContainsInstructionAt(return_address));
+  PoolPointerCall call(return_address, code);
+  call.SetTarget(new_target);
+}
+
+void CodePatcher::InsertDeoptimizationCallAt(uword start) {
+  UNREACHABLE();
+}
+
+CodePtr CodePatcher::GetInstanceCallAt(uword return_address,
+                                       const Code& caller_code,
+                                       Object* data) {
+  ASSERT(caller_code.ContainsInstructionAt(return_address));
+  ICCallPattern call(return_address, caller_code);
+  if (data != NULL) {
+    *data = call.Data();
+  }
+  return call.TargetCode();
+}
+
+void CodePatcher::PatchInstanceCallAt(uword return_address,
+                                      const Code& caller_code,
+                                      const Object& data,
+                                      const Code& target) {
+  auto thread = Thread::Current();
+  thread->isolate_group()->RunWithStoppedMutators([&]() {
+    PatchInstanceCallAtWithMutatorsStopped(thread, return_address, caller_code,
+                                           data, target);
+  });
+}
+
+void CodePatcher::PatchInstanceCallAtWithMutatorsStopped(
+    Thread* thread,
+    uword return_address,
+    const Code& caller_code,
+    const Object& data,
+    const Code& target) {
+  ASSERT(caller_code.ContainsInstructionAt(return_address));
+  ICCallPattern call(return_address, caller_code);
+  call.SetData(data);
+  call.SetTargetCode(target);
+}
+
+FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
+                                                    const Code& code,
+                                                    ICData* ic_data_result) {
+  ASSERT(code.ContainsInstructionAt(return_address));
+  ICCallPattern static_call(return_address, code);
+  ICData& ic_data = ICData::Handle();
+  ic_data ^= static_call.Data();
+  if (ic_data_result != NULL) {
+    *ic_data_result = ic_data.ptr();
+  }
+  return ic_data.GetTargetAt(0);
+}
+
+void CodePatcher::PatchSwitchableCallAt(uword return_address,
+                                        const Code& caller_code,
+                                        const Object& data,
+                                        const Code& target) {
+  auto thread = Thread::Current();
+  // Ensure all threads are suspended as we update data and target pair.
+  thread->isolate_group()->RunWithStoppedMutators([&]() {
+    PatchSwitchableCallAtWithMutatorsStopped(thread, return_address,
+                                             caller_code, data, target);
+  });
+}
+
+void CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
+    Thread* thread,
+    uword return_address,
+    const Code& caller_code,
+    const Object& data,
+    const Code& target) {
+  if (FLAG_precompiled_mode) {
+    BareSwitchableCallPattern call(return_address);
+    call.SetData(data);
+    call.SetTarget(target);
+  } else {
+    SwitchableCallPattern call(return_address, caller_code);
+    call.SetData(data);
+    call.SetTarget(target);
+  }
+}
+
+uword CodePatcher::GetSwitchableCallTargetEntryAt(uword return_address,
+                                                  const Code& caller_code) {
+  if (FLAG_precompiled_mode) {
+    BareSwitchableCallPattern call(return_address);
+    return call.target_entry();
+  } else {
+    SwitchableCallPattern call(return_address, caller_code);
+    return call.target_entry();
+  }
+}
+
+ObjectPtr CodePatcher::GetSwitchableCallDataAt(uword return_address,
+                                               const Code& caller_code) {
+  if (FLAG_precompiled_mode) {
+    BareSwitchableCallPattern call(return_address);
+    return call.data();
+  } else {
+    SwitchableCallPattern call(return_address, caller_code);
+    return call.data();
+  }
+}
+
+void CodePatcher::PatchNativeCallAt(uword return_address,
+                                    const Code& caller_code,
+                                    NativeFunction target,
+                                    const Code& trampoline) {
+  Thread::Current()->isolate_group()->RunWithStoppedMutators([&]() {
+    ASSERT(caller_code.ContainsInstructionAt(return_address));
+    NativeCallPattern call(return_address, caller_code);
+    call.set_target(trampoline);
+    call.set_native_function(target);
+  });
+}
+
+CodePtr CodePatcher::GetNativeCallAt(uword return_address,
+                                     const Code& caller_code,
+                                     NativeFunction* target) {
+  ASSERT(caller_code.ContainsInstructionAt(return_address));
+  NativeCallPattern call(return_address, caller_code);
+  *target = call.native_function();
+  return call.target();
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/code_patcher_riscv_test.cc b/runtime/vm/code_patcher_riscv_test.cc
new file mode 100644
index 0000000..e6d4480
--- /dev/null
+++ b/runtime/vm/code_patcher_riscv_test.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/code_patcher.h"
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/dart_entry.h"
+#include "vm/instructions.h"
+#include "vm/native_entry.h"
+#include "vm/native_entry_test.h"
+#include "vm/runtime_entry.h"
+#include "vm/stub_code.h"
+#include "vm/symbols.h"
+#include "vm/unit_test.h"
+
+namespace dart {
+
+#define __ assembler->
+
+ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
+  Thread* thread = Thread::Current();
+  const String& class_name = String::Handle(Symbols::New(thread, "ownerClass"));
+  const Script& script = Script::Handle();
+  const Class& owner_class = Class::Handle(Class::New(
+      Library::Handle(), class_name, script, TokenPosition::kNoSource));
+  const String& function_name =
+      String::Handle(Symbols::New(thread, "callerFunction"));
+  const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
+  const Function& function = Function::Handle(Function::New(
+      signature, function_name, UntaggedFunction::kRegularFunction, true, false,
+      false, false, false, owner_class, TokenPosition::kNoSource));
+
+  const String& target_name =
+      String::Handle(Symbols::New(thread, "targetFunction"));
+  const intptr_t kTypeArgsLen = 0;
+  const intptr_t kNumArgs = 1;
+  const Array& args_descriptor = Array::Handle(ArgumentsDescriptor::NewBoxed(
+      kTypeArgsLen, kNumArgs, Object::null_array()));
+  const ICData& ic_data = ICData::ZoneHandle(ICData::New(
+      function, target_name, args_descriptor, 15, 1, ICData::kInstance));
+  const Code& stub = StubCode::OneArgCheckInlineCache();
+
+  // Code is generated, but not executed. Just parsed with CodePatcher.
+  __ set_constant_pool_allowed(true);  // Uninitialized pp is OK.
+  __ LoadUniqueObject(IC_DATA_REG, ic_data);
+  __ LoadUniqueObject(CODE_REG, stub);
+  __ Call(compiler::FieldAddress(
+      CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
+  __ ret();
+}
+
+ASSEMBLER_TEST_RUN(IcDataAccess, test) {
+  uword end = test->payload_start() + test->code().Size();
+  uword return_address = end - CInstr::kInstrSize;
+  ICData& ic_data = ICData::Handle();
+  CodePatcher::GetInstanceCallAt(return_address, test->code(), &ic_data);
+  EXPECT_STREQ("targetFunction",
+               String::Handle(ic_data.target_name()).ToCString());
+  EXPECT_EQ(1, ic_data.NumArgsTested());
+  EXPECT_EQ(0, ic_data.NumberOfChecks());
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/compiler/aot/aot_call_specializer.cc b/runtime/vm/compiler/aot/aot_call_specializer.cc
index a8e2f04..d4aa2e9 100644
--- a/runtime/vm/compiler/aot/aot_call_specializer.cc
+++ b/runtime/vm/compiler/aot/aot_call_specializer.cc
@@ -509,7 +509,7 @@
         if (replacement != nullptr) break;
         FALL_THROUGH;
       case Token::kTRUNCDIV:
-#if !defined(TARGET_ARCH_X64) && !defined(TARGET_ARCH_ARM64)
+#if !defined(TARGET_ARCH_IS_64_BIT)
         // TODO(ajcbik): 32-bit archs too?
         break;
 #else
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index 143546b..0a66815 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -3110,7 +3110,7 @@
   // true. use_far_branches is always false on ia32 and x64.
   bool done = false;
   // volatile because the variable may be clobbered by a longjmp.
-  volatile bool use_far_branches = false;
+  volatile intptr_t far_branch_level = 0;
   SpeculativeInliningPolicy speculative_policy(
       true, FLAG_max_speculative_inlining_attempts);
 
@@ -3198,7 +3198,7 @@
       // (See TryCommitToParent invocation below).
       compiler::ObjectPoolBuilder object_pool_builder(
           precompiler_->global_object_pool_builder());
-      compiler::Assembler assembler(&object_pool_builder, use_far_branches);
+      compiler::Assembler assembler(&object_pool_builder, far_branch_level);
 
       CodeStatistics* function_stats = NULL;
       if (FLAG_print_instruction_stats) {
@@ -3273,8 +3273,8 @@
         // Compilation failed due to an out of range branch offset in the
         // assembler. We try again (done = false) with far branches enabled.
         done = false;
-        ASSERT(!use_far_branches);
-        use_far_branches = true;
+        RELEASE_ASSERT(far_branch_level < 2);
+        far_branch_level++;
       } else if (error.ptr() == Object::speculative_inlining_error().ptr()) {
         // The return value of setjmp is the deopt id of the check instruction
         // that caused the bailout.
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm.cc b/runtime/vm/compiler/asm_intrinsifier_arm.cc
index 3b8931c..77cb516 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm.cc
@@ -15,10 +15,12 @@
 namespace compiler {
 
 // When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
 // R4: Arguments descriptor
 // LR: Return address
-// The R4 register can be destroyed only if there is no slow-path, i.e.
-// if the intrinsified method always executes a return.
+// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
+// i.e. if the intrinsified method always executes a return.
 // The FP register should not be modified, because it is used by the profiler.
 // The PP and THR registers (see constants_arm.h) must be preserved.
 
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm64.cc b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
index 4056cbf..600e28c 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
@@ -15,10 +15,12 @@
 namespace compiler {
 
 // When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
 // R4: Arguments descriptor
 // LR: Return address
-// The R4 register can be destroyed only if there is no slow-path, i.e.
-// if the intrinsified method always executes a return.
+// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
+// i.e. if the intrinsified method always executes a return.
 // The FP register should not be modified, because it is used by the profiler.
 // The PP and THR registers (see constants_arm64.h) must be preserved.
 
diff --git a/runtime/vm/compiler/asm_intrinsifier_riscv.cc b/runtime/vm/compiler/asm_intrinsifier_riscv.cc
new file mode 100644
index 0000000..bd85776
--- /dev/null
+++ b/runtime/vm/compiler/asm_intrinsifier_riscv.cc
@@ -0,0 +1,725 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/class_id.h"
+#include "vm/compiler/asm_intrinsifier.h"
+#include "vm/compiler/assembler/assembler.h"
+
+namespace dart {
+namespace compiler {
+
+// When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
+// S4: Arguments descriptor
+// RA: Return address
+// The S4 and CODE_REG registers can be destroyed only if there is no slow-path,
+// i.e. if the intrinsified method always executes a return.
+// The FP register should not be modified, because it is used by the profiler.
+// The PP and THR registers (see constants_riscv.h) must be preserved.
+
+#define __ assembler->
+
+// Allocate a GrowableObjectArray:: using the backing array specified.
+// On stack: type argument (+1), data (+0).
+void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  // The newly allocated object is returned in R0.
+  const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
+  const intptr_t kArrayOffset = 0 * target::kWordSize;
+
+  // Try allocating in new space.
+  const Class& cls = GrowableObjectArrayClass();
+  __ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, A0, A1);
+
+  // Store backing array object in growable array object.
+  __ lx(A1, Address(SP, kArrayOffset));  // Data argument.
+  // R0 is new, no barrier needed.
+  __ StoreCompressedIntoObjectNoBarrier(
+      A0, FieldAddress(A0, target::GrowableObjectArray::data_offset()), A1);
+
+  // R0: new growable array object start as a tagged pointer.
+  // Store the type argument field in the growable array object.
+  __ lx(A1, Address(SP, kTypeArgumentsOffset));  // Type argument.
+  __ StoreCompressedIntoObjectNoBarrier(
+      A0,
+      FieldAddress(A0, target::GrowableObjectArray::type_arguments_offset()),
+      A1);
+
+  // Set the length field in the growable array object to 0.
+  __ StoreCompressedIntoObjectNoBarrier(
+      A0, FieldAddress(A0, target::GrowableObjectArray::length_offset()), ZR);
+  __ ret();  // Returns the newly allocated object in A0.
+
+  __ Bind(normal_ir_body);
+}
+
+// Loads args from stack into A0 and A1
+// Tests if they are smis, jumps to label not_smi if not.
+static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
+  __ lx(A0, Address(SP, +1 * target::kWordSize));
+  __ lx(A1, Address(SP, +0 * target::kWordSize));
+  __ or_(TMP, A0, A1);
+  __ BranchIfNotSmi(TMP, not_smi, Assembler::kNearJump);
+}
+
+void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
+  const Register left = A0;
+  const Register right = A1;
+  const Register result = A0;
+
+  TestBothArgumentsSmis(assembler, normal_ir_body);
+  __ CompareImmediate(right, target::ToRawSmi(target::kSmiBits),
+                      compiler::kObjectBytes);
+  __ BranchIf(CS, normal_ir_body, Assembler::kNearJump);
+
+  __ SmiUntag(right);
+  __ sll(TMP, left, right);
+  __ sra(TMP2, TMP, right);
+  __ bne(TMP2, left, normal_ir_body, Assembler::kNearJump);
+  __ mv(result, TMP);
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+static void CompareIntegers(Assembler* assembler,
+                            Label* normal_ir_body,
+                            Condition true_condition) {
+  Label true_label;
+  TestBothArgumentsSmis(assembler, normal_ir_body);
+  __ CompareObjectRegisters(A0, A1);
+  __ BranchIf(true_condition, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, LT);
+}
+
+void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
+                                          Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, GT);
+}
+
+void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, LE);
+}
+
+void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
+                                               Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, GE);
+}
+
+// This is called for Smi and Mint receivers. The right argument
+// can be Smi, Mint or double.
+void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  Label true_label, check_for_mint;
+  // For integer receiver '===' check first.
+  __ lx(A0, Address(SP, 1 * target::kWordSize));
+  __ lx(A1, Address(SP, 0 * target::kWordSize));
+  __ CompareObjectRegisters(A0, A1);
+  __ BranchIf(EQ, &true_label, Assembler::kNearJump);
+
+  __ or_(TMP, A0, A1);
+  __ BranchIfNotSmi(TMP, &check_for_mint, Assembler::kNearJump);
+  // If R0 or R1 is not a smi do Mint checks.
+
+  // Both arguments are smi, '===' is good enough.
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  // At least one of the arguments was not Smi.
+  Label receiver_not_smi;
+  __ Bind(&check_for_mint);
+
+  __ BranchIfNotSmi(A0, &receiver_not_smi,
+                    Assembler::kNearJump);  // Check receiver.
+
+  // Left (receiver) is Smi, return false if right is not Double.
+  // Note that an instance of Mint never contains a value that can be
+  // represented by Smi.
+
+  __ CompareClassId(A1, kDoubleCid, TMP);
+  __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
+  __ LoadObject(A0,
+                CastHandle<Object>(FalseObject()));  // Smi == Mint -> false.
+  __ ret();
+
+  __ Bind(&receiver_not_smi);
+  // A0: receiver.
+
+  __ CompareClassId(A0, kMintCid, TMP);
+  __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
+  // Receiver is Mint, return false if right is Smi.
+  __ BranchIfNotSmi(A1, normal_ir_body, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  // TODO(srdjan): Implement Mint == Mint comparison.
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Integer_equal(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  Integer_equalToInteger(assembler, normal_ir_body);
+}
+
+void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
+                                                   Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// FA0: left
+// FA1: right
+static void PrepareDoubleOp(Assembler* assembler, Label* normal_ir_body) {
+  Label double_op;
+  __ lx(A0, Address(SP, 1 * target::kWordSize));  // Left
+  __ lx(A1, Address(SP, 0 * target::kWordSize));  // Right
+
+  __ fld(FA0, FieldAddress(A0, target::Double::value_offset()));
+
+  __ SmiUntag(TMP, A1);
+#if XLEN == 32
+  __ fcvtdw(FA1, TMP);
+#else
+  __ fcvtdl(FA1, TMP);
+#endif
+  __ BranchIfSmi(A1, &double_op, Assembler::kNearJump);
+  __ CompareClassId(A1, kDoubleCid, TMP);
+  __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
+  __ fld(FA1, FieldAddress(A1, target::Double::value_offset()));
+
+  __ Bind(&double_op);
+}
+
+void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
+                                         Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fltd(TMP, FA1, FA0);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
+                                              Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fled(TMP, FA1, FA0);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
+                                      Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fltd(TMP, FA0, FA1);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_equal(Assembler* assembler,
+                                   Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ feqd(TMP, FA0, FA1);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
+                                           Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fled(TMP, FA0, FA1);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+// Expects left argument to be double (receiver). Right argument is unknown.
+// Both arguments are on stack.
+static void DoubleArithmeticOperations(Assembler* assembler,
+                                       Label* normal_ir_body,
+                                       Token::Kind kind) {
+  PrepareDoubleOp(assembler, normal_ir_body);
+  switch (kind) {
+    case Token::kADD:
+      __ faddd(FA0, FA0, FA1);
+      break;
+    case Token::kSUB:
+      __ fsubd(FA0, FA0, FA1);
+      break;
+    case Token::kMUL:
+      __ fmuld(FA0, FA0, FA1);
+      break;
+    case Token::kDIV:
+      __ fdivd(FA0, FA0, FA1);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  const Class& double_class = DoubleClass();
+  __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, A0, TMP);
+  __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
+}
+
+void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
+}
+
+void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
+}
+
+void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
+}
+
+// Left is double, right is integer (Mint or Smi)
+void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ BranchIfNotSmi(A0, normal_ir_body, Assembler::kNearJump);
+  // Is Smi.
+  __ SmiUntag(A0);
+#if XLEN == 32
+  __ fcvtdw(FA0, A0);
+#else
+  __ fcvtdl(FA0, A0);
+#endif
+  const Class& double_class = DoubleClass();
+  __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, A0, TMP);
+  __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
+  __ ret();
+  __ Bind(normal_ir_body);
+}
+
+static void DoubleIsClass(Assembler* assembler, intx_t fclass) {
+  Label true_label;
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ LoadDFieldFromOffset(FA0, A0, target::Double::value_offset());
+  __ fclassd(TMP, FA0);
+  __ andi(TMP, TMP, fclass);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+}
+
+void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
+                                      Label* normal_ir_body) {
+  DoubleIsClass(assembler, kFClassSignallingNan | kFClassQuietNan);
+}
+
+void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
+                                           Label* normal_ir_body) {
+  DoubleIsClass(assembler, kFClassNegInfinity | kFClassPosInfinity);
+}
+
+void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
+                                           Label* normal_ir_body) {
+  DoubleIsClass(assembler, kFClassNegInfinity | kFClassNegNormal |
+                               kFClassNegSubnormal | kFClassNegZero);
+}
+
+void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
+                                      Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+//    var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
+//    _state[kSTATE_LO] = state & _MASK_32;
+//    _state[kSTATE_HI] = state >> 32;
+void AsmIntrinsifier::Random_nextState(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
+                                   Label* normal_ir_body) {
+  Label true_label;
+  __ lx(A0, Address(SP, 1 * target::kWordSize));
+  __ lx(A1, Address(SP, 0 * target::kWordSize));
+  __ beq(A0, A1, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+}
+
+// Return type quickly for simple types (not parameterized and not signature).
+void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
+                                                Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
+                                         Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+#if XLEN == 32
+  // Smi field.
+  __ lw(A0, FieldAddress(A0, target::String::hash_offset()));
+#else
+  // uint32_t field in header.
+  __ lwu(A0, FieldAddress(A0, target::String::hash_offset()));
+  __ SmiTag(A0);
+#endif
+  __ beqz(A0, normal_ir_body, Assembler::kNearJump);
+  __ ret();
+
+  // Hash not yet computed.
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ LoadCompressed(A0, FieldAddress(A0, target::Type::hash_offset()));
+  __ beqz(A0, normal_ir_body, Assembler::kNearJump);
+  __ ret();
+  // Hash not yet computed.
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Type_equality(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::FunctionType_getHashCode(Assembler* assembler,
+                                               Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ LoadCompressed(A0, FieldAddress(A0, target::FunctionType::hash_offset()));
+  __ beqz(A0, normal_ir_body, Assembler::kNearJump);
+  __ ret();
+  // Hash not yet computed.
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::FunctionType_equality(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// Keep in sync with Instance::IdentityHashCode.
+// Note int and double never reach here because they override _identityHashCode.
+// Special cases are also not needed for null or bool because they were pre-set
+// during VM isolate finalization.
+void AsmIntrinsifier::Object_getHash(Assembler* assembler,
+                                     Label* normal_ir_body) {
+#if XLEN == 32
+  UNREACHABLE();
+#else
+  Label not_yet_computed;
+  __ lx(A0, Address(SP, 0 * target::kWordSize));  // Object.
+  __ lwu(A0, FieldAddress(
+                 A0, target::Object::tags_offset() +
+                         target::UntaggedObject::kHashTagPos / kBitsPerByte));
+  __ beqz(A0, &not_yet_computed);
+  __ SmiTag(A0);
+  __ ret();
+
+  __ Bind(&not_yet_computed);
+  __ LoadFromOffset(A1, THR, target::Thread::random_offset());
+  __ AndImmediate(T2, A1, 0xffffffff);  // state_lo
+  __ srli(T3, A1, 32);                  // state_hi
+  __ LoadImmediate(A1, 0xffffda61);     // A
+  __ mul(A1, A1, T2);
+  __ add(A1, A1, T3);  // new_state = (A * state_lo) + state_hi
+  __ StoreToOffset(A1, THR, target::Thread::random_offset());
+  __ AndImmediate(A1, A1, 0x3fffffff);
+  __ beqz(A1, &not_yet_computed);
+
+  __ lx(A0, Address(SP, 0 * target::kWordSize));  // Object
+  __ subi(A0, A0, kHeapObjectTag);
+  __ slli(T3, A1, target::UntaggedObject::kHashTagPos);
+
+  Label retry, already_set_in_r4;
+  __ Bind(&retry);
+  __ lr(T2, Address(A0, 0));
+  __ srli(T4, T2, target::UntaggedObject::kHashTagPos);
+  __ bnez(T4, &already_set_in_r4);
+  __ or_(T2, T2, T3);
+  __ sc(T4, T2, Address(A0, 0));
+  __ bnez(T4, &retry);
+  // Fall-through with A1 containing new hash value (untagged).
+  __ SmiTag(A0, A1);
+  __ ret();
+  __ Bind(&already_set_in_r4);
+  __ SmiTag(A0, T4);
+  __ ret();
+#endif
+}
+
+void GenerateSubstringMatchesSpecialization(Assembler* assembler,
+                                            intptr_t receiver_cid,
+                                            intptr_t other_cid,
+                                            Label* return_true,
+                                            Label* return_false) {
+  UNIMPLEMENTED();
+}
+
+// bool _substringMatches(int start, String other)
+// This intrinsic handles a OneByteString or TwoByteString receiver with a
+// OneByteString other.
+void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
+                                                 Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
+                                                Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// Arg0: OneByteString (receiver).
+// Arg1: Start index as Smi.
+// Arg2: End index as Smi.
+// The indexes must be valid.
+void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
+                                                       Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 2 * target::kWordSize));  // OneByteString.
+  __ lx(A1, Address(SP, 1 * target::kWordSize));  // Index.
+  __ lx(A2, Address(SP, 0 * target::kWordSize));  // Value.
+  __ SmiUntag(A1);
+  __ SmiUntag(A2);
+  __ add(A1, A1, A0);
+  __ sb(A2, FieldAddress(A1, target::OneByteString::data_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 2 * target::kWordSize));  // TwoByteString.
+  __ lx(A1, Address(SP, 1 * target::kWordSize));  // Index.
+  __ lx(A2, Address(SP, 0 * target::kWordSize));  // Value.
+  // Untag index and multiply by element size -> no-op.
+  __ SmiUntag(A2);
+  __ add(A1, A1, A0);
+  __ sh(A2, FieldAddress(A1, target::OneByteString::data_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
+static void StringEquality(Assembler* assembler,
+                           Label* normal_ir_body,
+                           intptr_t string_cid) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  StringEquality(assembler, normal_ir_body, kOneByteStringCid);
+}
+
+void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
+}
+
+void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
+                                                   Label* normal_ir_body,
+                                                   bool sticky) {
+  if (FLAG_interpret_irregexp) return;
+
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
+                                         Label* normal_ir_body) {
+  __ LoadIsolate(A0);
+  __ lx(A0, Address(A0, target::Isolate::default_tag_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  __ LoadIsolate(A0);
+  __ lx(A0, Address(A0, target::Isolate::current_tag_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
+                                                   Label* normal_ir_body) {
+#if !defined(SUPPORT_TIMELINE)
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+#else
+  Label true_label;
+  // Load TimelineStream*.
+  __ lx(A0, Address(THR, target::Thread::dart_stream_offset()));
+  // Load uintptr_t from TimelineStream*.
+  __ lx(A0, Address(A0, target::TimelineStream::enabled_offset()));
+  __ bnez(A0, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+#endif
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/asm_intrinsifier_x64.cc b/runtime/vm/compiler/asm_intrinsifier_x64.cc
index e2f7b24..656bce9 100644
--- a/runtime/vm/compiler/asm_intrinsifier_x64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_x64.cc
@@ -15,10 +15,12 @@
 namespace compiler {
 
 // When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
 // R10: Arguments descriptor
 // TOS: Return address
-// The R10 registers can be destroyed only if there is no slow-path, i.e.
-// if the intrinsified method always executes a return.
+// The R10 and CODE_REG registers can be destroyed only if there is no
+// slow-path, i.e. if the intrinsified method always executes a return.
 // The RBP register should not be modified, because it is used by the profiler.
 // The PP and THR registers (see constants_x64.h) must be preserved.
 
diff --git a/runtime/vm/compiler/assembler/assembler.h b/runtime/vm/compiler/assembler/assembler.h
index 336640a..cfed11a 100644
--- a/runtime/vm/compiler/assembler/assembler.h
+++ b/runtime/vm/compiler/assembler/assembler.h
@@ -25,6 +25,8 @@
 #include "vm/compiler/assembler/assembler_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 #include "vm/compiler/assembler/assembler_arm64.h"
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#include "vm/compiler/assembler/assembler_riscv.h"
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 3aeae48..139be88 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -30,9 +30,9 @@
 namespace compiler {
 
 Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches)
+                     intptr_t far_branch_level)
     : AssemblerBase(object_pool_builder),
-      use_far_branches_(use_far_branches),
+      use_far_branches_(far_branch_level != 0),
       constant_pool_allowed_(false) {
   generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) {
     Call(
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index c7168c3..78bb68e 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -354,7 +354,7 @@
 class Assembler : public AssemblerBase {
  public:
   explicit Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches = false);
+                     intptr_t far_branch_level = 0);
   ~Assembler() {}
 
   void PushRegister(Register r) { Push(r); }
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 5869617..943457c 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -26,9 +26,9 @@
 namespace compiler {
 
 Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches)
+                     intptr_t far_branch_level)
     : AssemblerBase(object_pool_builder),
-      use_far_branches_(use_far_branches),
+      use_far_branches_(far_branch_level != 0),
       constant_pool_allowed_(false) {
   generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
     Call(Address(THR,
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index e404453..a1126ec 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -512,7 +512,7 @@
 class Assembler : public AssemblerBase {
  public:
   explicit Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches = false);
+                     intptr_t far_branch_level = 0);
   ~Assembler() {}
 
   void PushRegister(Register r) { Push(r); }
diff --git a/runtime/vm/compiler/assembler/assembler_base.h b/runtime/vm/compiler/assembler/assembler_base.h
index c3410b4..85259a3 100644
--- a/runtime/vm/compiler/assembler/assembler_base.h
+++ b/runtime/vm/compiler/assembler/assembler_base.h
@@ -20,7 +20,8 @@
 
 namespace dart {
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
+#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
+    defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
 DECLARE_FLAG(bool, use_far_branches);
 #endif
 
@@ -214,6 +215,64 @@
 class Address;
 class FieldAddress;
 
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+class Label : public ZoneAllocated {
+ public:
+  Label() {}
+  ~Label() {
+    // Assert if label is being destroyed with unresolved branches pending.
+    ASSERT(!IsLinked());
+  }
+
+  intptr_t Position() const {
+    ASSERT(IsBound());
+    return position_;
+  }
+
+  bool IsBound() const { return position_ != -1; }
+  bool IsUnused() const { return !IsBound() && !IsLinked(); }
+  bool IsLinked() const {
+    return unresolved_cb_ != -1 || unresolved_cj_ != -1 ||
+           unresolved_b_ != -1 || unresolved_j_ != -1 || unresolved_far_ != -1;
+  }
+
+ private:
+  int32_t position_ = -1;
+  void BindTo(intptr_t position) {
+    ASSERT(!IsBound());
+    ASSERT(!IsLinked());
+    position_ = position;
+    ASSERT(IsBound());
+  }
+
+  // Linked lists of unresolved forward branches, threaded through the branch
+  // instructions. The offset encoded in each unresolved branch the delta to the
+  // next instruction in the list, terminated with 0 delta. Each branch class
+  // has a separate list because the offset range of each is different.
+#define DEFINE_BRANCH_CLASS(name)                                              \
+  int32_t unresolved_##name##_ = -1;                                           \
+  int32_t link_##name(int32_t position) {                                      \
+    ASSERT(position > unresolved_##name##_);                                   \
+    int32_t offset;                                                            \
+    if (unresolved_##name##_ == -1) {                                          \
+      offset = 0;                                                              \
+    } else {                                                                   \
+      offset = position - unresolved_##name##_;                                \
+      ASSERT(offset > 0);                                                      \
+    }                                                                          \
+    unresolved_##name##_ = position;                                           \
+    return offset;                                                             \
+  }
+  DEFINE_BRANCH_CLASS(cb);
+  DEFINE_BRANCH_CLASS(cj);
+  DEFINE_BRANCH_CLASS(b);
+  DEFINE_BRANCH_CLASS(j);
+  DEFINE_BRANCH_CLASS(far);
+
+  friend class MicroAssembler;
+  DISALLOW_COPY_AND_ASSIGN(Label);
+};
+#else
 class Label : public ZoneAllocated {
  public:
   Label() : position_(0), unresolved_(0) {
@@ -324,6 +383,7 @@
   friend class Assembler;
   DISALLOW_COPY_AND_ASSIGN(Label);
 };
+#endif
 
 // External labels keep a function pointer to allow them
 // to be called from code generated by the assembler.
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index d5b61ee..69207d5 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -229,12 +229,12 @@
 class Assembler : public AssemblerBase {
  public:
   explicit Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches = false)
+                     intptr_t far_branch_level = 0)
       : AssemblerBase(object_pool_builder),
         jit_cookie_(0),
         code_(NewZoneHandle(ThreadState::Current()->zone())) {
     // This mode is only needed and implemented for ARM.
-    ASSERT(!use_far_branches);
+    ASSERT(far_branch_level == 0);
   }
   ~Assembler() {}
 
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.cc b/runtime/vm/compiler/assembler/assembler_riscv.cc
new file mode 100644
index 0000000..b86d5c7
--- /dev/null
+++ b/runtime/vm/compiler/assembler/assembler_riscv.cc
@@ -0,0 +1,4391 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // NOLINT
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/cpu.h"
+#include "vm/instructions.h"
+#include "vm/simulator.h"
+
+namespace dart {
+
+DECLARE_FLAG(bool, check_code_pointer);
+DECLARE_FLAG(bool, precompiled_mode);
+
+DEFINE_FLAG(int, far_branch_level, 0, "Always use far branches");
+
+namespace compiler {
+
+MicroAssembler::MicroAssembler(ObjectPoolBuilder* object_pool_builder,
+                               intptr_t far_branch_level,
+                               ExtensionSet extensions)
+    : AssemblerBase(object_pool_builder),
+      extensions_(extensions),
+      far_branch_level_(far_branch_level) {
+  ASSERT(far_branch_level >= 0);
+  ASSERT(far_branch_level <= 2);
+}
+
+MicroAssembler::~MicroAssembler() {}
+
+void MicroAssembler::Bind(Label* label) {
+  ASSERT(!label->IsBound());
+  intptr_t target_position = Position();
+  intptr_t branch_position;
+
+#define BIND(head, update)                                                     \
+  branch_position = label->head;                                               \
+  while (branch_position >= 0) {                                               \
+    ASSERT(Utils::IsAligned(branch_position, Supports(RV_C) ? 2 : 4));         \
+    intptr_t new_offset = target_position - branch_position;                   \
+    ASSERT(Utils::IsAligned(new_offset, Supports(RV_C) ? 2 : 4));              \
+    intptr_t old_offset = update(branch_position, new_offset);                 \
+    if (old_offset == 0) break;                                                \
+    branch_position -= old_offset;                                             \
+  }                                                                            \
+  label->head = -1
+
+  BIND(unresolved_cb_, UpdateCBOffset);
+  BIND(unresolved_cj_, UpdateCJOffset);
+  BIND(unresolved_b_, UpdateBOffset);
+  BIND(unresolved_j_, UpdateJOffset);
+  BIND(unresolved_far_, UpdateFarOffset);
+
+  label->BindTo(target_position);
+}
+
+intptr_t MicroAssembler::UpdateCBOffset(intptr_t branch_position,
+                                        intptr_t new_offset) {
+  CInstr instr(Read16(branch_position));
+  ASSERT((instr.opcode() == C_BEQZ) || (instr.opcode() == C_BNEZ));
+  intptr_t old_offset = instr.b_imm();
+  if (!IsCBImm(new_offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Write16(branch_position,
+          instr.opcode() | EncodeCRs1p(instr.rs1p()) | EncodeCBImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateCJOffset(intptr_t branch_position,
+                                        intptr_t new_offset) {
+  CInstr instr(Read16(branch_position));
+  ASSERT((instr.opcode() == C_J) || (instr.opcode() == C_JAL));
+  intptr_t old_offset = instr.j_imm();
+  if (!IsCJImm(new_offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Write16(branch_position, instr.opcode() | EncodeCJImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateBOffset(intptr_t branch_position,
+                                       intptr_t new_offset) {
+  Instr instr(Read32(branch_position));
+  ASSERT(instr.opcode() == BRANCH);
+  intptr_t old_offset = instr.btype_imm();
+  if (!IsBTypeImm(new_offset)) {
+    BailoutWithBranchOffsetError();
+  }
+  Write32(branch_position, EncodeRs2(instr.rs2()) | EncodeRs1(instr.rs1()) |
+                               EncodeFunct3(instr.funct3()) |
+                               EncodeOpcode(instr.opcode()) |
+                               EncodeBTypeImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateJOffset(intptr_t branch_position,
+                                       intptr_t new_offset) {
+  Instr instr(Read32(branch_position));
+  ASSERT(instr.opcode() == JAL);
+  intptr_t old_offset = instr.jtype_imm();
+  if (!IsJTypeImm(new_offset)) {
+    BailoutWithBranchOffsetError();
+  }
+  Write32(branch_position, EncodeRd(instr.rd()) | EncodeOpcode(instr.opcode()) |
+                               EncodeJTypeImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateFarOffset(intptr_t branch_position,
+                                         intptr_t new_offset) {
+  Instr auipc_instr(Read32(branch_position));
+  ASSERT(auipc_instr.opcode() == AUIPC);
+  ASSERT(auipc_instr.rd() == FAR_TMP);
+  Instr jr_instr(Read32(branch_position + 4));
+  ASSERT(jr_instr.opcode() == JALR);
+  ASSERT(jr_instr.rd() == ZR);
+  ASSERT(jr_instr.funct3() == F3_0);
+  ASSERT(jr_instr.rs1() == FAR_TMP);
+  intptr_t old_offset = auipc_instr.utype_imm() + jr_instr.itype_imm();
+  intx_t lo = new_offset << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (new_offset - lo) << (XLEN - 32) >> (XLEN - 32);
+  if (!IsUTypeImm(hi)) {
+    FATAL("Jump/branch distance exceeds 2GB!");
+  }
+  Write32(branch_position,
+          EncodeUTypeImm(hi) | EncodeRd(FAR_TMP) | EncodeOpcode(AUIPC));
+  Write32(branch_position + 4, EncodeITypeImm(lo) | EncodeRs1(FAR_TMP) |
+                                   EncodeFunct3(F3_0) | EncodeRd(ZR) |
+                                   EncodeOpcode(JALR));
+  return old_offset;
+}
+
+void MicroAssembler::lui(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) && (rd != ZR) && (rd != SP) && IsCUImm(imm)) {
+    c_lui(rd, imm);
+    return;
+  }
+  EmitUType(imm, rd, LUI);
+}
+
+void MicroAssembler::lui_fixed(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitUType(imm, rd, LUI);
+}
+
+void MicroAssembler::auipc(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitUType(imm, rd, AUIPC);
+}
+
+void MicroAssembler::jal(Register rd, Label* label, JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) &&
+      ((distance == kNearJump) ||
+       (label->IsBound() && IsCJImm(label->Position() - Position())))) {
+    if (rd == ZR) {
+      c_j(label);
+      return;
+    }
+#if XLEN == 32
+    if (rd == RA) {
+      c_jal(label);
+      return;
+    }
+#endif  // XLEN == 32
+  }
+  EmitJump(rd, label, JAL, distance);
+}
+
+void MicroAssembler::jalr(Register rd, Register rs1, intptr_t offset) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if (rs1 != ZR && offset == 0) {
+      if (rd == ZR) {
+        c_jr(rs1);
+        return;
+      } else if (rd == RA) {
+        c_jalr(rs1);
+        return;
+      }
+    }
+  }
+  EmitIType(offset, rs1, F3_0, rd, JALR);
+}
+
+void MicroAssembler::jalr_fixed(Register rd, Register rs1, intptr_t offset) {
+  ASSERT(Supports(RV_I));
+  EmitIType(offset, rs1, F3_0, rd, JALR);
+}
+
+void MicroAssembler::beq(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) &&
+      ((distance == kNearJump) ||
+       (label->IsBound() && IsCBImm(label->Position() - Position())))) {
+    if ((rs1 == ZR) && IsCRs1p(rs2)) {
+      c_beqz(rs2, label);
+      return;
+    } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
+      c_beqz(rs1, label);
+      return;
+    }
+  }
+  EmitBranch(rs1, rs2, label, BEQ, distance);
+}
+
+void MicroAssembler::bne(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) &&
+      ((distance == kNearJump) ||
+       (label->IsBound() && IsCBImm(label->Position() - Position())))) {
+    if ((rs1 == ZR) && IsCRs1p(rs2)) {
+      c_bnez(rs2, label);
+      return;
+    } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
+      c_bnez(rs1, label);
+      return;
+    }
+  }
+  EmitBranch(rs1, rs2, label, BNE, distance);
+}
+
+void MicroAssembler::blt(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  EmitBranch(rs1, rs2, label, BLT, distance);
+}
+
+void MicroAssembler::bge(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  EmitBranch(rs1, rs2, label, BGE, distance);
+}
+
+void MicroAssembler::bltu(Register rs1,
+                          Register rs2,
+                          Label* label,
+                          JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  EmitBranch(rs1, rs2, label, BLTU, distance);
+}
+
+void MicroAssembler::bgeu(Register rs1,
+                          Register rs2,
+                          Label* label,
+                          JumpDistance distance) {
+  EmitBranch(rs1, rs2, label, BGEU, distance);
+}
+
+void MicroAssembler::lb(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LB, rd, LOAD);
+}
+
+void MicroAssembler::lh(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LH, rd, LOAD);
+}
+
+void MicroAssembler::lw(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
+      c_lwsp(rd, addr);
+      return;
+    }
+    if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_lw(rd, addr);
+      return;
+    }
+  }
+  EmitIType(addr.offset(), addr.base(), LW, rd, LOAD);
+}
+
+void MicroAssembler::lbu(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LBU, rd, LOAD);
+}
+
+void MicroAssembler::lhu(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LHU, rd, LOAD);
+}
+
+void MicroAssembler::sb(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitSType(addr.offset(), rs2, addr.base(), SB, STORE);
+}
+
+void MicroAssembler::sh(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitSType(addr.offset(), rs2, addr.base(), SH, STORE);
+}
+
+void MicroAssembler::sw(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
+      c_swsp(rs2, addr);
+      return;
+    }
+    if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_sw(rs2, addr);
+      return;
+    }
+  }
+  EmitSType(addr.offset(), rs2, addr.base(), SW, STORE);
+}
+
+void MicroAssembler::addi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
+      c_li(rd, imm);
+      return;
+    }
+    if ((rd == rs1) && IsCIImm(imm) && (imm != 0)) {
+      c_addi(rd, rs1, imm);
+      return;
+    }
+    if ((rd == SP) && (rs1 == SP) && IsCI16Imm(imm) && (imm != 0)) {
+      c_addi16sp(rd, rs1, imm);
+      return;
+    }
+    if (IsCRdp(rd) && (rs1 == SP) && IsCI4SPNImm(imm) && (imm != 0)) {
+      c_addi4spn(rd, rs1, imm);
+      return;
+    }
+    if (imm == 0) {
+      if ((rd == ZR) && (rs1 == ZR)) {
+        c_nop();
+        return;
+      }
+      if ((rd != ZR) && (rs1 != ZR)) {
+        c_mv(rd, rs1);
+        return;
+      }
+    }
+  }
+  EmitIType(imm, rs1, ADDI, rd, OPIMM);
+}
+
+void MicroAssembler::slti(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, SLTI, rd, OPIMM);
+}
+
+void MicroAssembler::sltiu(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, SLTIU, rd, OPIMM);
+}
+
+void MicroAssembler::xori(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, XORI, rd, OPIMM);
+}
+
+void MicroAssembler::ori(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, ORI, rd, OPIMM);
+}
+
+void MicroAssembler::andi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCIImm(imm)) {
+      c_andi(rd, rs1, imm);
+      return;
+    }
+  }
+  EmitIType(imm, rs1, ANDI, rd, OPIMM);
+}
+
+void MicroAssembler::slli(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && (shamt != 0) && IsCIImm(shamt)) {
+      c_slli(rd, rs1, shamt);
+      return;
+    }
+  }
+  EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM);
+}
+
+void MicroAssembler::srli(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
+      c_srli(rd, rs1, shamt);
+      return;
+    }
+  }
+  EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM);
+}
+
+void MicroAssembler::srai(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
+      c_srai(rd, rs1, shamt);
+      return;
+    }
+  }
+  EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM);
+}
+
+void MicroAssembler::add(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if (rd == rs1) {
+      c_add(rd, rs1, rs2);
+      return;
+    }
+    if (rd == rs2) {
+      c_add(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, ADD, rd, OP);
+}
+
+void MicroAssembler::sub(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_sub(rd, rs1, rs2);
+      return;
+    }
+  }
+  EmitRType(SUB, rs2, rs1, ADD, rd, OP);
+}
+
+void MicroAssembler::sll(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLL, rd, OP);
+}
+
+void MicroAssembler::slt(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLT, rd, OP);
+}
+
+void MicroAssembler::sltu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLTU, rd, OP);
+}
+
+void MicroAssembler::xor_(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_xor(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_xor(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, XOR, rd, OP);
+}
+
+void MicroAssembler::srl(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SR, rd, OP);
+}
+
+void MicroAssembler::sra(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(SRA, rs2, rs1, SR, rd, OP);
+}
+
+void MicroAssembler::or_(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_or(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_or(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, OR, rd, OP);
+}
+
+void MicroAssembler::and_(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_and(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_and(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, AND, rd, OP);
+}
+
+void MicroAssembler::fence(HartEffects predecessor, HartEffects successor) {
+  ASSERT((predecessor & kAll) == predecessor);
+  ASSERT((successor & kAll) == successor);
+  ASSERT(Supports(RV_I));
+  EmitIType((predecessor << 4) | successor, ZR, FENCE, ZR, MISCMEM);
+}
+
+void MicroAssembler::fencei() {
+  ASSERT(Supports(RV_I));
+  EmitIType(0, ZR, FENCEI, ZR, MISCMEM);
+}
+
+void MicroAssembler::ecall() {
+  ASSERT(Supports(RV_I));
+  EmitIType(ECALL, ZR, F3_0, ZR, SYSTEM);
+}
+void MicroAssembler::ebreak() {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    c_ebreak();
+    return;
+  }
+  EmitIType(EBREAK, ZR, F3_0, ZR, SYSTEM);
+}
+void MicroAssembler::SimulatorPrintObject(Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(ECALL, rs1, F3_0, ZR, SYSTEM);
+}
+
+void MicroAssembler::csrrw(Register rd, uint32_t csr, Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, rs1, CSRRW, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrs(Register rd, uint32_t csr, Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, rs1, CSRRS, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrc(Register rd, uint32_t csr, Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, rs1, CSRRC, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrwi(Register rd, uint32_t csr, uint32_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, Register(imm), CSRRWI, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrsi(Register rd, uint32_t csr, uint32_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, Register(imm), CSRRSI, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrci(Register rd, uint32_t csr, uint32_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, Register(imm), CSRRCI, rd, SYSTEM);
+}
+
+void MicroAssembler::trap() {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    Emit16(0);  // Permanently reserved illegal instruction.
+  } else {
+    Emit32(0);  // Permanently reserved illegal instruction.
+  }
+}
+
+#if XLEN >= 64
+void MicroAssembler::lwu(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LWU, rd, LOAD);
+}
+
+void MicroAssembler::ld(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
+      c_ldsp(rd, addr);
+      return;
+    }
+    if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_ld(rd, addr);
+      return;
+    }
+  }
+  EmitIType(addr.offset(), addr.base(), LD, rd, LOAD);
+}
+
+void MicroAssembler::sd(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
+      c_sdsp(rs2, addr);
+      return;
+    }
+    if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_sd(rs2, addr);
+      return;
+    }
+  }
+  EmitSType(addr.offset(), rs2, addr.base(), SD, STORE);
+}
+
+void MicroAssembler::addiw(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
+      c_li(rd, imm);
+      return;
+    }
+    if ((rd == rs1) && (rd != ZR) && IsCIImm(imm)) {
+      c_addiw(rd, rs1, imm);
+      return;
+    }
+  }
+  EmitIType(imm, rs1, ADDI, rd, OPIMM32);
+}
+
+void MicroAssembler::slliw(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < 32));
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM32);
+}
+
+void MicroAssembler::srliw(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < 32));
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM32);
+}
+
+void MicroAssembler::sraiw(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM32);
+}
+
+void MicroAssembler::addw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_addw(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_addw(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, ADD, rd, OP32);
+}
+
+void MicroAssembler::subw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_subw(rd, rs1, rs2);
+      return;
+    }
+  }
+  EmitRType(SUB, rs2, rs1, ADD, rd, OP32);
+}
+
+void MicroAssembler::sllw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLL, rd, OP32);
+}
+
+void MicroAssembler::srlw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SR, rd, OP32);
+}
+void MicroAssembler::sraw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(SRA, rs2, rs1, SR, rd, OP32);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::mul(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MUL, rd, OP);
+}
+
+void MicroAssembler::mulh(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULH, rd, OP);
+}
+
+void MicroAssembler::mulhsu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULHSU, rd, OP);
+}
+
+void MicroAssembler::mulhu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULHU, rd, OP);
+}
+
+void MicroAssembler::div(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIV, rd, OP);
+}
+
+void MicroAssembler::divu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIVU, rd, OP);
+}
+
+void MicroAssembler::rem(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REM, rd, OP);
+}
+
+void MicroAssembler::remu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REMU, rd, OP);
+}
+
+#if XLEN >= 64
+void MicroAssembler::mulw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULW, rd, OP32);
+}
+
+void MicroAssembler::divw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIVW, rd, OP32);
+}
+
+void MicroAssembler::divuw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIVUW, rd, OP32);
+}
+
+void MicroAssembler::remw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REMW, rd, OP32);
+}
+
+void MicroAssembler::remuw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REMUW, rd, OP32);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::lrw(Register rd, Address addr, std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(LR, order, ZR, addr.base(), WIDTH32, rd, AMO);
+}
+void MicroAssembler::scw(Register rd,
+                         Register rs2,
+                         Address addr,
+                         std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(SC, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoswapw(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoaddw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOADD, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoxorw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoandw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOAND, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoorw(Register rd,
+                            Register rs2,
+                            Address addr,
+                            std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amominw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amomaxw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amominuw(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amomaxuw(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+#if XLEN >= 64
+void MicroAssembler::lrd(Register rd, Address addr, std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(LR, order, ZR, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::scd(Register rd,
+                         Register rs2,
+                         Address addr,
+                         std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(SC, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoswapd(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoaddd(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOADD, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoxord(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoandd(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOAND, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoord(Register rd,
+                            Register rs2,
+                            Address addr,
+                            std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amomind(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amomaxd(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amominud(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amomaxud(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::flw(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_F));
+#if XLEN == 32
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
+      c_flwsp(rd, addr);
+      return;
+    }
+    if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_flw(rd, addr);
+      return;
+    }
+  }
+#endif  // XLEN == 32
+  EmitIType(addr.offset(), addr.base(), S, rd, LOADFP);
+}
+
+void MicroAssembler::fsw(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_F));
+#if XLEN == 32
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
+      c_fswsp(rs2, addr);
+      return;
+    }
+    if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_fsw(rs2, addr);
+      return;
+    }
+  }
+#endif  // XLEN == 32
+  EmitSType(addr.offset(), rs2, addr.base(), S, STOREFP);
+}
+
+void MicroAssembler::fmadds(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMADD);
+}
+
+void MicroAssembler::fmsubs(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMSUB);
+}
+
+void MicroAssembler::fnmsubs(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMSUB);
+}
+
+void MicroAssembler::fnmadds(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMADD);
+}
+
+void MicroAssembler::fadds(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FADDS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsubs(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSUBS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmuls(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMULS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fdivs(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FDIVS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsqrts(FRegister rd,
+                            FRegister rs1,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSQRTS, FRegister(0), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjs(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSGNJS, rs2, rs1, J, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjns(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSGNJS, rs2, rs1, JN, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjxs(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSGNJS, rs2, rs1, JX, rd, OPFP);
+}
+
+void MicroAssembler::fmins(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMINMAXS, rs2, rs1, MIN, rd, OPFP);
+}
+
+void MicroAssembler::fmaxs(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMINMAXS, rs2, rs1, MAX, rd, OPFP);
+}
+
+void MicroAssembler::feqs(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCMPS, rs2, rs1, FEQ, rd, OPFP);
+}
+
+void MicroAssembler::flts(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCMPS, rs2, rs1, FLT, rd, OPFP);
+}
+
+void MicroAssembler::fles(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCMPS, rs2, rs1, FLE, rd, OPFP);
+}
+
+void MicroAssembler::fclasss(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCLASSS, FRegister(0), rs1, F3_1, rd, OPFP);
+}
+
+void MicroAssembler::fcvtws(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtwus(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtsw(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtswu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmvxw(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMVXW, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+void MicroAssembler::fmvwx(FRegister rd, Register rs1) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMVWX, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+#if XLEN >= 64
+void MicroAssembler::fcvtls(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtlus(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtsl(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtslu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::fld(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_D));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
+      c_fldsp(rd, addr);
+      return;
+    }
+    if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_fld(rd, addr);
+      return;
+    }
+  }
+  EmitIType(addr.offset(), addr.base(), D, rd, LOADFP);
+}
+
+void MicroAssembler::fsd(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_D));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
+      c_fsdsp(rs2, addr);
+      return;
+    }
+    if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_fsd(rs2, addr);
+      return;
+    }
+  }
+  EmitSType(addr.offset(), rs2, addr.base(), D, STOREFP);
+}
+
+void MicroAssembler::fmaddd(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMADD);
+}
+
+void MicroAssembler::fmsubd(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMSUB);
+}
+
+void MicroAssembler::fnmsubd(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMSUB);
+}
+
+void MicroAssembler::fnmaddd(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMADD);
+}
+
+void MicroAssembler::faddd(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FADDD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsubd(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSUBD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmuld(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMULD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fdivd(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FDIVD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsqrtd(FRegister rd,
+                            FRegister rs1,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSQRTD, FRegister(0), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSGNJD, rs2, rs1, J, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjnd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSGNJD, rs2, rs1, JN, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjxd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSGNJD, rs2, rs1, JX, rd, OPFP);
+}
+
+void MicroAssembler::fmind(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMINMAXD, rs2, rs1, MIN, rd, OPFP);
+}
+
+void MicroAssembler::fmaxd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMINMAXD, rs2, rs1, MAX, rd, OPFP);
+}
+
+void MicroAssembler::fcvtsd(FRegister rd,
+                            FRegister rs1,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTS, FRegister(1), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtds(FRegister rd, FRegister rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTD, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+void MicroAssembler::feqd(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCMPD, rs2, rs1, FEQ, rd, OPFP);
+}
+
+void MicroAssembler::fltd(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCMPD, rs2, rs1, FLT, rd, OPFP);
+}
+
+void MicroAssembler::fled(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCMPD, rs2, rs1, FLE, rd, OPFP);
+}
+
+void MicroAssembler::fclassd(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCLASSD, FRegister(0), rs1, F3_1, rd, OPFP);
+}
+
+void MicroAssembler::fcvtwd(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtwud(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdw(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdwu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+#if XLEN >= 64
+void MicroAssembler::fcvtld(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtlud(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmvxd(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMVXD, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdl(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdlu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmvdx(FRegister rd, Register rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMVDX, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::c_lwsp(Register rd, Address addr) {
+  ASSERT(rd != ZR);
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_LWSP | EncodeCRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
+}
+
+#if XLEN == 32
+void MicroAssembler::c_flwsp(FRegister rd, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FLWSP | EncodeCFRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
+}
+#else
+void MicroAssembler::c_ldsp(Register rd, Address addr) {
+  ASSERT(rd != ZR);
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_LDSP | EncodeCRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
+}
+#endif
+
+void MicroAssembler::c_fldsp(FRegister rd, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FLDSP | EncodeCFRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_swsp(Register rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_SWSP | EncodeCRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
+}
+
+#if XLEN == 32
+void MicroAssembler::c_fswsp(FRegister rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FSWSP | EncodeCFRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
+}
+#else
+void MicroAssembler::c_sdsp(Register rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_SDSP | EncodeCRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
+}
+#endif
+void MicroAssembler::c_fsdsp(FRegister rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FSDSP | EncodeCFRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_lw(Register rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_LW | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_ld(Register rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_LD | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_flw(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FLW | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_fld(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FLD | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_sw(Register rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SW | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_sd(Register rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SD | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_fsw(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FSW | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_fsd(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FSD | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_j(Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCJump(label, C_J);
+}
+
+#if XLEN == 32
+void MicroAssembler::c_jal(Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCJump(label, C_JAL);
+}
+#endif  // XLEN == 32
+
+void MicroAssembler::c_jr(Register rs1) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rs1 != ZR);
+  Emit16(C_JR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
+}
+
+void MicroAssembler::c_jalr(Register rs1) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_JALR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
+}
+
+void MicroAssembler::c_beqz(Register rs1p, Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCBranch(rs1p, label, C_BEQZ);
+}
+
+void MicroAssembler::c_bnez(Register rs1p, Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCBranch(rs1p, label, C_BNEZ);
+}
+
+void MicroAssembler::c_li(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  Emit16(C_LI | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_lui(Register rd, uintptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  ASSERT(rd != SP);
+  Emit16(C_LUI | EncodeCRd(rd) | EncodeCUImm(imm));
+}
+
+void MicroAssembler::c_addi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(imm != 0);
+  ASSERT(rd == rs1);
+  Emit16(C_ADDI | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+
+#if XLEN >= 64
+void MicroAssembler::c_addiw(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_ADDIW | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+#endif
+void MicroAssembler::c_addi16sp(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_ADDI16SP | EncodeCRd(rd) | EncodeCI16Imm(imm));
+}
+
+void MicroAssembler::c_addi4spn(Register rdp, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rs1 == SP);
+  ASSERT(imm != 0);
+  Emit16(C_ADDI4SPN | EncodeCRdp(rdp) | EncodeCI4SPNImm(imm));
+}
+
+void MicroAssembler::c_slli(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  ASSERT(imm != 0);
+  Emit16(C_SLLI | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_srli(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  ASSERT(imm != 0);
+  Emit16(C_SRLI | EncodeCRs1p(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_srai(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  ASSERT(imm != 0);
+  Emit16(C_SRAI | EncodeCRs1p(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_andi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_ANDI | EncodeCRs1p(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_mv(Register rd, Register rs2) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  ASSERT(rs2 != ZR);
+  Emit16(C_MV | EncodeCRd(rd) | EncodeCRs2(rs2));
+}
+
+void MicroAssembler::c_add(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  ASSERT(rd == rs1);
+  ASSERT(rs2 != ZR);
+  Emit16(C_ADD | EncodeCRd(rd) | EncodeCRs2(rs2));
+}
+
+void MicroAssembler::c_and(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_AND | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_or(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_OR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_xor(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_XOR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_sub(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SUB | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+#if XLEN >= 64
+void MicroAssembler::c_addw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_ADDW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_subw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SUBW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::c_nop() {
+  ASSERT(Supports(RV_C));
+  Emit16(C_NOP);
+}
+
+void MicroAssembler::c_ebreak() {
+  ASSERT(Supports(RV_C));
+  Emit16(C_EBREAK);
+}
+
+static Funct3 InvertFunct3(Funct3 func) {
+  switch (func) {
+    case BEQ:
+      return BNE;
+    case BNE:
+      return BEQ;
+    case BGE:
+      return BLT;
+    case BGEU:
+      return BLTU;
+    case BLT:
+      return BGE;
+    case BLTU:
+      return BGEU;
+    default:
+      UNREACHABLE();
+  }
+}
+
+void MicroAssembler::EmitBranch(Register rs1,
+                                Register rs2,
+                                Label* label,
+                                Funct3 func,
+                                JumpDistance distance) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    // Backward branch: use near or far branch based on actual distance.
+    offset = label->Position() - Position();
+    if (IsBTypeImm(offset)) {
+      EmitBType(offset, rs2, rs1, func, BRANCH);
+      return;
+    }
+
+    if (IsJTypeImm(offset + 4)) {
+      intptr_t start = Position();
+      const intptr_t kFarBranchLength = 8;
+      EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+      offset = label->Position() - Position();
+      EmitJType(offset, ZR, JAL);
+      intptr_t end = Position();
+      ASSERT_EQUAL(end - start, kFarBranchLength);
+      return;
+    }
+
+    intptr_t start = Position();
+    const intptr_t kFarBranchLength = 12;
+    EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+    offset = label->Position() - Position();
+    intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (!IsUTypeImm(hi)) {
+      FATAL("Branch distance exceeds 2GB!");
+    }
+    EmitUType(hi, FAR_TMP, AUIPC);
+    EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+    intptr_t end = Position();
+    ASSERT_EQUAL(end - start, kFarBranchLength);
+    return;
+  } else {
+    // Forward branch: speculatively use near branches and re-assemble with far
+    // branches if any need greater length.
+    if (distance == kNearJump) {
+      offset = label->link_b(Position());
+      if (!IsBTypeImm(offset)) {
+        FATAL("Incorrect Assembler::kNearJump");
+      }
+      EmitBType(offset, rs2, rs1, func, BRANCH);
+    } else if (far_branch_level() == 0) {
+      offset = label->link_b(Position());
+      if (!IsBTypeImm(offset)) {
+        // TODO(riscv): This isn't so much because the branch is out of range
+        // as some previous jump to the same target would be out of B-type
+        // range... A possible alternative is to have separate lists on Labels
+        // for pending B-type and J-type instructions.
+        BailoutWithBranchOffsetError();
+      }
+      EmitBType(offset, rs2, rs1, func, BRANCH);
+    } else if (far_branch_level() == 1) {
+      intptr_t start = Position();
+      const intptr_t kFarBranchLength = 8;
+      EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+      offset = label->link_j(Position());
+      EmitJType(offset, ZR, JAL);
+      intptr_t end = Position();
+      ASSERT_EQUAL(end - start, kFarBranchLength);
+    } else {
+      intptr_t start = Position();
+      const intptr_t kFarBranchLength = 12;
+      EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+      offset = label->link_far(Position());
+      intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+      intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+      if (!IsUTypeImm(hi)) {
+        FATAL("Branch distance exceeds 2GB!");
+      }
+      EmitUType(hi, FAR_TMP, AUIPC);
+      EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+      intptr_t end = Position();
+      ASSERT_EQUAL(end - start, kFarBranchLength);
+    }
+  }
+}
+
+void MicroAssembler::EmitJump(Register rd,
+                              Label* label,
+                              Opcode op,
+                              JumpDistance distance) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    // Backward jump: use near or far jump based on actual distance.
+    offset = label->Position() - Position();
+
+    if (IsJTypeImm(offset)) {
+      EmitJType(offset, rd, JAL);
+      return;
+    }
+    intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (!IsUTypeImm(hi)) {
+      FATAL("Jump distance exceeds 2GB!");
+    }
+    EmitUType(hi, FAR_TMP, AUIPC);
+    EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+    return;
+  } else {
+    // Forward jump: speculatively use near jumps and re-assemble with far
+    // jumps if any need greater length.
+    if (distance == kNearJump) {
+      offset = label->link_j(Position());
+      if (!IsJTypeImm(offset)) {
+        FATAL("Incorrect Assembler::kNearJump");
+      }
+      EmitJType(offset, rd, JAL);
+    } else if (far_branch_level() < 2) {
+      offset = label->link_j(Position());
+      if (!IsJTypeImm(offset)) {
+        BailoutWithBranchOffsetError();
+      }
+      EmitJType(offset, rd, JAL);
+    } else {
+      offset = label->link_far(Position());
+      intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+      intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+      if (!IsUTypeImm(hi)) {
+        FATAL("Jump distance exceeds 2GB!");
+      }
+      EmitUType(hi, FAR_TMP, AUIPC);
+      EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+    }
+  }
+}
+
+void MicroAssembler::EmitCBranch(Register rs1p, Label* label, COpcode op) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    offset = label->Position() - Position();
+  } else {
+    offset = label->link_cb(Position());
+  }
+  if (!IsCBImm(offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Emit16(op | EncodeCRs1p(rs1p) | EncodeCBImm(offset));
+}
+
+void MicroAssembler::EmitCJump(Label* label, COpcode op) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    offset = label->Position() - Position();
+  } else {
+    offset = label->link_cj(Position());
+  }
+  if (!IsCJImm(offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Emit16(op | EncodeCJImm(offset));
+}
+
+void MicroAssembler::EmitRType(Funct5 funct5,
+                               std::memory_order order,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  intptr_t funct7 = funct5 << 2;
+  switch (order) {
+    case std::memory_order_acq_rel:
+      funct7 |= 0b11;
+      break;
+    case std::memory_order_acquire:
+      funct7 |= 0b10;
+      break;
+    case std::memory_order_release:
+      funct7 |= 0b01;
+      break;
+    case std::memory_order_relaxed:
+      funct7 |= 0b00;
+      break;
+    default:
+      FATAL("Invalid memory order");
+  }
+  EmitRType((Funct7)funct7, rs2, rs1, funct3, rd, opcode);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               Funct3 funct3,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               RoundingMode round,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               Register rs1,
+                               RoundingMode round,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               RoundingMode round,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               intptr_t shamt,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeShamt(shamt);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitR4Type(FRegister rs3,
+                                Funct2 funct2,
+                                FRegister rs2,
+                                FRegister rs1,
+                                RoundingMode round,
+                                FRegister rd,
+                                Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFRs3(rs3);
+  e |= EncodeFunct2(funct2);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitIType(intptr_t imm,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeITypeImm(imm);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitIType(intptr_t imm,
+                               Register rs1,
+                               Funct3 funct3,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeITypeImm(imm);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitSType(intptr_t imm,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeSTypeImm(imm);
+  e |= EncodeRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitSType(intptr_t imm,
+                               FRegister rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeSTypeImm(imm);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitBType(intptr_t imm,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeBTypeImm(imm);
+  e |= EncodeRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitUType(intptr_t imm, Register rd, Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeUTypeImm(imm);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitJType(intptr_t imm, Register rd, Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeJTypeImm(imm);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
+                     intptr_t far_branch_level)
+    : MicroAssembler(object_pool_builder,
+                     far_branch_level,
+                     FLAG_use_compressed_instructions ? RV_GC : RV_G),
+      constant_pool_allowed_(false) {
+  generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
+    // Note this does not destory RA.
+    lx(TMP,
+       Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)));
+    jalr(TMP, TMP);
+  };
+  generate_invoke_array_write_barrier_ = [&]() {
+    Call(
+        Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
+  };
+}
+
+void Assembler::PushRegister(Register r) {
+  ASSERT(r != SP);
+  subi(SP, SP, target::kWordSize);
+  sx(r, Address(SP, 0));
+}
+void Assembler::PopRegister(Register r) {
+  ASSERT(r != SP);
+  lx(r, Address(SP, 0));
+  addi(SP, SP, target::kWordSize);
+}
+
+void Assembler::PushRegisterPair(Register r0, Register r1) {
+  ASSERT(r0 != SP);
+  ASSERT(r1 != SP);
+  subi(SP, SP, 2 * target::kWordSize);
+  sx(r1, Address(SP, target::kWordSize));
+  sx(r0, Address(SP, 0));
+}
+
+void Assembler::PopRegisterPair(Register r0, Register r1) {
+  ASSERT(r0 != SP);
+  ASSERT(r1 != SP);
+  lx(r1, Address(SP, target::kWordSize));
+  lx(r0, Address(SP, 0));
+  addi(SP, SP, 2 * target::kWordSize);
+}
+
+void Assembler::PushRegisters(const RegisterSet& regs) {
+  // The order in which the registers are pushed must match the order
+  // in which the registers are encoded in the safepoint's stack map.
+
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * kFpuRegisterSize);
+  if (size == 0) {
+    return;  // Skip no-op SP update.
+  }
+
+  subi(SP, SP, size);
+  intptr_t offset = size;
+  for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; i--) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      offset -= kFpuRegisterSize;
+      fsd(reg, Address(SP, offset));
+    }
+  }
+  for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      offset -= target::kWordSize;
+      sx(reg, Address(SP, offset));
+    }
+  }
+  ASSERT(offset == 0);
+}
+void Assembler::PopRegisters(const RegisterSet& regs) {
+  // The order in which the registers are pushed must match the order
+  // in which the registers are encoded in the safepoint's stack map.
+
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * kFpuRegisterSize);
+  if (size == 0) {
+    return;  // Skip no-op SP update.
+  }
+  intptr_t offset = 0;
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      lx(reg, Address(SP, offset));
+      offset += target::kWordSize;
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      fld(reg, Address(SP, offset));
+      offset += kFpuRegisterSize;
+    }
+  }
+  ASSERT(offset == size);
+  addi(SP, SP, size);
+}
+
+void Assembler::PushNativeCalleeSavedRegisters() {
+  RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * sizeof(double));
+  subi(SP, SP, size);
+  intptr_t offset = 0;
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      fsd(reg, Address(SP, offset));
+      offset += sizeof(double);
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      sx(reg, Address(SP, offset));
+      offset += target::kWordSize;
+    }
+  }
+  ASSERT(offset == size);
+}
+
+void Assembler::PopNativeCalleeSavedRegisters() {
+  RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * sizeof(double));
+  intptr_t offset = 0;
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      fld(reg, Address(SP, offset));
+      offset += sizeof(double);
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      lx(reg, Address(SP, offset));
+      offset += target::kWordSize;
+    }
+  }
+  ASSERT(offset == size);
+  addi(SP, SP, size);
+}
+
+void Assembler::ExtendValue(Register rd, Register rn, OperandSize sz) {
+  switch (sz) {
+#if XLEN == 64
+    case kEightBytes:
+      if (rd == rn) return;  // No operation needed.
+      return mv(rd, rn);
+    case kUnsignedFourBytes:
+      return UNIMPLEMENTED();
+    case kFourBytes:
+      return sextw(rd, rn);
+#elif XLEN == 32
+    case kUnsignedFourBytes:
+    case kFourBytes:
+      if (rd == rn) return;  // No operation needed.
+      return mv(rd, rn);
+#endif
+    case kUnsignedTwoBytes:
+    case kTwoBytes:
+    case kUnsignedByte:
+    case kByte:
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  UNIMPLEMENTED();
+}
+void Assembler::ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz) {
+  if (sz == kWordBytes) {
+    SmiTag(rd, rn);
+    return;
+  }
+
+  switch (sz) {
+#if XLEN == 64
+    case kUnsignedFourBytes:
+      slli(rd, rn, XLEN - kBitsPerInt32);
+      srli(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
+      return;
+    case kFourBytes:
+      slli(rd, rn, XLEN - kBitsPerInt32);
+      srai(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
+      return;
+#endif
+    case kUnsignedTwoBytes:
+      slli(rd, rn, XLEN - kBitsPerInt16);
+      srli(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
+      return;
+    case kTwoBytes:
+      slli(rd, rn, XLEN - kBitsPerInt16);
+      srai(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
+      return;
+    case kUnsignedByte:
+      slli(rd, rn, XLEN - kBitsPerInt8);
+      srli(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
+      return;
+    case kByte:
+      slli(rd, rn, XLEN - kBitsPerInt8);
+      srai(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
+      return;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+}
+
+// Unconditional jump to a given address in memory. Clobbers TMP.
+void Assembler::Jump(const Address& address) {
+  lx(TMP2, address);
+  jr(TMP2);
+}
+
+void Assembler::LoadField(Register dst, const FieldAddress& address) {
+  lx(dst, address);
+}
+
+#if defined(USING_THREAD_SANITIZER)
+void Assembler::TsanLoadAcquire(Register addr) {
+  UNIMPLEMENTED();
+}
+void Assembler::TsanStoreRelease(Register addr) {
+  UNIMPLEMENTED();
+}
+#endif
+
+void Assembler::LoadAcquire(Register dst, Register address, int32_t offset) {
+  ASSERT(dst != address);
+  LoadFromOffset(dst, address, offset);
+  fence(HartEffects::kRead, HartEffects::kMemory);
+
+#if defined(USING_THREAD_SANITIZER)
+  if (offset == 0) {
+    TsanLoadAcquire(address);
+  } else {
+    AddImmediate(TMP2, address, offset);
+    TsanLoadAcquire(TMP2);
+  }
+#endif
+}
+
+void Assembler::LoadAcquireCompressed(Register dst,
+                                      Register address,
+                                      int32_t offset) {
+  LoadAcquire(dst, address, offset);
+}
+
+void Assembler::StoreRelease(Register src, Register address, int32_t offset) {
+  fence(HartEffects::kMemory, HartEffects::kRead);
+  StoreToOffset(src, address, offset);
+}
+
+void Assembler::StoreReleaseCompressed(Register src,
+                                       Register address,
+                                       int32_t offset) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::CompareWithCompressedFieldFromOffset(Register value,
+                                                     Register base,
+                                                     int32_t offset) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::CompareWithMemoryValue(Register value,
+                                       Address address,
+                                       OperandSize sz) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::CompareFunctionTypeNullabilityWith(Register type,
+                                                   int8_t value) {
+  EnsureHasClassIdInDEBUG(kFunctionTypeCid, type, TMP);
+  lbu(TMP,
+      FieldAddress(type, compiler::target::FunctionType::nullability_offset()));
+  CompareImmediate(TMP, value);
+}
+void Assembler::CompareTypeNullabilityWith(Register type, int8_t value) {
+  EnsureHasClassIdInDEBUG(kTypeCid, type, TMP);
+  lbu(TMP, FieldAddress(type, compiler::target::Type::nullability_offset()));
+  CompareImmediate(TMP, value);
+}
+
+void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
+  if (frame_space != 0) {
+    addi(SP, SP, -frame_space);
+  }
+  const intptr_t kAbiStackAlignment = 16;  // For both 32 and 64 bit.
+  andi(SP, SP, ~(kAbiStackAlignment - 1));
+}
+
+// In debug mode, this generates code to check that:
+//   FP + kExitLinkSlotFromEntryFp == SP
+// or triggers breakpoint otherwise.
+void Assembler::EmitEntryFrameVerification() {
+#if defined(DEBUG)
+  Label done;
+  ASSERT(!constant_pool_allowed());
+  LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
+                         target::kWordSize);
+  add(TMP, TMP, FPREG);
+  beq(TMP, SPREG, &done, kNearJump);
+
+  Breakpoint();
+
+  Bind(&done);
+#endif
+}
+
+void Assembler::CompareRegisters(Register rn, Register rm) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kCompareReg;
+  deferred_left_ = rn;
+  deferred_reg_ = rm;
+}
+void Assembler::CompareObjectRegisters(Register rn, Register rm) {
+  CompareRegisters(rn, rm);
+}
+void Assembler::TestRegisters(Register rn, Register rm) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kTestReg;
+  deferred_left_ = rn;
+  deferred_reg_ = rm;
+}
+
+void Assembler::BranchIf(Condition condition,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(deferred_compare_ != kNone);
+
+  if (deferred_compare_ == kCompareImm || deferred_compare_ == kCompareReg) {
+    Register left = deferred_left_;
+    Register right;
+    if (deferred_compare_ == kCompareImm) {
+      if (deferred_imm_ == 0) {
+        right = ZR;
+      } else {
+        LoadImmediate(TMP2, deferred_imm_);
+        right = TMP2;
+      }
+    } else {
+      right = deferred_reg_;
+    }
+    switch (condition) {
+      case EQUAL:
+        beq(left, right, label, distance);
+        break;
+      case NOT_EQUAL:
+        bne(left, right, label, distance);
+        break;
+      case LESS:
+        blt(left, right, label, distance);
+        break;
+      case LESS_EQUAL:
+        ble(left, right, label, distance);
+        break;
+      case GREATER_EQUAL:
+        bge(left, right, label, distance);
+        break;
+      case GREATER:
+        bgt(left, right, label, distance);
+        break;
+      case UNSIGNED_LESS:
+        bltu(left, right, label, distance);
+        break;
+      case UNSIGNED_LESS_EQUAL:
+        bleu(left, right, label, distance);
+        break;
+      case UNSIGNED_GREATER_EQUAL:
+        bgeu(left, right, label, distance);
+        break;
+      case UNSIGNED_GREATER:
+        bgtu(left, right, label, distance);
+        break;
+      case OVERFLOW:
+      case NO_OVERFLOW:
+        FATAL("Use Add/Subtract/MultiplyBranchOverflow instead.");
+      default:
+        UNREACHABLE();
+    }
+  } else if (deferred_compare_ == kTestImm || deferred_compare_ == kTestReg) {
+    if (deferred_compare_ == kTestImm) {
+      AndImmediate(TMP2, deferred_left_, deferred_imm_);
+    } else {
+      and_(TMP2, deferred_left_, deferred_reg_);
+    }
+    switch (condition) {
+      case ZERO:
+        beqz(TMP2, label, distance);
+        break;
+      case NOT_ZERO:
+        bnez(TMP2, label, distance);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    UNREACHABLE();
+  }
+  deferred_compare_ = kNone;  // Consumed.
+}
+
+void Assembler::SetIf(Condition condition, Register rd) {
+  ASSERT(deferred_compare_ != kNone);
+
+  if (deferred_compare_ == kCompareImm) {
+    if (deferred_imm_ == 0) {
+      deferred_compare_ = kCompareReg;
+      deferred_reg_ = ZR;
+      SetIf(condition, rd);
+      return;
+    }
+    if (!IsITypeImm(deferred_imm_) || !IsITypeImm(deferred_imm_ + 1)) {
+      LoadImmediate(TMP2, deferred_imm_);
+      deferred_compare_ = kCompareReg;
+      deferred_reg_ = TMP2;
+      SetIf(condition, rd);
+      return;
+    }
+    Register left = deferred_left_;
+    intx_t right = deferred_imm_;
+    switch (condition) {
+      case EQUAL:
+        xori(rd, left, right);
+        seqz(rd, rd);
+        break;
+      case NOT_EQUAL:
+        xori(rd, left, right);
+        snez(rd, rd);
+        break;
+      case LESS:
+        slti(rd, left, right);
+        break;
+      case LESS_EQUAL:
+        slti(rd, left, right + 1);
+        break;
+      case GREATER_EQUAL:
+        slti(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case GREATER:
+        slti(rd, left, right + 1);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_LESS:
+        sltiu(rd, left, right);
+        break;
+      case UNSIGNED_LESS_EQUAL:
+        sltiu(rd, left, right + 1);
+        break;
+      case UNSIGNED_GREATER_EQUAL:
+        sltiu(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_GREATER:
+        sltiu(rd, left, right + 1);
+        xori(rd, rd, 1);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (deferred_compare_ == kCompareReg) {
+    Register left = deferred_left_;
+    Register right = deferred_reg_;
+    switch (condition) {
+      case EQUAL:
+        if (right == ZR) {
+          seqz(rd, left);
+        } else {
+          xor_(rd, left, right);
+          seqz(rd, rd);
+        }
+        break;
+      case NOT_EQUAL:
+        if (right == ZR) {
+          snez(rd, left);
+        } else {
+          xor_(rd, left, right);
+          snez(rd, rd);
+        }
+        break;
+      case LESS:
+        slt(rd, left, right);
+        break;
+      case LESS_EQUAL:
+        slt(rd, right, left);
+        xori(rd, rd, 1);
+        break;
+      case GREATER_EQUAL:
+        slt(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case GREATER:
+        slt(rd, right, left);
+        break;
+      case UNSIGNED_LESS:
+        sltu(rd, left, right);
+        break;
+      case UNSIGNED_LESS_EQUAL:
+        sltu(rd, right, left);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_GREATER_EQUAL:
+        sltu(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_GREATER:
+        sltu(rd, right, left);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (deferred_compare_ == kTestImm || deferred_compare_ == kTestReg) {
+    if (deferred_compare_ == kTestImm) {
+      AndImmediate(TMP2, deferred_left_, deferred_imm_);
+    } else {
+      and_(TMP2, deferred_left_, deferred_reg_);
+    }
+    switch (condition) {
+      case ZERO:
+        seqz(rd, TMP2);
+        break;
+      case NOT_ZERO:
+        snez(rd, TMP2);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  deferred_compare_ = kNone;  // Consumed.
+}
+
+void Assembler::BranchIfZero(Register rn, Label* label, JumpDistance distance) {
+  beqz(rn, label, distance);
+}
+
+void Assembler::BranchIfNotSmi(Register reg,
+                               Label* label,
+                               JumpDistance distance) {
+  ASSERT(reg != TMP2);
+  andi(TMP2, reg, kSmiTagMask);
+  bnez(TMP2, label, distance);
+}
+void Assembler::BranchIfSmi(Register reg, Label* label, JumpDistance distance) {
+  ASSERT(reg != TMP2);
+  andi(TMP2, reg, kSmiTagMask);
+  beqz(TMP2, label, distance);
+}
+
+void Assembler::Jump(const Code& target,
+                     Register pp,
+                     ObjectPoolBuilderEntry::Patchability patchable) {
+  const intptr_t index =
+      object_pool_builder().FindObject(ToObject(target), patchable);
+  LoadWordFromPoolIndex(CODE_REG, index, pp);
+  Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+}
+
+void Assembler::JumpAndLink(const Code& target,
+                            ObjectPoolBuilderEntry::Patchability patchable,
+                            CodeEntryKind entry_kind) {
+  const intptr_t index =
+      object_pool_builder().FindObject(ToObject(target), patchable);
+  LoadWordFromPoolIndex(CODE_REG, index);
+  Call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
+}
+
+void Assembler::JumpAndLinkToRuntime() {
+  Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
+}
+
+void Assembler::JumpAndLinkWithEquivalence(const Code& target,
+                                           const Object& equivalence,
+                                           CodeEntryKind entry_kind) {
+  const intptr_t index =
+      object_pool_builder().FindObject(ToObject(target), equivalence);
+  LoadWordFromPoolIndex(CODE_REG, index);
+  Call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
+}
+
+void Assembler::Call(Address target) {
+  lx(RA, target);
+  jalr(RA);
+}
+
+void Assembler::AddImmediate(Register rd,
+                             Register rs1,
+                             intx_t imm,
+                             OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    addi(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    add(rd, rs1, TMP2);
+  }
+}
+void Assembler::AndImmediate(Register rd,
+                             Register rs1,
+                             intx_t imm,
+                             OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    andi(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    and_(rd, rs1, TMP2);
+  }
+}
+void Assembler::OrImmediate(Register rd,
+                            Register rs1,
+                            intx_t imm,
+                            OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    ori(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    or_(rd, rs1, TMP2);
+  }
+}
+void Assembler::XorImmediate(Register rd,
+                             Register rs1,
+                             intx_t imm,
+                             OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    xori(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    xor_(rd, rs1, TMP2);
+  }
+}
+
+void Assembler::TestImmediate(Register rn, intx_t imm, OperandSize sz) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kTestImm;
+  deferred_left_ = rn;
+  deferred_imm_ = imm;
+}
+void Assembler::CompareImmediate(Register rn, intx_t imm, OperandSize sz) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kCompareImm;
+  deferred_left_ = rn;
+  deferred_imm_ = imm;
+}
+
+void Assembler::LoadFromOffset(Register dest,
+                               const Address& address,
+                               OperandSize sz) {
+  LoadFromOffset(dest, address.base(), address.offset(), sz);
+}
+void Assembler::LoadFromOffset(Register dest,
+                               Register base,
+                               int32_t offset,
+                               OperandSize sz) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  switch (sz) {
+#if XLEN == 64
+    case kEightBytes:
+      return ld(dest, Address(base, offset));
+    case kUnsignedFourBytes:
+      return lwu(dest, Address(base, offset));
+#elif XLEN == 32
+    case kUnsignedFourBytes:
+      return lw(dest, Address(base, offset));
+#endif
+    case kFourBytes:
+      return lw(dest, Address(base, offset));
+    case kUnsignedTwoBytes:
+      return lhu(dest, Address(base, offset));
+    case kTwoBytes:
+      return lh(dest, Address(base, offset));
+    case kUnsignedByte:
+      return lbu(dest, Address(base, offset));
+    case kByte:
+      return lb(dest, Address(base, offset));
+    default:
+      UNREACHABLE();
+  }
+}
+// For loading indexed payloads out of tagged objects like Arrays. If the
+// payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
+// [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
+void Assembler::LoadIndexedPayload(Register dest,
+                                   Register base,
+                                   int32_t payload_offset,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   OperandSize sz) {
+  slli(TMP, index, scale);
+  add(TMP, TMP, base);
+  LoadFromOffset(dest, TMP, payload_offset - kHeapObjectTag, sz);
+}
+void Assembler::LoadIndexedCompressed(Register dest,
+                                      Register base,
+                                      int32_t offset,
+                                      Register index) {
+  LoadIndexedPayload(dest, base, offset, index, TIMES_WORD_SIZE, kObjectBytes);
+}
+
+void Assembler::LoadSFromOffset(FRegister dest, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  flw(dest, Address(base, offset));
+}
+
+void Assembler::LoadDFromOffset(FRegister dest, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  fld(dest, Address(base, offset));
+}
+
+void Assembler::LoadFromStack(Register dst, intptr_t depth) {
+  UNIMPLEMENTED();
+}
+void Assembler::StoreToStack(Register src, intptr_t depth) {
+  UNIMPLEMENTED();
+}
+void Assembler::CompareToStack(Register src, intptr_t depth) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::StoreToOffset(Register src,
+                              const Address& address,
+                              OperandSize sz) {
+  StoreToOffset(src, address.base(), address.offset(), sz);
+}
+void Assembler::StoreToOffset(Register src,
+                              Register base,
+                              int32_t offset,
+                              OperandSize sz) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  switch (sz) {
+#if XLEN == 64
+    case kEightBytes:
+      return sd(src, Address(base, offset));
+#endif
+    case kUnsignedFourBytes:
+    case kFourBytes:
+      return sw(src, Address(base, offset));
+    case kUnsignedTwoBytes:
+    case kTwoBytes:
+      return sh(src, Address(base, offset));
+    case kUnsignedByte:
+    case kByte:
+      return sb(src, Address(base, offset));
+    default:
+      UNREACHABLE();
+  }
+}
+
+void Assembler::StoreSToOffset(FRegister src, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  fsw(src, Address(base, offset));
+}
+
+void Assembler::StoreDToOffset(FRegister src, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  fsd(src, Address(base, offset));
+}
+
+void Assembler::LoadUnboxedDouble(FpuRegister dst,
+                                  Register base,
+                                  int32_t offset) {
+  fld(dst, Address(base, offset));
+}
+void Assembler::StoreUnboxedDouble(FpuRegister src,
+                                   Register base,
+                                   int32_t offset) {
+  fsd(src, Address(base, offset));
+}
+void Assembler::MoveUnboxedDouble(FpuRegister dst, FpuRegister src) {
+  fmvd(dst, src);
+}
+
+void Assembler::LoadCompressed(Register dest, const Address& slot) {
+  lx(dest, slot);
+}
+void Assembler::LoadCompressedFromOffset(Register dest,
+                                         Register base,
+                                         int32_t offset) {
+  lx(dest, Address(base, offset));
+}
+void Assembler::LoadCompressedSmi(Register dest, const Address& slot) {
+  lx(dest, slot);
+}
+void Assembler::LoadCompressedSmiFromOffset(Register dest,
+                                            Register base,
+                                            int32_t offset) {
+  lx(dest, Address(base, offset));
+}
+
+// Store into a heap object and apply the generational and incremental write
+// barriers. All stores into heap objects must pass through this function or,
+// if the value can be proven either Smi or old-and-premarked, its NoBarrier
+// variants.
+// Preserves object and value registers.
+void Assembler::StoreIntoObject(Register object,
+                                const Address& dest,
+                                Register value,
+                                CanBeSmi can_value_be_smi,
+                                MemoryOrder memory_order) {
+  // stlr does not feature an address operand.
+  ASSERT(memory_order == kRelaxedNonAtomic);
+  sx(value, dest);
+  StoreBarrier(object, value, can_value_be_smi);
+}
+void Assembler::StoreCompressedIntoObject(Register object,
+                                          const Address& dest,
+                                          Register value,
+                                          CanBeSmi can_value_be_smi,
+                                          MemoryOrder memory_order) {
+  StoreIntoObject(object, dest, value, can_value_be_smi, memory_order);
+}
+void Assembler::StoreBarrier(Register object,
+                             Register value,
+                             CanBeSmi can_value_be_smi) {
+  // x.slot = x. Barrier should have be removed at the IL level.
+  ASSERT(object != value);
+  ASSERT(object != RA);
+  ASSERT(value != RA);
+  ASSERT(object != TMP);
+  ASSERT(object != TMP2);
+  ASSERT(value != TMP);
+  ASSERT(value != TMP2);
+
+  // In parallel, test whether
+  //  - object is old and not remembered and value is new, or
+  //  - object is old and value is old and not marked and concurrent marking is
+  //    in progress
+  // If so, call the WriteBarrier stub, which will either add object to the
+  // store buffer (case 1) or add value to the marking stack (case 2).
+  // Compare UntaggedObject::StorePointer.
+  Label done;
+  if (can_value_be_smi == kValueCanBeSmi) {
+    BranchIfSmi(value, &done, kNearJump);
+  }
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+
+  Register objectForCall = object;
+  if (value != kWriteBarrierValueReg) {
+    // Unlikely. Only non-graph intrinsics.
+    // TODO(rmacnak): Shuffle registers in intrinsics.
+    if (object != kWriteBarrierValueReg) {
+      PushRegister(kWriteBarrierValueReg);
+    } else {
+      COMPILE_ASSERT(S2 != kWriteBarrierValueReg);
+      COMPILE_ASSERT(S3 != kWriteBarrierValueReg);
+      objectForCall = (value == S2) ? S3 : S2;
+      PushRegisterPair(kWriteBarrierValueReg, objectForCall);
+      mv(objectForCall, object);
+    }
+    mv(kWriteBarrierValueReg, value);
+  }
+
+  // Note this uses TMP as the link register, so RA remains preserved.
+  generate_invoke_write_barrier_wrapper_(objectForCall);
+
+  if (value != kWriteBarrierValueReg) {
+    if (object != kWriteBarrierValueReg) {
+      PopRegister(kWriteBarrierValueReg);
+    } else {
+      PopRegisterPair(kWriteBarrierValueReg, objectForCall);
+    }
+  }
+  Bind(&done);
+}
+void Assembler::StoreIntoArray(Register object,
+                               Register slot,
+                               Register value,
+                               CanBeSmi can_value_be_smi) {
+  sx(value, Address(slot, 0));
+  StoreIntoArrayBarrier(object, slot, value, can_value_be_smi);
+}
+void Assembler::StoreCompressedIntoArray(Register object,
+                                         Register slot,
+                                         Register value,
+                                         CanBeSmi can_value_be_smi) {
+  StoreIntoArray(object, slot, value, can_value_be_smi);
+}
+void Assembler::StoreIntoArrayBarrier(Register object,
+                                      Register slot,
+                                      Register value,
+                                      CanBeSmi can_value_be_smi) {
+  // TODO(riscv): Use RA2 to avoid spilling RA inline?
+  const bool spill_lr = true;
+  ASSERT(object != TMP);
+  ASSERT(object != TMP2);
+  ASSERT(value != TMP);
+  ASSERT(value != TMP2);
+  ASSERT(slot != TMP);
+  ASSERT(slot != TMP2);
+
+  // In parallel, test whether
+  //  - object is old and not remembered and value is new, or
+  //  - object is old and value is old and not marked and concurrent marking is
+  //    in progress
+  // If so, call the WriteBarrier stub, which will either add object to the
+  // store buffer (case 1) or add value to the marking stack (case 2).
+  // Compare UntaggedObject::StorePointer.
+  Label done;
+  if (can_value_be_smi == kValueCanBeSmi) {
+    BranchIfSmi(value, &done, kNearJump);
+  }
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+  if (spill_lr) {
+    PushRegister(RA);
+  }
+  if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
+      (slot != kWriteBarrierSlotReg)) {
+    // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
+    // from StoreIndexInstr, which gets these exact registers from the register
+    // allocator.
+    UNIMPLEMENTED();
+  }
+  generate_invoke_array_write_barrier_();
+  if (spill_lr) {
+    PopRegister(RA);
+  }
+  Bind(&done);
+}
+
+void Assembler::StoreIntoObjectOffset(Register object,
+                                      int32_t offset,
+                                      Register value,
+                                      CanBeSmi can_value_be_smi,
+                                      MemoryOrder memory_order) {
+  if (memory_order == kRelease) {
+    StoreRelease(value, object, offset - kHeapObjectTag);
+  } else {
+    StoreToOffset(value, object, offset - kHeapObjectTag);
+  }
+  StoreBarrier(object, value, can_value_be_smi);
+}
+void Assembler::StoreCompressedIntoObjectOffset(Register object,
+                                                int32_t offset,
+                                                Register value,
+                                                CanBeSmi can_value_be_smi,
+                                                MemoryOrder memory_order) {
+  StoreIntoObjectOffset(object, offset, value, can_value_be_smi, memory_order);
+}
+void Assembler::StoreIntoObjectNoBarrier(Register object,
+                                         const Address& dest,
+                                         Register value,
+                                         MemoryOrder memory_order) {
+  ASSERT(memory_order == kRelaxedNonAtomic);
+  sx(value, dest);
+#if defined(DEBUG)
+  Label done;
+  beq(object, value, &done, kNearJump);
+  BranchIfSmi(value, &done, kNearJump);
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+  Stop("Store buffer update is required");
+  Bind(&done);
+#endif
+}
+void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
+                                                   const Address& dest,
+                                                   Register value,
+                                                   MemoryOrder memory_order) {
+  StoreIntoObjectNoBarrier(object, dest, value, memory_order);
+}
+void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
+                                               int32_t offset,
+                                               Register value,
+                                               MemoryOrder memory_order) {
+  if (memory_order == kRelease) {
+    StoreRelease(value, object, offset);
+  } else {
+    StoreToOffset(value, object, offset - kHeapObjectTag);
+  }
+#if defined(DEBUG)
+  Label done;
+  beq(object, value, &done, kNearJump);
+  BranchIfSmi(value, &done, kNearJump);
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+  Stop("Store buffer update is required");
+  Bind(&done);
+#endif
+}
+void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
+    Register object,
+    int32_t offset,
+    Register value,
+    MemoryOrder memory_order) {
+  StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order);
+}
+void Assembler::StoreIntoObjectNoBarrier(Register object,
+                                         const Address& dest,
+                                         const Object& value) {
+  ASSERT(IsOriginalObject(value));
+  ASSERT(IsNotTemporaryScopedHandle(value));
+  // No store buffer update.
+  if (IsSameObject(compiler::NullObject(), value)) {
+    sx(NULL_REG, dest);
+  } else if (target::IsSmi(object) && (target::ToRawSmi(object) == 0)) {
+    sx(ZR, dest);
+  } else {
+    LoadObject(TMP2, value);
+    sx(TMP2, dest);
+  }
+}
+void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
+                                                   const Address& dest,
+                                                   const Object& value,
+                                                   MemoryOrder memory_order) {
+  UNIMPLEMENTED();
+}
+void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
+                                               int32_t offset,
+                                               const Object& value,
+                                               MemoryOrder memory_order) {
+  if (memory_order == kRelease) {
+    Register value_reg = TMP2;
+    if (IsSameObject(compiler::NullObject(), value)) {
+      value_reg = NULL_REG;
+    } else if (target::IsSmi(object) && (target::ToRawSmi(object) == 0)) {
+      value_reg = ZR;
+    } else {
+      LoadObject(value_reg, value);
+    }
+    StoreIntoObjectOffsetNoBarrier(object, offset, value_reg, memory_order);
+  } else if (IsITypeImm(offset - kHeapObjectTag)) {
+    StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
+  } else {
+    AddImmediate(TMP, object, offset - kHeapObjectTag);
+    StoreIntoObjectNoBarrier(object, Address(TMP), value);
+  }
+}
+void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
+    Register object,
+    int32_t offset,
+    const Object& value,
+    MemoryOrder memory_order) {
+  UNIMPLEMENTED();
+}
+
+// Stores a non-tagged value into a heap object.
+void Assembler::StoreInternalPointer(Register object,
+                                     const Address& dest,
+                                     Register value) {
+  sx(value, dest);
+}
+
+// Object pool, loading from pool, etc.
+void Assembler::LoadPoolPointer(Register pp) {
+  CheckCodePointer();
+  lx(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
+
+  // When in the PP register, the pool pointer is untagged. When we
+  // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
+  // then untags when restoring from the stack. This will make loading from the
+  // object pool only one instruction for the first 4096 entries. Otherwise,
+  // because the offset wouldn't be aligned, it would be only one instruction
+  // for the first 64 entries.
+  subi(pp, pp, kHeapObjectTag);
+  set_constant_pool_allowed(pp == PP);
+}
+
+intptr_t Assembler::FindImmediate(int64_t imm) {
+  UNIMPLEMENTED();
+}
+bool Assembler::CanLoadFromObjectPool(const Object& object) const {
+  ASSERT(IsOriginalObject(object));
+  if (!constant_pool_allowed()) {
+    return false;
+  }
+
+  ASSERT(IsNotTemporaryScopedHandle(object));
+  ASSERT(IsInOldSpace(object));
+  return true;
+}
+void Assembler::LoadNativeEntry(
+    Register dst,
+    const ExternalLabel* label,
+    ObjectPoolBuilderEntry::Patchability patchable) {
+  const intptr_t index =
+      object_pool_builder().FindNativeFunction(label, patchable);
+  LoadWordFromPoolIndex(dst, index);
+}
+void Assembler::LoadIsolate(Register dst) {
+  lx(dst, Address(THR, target::Thread::isolate_offset()));
+}
+void Assembler::LoadIsolateGroup(Register dst) {
+  lx(dst, Address(THR, target::Thread::isolate_group_offset()));
+}
+
+void Assembler::LoadImmediate(Register reg, intx_t imm) {
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+
+#if XLEN > 32
+  if (!Utils::IsInt(32, imm)) {
+    LoadImmediate(reg, (imm - lo) >> 12);
+    slli(reg, reg, 12);
+    if (lo != 0) {
+      addi(reg, reg, lo);
+    }
+    return;
+  }
+#endif
+
+  if (hi == 0) {
+    addi(reg, ZR, lo);
+  } else {
+    lui(reg, hi);
+    if (lo != 0) {
+#if XLEN == 32
+      addi(reg, reg, lo);
+#else
+      addiw(reg, reg, lo);
+#endif
+    }
+  }
+}
+
+void Assembler::LoadDImmediate(FRegister reg, double immd) {
+  int64_t imm = bit_cast<int64_t, double>(immd);
+  if (imm == 0) {
+#if XLEN >= 64
+    fmvdx(reg, ZR);  // bit_cast uint64_t -> double
+#else
+    fcvtdwu(reg, ZR);  // static_cast uint32_t -> double
+#endif
+  } else {
+    ASSERT(constant_pool_allowed());
+#if XLEN >= 64
+    intptr_t index = object_pool_builder().FindImmediate(imm);
+    intptr_t offset = target::ObjectPool::element_offset(index);
+#else
+    intptr_t lo_index =
+        object_pool_builder().AddImmediate(Utils::Low32Bits(imm));
+    intptr_t hi_index =
+        object_pool_builder().AddImmediate(Utils::High32Bits(imm));
+    ASSERT(lo_index + 1 == hi_index);
+    intptr_t offset = target::ObjectPool::element_offset(lo_index);
+#endif
+    LoadDFromOffset(reg, PP, offset);
+  }
+}
+
+// Load word from pool from the given offset using encoding that
+// InstructionPattern::DecodeLoadWordFromPool can decode.
+//
+// Note: the function never clobbers TMP, TMP2 scratch registers.
+void Assembler::LoadWordFromPoolIndex(Register dst,
+                                      intptr_t index,
+                                      Register pp) {
+  ASSERT((pp != PP) || constant_pool_allowed());
+  ASSERT(dst != pp);
+  const uint32_t offset = target::ObjectPool::element_offset(index);
+  // PP is untagged.
+  intx_t imm = offset;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  if (hi == 0) {
+    lx(dst, Address(pp, lo));
+  } else {
+    lui(dst, hi);
+    add(dst, dst, pp);
+    lx(dst, Address(dst, lo));
+  }
+}
+
+void Assembler::CompareObject(Register reg, const Object& object) {
+  ASSERT(IsOriginalObject(object));
+  if (IsSameObject(compiler::NullObject(), object)) {
+    CompareObjectRegisters(reg, NULL_REG);
+  } else if (target::IsSmi(object)) {
+    CompareImmediate(reg, target::ToRawSmi(object));
+  } else {
+    LoadObject(TMP, object);
+    CompareObjectRegisters(reg, TMP);
+  }
+}
+
+void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
+  ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
+  ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
+#if XLEN == 64
+  srliw(result, tags, target::UntaggedObject::kClassIdTagPos);
+#else
+  srli(result, tags, target::UntaggedObject::kClassIdTagPos);
+#endif
+}
+void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
+  ASSERT(target::UntaggedObject::kSizeTagPos == 8);
+  ASSERT(target::UntaggedObject::kSizeTagSize == 8);
+  srli(result, tags, target::UntaggedObject::kSizeTagPos);
+  andi(result, result, (1 << target::UntaggedObject::kSizeTagSize) - 1);
+  slli(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
+}
+
+void Assembler::LoadClassId(Register result, Register object) {
+  ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
+  ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
+  const intptr_t class_id_offset =
+      target::Object::tags_offset() +
+      target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
+  lhu(result, FieldAddress(object, class_id_offset));
+}
+void Assembler::LoadClassById(Register result, Register class_id) {
+  ASSERT(result != class_id);
+
+  const intptr_t table_offset =
+      target::IsolateGroup::cached_class_table_table_offset();
+
+  LoadIsolateGroup(result);
+  LoadFromOffset(result, result, table_offset);
+  slli(TMP, class_id, target::kWordSizeLog2);
+  add(result, result, TMP);
+  lx(result, Address(result, 0));
+}
+void Assembler::CompareClassId(Register object,
+                               intptr_t class_id,
+                               Register scratch) {
+  ASSERT(scratch != kNoRegister);
+  LoadClassId(scratch, object);
+  CompareImmediate(scratch, class_id);
+}
+// Note: input and output registers must be different.
+void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
+  ASSERT(result != object);
+  ASSERT(result != TMP2);
+  ASSERT(object != TMP2);
+  li(result, kSmiCid);
+  Label done;
+  BranchIfSmi(object, &done, kNearJump);
+  LoadClassId(result, object);
+  Bind(&done);
+}
+void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
+  LoadClassIdMayBeSmi(result, object);
+  SmiTag(result);
+}
+void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
+                                        Register src,
+                                        Register scratch,
+                                        bool can_be_null) {
+#if defined(DEBUG)
+  Comment("Check that object in register has cid %" Pd "", cid);
+  Label matches;
+  LoadClassIdMayBeSmi(scratch, src);
+  CompareImmediate(scratch, cid);
+  BranchIf(EQUAL, &matches, Assembler::kNearJump);
+  if (can_be_null) {
+    CompareImmediate(scratch, kNullCid);
+    BranchIf(EQUAL, &matches, Assembler::kNearJump);
+  }
+  trap();
+  Bind(&matches);
+#endif
+}
+
+void Assembler::EnterFrame(intptr_t frame_size) {
+  // N.B. The ordering here is important. We must never write beyond SP or
+  // it can be clobbered by a signal handler.
+  subi(SP, SP, frame_size + 2 * target::kWordSize);
+  sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
+  sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
+  addi(FP, SP, frame_size + 0 * target::kWordSize);
+}
+void Assembler::LeaveFrame() {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  mv(SP, FP);
+  lx(FP, Address(SP, 0 * target::kWordSize));
+  lx(RA, Address(SP, 1 * target::kWordSize));
+  addi(SP, SP, 2 * target::kWordSize);
+}
+
+void Assembler::TransitionGeneratedToNative(Register destination,
+                                            Register new_exit_frame,
+                                            Register new_exit_through_ffi,
+                                            bool enter_safepoint) {
+  // Save exit frame information to enable stack walking.
+  sx(new_exit_frame,
+     Address(THR, target::Thread::top_exit_frame_info_offset()));
+
+  sx(new_exit_through_ffi,
+     Address(THR, target::Thread::exit_through_ffi_offset()));
+  Register tmp = new_exit_through_ffi;
+
+  // Mark that the thread is executing native code.
+  sx(destination, Address(THR, target::Thread::vm_tag_offset()));
+  li(tmp, target::Thread::native_execution_state());
+  sx(tmp, Address(THR, target::Thread::execution_state_offset()));
+
+  if (enter_safepoint) {
+    EnterFullSafepoint(tmp);
+  }
+}
+
+void Assembler::TransitionNativeToGenerated(Register state,
+                                            bool exit_safepoint) {
+  if (exit_safepoint) {
+    ExitFullSafepoint(state);
+  } else {
+#if defined(DEBUG)
+    // Ensure we've already left the safepoint.
+    ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
+    li(state, target::Thread::full_safepoint_state_acquired());
+    lx(RA, Address(THR, target::Thread::safepoint_state_offset()));
+    and_(RA, RA, state);
+    Label ok;
+    beqz(RA, &ok, Assembler::kNearJump);
+    Breakpoint();
+    Bind(&ok);
+#endif
+  }
+
+  // Mark that the thread is executing Dart code.
+  li(state, target::Thread::vm_tag_dart_id());
+  sx(state, Address(THR, target::Thread::vm_tag_offset()));
+  li(state, target::Thread::generated_execution_state());
+  sx(state, Address(THR, target::Thread::execution_state_offset()));
+
+  // Reset exit frame information in Isolate's mutator thread structure.
+  sx(ZR, Address(THR, target::Thread::top_exit_frame_info_offset()));
+  sx(ZR, Address(THR, target::Thread::exit_through_ffi_offset()));
+}
+
+void Assembler::EnterFullSafepoint(Register state) {
+  // We generate the same number of instructions whether or not the slow-path is
+  // forced. This simplifies GenerateJitCallbackTrampolines.
+
+  Register addr = RA;
+  ASSERT(addr != state);
+
+  Label slow_path, done, retry;
+  if (FLAG_use_slow_path) {
+    j(&slow_path, Assembler::kNearJump);
+  }
+
+  addi(addr, THR, target::Thread::safepoint_state_offset());
+  Bind(&retry);
+  lr(state, Address(addr, 0));
+  subi(state, state, target::Thread::full_safepoint_state_unacquired());
+  bnez(state, &slow_path, Assembler::kNearJump);
+
+  li(state, target::Thread::full_safepoint_state_acquired());
+  sc(state, state, Address(addr, 0));
+  beqz(state, &done, Assembler::kNearJump);  // 0 means sc was successful.
+
+  if (!FLAG_use_slow_path) {
+    j(&retry, Assembler::kNearJump);
+  }
+
+  Bind(&slow_path);
+  lx(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
+  lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
+  jalr(addr);
+
+  Bind(&done);
+}
+
+void Assembler::ExitFullSafepoint(Register state) {
+  // We generate the same number of instructions whether or not the slow-path is
+  // forced, for consistency with EnterFullSafepoint.
+  Register addr = RA;
+  ASSERT(addr != state);
+
+  Label slow_path, done, retry;
+  if (FLAG_use_slow_path) {
+    j(&slow_path, Assembler::kNearJump);
+  }
+
+  addi(addr, THR, target::Thread::safepoint_state_offset());
+  Bind(&retry);
+  lr(state, Address(addr, 0));
+  subi(state, state, target::Thread::full_safepoint_state_acquired());
+  bnez(state, &slow_path, Assembler::kNearJump);
+
+  li(state, target::Thread::full_safepoint_state_unacquired());
+  sc(state, state, Address(addr, 0));
+  beqz(state, &done, Assembler::kNearJump);  // 0 means sc was successful.
+
+  if (!FLAG_use_slow_path) {
+    j(&retry, Assembler::kNearJump);
+  }
+
+  Bind(&slow_path);
+  lx(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
+  lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
+  jalr(addr);
+
+  Bind(&done);
+}
+
+void Assembler::CheckCodePointer() {
+#ifdef DEBUG
+  if (!FLAG_check_code_pointer) {
+    return;
+  }
+  Comment("CheckCodePointer");
+  Label cid_ok, instructions_ok;
+  CompareClassId(CODE_REG, kCodeCid, TMP);
+  BranchIf(EQ, &cid_ok, kNearJump);
+  ebreak();
+  Bind(&cid_ok);
+
+  const intptr_t entry_offset =
+      CodeSize() + target::Instructions::HeaderSize() - kHeapObjectTag;
+  intx_t imm = -entry_offset;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  auipc(TMP, hi);
+  addi(TMP, TMP, lo);
+  lx(TMP2, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
+  beq(TMP, TMP2, &instructions_ok, kNearJump);
+  ebreak();
+  Bind(&instructions_ok);
+#endif
+}
+
+void Assembler::RestoreCodePointer() {
+  lx(CODE_REG,
+     Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
+  CheckCodePointer();
+}
+
+// Restores the values of the registers that are blocked to cache some values
+// e.g. BARRIER_MASK and NULL_REG.
+void Assembler::RestorePinnedRegisters() {
+  lx(WRITE_BARRIER_MASK,
+     Address(THR, target::Thread::write_barrier_mask_offset()));
+  lx(NULL_REG, Address(THR, target::Thread::object_null_offset()));
+}
+
+void Assembler::SetupGlobalPoolAndDispatchTable() {
+  ASSERT(FLAG_precompiled_mode);
+  lx(PP, Address(THR, target::Thread::global_object_pool_offset()));
+  subi(PP, PP, kHeapObjectTag);  // Pool in PP is untagged!
+  lx(DISPATCH_TABLE_REG,
+     Address(THR, target::Thread::dispatch_table_array_offset()));
+}
+
+void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
+  ASSERT(!constant_pool_allowed());
+
+  if (!IsITypeImm(frame_size + 4 * target::kWordSize)) {
+    EnterDartFrame(0, new_pp);
+    AddImmediate(SP, SP, -frame_size);
+    return;
+  }
+
+  // N.B. The ordering here is important. We must never write beyond SP or
+  // it can be clobbered by a signal handler.
+  if (FLAG_precompiled_mode) {
+    subi(SP, SP, frame_size + 2 * target::kWordSize);
+    sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
+    sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
+    addi(FP, SP, frame_size + 0 * target::kWordSize);
+  } else {
+    subi(SP, SP, frame_size + 4 * target::kWordSize);
+    sx(RA, Address(SP, frame_size + 3 * target::kWordSize));
+    sx(FP, Address(SP, frame_size + 2 * target::kWordSize));
+    sx(CODE_REG, Address(SP, frame_size + 1 * target::kWordSize));
+    addi(PP, PP, kHeapObjectTag);
+    sx(PP, Address(SP, frame_size + 0 * target::kWordSize));
+    addi(FP, SP, frame_size + 2 * target::kWordSize);
+    if (new_pp == kNoRegister) {
+      LoadPoolPointer();
+    } else {
+      mv(PP, new_pp);
+    }
+  }
+  set_constant_pool_allowed(true);
+}
+
+// On entry to a function compiled for OSR, the caller's frame pointer, the
+// stack locals, and any copied parameters are already in place.  The frame
+// pointer is already set up.  The PC marker is not correct for the
+// optimized function and there may be extra space for spill slots to
+// allocate. We must also set up the pool pointer for the function.
+void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
+  ASSERT(!constant_pool_allowed());
+  Comment("EnterOsrFrame");
+  RestoreCodePointer();
+  LoadPoolPointer();
+
+  if (extra_size > 0) {
+    AddImmediate(SP, -extra_size);
+  }
+}
+
+void Assembler::LeaveDartFrame(RestorePP restore_pp) {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  if (!FLAG_precompiled_mode) {
+    if (restore_pp == kRestoreCallerPP) {
+      lx(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
+                             target::kWordSize));
+      subi(PP, PP, kHeapObjectTag);
+    }
+  }
+  set_constant_pool_allowed(false);
+  mv(SP, FP);
+  lx(FP, Address(SP, 0 * target::kWordSize));
+  lx(RA, Address(SP, 1 * target::kWordSize));
+  addi(SP, SP, 2 * target::kWordSize);
+
+  // TODO(riscv): When we know the stack depth, we can avoid updating SP twice.
+}
+
+void Assembler::CallRuntime(const RuntimeEntry& entry,
+                            intptr_t argument_count) {
+  entry.Call(this, argument_count);
+}
+
+void Assembler::EnterCFrame(intptr_t frame_space) {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  subi(SP, SP, frame_space + 2 * target::kWordSize);
+  sx(RA, Address(SP, frame_space + 1 * target::kWordSize));
+  sx(FP, Address(SP, frame_space + 0 * target::kWordSize));
+  addi(FP, SP, frame_space);
+}
+
+void Assembler::LeaveCFrame() {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  mv(SP, FP);
+  lx(FP, Address(SP, 0 * target::kWordSize));
+  lx(RA, Address(SP, 1 * target::kWordSize));
+  addi(SP, SP, 2 * target::kWordSize);
+}
+
+// A0: Receiver
+// S5: ICData entry array
+// PP: Caller's PP (preserved)
+void Assembler::MonomorphicCheckedEntryJIT() {
+  has_monomorphic_entry_ = true;
+  const intptr_t saved_far_branch_level = far_branch_level();
+  set_far_branch_level(0);
+  const intptr_t start = CodeSize();
+
+  Label immediate, miss;
+  Bind(&miss);
+  lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
+  jr(TMP);
+
+  Comment("MonomorphicCheckedEntry");
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kMonomorphicEntryOffsetJIT);
+
+  Register entries_reg = IC_DATA_REG;  // Contains ICData::entries().
+  const intptr_t cid_offset = target::Array::element_offset(0);
+  const intptr_t count_offset = target::Array::element_offset(1);
+  ASSERT(A1 != PP);
+  ASSERT(A1 != entries_reg);
+  ASSERT(A1 != CODE_REG);
+
+  lx(TMP, FieldAddress(entries_reg, cid_offset));
+  LoadTaggedClassIdMayBeSmi(A1, A0);
+  bne(TMP, A1, &miss, kNearJump);
+
+  lx(TMP, FieldAddress(entries_reg, count_offset));
+  addi(TMP, TMP, target::ToRawSmi(1));
+  sx(TMP, FieldAddress(entries_reg, count_offset));
+
+  li(ARGS_DESC_REG, 0);  // GC-safe for OptimizeInvokedFunction
+
+  // Fall through to unchecked entry.
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kPolymorphicEntryOffsetJIT);
+
+  set_far_branch_level(saved_far_branch_level);
+}
+
+// A0 receiver, S5 guarded cid as Smi.
+// Preserve S4 (ARGS_DESC_REG), not required today, but maybe later.
+// PP: Caller's PP (preserved)
+void Assembler::MonomorphicCheckedEntryAOT() {
+  has_monomorphic_entry_ = true;
+  intptr_t saved_far_branch_level = far_branch_level();
+  set_far_branch_level(0);
+
+  const intptr_t start = CodeSize();
+
+  Label immediate, miss;
+  Bind(&miss);
+  lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
+  jr(TMP);
+
+  Comment("MonomorphicCheckedEntry");
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kMonomorphicEntryOffsetAOT);
+  LoadClassId(TMP, A0);
+  SmiTag(TMP);
+  bne(S5, TMP, &miss, kNearJump);
+
+  // Fall through to unchecked entry.
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kPolymorphicEntryOffsetAOT);
+
+  set_far_branch_level(saved_far_branch_level);
+}
+
+void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
+  has_monomorphic_entry_ = true;
+  while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
+    ebreak();
+  }
+  j(label);
+  while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
+    ebreak();
+  }
+}
+
+#ifndef PRODUCT
+void Assembler::MaybeTraceAllocation(intptr_t cid,
+                                     Register temp_reg,
+                                     Label* trace) {
+  ASSERT(cid > 0);
+
+  const intptr_t shared_table_offset =
+      target::IsolateGroup::shared_class_table_offset();
+  const intptr_t table_offset =
+      target::SharedClassTable::class_heap_stats_table_offset();
+  const intptr_t class_offset = target::ClassTable::ClassOffsetFor(cid);
+
+  LoadIsolateGroup(temp_reg);
+  lx(temp_reg, Address(temp_reg, shared_table_offset));
+  lx(temp_reg, Address(temp_reg, table_offset));
+  if (IsITypeImm(class_offset)) {
+    lbu(temp_reg, Address(temp_reg, class_offset));
+  } else {
+    AddImmediate(temp_reg, class_offset);
+    lbu(temp_reg, Address(temp_reg, 0));
+  }
+  bnez(temp_reg, trace);
+}
+#endif  // !PRODUCT
+
+void Assembler::TryAllocateObject(intptr_t cid,
+                                  intptr_t instance_size,
+                                  Label* failure,
+                                  JumpDistance distance,
+                                  Register instance_reg,
+                                  Register temp_reg) {
+  ASSERT(failure != NULL);
+  ASSERT(instance_size != 0);
+  ASSERT(instance_reg != temp_reg);
+  ASSERT(temp_reg != kNoRegister);
+  ASSERT(Utils::IsAligned(instance_size,
+                          target::ObjectAlignment::kObjectAlignment));
+  if (FLAG_inline_alloc &&
+      target::Heap::IsAllocatableInNewSpace(instance_size)) {
+    // If this allocation is traced, program will jump to failure path
+    // (i.e. the allocation stub) which will allocate the object and trace the
+    // allocation call site.
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure));
+
+    lx(instance_reg, Address(THR, target::Thread::top_offset()));
+    lx(temp_reg, Address(THR, target::Thread::end_offset()));
+    // instance_reg: current top (next object start).
+    // temp_reg: heap end
+
+    // TODO(koda): Protect against unsigned overflow here.
+    AddImmediate(instance_reg, instance_size);
+    // instance_reg: potential top (next object start).
+    // fail if heap end unsigned less than or equal to new heap top.
+    bleu(temp_reg, instance_reg, failure, distance);
+
+    // Successfully allocated the object, now update temp to point to
+    // next object start and store the class in the class field of object.
+    sx(instance_reg, Address(THR, target::Thread::top_offset()));
+    // Move instance_reg back to the start of the object and tag it.
+    AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
+
+    const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
+    LoadImmediate(temp_reg, tags);
+    StoreToOffset(temp_reg,
+                  FieldAddress(instance_reg, target::Object::tags_offset()));
+  } else {
+    j(failure, distance);
+  }
+}
+
+void Assembler::TryAllocateArray(intptr_t cid,
+                                 intptr_t instance_size,
+                                 Label* failure,
+                                 Register instance,
+                                 Register end_address,
+                                 Register temp1,
+                                 Register temp2) {
+  if (FLAG_inline_alloc &&
+      target::Heap::IsAllocatableInNewSpace(instance_size)) {
+    // If this allocation is traced, program will jump to failure path
+    // (i.e. the allocation stub) which will allocate the object and trace the
+    // allocation call site.
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure));
+    // Potential new object start.
+    lx(instance, Address(THR, target::Thread::top_offset()));
+    addi(end_address, instance, instance_size);
+    bltu(end_address, instance, failure);  // Fail on unsigned overflow.
+
+    // Check if the allocation fits into the remaining space.
+    // instance: potential new object start.
+    // end_address: potential next object start.
+    lx(temp2, Address(THR, target::Thread::end_offset()));
+    bgeu(end_address, temp2, failure);
+
+    // Successfully allocated the object(s), now update top to point to
+    // next object start and initialize the object.
+    sx(end_address, Address(THR, target::Thread::top_offset()));
+    addi(instance, instance, kHeapObjectTag);
+    NOT_IN_PRODUCT(LoadImmediate(temp2, instance_size));
+
+    // Initialize the tags.
+    // instance: new object start as a tagged pointer.
+    const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
+    LoadImmediate(temp2, tags);
+    sx(temp2, FieldAddress(instance, target::Object::tags_offset()));
+  } else {
+    j(failure);
+  }
+}
+
+void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
+  // JAL only has a +/- 1MB range. AUIPC+JALR has a +/- 2GB range.
+  intx_t imm = offset_into_target;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  auipc(RA, hi);
+  jalr_fixed(RA, RA, lo);
+}
+
+void Assembler::GenerateUnRelocatedPcRelativeTailCall(
+    intptr_t offset_into_target) {
+  // J only has a +/- 1MB range. AUIPC+JR has a +/- 2GB range.
+  intx_t imm = offset_into_target;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  auipc(TMP, hi);
+  jalr_fixed(ZR, TMP, lo);
+}
+
+static OperandSize OperandSizeFor(intptr_t cid) {
+  switch (cid) {
+    case kArrayCid:
+    case kImmutableArrayCid:
+    case kTypeArgumentsCid:
+      return kObjectBytes;
+    case kOneByteStringCid:
+    case kExternalOneByteStringCid:
+      return kByte;
+    case kTwoByteStringCid:
+    case kExternalTwoByteStringCid:
+      return kTwoBytes;
+    case kTypedDataInt8ArrayCid:
+      return kByte;
+    case kTypedDataUint8ArrayCid:
+    case kTypedDataUint8ClampedArrayCid:
+    case kExternalTypedDataUint8ArrayCid:
+    case kExternalTypedDataUint8ClampedArrayCid:
+      return kUnsignedByte;
+    case kTypedDataInt16ArrayCid:
+      return kTwoBytes;
+    case kTypedDataUint16ArrayCid:
+      return kUnsignedTwoBytes;
+    case kTypedDataInt32ArrayCid:
+      return kFourBytes;
+    case kTypedDataUint32ArrayCid:
+      return kUnsignedFourBytes;
+    case kTypedDataInt64ArrayCid:
+    case kTypedDataUint64ArrayCid:
+      return kDWord;
+    case kTypedDataFloat32ArrayCid:
+      return kSWord;
+    case kTypedDataFloat64ArrayCid:
+      return kDWord;
+    case kTypedDataFloat32x4ArrayCid:
+    case kTypedDataInt32x4ArrayCid:
+    case kTypedDataFloat64x2ArrayCid:
+      return kQWord;
+    case kTypedDataInt8ArrayViewCid:
+      UNREACHABLE();
+      return kByte;
+    default:
+      UNREACHABLE();
+      return kByte;
+  }
+}
+
+Address Assembler::ElementAddressForIntIndex(bool is_external,
+                                             intptr_t cid,
+                                             intptr_t index_scale,
+                                             Register array,
+                                             intptr_t index) const {
+  const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
+  ASSERT(Utils::IsInt(32, offset));
+  return Address(array, static_cast<int32_t>(offset));
+}
+void Assembler::ComputeElementAddressForIntIndex(Register address,
+                                                 bool is_external,
+                                                 intptr_t cid,
+                                                 intptr_t index_scale,
+                                                 Register array,
+                                                 intptr_t index) {
+  const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
+  AddImmediate(address, array, offset);
+}
+
+Address Assembler::ElementAddressForRegIndex(bool is_external,
+                                             intptr_t cid,
+                                             intptr_t index_scale,
+                                             bool index_unboxed,
+                                             Register array,
+                                             Register index,
+                                             Register temp) {
+  return ElementAddressForRegIndexWithSize(is_external, cid,
+                                           OperandSizeFor(cid), index_scale,
+                                           index_unboxed, array, index, temp);
+}
+
+Address Assembler::ElementAddressForRegIndexWithSize(bool is_external,
+                                                     intptr_t cid,
+                                                     OperandSize size,
+                                                     intptr_t index_scale,
+                                                     bool index_unboxed,
+                                                     Register array,
+                                                     Register index,
+                                                     Register temp) {
+  // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
+  const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
+  const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
+  const int32_t offset = HeapDataOffset(is_external, cid);
+  ASSERT(array != temp);
+  ASSERT(index != temp);
+  if (shift == 0) {
+    add(temp, array, index);
+  } else if (shift < 0) {
+    ASSERT(shift == -1);
+    srai(temp, index, 1);
+    add(temp, array, temp);
+  } else {
+    slli(temp, index, shift);
+    add(temp, array, temp);
+  }
+  return Address(temp, offset);
+}
+
+void Assembler::ComputeElementAddressForRegIndex(Register address,
+                                                 bool is_external,
+                                                 intptr_t cid,
+                                                 intptr_t index_scale,
+                                                 bool index_unboxed,
+                                                 Register array,
+                                                 Register index) {
+  // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
+  const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
+  const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
+  const int32_t offset = HeapDataOffset(is_external, cid);
+  ASSERT(array != address);
+  ASSERT(index != address);
+  if (shift == 0) {
+    add(address, array, index);
+  } else if (shift < 0) {
+    ASSERT(shift == -1);
+    srai(address, index, 1);
+    add(address, array, address);
+  } else {
+    slli(address, index, shift);
+    add(address, array, address);
+  }
+  if (offset != 0) {
+    AddImmediate(address, address, offset);
+  }
+}
+
+void Assembler::LoadStaticFieldAddress(Register address,
+                                       Register field,
+                                       Register scratch) {
+  LoadCompressedSmiFieldFromOffset(
+      scratch, field, target::Field::host_offset_or_field_id_offset());
+  const intptr_t field_table_offset =
+      compiler::target::Thread::field_table_values_offset();
+  LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
+  slli(scratch, scratch, target::kWordSizeLog2 - kSmiTagShift);
+  add(address, address, scratch);
+}
+
+void Assembler::LoadCompressedFieldAddressForRegOffset(
+    Register address,
+    Register instance,
+    Register offset_in_words_as_smi) {
+  slli(TMP, offset_in_words_as_smi,
+       target::kCompressedWordSizeLog2 - kSmiTagShift);
+  add(TMP, TMP, instance);
+  addi(address, TMP, -kHeapObjectTag);
+}
+
+void Assembler::LoadFieldAddressForRegOffset(Register address,
+                                             Register instance,
+                                             Register offset_in_words_as_smi) {
+  slli(TMP, offset_in_words_as_smi, target::kWordSizeLog2 - kSmiTagShift);
+  add(TMP, TMP, instance);
+  addi(address, TMP, -kHeapObjectTag);
+}
+
+// Note: the function never clobbers TMP, TMP2 scratch registers.
+void Assembler::LoadObjectHelper(Register dst,
+                                 const Object& object,
+                                 bool is_unique) {
+  ASSERT(IsOriginalObject(object));
+  // `is_unique == true` effectively means object has to be patchable.
+  // (even if the object is null)
+  if (!is_unique) {
+    if (IsSameObject(compiler::NullObject(), object)) {
+      mv(dst, NULL_REG);
+      return;
+    }
+    if (IsSameObject(CastHandle<Object>(compiler::TrueObject()), object)) {
+      addi(dst, NULL_REG, kTrueOffsetFromNull);
+      return;
+    }
+    if (IsSameObject(CastHandle<Object>(compiler::FalseObject()), object)) {
+      addi(dst, NULL_REG, kFalseOffsetFromNull);
+      return;
+    }
+    word offset = 0;
+    if (target::CanLoadFromThread(object, &offset)) {
+      lx(dst, Address(THR, offset));
+      return;
+    }
+    if (target::IsSmi(object)) {
+      intx_t raw_smi = target::ToRawSmi(object);
+      if (IsITypeImm(raw_smi)) {
+        li(dst, raw_smi);
+        return;
+      }
+      if (IsUTypeImm(raw_smi)) {
+        lui(dst, raw_smi);
+        return;
+      }
+    }
+  }
+  if (CanLoadFromObjectPool(object)) {
+    const intptr_t index =
+        is_unique ? object_pool_builder().AddObject(
+                        object, ObjectPoolBuilderEntry::kPatchable)
+                  : object_pool_builder().FindObject(
+                        object, ObjectPoolBuilderEntry::kNotPatchable);
+    LoadWordFromPoolIndex(dst, index);
+    return;
+  }
+  ASSERT(target::IsSmi(object));
+  LoadImmediate(dst, target::ToRawSmi(object));
+}
+
+// Note: leaf call sequence uses some abi callee save registers as scratch
+// so they should be manually preserved.
+void Assembler::EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf) {
+  // N.B. The ordering here is important. We must never write beyond SP or
+  // it can be clobbered by a signal handler.
+  if (FLAG_precompiled_mode) {
+    subi(SP, SP, 2 * target::kWordSize + frame_size);
+    sx(RA, Address(SP, 1 * target::kWordSize + frame_size));
+    sx(FP, Address(SP, 0 * target::kWordSize + frame_size));
+    addi(FP, SP, 0 * target::kWordSize + frame_size);
+  } else {
+    subi(SP, SP, 4 * target::kWordSize + frame_size);
+    sx(RA, Address(SP, 3 * target::kWordSize + frame_size));
+    sx(FP, Address(SP, 2 * target::kWordSize + frame_size));
+    sx(CODE_REG, Address(SP, 1 * target::kWordSize + frame_size));
+    addi(PP, PP, kHeapObjectTag);
+    sx(PP, Address(SP, 0 * target::kWordSize + frame_size));
+    addi(FP, SP, 2 * target::kWordSize + frame_size);
+  }
+
+  const RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs,
+                                         kAbiVolatileFpuRegs);
+  PushRegisters(kVolatileRegisterSet);
+
+  if (!is_leaf) {  // Leaf calling sequence aligns the stack itself.
+    ReserveAlignedFrameSpace(0);
+  }
+}
+
+void Assembler::LeaveCallRuntimeFrame(bool is_leaf) {
+  const RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs,
+                                         kAbiVolatileFpuRegs);
+
+  const intptr_t kPushedRegistersSize =
+      kVolatileRegisterSet.CpuRegisterCount() * target::kWordSize +
+      kVolatileRegisterSet.FpuRegisterCount() * kFpuRegisterSize +
+      (target::frame_layout.dart_fixed_frame_size - 2) *
+          target::kWordSize;  // From EnterStubFrame (excluding PC / FP)
+
+  subi(SP, FP, kPushedRegistersSize);
+
+  PopRegisters(kVolatileRegisterSet);
+
+  LeaveStubFrame();
+}
+
+void Assembler::CallRuntimeScope::Call(intptr_t argument_count) {
+  assembler_->CallRuntime(entry_, argument_count);
+}
+
+Assembler::CallRuntimeScope::~CallRuntimeScope() {
+  if (preserve_registers_) {
+    assembler_->LeaveCallRuntimeFrame(entry_.is_leaf());
+    if (restore_code_reg_) {
+      assembler_->PopRegister(CODE_REG);
+    }
+  }
+}
+
+Assembler::CallRuntimeScope::CallRuntimeScope(Assembler* assembler,
+                                              const RuntimeEntry& entry,
+                                              intptr_t frame_size,
+                                              bool preserve_registers,
+                                              const Address* caller)
+    : assembler_(assembler),
+      entry_(entry),
+      preserve_registers_(preserve_registers),
+      restore_code_reg_(caller != nullptr) {
+  if (preserve_registers_) {
+    if (caller != nullptr) {
+      assembler_->PushRegister(CODE_REG);
+      assembler_->lx(CODE_REG, *caller);
+    }
+    assembler_->EnterCallRuntimeFrame(frame_size, entry.is_leaf());
+  }
+}
+
+void Assembler::AddImmediateBranchOverflow(Register rd,
+                                           Register rs1,
+                                           intx_t imm,
+                                           Label* overflow) {
+  ASSERT(rd != TMP2);
+  if (rd == rs1) {
+    mv(TMP2, rs1);
+    AddImmediate(rd, rs1, imm);
+    if (imm > 0) {
+      blt(rd, TMP2, overflow);
+    } else if (imm < 0) {
+      bgt(rd, TMP2, overflow);
+    }
+  } else {
+    AddImmediate(rd, rs1, imm);
+    if (imm > 0) {
+      blt(rd, rs1, overflow);
+    } else if (imm < 0) {
+      bgt(rd, rs1, overflow);
+    }
+  }
+}
+void Assembler::SubtractImmediateBranchOverflow(Register rd,
+                                                Register rs1,
+                                                intx_t imm,
+                                                Label* overflow) {
+  // TODO(riscv): Incorrect for MIN_INTX_T!
+  AddImmediateBranchOverflow(rd, rs1, -imm, overflow);
+}
+void Assembler::MultiplyImmediateBranchOverflow(Register rd,
+                                                Register rs1,
+                                                intx_t imm,
+                                                Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+
+  LoadImmediate(TMP2, imm);
+  // Macro-op fusion: when both products are needed, the recommended sequence
+  // is mulh first.
+  mulh(TMP, rs1, TMP2);
+  mul(rd, rs1, TMP2);
+  srai(TMP2, rd, XLEN - 1);
+  bne(TMP, TMP2, overflow);
+}
+void Assembler::AddBranchOverflow(Register rd,
+                                  Register rs1,
+                                  Register rs2,
+                                  Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+  ASSERT(rs2 != TMP);
+  ASSERT(rs2 != TMP2);
+
+  if ((rd == rs1) && (rd == rs2)) {
+    ASSERT(rs1 == rs2);
+    mv(TMP, rs1);
+    add(rd, rs1, rs2);   // rs1, rs2 destroyed
+    xor_(TMP, TMP, rd);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rs1 == rs2) {
+    ASSERT(rd != rs1);
+    ASSERT(rd != rs2);
+    add(rd, rs1, rs2);
+    xor_(TMP, rd, rs1);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rd == rs1) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs1, 0);
+    add(rd, rs1, rs2);  // rs1 destroyed
+    slt(TMP2, rd, rs2);
+    bne(TMP, TMP2, overflow);
+  } else if (rd == rs2) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs2, 0);
+    add(rd, rs1, rs2);  // rs2 destroyed
+    slt(TMP2, rd, rs1);
+    bne(TMP, TMP2, overflow);
+  } else {
+    add(rd, rs1, rs2);
+    slti(TMP, rs2, 0);
+    slt(TMP2, rd, rs1);
+    bne(TMP, TMP2, overflow);
+  }
+}
+
+void Assembler::SubtractBranchOverflow(Register rd,
+                                       Register rs1,
+                                       Register rs2,
+                                       Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+  ASSERT(rs2 != TMP);
+  ASSERT(rs2 != TMP2);
+
+  if ((rd == rs1) && (rd == rs2)) {
+    ASSERT(rs1 == rs2);
+    mv(TMP, rs1);
+    sub(rd, rs1, rs2);   // rs1, rs2 destroyed
+    xor_(TMP, TMP, rd);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rs1 == rs2) {
+    ASSERT(rd != rs1);
+    ASSERT(rd != rs2);
+    sub(rd, rs1, rs2);
+    xor_(TMP, rd, rs1);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rd == rs1) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs1, 0);
+    sub(rd, rs1, rs2);  // rs1 destroyed
+    slt(TMP2, rd, rs2);
+    bne(TMP, TMP2, overflow);
+  } else if (rd == rs2) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs2, 0);
+    sub(rd, rs1, rs2);  // rs2 destroyed
+    slt(TMP2, rd, rs1);
+    bne(TMP, TMP2, overflow);
+  } else {
+    sub(rd, rs1, rs2);
+    slti(TMP, rs2, 0);
+    slt(TMP2, rs1, rd);
+    bne(TMP, TMP2, overflow);
+  }
+}
+
+void Assembler::MultiplyBranchOverflow(Register rd,
+                                       Register rs1,
+                                       Register rs2,
+                                       Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+  ASSERT(rs2 != TMP);
+  ASSERT(rs2 != TMP2);
+
+  // Macro-op fusion: when both products are needed, the recommended sequence
+  // is mulh first.
+  mulh(TMP, rs1, rs2);
+  mul(rd, rs1, rs2);
+  srai(TMP2, rd, XLEN - 1);
+  bne(TMP, TMP2, overflow);
+}
+
+}  // namespace compiler
+
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.h b/runtime/vm/compiler/assembler/assembler_riscv.h
new file mode 100644
index 0000000..c96fa8b
--- /dev/null
+++ b/runtime/vm/compiler/assembler/assembler_riscv.h
@@ -0,0 +1,1457 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_RISCV_H_
+#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_RISCV_H_
+
+#if defined(DART_PRECOMPILED_RUNTIME)
+#error "AOT runtime should not use compiler sources (including header files)"
+#endif  // defined(DART_PRECOMPILED_RUNTIME)
+
+#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
+#error Do not include assembler_riscv.h directly; use assembler.h instead.
+#endif
+
+#include <functional>
+
+#include "platform/assert.h"
+#include "platform/utils.h"
+#include "vm/class_id.h"
+#include "vm/compiler/assembler/assembler_base.h"
+#include "vm/constants.h"
+#include "vm/hash_map.h"
+#include "vm/simulator.h"
+
+namespace dart {
+
+// Forward declarations.
+class FlowGraphCompiler;
+class RuntimeEntry;
+class RegisterSet;
+
+namespace compiler {
+
+class Address {
+ public:
+  Address(Register base, intptr_t offset) : base_(base), offset_(offset) {}
+  explicit Address(Register base) : base_(base), offset_(0) {}
+
+  // Prevent implicit conversion of Register to intptr_t.
+  Address(Register base, Register index) = delete;
+
+  Register base() const { return base_; }
+  intptr_t offset() const { return offset_; }
+
+ private:
+  Register base_;
+  intptr_t offset_;
+};
+
+class FieldAddress : public Address {
+ public:
+  FieldAddress(Register base, intptr_t offset)
+      : Address(base, offset - kHeapObjectTag) {}
+
+  // Prevent implicit conversion of Register to intptr_t.
+  FieldAddress(Register base, Register index) = delete;
+};
+
+// All functions produce exactly one instruction.
+class MicroAssembler : public AssemblerBase {
+ public:
+  MicroAssembler(ObjectPoolBuilder* object_pool_builder,
+                 intptr_t far_branch_level,
+                 ExtensionSet extensions);
+  ~MicroAssembler();
+
+#if defined(TESTING)
+  void SetExtensions(ExtensionSet extensions) { extensions_ = extensions; }
+#endif
+  bool Supports(Extension extension) const {
+    return extensions_.Includes(extension);
+  }
+  bool Supports(ExtensionSet extensions) const {
+    return extensions_.IncludesAll(extensions);
+  }
+
+  intptr_t far_branch_level() const { return far_branch_level_; }
+  void set_far_branch_level(intptr_t level) { far_branch_level_ = level; }
+  void Bind(Label* label);
+
+  // ==== RV32I ====
+  void lui(Register rd, intptr_t imm);
+  void lui_fixed(Register rd, intptr_t imm);
+  void auipc(Register rd, intptr_t imm);
+
+  void jal(Register rd, Label* label, JumpDistance d = kFarJump);
+  void jal(Label* label, JumpDistance d = kFarJump) { jal(RA, label, d); }
+  void j(Label* label, JumpDistance d = kFarJump) { jal(ZR, label, d); }
+
+  void jalr(Register rd, Register rs1, intptr_t offset = 0);
+  void jalr_fixed(Register rd, Register rs1, intptr_t offset);
+  void jalr(Register rs1, intptr_t offset = 0) { jalr(RA, rs1, offset); }
+  void jr(Register rs1, intptr_t offset = 0) { jalr(ZR, rs1, offset); }
+  void ret() { jalr(ZR, RA, 0); }
+
+  void beq(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bne(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void blt(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bge(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bgt(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    blt(rs2, rs1, l, d);
+  }
+  void ble(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    bge(rs2, rs1, l, d);
+  }
+  void bltu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bgeu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bgtu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    bltu(rs2, rs1, l, d);
+  }
+  void bleu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    bgeu(rs2, rs1, l, d);
+  }
+
+  void lb(Register rd, Address addr);
+  void lh(Register rd, Address addr);
+  void lw(Register rd, Address addr);
+  void lbu(Register rd, Address addr);
+  void lhu(Register rd, Address addr);
+
+  void sb(Register rs2, Address addr);
+  void sh(Register rs2, Address addr);
+  void sw(Register rs2, Address addr);
+
+  void addi(Register rd, Register rs1, intptr_t imm);
+  void subi(Register rd, Register rs1, intptr_t imm) { addi(rd, rs1, -imm); }
+  void slti(Register rd, Register rs1, intptr_t imm);
+  void sltiu(Register rd, Register rs1, intptr_t imm);
+  void xori(Register rd, Register rs1, intptr_t imm);
+  void ori(Register rd, Register rs1, intptr_t imm);
+  void andi(Register rd, Register rs1, intptr_t imm);
+  void slli(Register rd, Register rs1, intptr_t shamt);
+  void srli(Register rd, Register rs1, intptr_t shamt);
+  void srai(Register rd, Register rs1, intptr_t shamt);
+
+  void add(Register rd, Register rs1, Register rs2);
+  void sub(Register rd, Register rs1, Register rs2);
+  void sll(Register rd, Register rs1, Register rs2);
+  void slt(Register rd, Register rs1, Register rs2);
+  void sltu(Register rd, Register rs1, Register rs2);
+  void xor_(Register rd, Register rs1, Register rs2);
+  void srl(Register rd, Register rs1, Register rs2);
+  void sra(Register rd, Register rs1, Register rs2);
+  void or_(Register rd, Register rs1, Register rs2);
+  void and_(Register rd, Register rs1, Register rs2);
+
+  void fence(HartEffects predecessor, HartEffects successor);
+  void fence() { fence(kAll, kAll); }
+  void fencei();
+  void ecall();
+  void ebreak();  // Causes SIGTRAP(5).
+
+  void csrrw(Register rd, uint32_t csr, Register rs1);
+  void csrrs(Register rd, uint32_t csr, Register rs1);
+  void csrrc(Register rd, uint32_t csr, Register rs1);
+  void csrr(Register rd, uint32_t csr) { csrrs(rd, csr, ZR); }
+  void csrw(uint32_t csr, Register rs) { csrrw(ZR, csr, rs); }
+  void csrs(uint32_t csr, Register rs) { csrrs(ZR, csr, rs); }
+  void csrc(uint32_t csr, Register rs) { csrrc(ZR, csr, rs); }
+  void csrrwi(Register rd, uint32_t csr, uint32_t imm);
+  void csrrsi(Register rd, uint32_t csr, uint32_t imm);
+  void csrrci(Register rd, uint32_t csr, uint32_t imm);
+  void csrwi(uint32_t csr, uint32_t imm) { csrrwi(ZR, csr, imm); }
+  void csrsi(uint32_t csr, uint32_t imm) { csrrsi(ZR, csr, imm); }
+  void csrci(uint32_t csr, uint32_t imm) { csrrci(ZR, csr, imm); }
+
+  void trap();  // Permanently reserved illegal instruction; causes SIGILL(4).
+
+  void nop() { addi(ZR, ZR, 0); }
+  void li(Register rd, intptr_t imm) { addi(rd, ZR, imm); }
+  void mv(Register rd, Register rs) { addi(rd, rs, 0); }
+  void not_(Register rd, Register rs) { xori(rd, rs, -1); }
+  void neg(Register rd, Register rs) { sub(rd, ZR, rs); }
+
+  void snez(Register rd, Register rs) { sltu(rd, ZR, rs); }
+  void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
+  void sltz(Register rd, Register rs) { slt(rd, rs, ZR); }
+  void sgtz(Register rd, Register rs) { slt(rd, ZR, rs); }
+
+  void beqz(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    beq(rs, ZR, label, distance);
+  }
+  void bnez(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    bne(rs, ZR, label, distance);
+  }
+  void blez(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    bge(ZR, rs, label, distance);
+  }
+  void bgez(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    bge(rs, ZR, label, distance);
+  }
+  void bltz(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    blt(rs, ZR, label, distance);
+  }
+  void bgtz(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    blt(ZR, rs, label, distance);
+  }
+
+  // ==== RV64I ====
+#if XLEN >= 64
+  void lwu(Register rd, Address addr);
+  void ld(Register rd, Address addr);
+
+  void sd(Register rs2, Address addr);
+
+  void addiw(Register rd, Register rs1, intptr_t imm);
+  void subiw(Register rd, Register rs1, intptr_t imm) { addiw(rd, rs1, -imm); }
+  void slliw(Register rd, Register rs1, intptr_t shamt);
+  void srliw(Register rd, Register rs1, intptr_t shamt);
+  void sraiw(Register rd, Register rs1, intptr_t shamt);
+
+  void addw(Register rd, Register rs1, Register rs2);
+  void subw(Register rd, Register rs1, Register rs2);
+  void sllw(Register rd, Register rs1, Register rs2);
+  void srlw(Register rd, Register rs1, Register rs2);
+  void sraw(Register rd, Register rs1, Register rs2);
+
+  void negw(Register rd, Register rs) { subw(rd, ZR, rs); }
+  void sextw(Register rd, Register rs) { addiw(rd, rs, 0); }
+#endif  // XLEN >= 64
+
+#if XLEN == 32
+  void lx(Register rd, Address addr) { lw(rd, addr); }
+  void sx(Register rs2, Address addr) { sw(rs2, addr); }
+#elif XLEN == 64
+  void lx(Register rd, Address addr) { ld(rd, addr); }
+  void sx(Register rs2, Address addr) { sd(rs2, addr); }
+#elif XLEN == 128
+  void lx(Register rd, Address addr) { lq(rd, addr); }
+  void sx(Register rs2, Address addr) { sq(rs2, addr); }
+#endif
+
+  // ==== RV32M ====
+  void mul(Register rd, Register rs1, Register rs2);
+  void mulh(Register rd, Register rs1, Register rs2);
+  void mulhsu(Register rd, Register rs1, Register rs2);
+  void mulhu(Register rd, Register rs1, Register rs2);
+  void div(Register rd, Register rs1, Register rs2);
+  void divu(Register rd, Register rs1, Register rs2);
+  void rem(Register rd, Register rs1, Register rs2);
+  void remu(Register rd, Register rs1, Register rs2);
+
+  // ==== RV64M ====
+#if XLEN >= 64
+  void mulw(Register rd, Register rs1, Register rs2);
+  void divw(Register rd, Register rs1, Register rs2);
+  void divuw(Register rd, Register rs1, Register rs2);
+  void remw(Register rd, Register rs1, Register rs2);
+  void remuw(Register rd, Register rs1, Register rs2);
+#endif  // XLEN >= 64
+
+  // ==== RV32A ====
+  void lrw(Register rd,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void scw(Register rd,
+           Register rs2,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void amoswapw(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amoaddw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoxorw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoandw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoorw(Register rd,
+              Register rs2,
+              Address addr,
+              std::memory_order order = std::memory_order_relaxed);
+  void amominw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amomaxw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amominuw(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amomaxuw(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+
+  // ==== RV64A ====
+#if XLEN >= 64
+  void lrd(Register rd,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void scd(Register rd,
+           Register rs2,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void amoswapd(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amoaddd(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoxord(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoandd(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoord(Register rd,
+              Register rs2,
+              Address addr,
+              std::memory_order order = std::memory_order_relaxed);
+  void amomind(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amomaxd(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amominud(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amomaxud(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+#endif  // XLEN >= 64
+
+#if XLEN == 32
+  void lr(Register rd,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    lrw(rd, addr, order);
+  }
+  void sc(Register rd,
+          Register rs2,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    scw(rd, rs2, addr, order);
+  }
+#elif XLEN == 64
+  void lr(Register rd,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    lrd(rd, addr, order);
+  }
+  void sc(Register rd,
+          Register rs2,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    scd(rd, rs2, addr, order);
+  }
+#elif XLEN == 128
+  void lr(Register rd,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    lrq(rd, addr, order);
+  }
+  void sc(Register rd,
+          Register rs2,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    scq(rd, rs2, addr, order);
+  }
+#endif
+
+  // ==== RV32F ====
+  void flw(FRegister rd, Address addr);
+  void fsw(FRegister rs2, Address addr);
+  // rd := (rs1 * rs2) + rs3
+  void fmadds(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := (rs1 * rs2) - rs3
+  void fmsubs(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) + rs3
+  void fnmsubs(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) - rs3
+  void fnmadds(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  void fadds(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsubs(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fmuls(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fdivs(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsqrts(FRegister rd, FRegister rs1, RoundingMode rounding = RNE);
+  void fsgnjs(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjns(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjxs(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmins(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmaxs(FRegister rd, FRegister rs1, FRegister rs2);
+  void feqs(Register rd, FRegister rs1, FRegister rs2);
+  void flts(Register rd, FRegister rs1, FRegister rs2);
+  void fles(Register rd, FRegister rs1, FRegister rs2);
+  void fclasss(Register rd, FRegister rs1);
+  // int32_t <- float
+  void fcvtws(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint32_t <- float
+  void fcvtwus(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // float <- int32_t
+  void fcvtsw(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // float <- uint32_t
+  void fcvtswu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+
+  void fmvs(FRegister rd, FRegister rs) { fsgnjs(rd, rs, rs); }
+  void fabss(FRegister rd, FRegister rs) { fsgnjxs(rd, rs, rs); }
+  void fnegs(FRegister rd, FRegister rs) { fsgnjns(rd, rs, rs); }
+
+  // xlen <--bit_cast-- float
+  void fmvxw(Register rd, FRegister rs1);
+  // float <--bit_cast-- xlen
+  void fmvwx(FRegister rd, Register rs1);
+
+  // ==== RV64F ====
+#if XLEN >= 64
+  // int64_t <- double
+  void fcvtls(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint64_t <- double
+  void fcvtlus(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // double <- int64_t
+  void fcvtsl(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <- uint64_t
+  void fcvtslu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+#endif  // XLEN >= 64
+
+  // ==== RV32D ====
+  void fld(FRegister rd, Address addr);
+  void fsd(FRegister rs2, Address addr);
+  // rd := (rs1 * rs2) + rs3
+  void fmaddd(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := (rs1 * rs2) - rs3
+  void fmsubd(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) - rs3
+  void fnmsubd(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) + rs3
+  void fnmaddd(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  void faddd(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsubd(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fmuld(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fdivd(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsqrtd(FRegister rd, FRegister rs1, RoundingMode rounding = RNE);
+  void fsgnjd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjnd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjxd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmind(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmaxd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fcvtsd(FRegister rd, FRegister rs1, RoundingMode rounding = RNE);
+  void fcvtds(FRegister rd, FRegister rs1);
+  void feqd(Register rd, FRegister rs1, FRegister rs2);
+  void fltd(Register rd, FRegister rs1, FRegister rs2);
+  void fled(Register rd, FRegister rs1, FRegister rs2);
+  void fclassd(Register rd, FRegister rs1);
+  // int32_t <- double
+  void fcvtwd(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint32_t <- double
+  void fcvtwud(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // double <- int32_t
+  void fcvtdw(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <- uint32_t
+  void fcvtdwu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+
+  void fmvd(FRegister rd, FRegister rs) { fsgnjd(rd, rs, rs); }
+  void fabsd(FRegister rd, FRegister rs) { fsgnjxd(rd, rs, rs); }
+  void fnegd(FRegister rd, FRegister rs) { fsgnjnd(rd, rs, rs); }
+
+  // ==== RV64D ====
+#if XLEN >= 64
+  // int64_t <- double
+  void fcvtld(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint64_t <- double
+  void fcvtlud(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // xlen <--bit_cast-- double
+  void fmvxd(Register rd, FRegister rs1);
+  // double <- int64_t
+  void fcvtdl(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <- uint64_t
+  void fcvtdlu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <--bit_cast-- xlen
+  void fmvdx(FRegister rd, Register rs1);
+#endif  // XLEN >= 64
+
+  // ==== Dart Simulator Debugging ====
+  void SimulatorPrintObject(Register rs1);
+
+ private:
+  // ==== RV32/64C ====
+  void c_lwsp(Register rd, Address addr);
+#if XLEN == 32
+  void c_flwsp(FRegister rd, Address addr);
+#else
+  void c_ldsp(Register rd, Address addr);
+#endif
+  void c_fldsp(FRegister rd, Address addr);
+
+  void c_swsp(Register rs2, Address addr);
+#if XLEN == 32
+  void c_fswsp(FRegister rs2, Address addr);
+#else
+  void c_sdsp(Register rs2, Address addr);
+#endif
+  void c_fsdsp(FRegister rs2, Address addr);
+
+  void c_lw(Register rd, Address addr);
+  void c_ld(Register rd, Address addr);
+  void c_flw(FRegister rd, Address addr);
+  void c_fld(FRegister rd, Address addr);
+
+  void c_sw(Register rs2, Address addr);
+  void c_sd(Register rs2, Address addr);
+  void c_fsw(FRegister rs2, Address addr);
+  void c_fsd(FRegister rs2, Address addr);
+
+  void c_j(Label* label);
+#if XLEN == 32
+  void c_jal(Label* label);
+#endif
+  void c_jr(Register rs1);
+  void c_jalr(Register rs1);
+
+  void c_beqz(Register rs1p, Label* label);
+  void c_bnez(Register rs1p, Label* label);
+
+  void c_li(Register rd, intptr_t imm);
+  void c_lui(Register rd, uintptr_t imm);
+
+  void c_addi(Register rd, Register rs1, intptr_t imm);
+#if XLEN >= 64
+  void c_addiw(Register rd, Register rs1, intptr_t imm);
+#endif
+  void c_addi16sp(Register rd, Register rs1, intptr_t imm);
+  void c_addi4spn(Register rdp, Register rs1, intptr_t imm);
+
+  void c_slli(Register rd, Register rs1, intptr_t imm);
+  void c_srli(Register rd, Register rs1, intptr_t imm);
+  void c_srai(Register rd, Register rs1, intptr_t imm);
+  void c_andi(Register rd, Register rs1, intptr_t imm);
+
+  void c_mv(Register rd, Register rs2);
+
+  void c_add(Register rd, Register rs1, Register rs2);
+  void c_and(Register rd, Register rs1, Register rs2);
+  void c_or(Register rd, Register rs1, Register rs2);
+  void c_xor(Register rd, Register rs1, Register rs2);
+  void c_sub(Register rd, Register rs1, Register rs2);
+#if XLEN >= 64
+  void c_addw(Register rd, Register rs1, Register rs2);
+  void c_subw(Register rd, Register rs1, Register rs2);
+#endif
+
+  void c_nop();
+  void c_ebreak();
+
+ protected:
+  intptr_t UpdateCBOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateCJOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateBOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateJOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateFarOffset(intptr_t branch_position, intptr_t new_offset);
+
+  intptr_t Position() { return buffer_.Size(); }
+  void EmitBranch(Register rs1,
+                  Register rs2,
+                  Label* label,
+                  Funct3 func,
+                  JumpDistance distance);
+  void EmitJump(Register rd, Label* label, Opcode op, JumpDistance distance);
+  void EmitCBranch(Register rs1p, Label* label, COpcode op);
+  void EmitCJump(Label* label, COpcode op);
+
+  void EmitRType(Funct5 funct5,
+                 std::memory_order order,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 Funct3 funct3,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 RoundingMode round,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 Register rs1,
+                 RoundingMode round,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 RoundingMode round,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 intptr_t shamt,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+
+  void EmitR4Type(FRegister rs3,
+                  Funct2 funct2,
+                  FRegister rs2,
+                  FRegister rs1,
+                  RoundingMode round,
+                  FRegister rd,
+                  Opcode opcode);
+
+  void EmitIType(intptr_t imm,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitIType(intptr_t imm,
+                 Register rs1,
+                 Funct3 funct3,
+                 FRegister rd,
+                 Opcode opcode);
+
+  void EmitSType(intptr_t imm,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Opcode opcode);
+  void EmitSType(intptr_t imm,
+                 FRegister rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Opcode opcode);
+
+  void EmitBType(intptr_t imm,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Opcode opcode);
+
+  void EmitUType(intptr_t imm, Register rd, Opcode opcode);
+
+  void EmitJType(intptr_t imm, Register rd, Opcode opcode);
+
+  uint16_t Read16(intptr_t position) {
+    return buffer_.Load<uint16_t>(position);
+  }
+  void Write16(intptr_t position, uint16_t instruction) {
+    return buffer_.Store<uint16_t>(position, instruction);
+  }
+  void Emit16(uint16_t instruction) {
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    buffer_.Emit<uint16_t>(instruction);
+  }
+  uint32_t Read32(intptr_t position) {
+    return buffer_.Load<uint32_t>(position);
+  }
+  void Write32(intptr_t position, uint32_t instruction) {
+    return buffer_.Store<uint32_t>(position, instruction);
+  }
+
+ public:
+  void Emit32(uint32_t instruction) {
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    buffer_.Emit<uint32_t>(instruction);
+  }
+  void Emit64(uint64_t instruction) {
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    buffer_.Emit<uint64_t>(instruction);
+  }
+
+ protected:
+  ExtensionSet extensions_;
+  intptr_t far_branch_level_;
+};
+
+class Assembler : public MicroAssembler {
+ public:
+  explicit Assembler(ObjectPoolBuilder* object_pool_builder,
+                     intptr_t far_branch_level = 0);
+  ~Assembler() {}
+
+  void PushRegister(Register r);
+  void PopRegister(Register r);
+
+  void PushRegisterPair(Register r0, Register r1);
+  void PopRegisterPair(Register r0, Register r1);
+
+  void PushRegisters(const RegisterSet& registers);
+  void PopRegisters(const RegisterSet& registers);
+
+  // Push all registers which are callee-saved according to the ARM64 ABI.
+  void PushNativeCalleeSavedRegisters();
+
+  // Pop all registers which are callee-saved according to the ARM64 ABI.
+  void PopNativeCalleeSavedRegisters();
+
+  void ExtendValue(Register rd, Register rn, OperandSize sz) override;
+  void ExtendAndSmiTagValue(Register rd,
+                            Register rn,
+                            OperandSize sz = kWordBytes) override;
+
+  void Drop(intptr_t stack_elements) {
+    ASSERT(stack_elements >= 0);
+    if (stack_elements > 0) {
+      AddImmediate(SP, SP, stack_elements * target::kWordSize);
+    }
+  }
+
+  void Bind(Label* label) { MicroAssembler::Bind(label); }
+  // Unconditional jump to a given label.
+  void Jump(Label* label, JumpDistance distance = kFarJump) {
+    j(label, distance);
+  }
+  // Unconditional jump to a given address in memory. Clobbers TMP.
+  void Jump(const Address& address);
+
+  void LoadField(Register dst, const FieldAddress& address) override;
+  void LoadCompressedField(Register dst, const FieldAddress& address) override {
+    LoadCompressed(dst, address);
+  }
+  void LoadMemoryValue(Register dst, Register base, int32_t offset) {
+    LoadFromOffset(dst, base, offset, kWordBytes);
+  }
+  void StoreMemoryValue(Register src, Register base, int32_t offset) {
+    StoreToOffset(src, base, offset, kWordBytes);
+  }
+
+#if defined(USING_THREAD_SANITIZER)
+  void TsanLoadAcquire(Register addr);
+  void TsanStoreRelease(Register addr);
+#endif
+
+  void LoadAcquire(Register dst, Register address, int32_t offset = 0);
+
+  void LoadAcquireCompressed(Register dst,
+                             Register address,
+                             int32_t offset = 0);
+
+  void StoreRelease(Register src,
+                    Register address,
+                    int32_t offset = 0) override;
+
+  void StoreReleaseCompressed(Register src,
+                              Register address,
+                              int32_t offset = 0);
+
+  void CompareWithFieldValue(Register value, FieldAddress address) {
+    CompareWithMemoryValue(value, address);
+  }
+  void CompareWithCompressedFieldFromOffset(Register value,
+                                            Register base,
+                                            int32_t offset);
+
+  void CompareWithMemoryValue(Register value,
+                              Address address,
+                              OperandSize sz = kWordBytes);
+
+  void CompareFunctionTypeNullabilityWith(Register type, int8_t value) override;
+  void CompareTypeNullabilityWith(Register type, int8_t value) override;
+
+  // Debugging and bringup support.
+  void Breakpoint() override { trap(); }
+
+  void SetPrologueOffset() {
+    if (prologue_offset_ == -1) {
+      prologue_offset_ = CodeSize();
+    }
+  }
+
+  void ReserveAlignedFrameSpace(intptr_t frame_space);
+
+  // In debug mode, this generates code to check that:
+  //   FP + kExitLinkSlotFromEntryFp == SP
+  // or triggers breakpoint otherwise.
+  void EmitEntryFrameVerification();
+
+  // Instruction pattern from entrypoint is used in Dart frame prologs
+  // to set up the frame and save a PC which can be used to figure out the
+  // RawInstruction object corresponding to the code running in the frame.
+  static const intptr_t kEntryPointToPcMarkerOffset = 0;
+  static intptr_t EntryPointToPcMarkerOffset() {
+    return kEntryPointToPcMarkerOffset;
+  }
+
+  // On some other platforms, we draw a distinction between safe and unsafe
+  // smis.
+  static bool IsSafe(const Object& object) { return true; }
+  static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
+
+  void CompareRegisters(Register rn, Register rm);
+  void CompareObjectRegisters(Register rn, Register rm);
+  void TestRegisters(Register rn, Register rm);
+
+  // Branches to the given label if the condition holds.
+  void BranchIf(Condition condition,
+                Label* label,
+                JumpDistance distance = kFarJump);
+  void BranchIfZero(Register rn,
+                    Label* label,
+                    JumpDistance distance = kFarJump);
+  void SetIf(Condition condition, Register rd);
+
+  void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+  void SmiUntag(Register dst, Register src) { srai(dst, src, kSmiTagSize); }
+  void SmiTag(Register reg) override { SmiTag(reg, reg); }
+  void SmiTag(Register dst, Register src) { slli(dst, src, kSmiTagSize); }
+
+  void BranchIfNotSmi(Register reg,
+                      Label* label,
+                      JumpDistance distance = kFarJump);
+  void BranchIfSmi(Register reg,
+                   Label* label,
+                   JumpDistance distance = kFarJump);
+
+  void Jump(const Code& code,
+            Register pp,
+            ObjectPoolBuilderEntry::Patchability patchable =
+                ObjectPoolBuilderEntry::kNotPatchable);
+
+  void JumpAndLink(const Code& code,
+                   ObjectPoolBuilderEntry::Patchability patchable =
+                       ObjectPoolBuilderEntry::kNotPatchable,
+                   CodeEntryKind entry_kind = CodeEntryKind::kNormal);
+
+  void JumpAndLinkPatchable(const Code& code,
+                            CodeEntryKind entry_kind = CodeEntryKind::kNormal) {
+    JumpAndLink(code, ObjectPoolBuilderEntry::kPatchable, entry_kind);
+  }
+  void JumpAndLinkToRuntime();
+
+  // Emit a call that shares its object pool entries with other calls
+  // that have the same equivalence marker.
+  void JumpAndLinkWithEquivalence(
+      const Code& code,
+      const Object& equivalence,
+      CodeEntryKind entry_kind = CodeEntryKind::kNormal);
+
+  void Call(Address target);
+  void Call(const Code& code) { JumpAndLink(code); }
+
+  void CallCFunction(Address target) { Call(target); }
+
+  void AddImmediate(Register dest, intx_t imm) {
+    AddImmediate(dest, dest, imm);
+  }
+
+  // Macros accepting a pp Register argument may attempt to load values from
+  // the object pool when possible. Unless you are sure that the untagged object
+  // pool pointer is in another register, or that it is not available at all,
+  // PP should be passed for pp. `dest` can be TMP2, `rn` cannot. `dest` can be
+  // TMP.
+  void AddImmediate(Register dest,
+                    Register rn,
+                    intx_t imm,
+                    OperandSize sz = kWordBytes);
+  void AndImmediate(Register rd,
+                    Register rn,
+                    intx_t imm,
+                    OperandSize sz = kWordBytes);
+  void OrImmediate(Register rd,
+                   Register rn,
+                   intx_t imm,
+                   OperandSize sz = kWordBytes);
+  void XorImmediate(Register rd,
+                    Register rn,
+                    intx_t imm,
+                    OperandSize sz = kWordBytes);
+  void TestImmediate(Register rn, intx_t imm, OperandSize sz = kWordBytes);
+  void CompareImmediate(Register rn, intx_t imm, OperandSize sz = kWordBytes);
+
+  void LoadFromOffset(Register dest,
+                      const Address& address,
+                      OperandSize sz = kWordBytes) override;
+  void LoadFromOffset(Register dest,
+                      Register base,
+                      int32_t offset,
+                      OperandSize sz = kWordBytes);
+  void LoadFieldFromOffset(Register dest,
+                           Register base,
+                           int32_t offset,
+                           OperandSize sz = kWordBytes) override {
+    LoadFromOffset(dest, base, offset - kHeapObjectTag, sz);
+  }
+  void LoadCompressedFieldFromOffset(Register dest,
+                                     Register base,
+                                     int32_t offset) override {
+    LoadCompressedFromOffset(dest, base, offset - kHeapObjectTag);
+  }
+  void LoadCompressedSmiFieldFromOffset(Register dest,
+                                        Register base,
+                                        int32_t offset) {
+    LoadCompressedSmiFromOffset(dest, base, offset - kHeapObjectTag);
+  }
+  // For loading indexed payloads out of tagged objects like Arrays. If the
+  // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
+  // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
+  void LoadIndexedPayload(Register dest,
+                          Register base,
+                          int32_t payload_offset,
+                          Register index,
+                          ScaleFactor scale,
+                          OperandSize sz = kWordBytes);
+  void LoadIndexedCompressed(Register dest,
+                             Register base,
+                             int32_t offset,
+                             Register index);
+  void LoadSFromOffset(FRegister dest, Register base, int32_t offset);
+  void LoadDFromOffset(FRegister dest, Register base, int32_t offset);
+  void LoadDFieldFromOffset(FRegister dest, Register base, int32_t offset) {
+    LoadDFromOffset(dest, base, offset - kHeapObjectTag);
+  }
+
+  void LoadFromStack(Register dst, intptr_t depth);
+  void StoreToStack(Register src, intptr_t depth);
+  void CompareToStack(Register src, intptr_t depth);
+
+  void StoreToOffset(Register src,
+                     const Address& address,
+                     OperandSize sz = kWordBytes) override;
+  void StoreToOffset(Register src,
+                     Register base,
+                     int32_t offset,
+                     OperandSize sz = kWordBytes);
+  void StoreFieldToOffset(Register src,
+                          Register base,
+                          int32_t offset,
+                          OperandSize sz = kWordBytes) {
+    StoreToOffset(src, base, offset - kHeapObjectTag, sz);
+  }
+  void StoreSToOffset(FRegister src, Register base, int32_t offset);
+  void StoreDToOffset(FRegister src, Register base, int32_t offset);
+  void StoreDFieldToOffset(FRegister src, Register base, int32_t offset) {
+    StoreDToOffset(src, base, offset - kHeapObjectTag);
+  }
+
+  void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset);
+  void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset);
+  void MoveUnboxedDouble(FpuRegister dst, FpuRegister src);
+
+  void LoadCompressed(Register dest, const Address& slot);
+  void LoadCompressedFromOffset(Register dest, Register base, int32_t offset);
+  void LoadCompressedSmi(Register dest, const Address& slot);
+  void LoadCompressedSmiFromOffset(Register dest,
+                                   Register base,
+                                   int32_t offset);
+
+  // Store into a heap object and apply the generational and incremental write
+  // barriers. All stores into heap objects must pass through this function or,
+  // if the value can be proven either Smi or old-and-premarked, its NoBarrier
+  // variants.
+  // Preserves object and value registers.
+  void StoreIntoObject(Register object,
+                       const Address& dest,
+                       Register value,
+                       CanBeSmi can_value_be_smi = kValueCanBeSmi,
+                       MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreCompressedIntoObject(
+      Register object,
+      const Address& dest,
+      Register value,
+      CanBeSmi can_value_be_smi = kValueCanBeSmi,
+      MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi);
+  void StoreIntoArray(Register object,
+                      Register slot,
+                      Register value,
+                      CanBeSmi can_value_be_smi = kValueCanBeSmi);
+  void StoreCompressedIntoArray(Register object,
+                                Register slot,
+                                Register value,
+                                CanBeSmi can_value_be_smi = kValueCanBeSmi);
+  void StoreIntoArrayBarrier(Register object,
+                             Register slot,
+                             Register value,
+                             CanBeSmi can_value_be_smi);
+
+  void StoreIntoObjectOffset(Register object,
+                             int32_t offset,
+                             Register value,
+                             CanBeSmi can_value_be_smi = kValueCanBeSmi,
+                             MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreCompressedIntoObjectOffset(
+      Register object,
+      int32_t offset,
+      Register value,
+      CanBeSmi can_value_be_smi = kValueCanBeSmi,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreIntoObjectNoBarrier(
+      Register object,
+      const Address& dest,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreCompressedIntoObjectNoBarrier(
+      Register object,
+      const Address& dest,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreCompressedIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreIntoObjectNoBarrier(Register object,
+                                const Address& dest,
+                                const Object& value);
+  void StoreCompressedIntoObjectNoBarrier(
+      Register object,
+      const Address& dest,
+      const Object& value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      const Object& value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreCompressedIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      const Object& value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+
+  // Stores a non-tagged value into a heap object.
+  void StoreInternalPointer(Register object,
+                            const Address& dest,
+                            Register value);
+
+  // Object pool, loading from pool, etc.
+  void LoadPoolPointer(Register pp = PP);
+
+  bool constant_pool_allowed() const { return constant_pool_allowed_; }
+  void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
+
+  intptr_t FindImmediate(int64_t imm);
+  bool CanLoadFromObjectPool(const Object& object) const;
+  void LoadNativeEntry(Register dst,
+                       const ExternalLabel* label,
+                       ObjectPoolBuilderEntry::Patchability patchable);
+  void LoadIsolate(Register dst);
+  void LoadIsolateGroup(Register dst);
+
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadObject(Register dst, const Object& obj) {
+    LoadObjectHelper(dst, obj, false);
+  }
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadUniqueObject(Register dst, const Object& obj) {
+    LoadObjectHelper(dst, obj, true);
+  }
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadImmediate(Register reg, intx_t imm);
+
+  void LoadDImmediate(FRegister reg, double immd);
+
+  // Load word from pool from the given offset using encoding that
+  // InstructionPattern::DecodeLoadWordFromPool can decode.
+  //
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadWordFromPoolIndex(Register dst, intptr_t index, Register pp = PP);
+
+  void PushObject(const Object& object) {
+    if (IsSameObject(compiler::NullObject(), object)) {
+      PushRegister(NULL_REG);
+    } else if (target::IsSmi(object) && (target::ToRawSmi(object) == 0)) {
+      PushRegister(ZR);
+    } else {
+      LoadObject(TMP, object);
+      PushRegister(TMP);
+    }
+  }
+  void PushImmediate(int64_t immediate) {
+    if (immediate == 0) {
+      PushRegister(ZR);
+    } else {
+      LoadImmediate(TMP, immediate);
+      PushRegister(TMP);
+    }
+  }
+  void CompareObject(Register reg, const Object& object);
+
+  void ExtractClassIdFromTags(Register result, Register tags);
+  void ExtractInstanceSizeFromTags(Register result, Register tags);
+
+  void LoadClassId(Register result, Register object);
+  void LoadClassById(Register result, Register class_id);
+  void CompareClassId(Register object,
+                      intptr_t class_id,
+                      Register scratch = kNoRegister);
+  // Note: input and output registers must be different.
+  void LoadClassIdMayBeSmi(Register result, Register object);
+  void LoadTaggedClassIdMayBeSmi(Register result, Register object);
+  void EnsureHasClassIdInDEBUG(intptr_t cid,
+                               Register src,
+                               Register scratch,
+                               bool can_be_null = false) override;
+
+  void EnterFrame(intptr_t frame_size);
+  void LeaveFrame();
+  void Ret() { ret(); }
+
+  // Emit code to transition between generated mode and native mode.
+  //
+  // These require and ensure that CSP and SP are equal and aligned and require
+  // a scratch register (in addition to TMP/TMP2).
+
+  void TransitionGeneratedToNative(Register destination_address,
+                                   Register new_exit_frame,
+                                   Register new_exit_through_ffi,
+                                   bool enter_safepoint);
+  void TransitionNativeToGenerated(Register scratch, bool exit_safepoint);
+  void EnterFullSafepoint(Register scratch);
+  void ExitFullSafepoint(Register scratch);
+
+  void CheckCodePointer();
+  void RestoreCodePointer();
+
+  // Restores the values of the registers that are blocked to cache some values
+  // e.g. BARRIER_MASK and NULL_REG.
+  void RestorePinnedRegisters();
+
+  void SetupGlobalPoolAndDispatchTable();
+
+  void EnterDartFrame(intptr_t frame_size, Register new_pp = kNoRegister);
+  void EnterOsrFrame(intptr_t extra_size, Register new_pp = kNoRegister);
+  void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP);
+
+  void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
+
+  // Helper method for performing runtime calls from callers requiring manual
+  // register preservation is required (e.g. outside IL instructions marked
+  // as calling).
+  class CallRuntimeScope : public ValueObject {
+   public:
+    CallRuntimeScope(Assembler* assembler,
+                     const RuntimeEntry& entry,
+                     intptr_t frame_size,
+                     bool preserve_registers = true)
+        : CallRuntimeScope(assembler,
+                           entry,
+                           frame_size,
+                           preserve_registers,
+                           /*caller=*/nullptr) {}
+
+    CallRuntimeScope(Assembler* assembler,
+                     const RuntimeEntry& entry,
+                     intptr_t frame_size,
+                     Address caller,
+                     bool preserve_registers = true)
+        : CallRuntimeScope(assembler,
+                           entry,
+                           frame_size,
+                           preserve_registers,
+                           &caller) {}
+
+    void Call(intptr_t argument_count);
+
+    ~CallRuntimeScope();
+
+   private:
+    CallRuntimeScope(Assembler* assembler,
+                     const RuntimeEntry& entry,
+                     intptr_t frame_size,
+                     bool preserve_registers,
+                     const Address* caller);
+
+    Assembler* const assembler_;
+    const RuntimeEntry& entry_;
+    const bool preserve_registers_;
+    const bool restore_code_reg_;
+  };
+
+  // Set up a stub frame so that the stack traversal code can easily identify
+  // a stub frame.
+  void EnterStubFrame() { EnterDartFrame(0); }
+  void LeaveStubFrame() { LeaveDartFrame(); }
+
+  // Set up a frame for calling a C function.
+  // Automatically save the pinned registers in Dart which are not callee-
+  // saved in the native calling convention.
+  // Use together with CallCFunction.
+  void EnterCFrame(intptr_t frame_space);
+  void LeaveCFrame();
+
+  void MonomorphicCheckedEntryJIT();
+  void MonomorphicCheckedEntryAOT();
+  void BranchOnMonomorphicCheckedEntryJIT(Label* label);
+
+  // If allocation tracing for |cid| is enabled, will jump to |trace| label,
+  // which will allocate in the runtime where tracing occurs.
+  void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace);
+
+  void TryAllocateObject(intptr_t cid,
+                         intptr_t instance_size,
+                         Label* failure,
+                         JumpDistance distance,
+                         Register instance_reg,
+                         Register temp_reg) override;
+
+  void TryAllocateArray(intptr_t cid,
+                        intptr_t instance_size,
+                        Label* failure,
+                        Register instance,
+                        Register end_address,
+                        Register temp1,
+                        Register temp2);
+
+  // This emits an PC-relative call of the form "bl <offset>".  The offset
+  // is not yet known and needs therefore relocation to the right place before
+  // the code can be used.
+  //
+  // The neccessary information for the "linker" (i.e. the relocation
+  // information) is stored in [UntaggedCode::static_calls_target_table_]: an
+  // entry of the form
+  //
+  //   (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
+  //
+  // will be used during relocation to fix the offset.
+  //
+  // The provided [offset_into_target] will be added to calculate the final
+  // destination.  It can be used e.g. for calling into the middle of a
+  // function.
+  void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target = 0);
+
+  // This emits an PC-relative tail call of the form "b <offset>".
+  //
+  // See also above for the pc-relative call.
+  void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target = 0);
+
+  Address ElementAddressForIntIndex(bool is_external,
+                                    intptr_t cid,
+                                    intptr_t index_scale,
+                                    Register array,
+                                    intptr_t index) const;
+  void ComputeElementAddressForIntIndex(Register address,
+                                        bool is_external,
+                                        intptr_t cid,
+                                        intptr_t index_scale,
+                                        Register array,
+                                        intptr_t index);
+  Address ElementAddressForRegIndex(bool is_external,
+                                    intptr_t cid,
+                                    intptr_t index_scale,
+                                    bool index_unboxed,
+                                    Register array,
+                                    Register index,
+                                    Register temp);
+
+  // Special version of ElementAddressForRegIndex for the case when cid and
+  // operand size for the target load don't match (e.g. when loading a few
+  // elements of the array with one load).
+  Address ElementAddressForRegIndexWithSize(bool is_external,
+                                            intptr_t cid,
+                                            OperandSize size,
+                                            intptr_t index_scale,
+                                            bool index_unboxed,
+                                            Register array,
+                                            Register index,
+                                            Register temp);
+
+  void ComputeElementAddressForRegIndex(Register address,
+                                        bool is_external,
+                                        intptr_t cid,
+                                        intptr_t index_scale,
+                                        bool index_unboxed,
+                                        Register array,
+                                        Register index);
+
+  void LoadStaticFieldAddress(Register address,
+                              Register field,
+                              Register scratch);
+
+  void LoadCompressedFieldAddressForRegOffset(Register address,
+                                              Register instance,
+                                              Register offset_in_words_as_smi);
+
+  void LoadFieldAddressForRegOffset(Register address,
+                                    Register instance,
+                                    Register offset_in_words_as_smi);
+
+  // Returns object data offset for address calculation; for heap objects also
+  // accounts for the tag.
+  static int32_t HeapDataOffset(bool is_external, intptr_t cid) {
+    return is_external
+               ? 0
+               : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
+  }
+
+  void AddImmediateBranchOverflow(Register rd,
+                                  Register rs1,
+                                  intx_t imm,
+                                  Label* overflow);
+  void SubtractImmediateBranchOverflow(Register rd,
+                                       Register rs1,
+                                       intx_t imm,
+                                       Label* overflow);
+  void MultiplyImmediateBranchOverflow(Register rd,
+                                       Register rs1,
+                                       intx_t imm,
+                                       Label* overflow);
+  void AddBranchOverflow(Register rd,
+                         Register rs1,
+                         Register rs2,
+                         Label* overflow);
+  void SubtractBranchOverflow(Register rd,
+                              Register rs1,
+                              Register rs2,
+                              Label* overflow);
+  void MultiplyBranchOverflow(Register rd,
+                              Register rs1,
+                              Register rs2,
+                              Label* overflow);
+
+ private:
+  bool constant_pool_allowed_;
+
+  enum DeferredCompareType {
+    kNone,
+    kCompareReg,
+    kCompareImm,
+    kTestReg,
+    kTestImm,
+  };
+  DeferredCompareType deferred_compare_ = kNone;
+  Register deferred_left_ = kNoRegister;
+  Register deferred_reg_ = kNoRegister;
+  intptr_t deferred_imm_ = 0;
+
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadObjectHelper(Register dst, const Object& obj, bool is_unique);
+
+  enum BarrierFilterMode {
+    // Filter falls through into the barrier update code. Target label
+    // is a "after-store" label.
+    kJumpToNoUpdate,
+
+    // Filter falls through to the "after-store" code. Target label
+    // is barrier update code label.
+    kJumpToBarrier,
+  };
+
+  void StoreIntoObjectFilter(Register object,
+                             Register value,
+                             Label* label,
+                             CanBeSmi can_be_smi,
+                             BarrierFilterMode barrier_filter_mode);
+
+  // Note: leaf call sequence uses some abi callee save registers as scratch
+  // so they should be manually preserved.
+  void EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf);
+  void LeaveCallRuntimeFrame(bool is_leaf);
+
+  friend class dart::FlowGraphCompiler;
+  std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
+  std::function<void()> generate_invoke_array_write_barrier_;
+
+  DISALLOW_ALLOCATION();
+  DISALLOW_COPY_AND_ASSIGN(Assembler);
+};
+
+}  // namespace compiler
+}  // namespace dart
+
+#endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_RISCV_H_
diff --git a/runtime/vm/compiler/assembler/assembler_riscv_test.cc b/runtime/vm/compiler/assembler/assembler_riscv_test.cc
new file mode 100644
index 0000000..b596017
--- /dev/null
+++ b/runtime/vm/compiler/assembler/assembler_riscv_test.cc
@@ -0,0 +1,6513 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/cpu.h"
+#include "vm/os.h"
+#include "vm/unit_test.h"
+#include "vm/virtual_memory.h"
+
+namespace dart {
+namespace compiler {
+#define __ assembler->
+
+#if defined(PRODUCT)
+#define EXPECT_DISASSEMBLY(expected)
+#else
+#define EXPECT_DISASSEMBLY(expected)                                           \
+  EXPECT_STREQ(expected, test->RelativeDisassembly())
+#endif
+
+// Called from assembler_test.cc.
+// RA: return address.
+// A0: value.
+// A1: growable array.
+// A2: current thread.
+ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) {
+  __ PushRegister(RA);
+  __ PushNativeCalleeSavedRegisters();
+
+  __ mv(THR, A2);
+  __ lx(WRITE_BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+
+  __ StoreIntoObject(A1, FieldAddress(A1, GrowableObjectArray::data_offset()),
+                     A0);
+
+  __ PopNativeCalleeSavedRegisters();
+  __ PopRegister(RA);
+  __ ret();
+}
+
+static intx_t Call(intx_t entry,
+                   intx_t arg0 = 0,
+                   intx_t arg1 = 0,
+                   intx_t arg2 = 0,
+                   intx_t arg3 = 0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->Call(entry, arg0, arg1, arg2, arg3);
+#else
+  typedef intx_t (*F)(intx_t, intx_t, intx_t, intx_t);
+  return reinterpret_cast<F>(entry)(arg0, arg1, arg2, arg3);
+#endif
+}
+static float CallF(intx_t entry, intx_t arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0);
+#else
+  typedef float (*F)(intx_t);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static float CallF(intx_t entry, intx_t arg0, float arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0, arg1);
+#else
+  typedef float (*F)(intx_t, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static float CallF(intx_t entry, double arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0);
+#else
+  typedef float (*F)(double);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static float CallF(intx_t entry, float arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0);
+#else
+  typedef float (*F)(float);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static float CallF(intx_t entry, float arg0, float arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0, arg1);
+#else
+  typedef float (*F)(float, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static float CallF(intx_t entry, float arg0, float arg1, float arg2) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0, arg1, arg2);
+#else
+  typedef float (*F)(float, float, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1, arg2);
+#endif
+}
+static intx_t CallI(intx_t entry, float arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0);
+#else
+  typedef intx_t (*F)(float);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static intx_t CallI(intx_t entry, float arg0, float arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0, arg1);
+#else
+  typedef intx_t (*F)(float, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static double CallD(intx_t entry, intx_t arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0);
+#else
+  typedef double (*F)(intx_t);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static double CallD(intx_t entry, intx_t arg0, double arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0, arg1);
+#else
+  typedef double (*F)(intx_t, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static double CallD(intx_t entry, float arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0);
+#else
+  typedef double (*F)(float);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static double CallD(intx_t entry, double arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0);
+#else
+  typedef double (*F)(double);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static double CallD(intx_t entry, double arg0, double arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0, arg1);
+#else
+  typedef double (*F)(double, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static double CallD(intx_t entry, double arg0, double arg1, double arg2) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0, arg1, arg2);
+#else
+  typedef double (*F)(double, double, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1, arg2);
+#endif
+}
+static intx_t CallI(intx_t entry, double arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0);
+#else
+  typedef intx_t (*F)(double);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static intx_t CallI(intx_t entry, double arg0, double arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0, arg1);
+#else
+  typedef intx_t (*F)(double, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+
+ASSEMBLER_TEST_GENERATE(LoadUpperImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  __ lui(A0, 42 << 16);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadUpperImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "002a0537 lui a0, 2752512\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42 << 16, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(AddUpperImmediatePC, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  __ auipc(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddUpperImmediatePC, test) {
+  EXPECT_DISASSEMBLY(
+      "00000517 auipc a0, 0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(test->entry(), static_cast<uintx_t>(Call(test->entry())));
+}
+
+ASSEMBLER_TEST_GENERATE(JumpAndLink, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label1, label2;
+  __ jal(T4, &label1);  // Forward.
+  __ sub(A0, T0, T1);
+  __ ret();
+  __ trap();
+
+  __ Bind(&label2);
+  __ li(T1, 7);
+  __ jalr(ZR, T5);
+  __ trap();
+
+  __ Bind(&label1);
+  __ li(T0, 4);
+  __ jal(T5, &label2);  // Backward.
+  __ jalr(ZR, T4);
+  __ trap();
+}
+ASSEMBLER_TEST_RUN(JumpAndLink, test) {
+  EXPECT_DISASSEMBLY(
+      "01c00eef jal t4, +28\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "000f0067 jr t5\n"
+      "00000000 trap\n"
+      "00400293 li t0, 4\n"
+      "ff1fff6f jal t5, -16\n"
+      "000e8067 jr t4\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(Jump, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label1, label2;
+  __ j(&label1);  // Forward.
+  __ trap();
+  __ Bind(&label2);
+  __ li(T1, 7);
+  __ sub(A0, T0, T1);
+  __ ret();
+  __ Bind(&label1);
+  __ li(T0, 4);
+  __ j(&label2);  // Backward.
+  __ trap();
+}
+ASSEMBLER_TEST_RUN(Jump, test) {
+  EXPECT_DISASSEMBLY(
+      "0140006f j +20\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00400293 li t0, 4\n"
+      "ff1ff06f j -16\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(JumpAndLinkRegister, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  /* 00 */ __ jalr(T4, A1, 28);  // Forward.
+  /* 04 */ __ sub(A0, T0, T1);
+  /* 08 */ __ ret();
+  /* 12 */ __ trap();
+
+  /* 16 */ __ li(T1, 7);
+  /* 20 */ __ jalr(ZR, T5);
+  /* 24 */ __ trap();
+
+  /* 28 */ __ li(T0, 4);
+  /* 32 */ __ jalr(T5, A1, 16);  // Backward.
+  /* 36 */ __ jalr(ZR, T4);
+  /* 40 */ __ trap();
+}
+ASSEMBLER_TEST_RUN(JumpAndLinkRegister, test) {
+  EXPECT_DISASSEMBLY(
+      "01c58ee7 jalr t4, 28(a1)\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "000f0067 jr t5\n"
+      "00000000 trap\n"
+      "00400293 li t0, 4\n"
+      "01058f67 jalr t5, 16(a1)\n"
+      "000e8067 jr t4\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry(), 0, test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(JumpRegister, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  /* 00 */ __ jr(A1, 20);  // Forward.
+  /* 04 */ __ trap();
+  /* 08 */ __ li(T1, 7);
+  /* 12 */ __ sub(A0, T0, T1);
+  /* 16 */ __ ret();
+  /* 20 */ __ li(T0, 4);
+  /* 24 */ __ jr(A1, 8);  // Backward.
+  /* 28 */ __ trap();
+}
+ASSEMBLER_TEST_RUN(JumpRegister, test) {
+  EXPECT_DISASSEMBLY(
+      "01458067 jr 20(a1)\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00400293 li t0, 4\n"
+      "00858067 jr 8(a1)\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry(), 0, test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ beq(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b50663 beq a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchEqualForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ beq(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchEqualForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchNotEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bne(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchNotEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b51663 bne a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchNotEqualForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bne(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchNotEqualForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ blt(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b54663 blt a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ blt(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ ble(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5d663 ble a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ ble(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterThanForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bgt(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterThanForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5c663 blt a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterOrEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bge(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterOrEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b55663 ble a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bltu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b56663 bltu a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bleu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5f663 bleu a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterThanUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bgtu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterThanUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5e663 bltu a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterOrEqualUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bgeu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterOrEqualUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b57663 bleu a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByte_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lb(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByte_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00050503 lb a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+  EXPECT_EQ(-51, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByte_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lb(A0, Address(A0, 1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByte_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00150503 lb a0, 1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(-17, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByte_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lb(A0, Address(A0, -1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByte_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "fff50503 lb a0, -1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(-85, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByteUnsigned_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lbu(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByteUnsigned_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00054503 lbu a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(0xCD, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByteUnsigned_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lbu(A0, Address(A0, 1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByteUnsigned_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00154503 lbu a0, 1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(0xEF, Call(test->entry(), reinterpret_cast<intx_t>((&values[1]))));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByteUnsigned_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lbu(A0, Address(A0, -1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByteUnsigned_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "fff54503 lbu a0, -1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(0xAB, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadHalfword_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lh(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfword_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00051503 lh a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(-13054, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadHalfword_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lh(A0, Address(A0, 2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfword_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00251503 lh a0, 2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(-4349, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadHalfword_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lh(A0, Address(A0, -2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfword_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffe51503 lh a0, -2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(-21759, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadHalfwordUnsigned_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lhu(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfwordUnsigned_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00055503 lhu a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(0xCD02, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadHalfwordUnsigned_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lhu(A0, Address(A0, 2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfwordUnsigned_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00255503 lhu a0, 2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(0xEF03, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadHalfwordUnsigned_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lhu(A0, Address(A0, -2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfwordUnsigned_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffe55503 lhu a0, -2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(0xAB01, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lw(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00052503 lw a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-855505915,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lw(A0, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00452503 lw a0, 4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-285014521,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lw(A0, Address(A0, -4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffc52503 lw a0, -4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-1425997309,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sw(A1, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00b52023 sw a1, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xCD020405);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0xCD020405, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sw(A1, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00b52223 sw a1, 4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xEF030607);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0xEF030607, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sw(A1, Address(A0, -4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "feb52e23 sw a1, -4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xAB010203);
+  EXPECT_EQ(0xAB010203, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(LoadWordUnsigned_0, assembler) {
+  __ lwu(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWordUnsigned_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00056503 lwu a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(0xCD020405,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWordUnsigned_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lwu(A0, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWordUnsigned_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00456503 lwu a0, 4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(0xEF030607,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWordUnsigned_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lwu(A0, Address(A0, -4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWordUnsigned_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffc56503 lwu a0, -4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(0xAB010203,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadDoubleWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ld(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00053503 ld a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-3674369926375274744,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadDoubleWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ld(A0, Address(A0, 8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00853503 ld a0, 8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-1224128046445295093,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadDoubleWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ld(A0, Address(A0, -8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ff853503 ld a0, -8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-6124611806271568377,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreDoubleWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sd(A1, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00b53023 sd a1, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xCD02040505060708);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0xCD02040505060708, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreDoubleWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sd(A1, Address(A0, 8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00b53423 sd a1, 8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xEF03060708090A0B);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0xEF03060708090A0B, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreDoubleWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sd(A1, Address(A0, -8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "feb53c23 sd a1, -8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xAB01020304050607);
+  EXPECT_EQ(0xAB01020304050607, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(AddImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addi(A0, A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "02a50513 addi a0, a0, 42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0));
+  EXPECT_EQ(40, Call(test->entry(), -2));
+  EXPECT_EQ(0, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(AddImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addi(A0, A0, -42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "fd650513 addi a0, a0, -42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 0));
+  EXPECT_EQ(-44, Call(test->entry(), -2));
+  EXPECT_EQ(38, Call(test->entry(), 80));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slti(A0, A0, 7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "00752513 slti a0, a0, 7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 6));
+  EXPECT_EQ(0, Call(test->entry(), 7));
+  EXPECT_EQ(0, Call(test->entry(), 8));
+  EXPECT_EQ(1, Call(test->entry(), -6));
+  EXPECT_EQ(1, Call(test->entry(), -7));
+  EXPECT_EQ(1, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slti(A0, A0, -7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "ff952513 slti a0, a0, -7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 6));
+  EXPECT_EQ(0, Call(test->entry(), 7));
+  EXPECT_EQ(0, Call(test->entry(), 8));
+  EXPECT_EQ(0, Call(test->entry(), -6));
+  EXPECT_EQ(0, Call(test->entry(), -7));
+  EXPECT_EQ(1, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediateUnsigned1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltiu(A0, A0, 7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediateUnsigned1, test) {
+  EXPECT_DISASSEMBLY(
+      "00753513 sltiu a0, a0, 7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 6));
+  EXPECT_EQ(0, Call(test->entry(), 7));
+  EXPECT_EQ(0, Call(test->entry(), 8));
+  EXPECT_EQ(0, Call(test->entry(), -6));
+  EXPECT_EQ(0, Call(test->entry(), -7));
+  EXPECT_EQ(0, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediateUnsigned2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltiu(A0, A0, -7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediateUnsigned2, test) {
+  EXPECT_DISASSEMBLY(
+      "ff953513 sltiu a0, a0, -7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 6));
+  EXPECT_EQ(1, Call(test->entry(), 7));
+  EXPECT_EQ(1, Call(test->entry(), 8));
+  EXPECT_EQ(0, Call(test->entry(), -6));
+  EXPECT_EQ(0, Call(test->entry(), -7));
+  EXPECT_EQ(1, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(XorImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ xori(A0, A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(XorImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "02a54513 xori a0, a0, 42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0));
+  EXPECT_EQ(43, Call(test->entry(), 1));
+  EXPECT_EQ(32, Call(test->entry(), 10));
+  EXPECT_EQ(-43, Call(test->entry(), -1));
+  EXPECT_EQ(-36, Call(test->entry(), -10));
+}
+
+ASSEMBLER_TEST_GENERATE(XorImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ xori(A0, A0, -42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(XorImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "fd654513 xori a0, a0, -42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 0));
+  EXPECT_EQ(-41, Call(test->entry(), 1));
+  EXPECT_EQ(-36, Call(test->entry(), 10));
+  EXPECT_EQ(41, Call(test->entry(), -1));
+  EXPECT_EQ(32, Call(test->entry(), -10));
+}
+
+ASSEMBLER_TEST_GENERATE(OrImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ori(A0, A0, -6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(OrImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "ffa56513 ori a0, a0, -6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-6, Call(test->entry(), 0));
+  EXPECT_EQ(-5, Call(test->entry(), 1));
+  EXPECT_EQ(-5, Call(test->entry(), 11));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-1, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(OrImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ori(A0, A0, 6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(OrImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "00656513 ori a0, a0, 6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(6, Call(test->entry(), 0));
+  EXPECT_EQ(7, Call(test->entry(), 1));
+  EXPECT_EQ(15, Call(test->entry(), 11));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-9, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(AndImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ andi(A0, A0, -6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AndImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "ffa57513 andi a0, a0, -6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(10, Call(test->entry(), 11));
+  EXPECT_EQ(-6, Call(test->entry(), -1));
+  EXPECT_EQ(-16, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(AndImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ andi(A0, A0, 6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AndImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "00657513 andi a0, a0, 6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(2, Call(test->entry(), 11));
+  EXPECT_EQ(6, Call(test->entry(), -1));
+  EXPECT_EQ(4, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slli(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "00251513 slli a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(84, Call(test->entry(), 21));
+  EXPECT_EQ(4, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-4, Call(test->entry(), -1));
+  EXPECT_EQ(-84, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slli(A0, A0, XLEN - 1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalImmediate2, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "01f51513 slli a0, a0, 0x1f\n"
+      "00008067 ret\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "03f51513 slli a0, a0, 0x3f\n"
+      "00008067 ret\n");
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 2));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), -1));
+  EXPECT_EQ(0, Call(test->entry(), -2));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srli(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "00255513 srli a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-1) >> 2),
+            Call(test->entry(), -1));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-21) >> 2),
+            Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srli(A0, A0, XLEN - 1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalImmediate2, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "01f55513 srli a0, a0, 0x1f\n"
+      "00008067 ret\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "03f55513 srli a0, a0, 0x3f\n"
+      "00008067 ret\n");
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(1, Call(test->entry(), -1));
+  EXPECT_EQ(1, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srai(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "40255513 srai a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-6, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srai(A0, A0, XLEN - 1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticImmediate2, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "41f55513 srai a0, a0, 0x1f\n"  // CHECK
+      "00008067 ret\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "43f55513 srai a0, a0, 0x3f\n"  // CHECK
+      "00008067 ret\n");
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-1, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(Add, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ add(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Add, test) {
+  EXPECT_DISASSEMBLY(
+      "00b50533 add a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(24, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-10, Call(test->entry(), 7, -17));
+  EXPECT_EQ(10, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, -17));
+  EXPECT_EQ(24, Call(test->entry(), 17, 7));
+  EXPECT_EQ(10, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(Subtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sub(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Subtract, test) {
+  EXPECT_DISASSEMBLY(
+      "40b50533 sub a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-10, Call(test->entry(), 7, 17));
+  EXPECT_EQ(24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(10, Call(test->entry(), -7, -17));
+  EXPECT_EQ(10, Call(test->entry(), 17, 7));
+  EXPECT_EQ(24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogical, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sll(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogical, test) {
+  EXPECT_DISASSEMBLY(
+      "00b51533 sll a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2176, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-2176, Call(test->entry(), -17, 7));
+  EXPECT_EQ(34, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-34, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThan, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slt(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThan, test) {
+  EXPECT_DISASSEMBLY(
+      "00b52533 slt a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 7, 7));
+  EXPECT_EQ(0, Call(test->entry(), -7, -7));
+  EXPECT_EQ(1, Call(test->entry(), 7, 17));
+  EXPECT_EQ(0, Call(test->entry(), 7, -17));
+  EXPECT_EQ(1, Call(test->entry(), -7, 17));
+  EXPECT_EQ(0, Call(test->entry(), -7, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(0, Call(test->entry(), 17, -7));
+  EXPECT_EQ(1, Call(test->entry(), -17, 7));
+  EXPECT_EQ(1, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "00b53533 sltu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 7, 7));
+  EXPECT_EQ(0, Call(test->entry(), -7, -7));
+  EXPECT_EQ(1, Call(test->entry(), 7, 17));
+  EXPECT_EQ(1, Call(test->entry(), 7, -17));
+  EXPECT_EQ(0, Call(test->entry(), -7, 17));
+  EXPECT_EQ(0, Call(test->entry(), -7, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(1, Call(test->entry(), 17, -7));
+  EXPECT_EQ(0, Call(test->entry(), -17, 7));
+  EXPECT_EQ(1, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(Xor, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ xor_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Xor, test) {
+  EXPECT_DISASSEMBLY(
+      "00b54533 xor a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(22, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(22, Call(test->entry(), -7, -17));
+  EXPECT_EQ(22, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(22, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogical, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srl(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogical, test) {
+  EXPECT_DISASSEMBLY(
+      "00b55533 srl a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-17) >> 7),
+            Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-17) >> 1),
+            Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmetic, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sra(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmetic, test) {
+  EXPECT_DISASSEMBLY(
+      "40b55533 sra a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-9, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(Or, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ or_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Or, test) {
+  EXPECT_DISASSEMBLY(
+      "00b56533 or a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(23, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-17, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-7, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-1, Call(test->entry(), -7, -17));
+  EXPECT_EQ(23, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-7, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-1, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(And, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ and_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(And, test) {
+  EXPECT_DISASSEMBLY(
+      "00b57533 and a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 7, 17));
+  EXPECT_EQ(7, Call(test->entry(), 7, -17));
+  EXPECT_EQ(17, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-23, Call(test->entry(), -7, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 7));
+  EXPECT_EQ(17, Call(test->entry(), 17, -7));
+  EXPECT_EQ(7, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-23, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(Fence, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fence();
+  __ fence(kRead, kWrite);
+  __ fence(kInput, kOutput);
+  __ fence(kMemory, kMemory);
+  __ fence(kAll, kAll);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Fence, test) {
+  EXPECT_DISASSEMBLY(
+      "0ff0000f fence\n"
+      "0210000f fence r,w\n"
+      "0840000f fence i,o\n"
+      "0330000f fence rw,rw\n"
+      "0ff0000f fence\n"
+      "00008067 ret\n");
+  Call(test->entry());
+}
+
+ASSEMBLER_TEST_GENERATE(InstructionFence, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fencei();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(InstructionFence, test) {
+  EXPECT_DISASSEMBLY(
+      "0000100f fence.i\n"
+      "00008067 ret\n");
+  Call(test->entry());
+}
+
+ASSEMBLER_TEST_GENERATE(EnvironmentCall, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ecall();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(EnvironmentCall, test) {
+  EXPECT_DISASSEMBLY(
+      "00000073 ecall\n"
+      "00008067 ret\n");
+
+  // Not running: would trap.
+}
+
+ASSEMBLER_TEST_GENERATE(EnvironmentBreak, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ebreak();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(EnvironmentBreak, test) {
+  EXPECT_DISASSEMBLY(
+      "00100073 ebreak\n"
+      "00008067 ret\n");
+
+  // Not running: would trap.
+}
+
+ASSEMBLER_TEST_GENERATE(ControlStatusRegisters, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ csrrw(T0, 0x123, S1);
+  __ csrrs(T1, 0x123, S2);
+  __ csrrc(T2, 0x123, S3);
+  __ csrr(T3, 0x123);
+  __ csrw(0x123, S4);
+  __ csrs(0x123, S5);
+  __ csrc(0x123, S6);
+  __ csrrwi(T1, 0x123, 1);
+  __ csrrsi(T2, 0x123, 2);
+  __ csrrci(T3, 0x123, 3);
+  __ csrwi(0x123, 4);
+  __ csrsi(0x123, 5);
+  __ csrci(0x123, 6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ControlStatusRegisters, test) {
+  EXPECT_DISASSEMBLY(
+      "123492f3 csrrw t0, 0x123, thr\n"
+      "12392373 csrrs t1, 0x123, s2\n"
+      "1239b3f3 csrrc t2, 0x123, s3\n"
+      "12302e73 csrr t3, 0x123\n"
+      "123a1073 csrw 0x123, s4\n"
+      "123aa073 csrs 0x123, s5\n"
+      "123b3073 csrc 0x123, s6\n"
+      "1230d373 csrrwi t1, 0x123, 1\n"
+      "123163f3 csrrsi t2, 0x123, 2\n"
+      "1231fe73 csrrci t3, 0x123, 3\n"
+      "12325073 csrwi 0x123, 4\n"
+      "1232e073 csrsi 0x123, 5\n"
+      "12337073 csrci 0x123, 6\n"
+      "00008067 ret\n");
+
+  // Not running: would trap.
+}
+
+ASSEMBLER_TEST_GENERATE(Nop, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ nop();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Nop, test) {
+  EXPECT_DISASSEMBLY(
+      "00000013 nop\n"
+      "00008067 ret\n");
+  EXPECT_EQ(123, Call(test->entry(), 123));
+}
+
+ASSEMBLER_TEST_GENERATE(Move, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mv(A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Move, test) {
+  EXPECT_DISASSEMBLY(
+      "00058513 mv a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(36, Call(test->entry(), 42, 36));
+}
+
+ASSEMBLER_TEST_GENERATE(Not, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ not_(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Not, test) {
+  EXPECT_DISASSEMBLY(
+      "fff54513 not a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(~42, Call(test->entry(), 42));
+  EXPECT_EQ(~-42, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(Negate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ neg(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Negate, test) {
+  EXPECT_DISASSEMBLY(
+      "40a00533 neg a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 42));
+  EXPECT_EQ(42, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetNotEqualToZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ snez(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetNotEqualToZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a03533 snez a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), -42));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(1, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetEqualToZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ seqz(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetEqualToZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00153513 seqz a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), -42));
+  EXPECT_EQ(1, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltz(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00052533 sltz a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), -42));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetGreaterThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sgtz(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetGreaterThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a02533 sgtz a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), -42));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(1, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ beqz(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00050663 beqz a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), -42));
+  EXPECT_EQ(4, Call(test->entry(), 0));
+  EXPECT_EQ(3, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchNotEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bnez(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchNotEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00051663 bnez a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), -42));
+  EXPECT_EQ(3, Call(test->entry(), 0));
+  EXPECT_EQ(4, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ blez(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a05663 blez a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), -42));
+  EXPECT_EQ(4, Call(test->entry(), 0));
+  EXPECT_EQ(3, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterOrEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bgez(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterOrEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00055663 bgez a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), -42));
+  EXPECT_EQ(4, Call(test->entry(), 0));
+  EXPECT_EQ(4, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bltz(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00054663 bltz a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), -42));
+  EXPECT_EQ(3, Call(test->entry(), 0));
+  EXPECT_EQ(3, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bgtz(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a04663 bgtz a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), -42));
+  EXPECT_EQ(3, Call(test->entry(), 0));
+  EXPECT_EQ(4, Call(test->entry(), 42));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(AddImmediateWord1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addiw(A0, A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediateWord1, test) {
+  EXPECT_DISASSEMBLY(
+      "02a5051b addiw a0, a0, 42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0));
+  EXPECT_EQ(40, Call(test->entry(), -2));
+  EXPECT_EQ(0, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(AddImmediateWord2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addiw(A0, A0, -42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediateWord2, test) {
+  EXPECT_DISASSEMBLY(
+      "fd65051b addiw a0, a0, -42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 0));
+  EXPECT_EQ(-44, Call(test->entry(), -2));
+  EXPECT_EQ(38, Call(test->entry(), 80));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalImmediateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slliw(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalImmediateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "0025151b slliw a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(84, Call(test->entry(), 21));
+  EXPECT_EQ(4, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-4, Call(test->entry(), -1));
+  EXPECT_EQ(-84, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalImmediateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srliw(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalImmediateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "0025551b srliw a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-1) >> 2),
+            Call(test->entry(), -1));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-21) >> 2),
+            Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticImmediateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sraiw(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticImmediateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "4025551b sraiw a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-6, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(AddWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5053b addw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(24, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-10, Call(test->entry(), 7, -17));
+  EXPECT_EQ(10, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, -17));
+  EXPECT_EQ(24, Call(test->entry(), 17, 7));
+  EXPECT_EQ(10, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, -7));
+  EXPECT_EQ(3, Call(test->entry(), 0x200000002, 0x100000001));
+}
+
+ASSEMBLER_TEST_GENERATE(SubtractWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ subw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SubtractWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5053b subw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-10, Call(test->entry(), 7, 17));
+  EXPECT_EQ(24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(10, Call(test->entry(), -7, -17));
+  EXPECT_EQ(10, Call(test->entry(), 17, 7));
+  EXPECT_EQ(24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, -7));
+  EXPECT_EQ(1, Call(test->entry(), 0x200000002, 0x100000001));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sllw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5153b sllw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2176, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-2176, Call(test->entry(), -17, 7));
+  EXPECT_EQ(34, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-34, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+  EXPECT_EQ(0x10, Call(test->entry(), 0x10000001, 4));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srlw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5553b srlw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-17) >> 7),
+            Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-17) >> 1),
+            Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sraw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5553b sraw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-9, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(NegateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ negw(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(NegateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40a0053b negw a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-42, Call(test->entry(), 42));
+  EXPECT_EQ(42, Call(test->entry(), -42));
+  EXPECT_EQ(1, Call(test->entry(), 0x10FFFFFFFF));
+}
+
+ASSEMBLER_TEST_GENERATE(SignExtendWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sextw(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SignExtendWord, test) {
+  EXPECT_DISASSEMBLY(
+      "0005051b sext.w a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(42, Call(test->entry(), 42));
+  EXPECT_EQ(-42, Call(test->entry(), -42));
+  EXPECT_EQ(-1, Call(test->entry(), 0x10FFFFFFFF));
+}
+#endif  // XLEN >= 64
+
+ASSEMBLER_TEST_GENERATE(Multiply, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mul(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Multiply, test) {
+  EXPECT_DISASSEMBLY(
+      "02b50533 mul a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(68, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), -4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), 4, -17));
+  EXPECT_EQ(68, Call(test->entry(), -4, -17));
+  EXPECT_EQ(68, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), 17, -4));
+  EXPECT_EQ(68, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyHigh, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulh(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyHigh, test) {
+  EXPECT_DISASSEMBLY(
+      "02b51533 mulh a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-1, Call(test->entry(), -4, 17));
+  EXPECT_EQ(-1, Call(test->entry(), 4, -17));
+  EXPECT_EQ(0, Call(test->entry(), -4, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), 17, -4));
+  EXPECT_EQ(0, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyHighSignedUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulhsu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyHighSignedUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b52533 mulhsu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-1, Call(test->entry(), -4, 17));
+  EXPECT_EQ(3, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(16, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-17, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyHighUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulhu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyHighUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b53533 mulhu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(16, Call(test->entry(), -4, 17));
+  EXPECT_EQ(3, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-21, Call(test->entry(), -4, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 4));
+  EXPECT_EQ(3, Call(test->entry(), -17, 4));
+  EXPECT_EQ(16, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-21, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(Divide, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ div(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Divide, test) {
+  EXPECT_DISASSEMBLY(
+      "02b54533 div a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(0, Call(test->entry(), -4, 17));
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(0, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), 17, -4));
+  EXPECT_EQ(4, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(DivideUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ divu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DivideUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b55533 divu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+#if XLEN == 32
+  EXPECT_EQ(252645134, Call(test->entry(), -4, 17));
+#else
+  EXPECT_EQ(1085102592571150094, Call(test->entry(), -4, 17));
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(1, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+#if XLEN == 32
+  EXPECT_EQ(1073741819, Call(test->entry(), -17, 4));
+#else
+  EXPECT_EQ(4611686018427387899, Call(test->entry(), -17, 4));
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 17, -4));
+  EXPECT_EQ(0, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(Remainder, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ rem(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Remainder, test) {
+  EXPECT_DISASSEMBLY(
+      "02b56533 rem a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(1, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(RemainderUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ remu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(RemainderUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b57533 remu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(14, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(13, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(3, Call(test->entry(), -17, 4));
+  EXPECT_EQ(17, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-17, Call(test->entry(), -17, -4));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(MultiplyWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5053b mulw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(68, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), -4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), 4, -17));
+  EXPECT_EQ(68, Call(test->entry(), -4, -17));
+  EXPECT_EQ(68, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), 17, -4));
+  EXPECT_EQ(68, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(DivideWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ divw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DivideWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5453b divw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(0, Call(test->entry(), -4, 17));
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(0, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), 17, -4));
+  EXPECT_EQ(4, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(DivideUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ divuw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DivideUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5553b divuw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(252645134, Call(test->entry(), -4, 17));
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(1, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+  EXPECT_EQ(1073741819, Call(test->entry(), -17, 4));
+  EXPECT_EQ(0, Call(test->entry(), 17, -4));
+  EXPECT_EQ(0, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(RemainderWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ remw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(RemainderWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5653b remw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(1, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(RemainderUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ remuw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(RemainderUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5753b remuw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(14, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(13, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(3, Call(test->entry(), -17, 4));
+  EXPECT_EQ(17, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-17, Call(test->entry(), -17, -4));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalWord_Success, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lrw(T0, Address(A0));
+  __ addi(T0, T0, 1);
+  __ scw(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalWord_Success, test) {
+  EXPECT_DISASSEMBLY(
+      "100522af lr.w t0, (a0)\n"
+      "00128293 addi t0, t0, 1\n"
+      "1855252f sc.w a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0, Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1101, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalWord_Failure, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ li(T0, 42);
+  __ scw(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalWord_Failure, test) {
+  EXPECT_DISASSEMBLY(
+      "02a00293 li t0, 42\n"
+      "1855252f sc.w a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(false, 0 == Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1100, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoSwapWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoswapw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoSwapWord, test) {
+  EXPECT_DISASSEMBLY(
+      "08b5252f amoswap.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1010, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAddWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoaddw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAddWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5252f amoadd.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 42;
+  EXPECT_EQ(42, Call(test->entry(), reinterpret_cast<intx_t>(value), 10));
+  EXPECT_EQ(52, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoXorWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoxorw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoXorWord, test) {
+  EXPECT_DISASSEMBLY(
+      "20b5252f amoxor.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b0110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAndWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoandw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAndWord, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5252f amoand.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1000, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoOrWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoorw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoOrWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5252f amoor.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amominw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinWord, test) {
+  EXPECT_DISASSEMBLY(
+      "80b5252f amomin.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxWord, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b5252f amomax.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amominuw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0b5252f amominu.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxuw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "e0b5252f amomaxu.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalDoubleWord_Success,
+                        assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lrd(T0, Address(A0));
+  __ addi(T0, T0, 1);
+  __ scd(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalDoubleWord_Success, test) {
+  EXPECT_DISASSEMBLY(
+      "100532af lr.d t0, (a0)\n"
+      "00128293 addi t0, t0, 1\n"
+      "1855352f sc.d a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0, Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1101, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalDoubleWord_Failure,
+                        assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ li(T0, 42);
+  __ scd(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalDoubleWord_Failure, test) {
+  EXPECT_DISASSEMBLY(
+      "02a00293 li t0, 42\n"
+      "1855352f sc.d a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(false, 0 == Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1100, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoSwapDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoswapd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoSwapDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "08b5352f amoswap.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1010, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAddDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoaddd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAddDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5352f amoadd.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 42;
+  EXPECT_EQ(42, Call(test->entry(), reinterpret_cast<intx_t>(value), 10));
+  EXPECT_EQ(52, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoXorDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoxord(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoXorDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "20b5352f amoxor.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b0110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAndDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoandd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAndDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5352f amoand.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1000, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoOrDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoord(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoOrDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5352f amoor.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomind(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "80b5352f amomin.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b5352f amomax.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinUnsignedDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amominud(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinUnsignedDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0b5352f amominu.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxUnsignedDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxud(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxUnsignedDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "e0b5352f amomaxu.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(LoadSingleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ flw(FA0, Address(A0, 1 * sizeof(float)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadSingleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00452507 flw fa0, 4(a0)\n"
+      "00008067 ret\n");
+
+  float* data = reinterpret_cast<float*>(malloc(3 * sizeof(float)));
+  data[0] = 1.7f;
+  data[1] = 2.8f;
+  data[2] = 3.9f;
+  EXPECT_EQ(data[1], CallF(test->entry(), reinterpret_cast<intx_t>(data)));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreSingleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsw(FA0, Address(A0, 1 * sizeof(float)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreSingleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00a52227 fsw fa0, 4(a0)\n"
+      "00008067 ret\n");
+
+  float* data = reinterpret_cast<float*>(malloc(3 * sizeof(float)));
+  data[0] = 1.7f;
+  data[1] = 2.8f;
+  data[2] = 3.9f;
+  CallF(test->entry(), reinterpret_cast<intx_t>(data), 4.2f);
+  EXPECT_EQ(4.2f, data[1]);
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmadds(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "60b50543 fmadd.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(22.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(8.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(26.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(16.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmsubs(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "60b50547 fmsub.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(22.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(16.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(26.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegateMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmsubs(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegateMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5054b fnmsub.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-8.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-22.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-16.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-26.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegateMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmadds(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegateMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5054f fnmadd.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-22.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-8.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-26.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-16.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fadds(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "00b50553 fadd.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-2.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(-8.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  EXPECT_EQ(10.0f, CallF(test->entry(), 7.0f, 3.0f));
+  EXPECT_EQ(-4.0f, CallF(test->entry(), -7.0f, 3.0f));
+  EXPECT_EQ(4.0f, CallF(test->entry(), 7.0f, -3.0f));
+  EXPECT_EQ(-10.0f, CallF(test->entry(), -7.0f, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleSubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsubs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleSubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "08b50553 fsub.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-2.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-8.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(8.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  EXPECT_EQ(4.0f, CallF(test->entry(), 7.0f, 3.0f));
+  EXPECT_EQ(-10.0f, CallF(test->entry(), -7.0f, 3.0f));
+  EXPECT_EQ(10.0f, CallF(test->entry(), 7.0f, -3.0f));
+  EXPECT_EQ(-4.0f, CallF(test->entry(), -7.0f, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMultiply, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmuls(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMultiply, test) {
+  EXPECT_DISASSEMBLY(
+      "10b50553 fmul.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(15.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-15.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-15.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(15.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  EXPECT_EQ(21.0f, CallF(test->entry(), 7.0f, 3.0f));
+  EXPECT_EQ(-21.0f, CallF(test->entry(), -7.0f, 3.0f));
+  EXPECT_EQ(-21.0f, CallF(test->entry(), 7.0f, -3.0f));
+  EXPECT_EQ(21.0f, CallF(test->entry(), -7.0f, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleDivide, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fdivs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleDivide, test) {
+  EXPECT_DISASSEMBLY(
+      "18b50553 fdiv.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2.0f, CallF(test->entry(), 10.0f, 5.0f));
+  EXPECT_EQ(-2.0f, CallF(test->entry(), -10.0f, 5.0f));
+  EXPECT_EQ(-2.0f, CallF(test->entry(), 10.0f, -5.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), -10.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleSquareRoot, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsqrts(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleSquareRoot, test) {
+  EXPECT_DISASSEMBLY(
+      "58050553 fsqrt.s fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), 0.0f));
+  EXPECT_EQ(1.0f, CallF(test->entry(), 1.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), 4.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 9.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "20b50553 fsgnj.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegatedSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjns(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegatedSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "20b51553 fsgnjn.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleXorSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjxs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleXorSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "20b52553 fsgnjx.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMin, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmins(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMin, test) {
+  EXPECT_DISASSEMBLY(
+      "28b50553 fmin.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1.0f, CallF(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-1.0f, CallF(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(-5.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(-5.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(3.0f, CallF(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMax, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmaxs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMax, test) {
+  EXPECT_DISASSEMBLY(
+      "28b51553 fmax.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(5.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(1.0f, CallF(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(5.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-1.0f, CallF(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(3.0f, CallF(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleEqual, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ feqs(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleEqual, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b52553 feq.s a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleLessThan, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ flts(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleLessThan, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b51553 flt.s a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleLessOrEqual, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fles(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleLessOrEqual, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b50553 fle.s a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleClassify, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fclasss(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleClassify, test) {
+  EXPECT_DISASSEMBLY(
+      "e0051553 fclass.s a0, fa0\n"
+      "00008067 ret\n");
+  // Neg infinity
+  EXPECT_EQ(1 << 0,
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  // Neg normal
+  EXPECT_EQ(1 << 1, CallI(test->entry(), -1.0f));
+  // Neg subnormal
+  EXPECT_EQ(1 << 2,
+            CallI(test->entry(), -std::numeric_limits<float>::min() / 2.0f));
+  // Neg zero
+  EXPECT_EQ(1 << 3, CallI(test->entry(), -0.0f));
+  // Pos zero
+  EXPECT_EQ(1 << 4, CallI(test->entry(), 0.0f));
+  // Pos subnormal
+  EXPECT_EQ(1 << 5,
+            CallI(test->entry(), std::numeric_limits<float>::min() / 2.0f));
+  // Pos normal
+  EXPECT_EQ(1 << 6, CallI(test->entry(), 1.0f));
+  // Pos infinity
+  EXPECT_EQ(1 << 7,
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  // Signaling NaN
+  EXPECT_EQ(1 << 8,
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+  // Queit NaN
+  EXPECT_EQ(1 << 9,
+            CallI(test->entry(), std::numeric_limits<float>::quiet_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0050553 fcvt.w.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RNE, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RNE);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RNE, test) {
+  EXPECT_DISASSEMBLY(
+      "c0050553 fcvt.w.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RTZ, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RTZ);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RTZ, test) {
+  EXPECT_DISASSEMBLY(
+      "c0051553 fcvt.w.s a0, fa0, rtz\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-43, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RDN, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RDN);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RDN, test) {
+  EXPECT_DISASSEMBLY(
+      "c0052553 fcvt.w.s a0, fa0, rdn\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RUP, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RUP);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RUP, test) {
+  EXPECT_DISASSEMBLY(
+      "c0053553 fcvt.w.s a0, fa0, rup\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-43, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RMM, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RMM);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RMM, test) {
+  EXPECT_DISASSEMBLY(
+      "c0054553 fcvt.w.s a0, fa0, rmm\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtwus(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0150553 fcvt.wu.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  // float loss of precision
+  EXPECT_EQ(-2147483648, CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtsw(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0050553 fcvt.s.w fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42.0f, CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<float>(kMinInt32),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxInt32),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(-1.0f, CallF(test->entry(), sign_extend(kMaxUint32)));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertUnsignedWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtswu(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertUnsignedWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0150553 fcvt.s.wu fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(
+      static_cast<float>(static_cast<uint32_t>(static_cast<int32_t>(-42))),
+      CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint32_t>(kMinInt32)),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxInt32),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxUint32),
+            CallF(test->entry(), sign_extend(kMaxUint32)));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMove, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvs(FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMove, test) {
+  EXPECT_DISASSEMBLY(
+      "20b58553 fmv.s fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(36.0f, CallF(test->entry(), 42.0f, 36.0f));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), -std::numeric_limits<float>::infinity(),
+                  std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleAbsoluteValue, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fabss(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleAbsoluteValue, test) {
+  EXPECT_DISASSEMBLY(
+      "20a52553 fabs.s fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), 0.0f));
+  EXPECT_EQ(0.0f, CallF(test->entry(), -0.0f));
+  EXPECT_EQ(42.0f, CallF(test->entry(), 42.0f));
+  EXPECT_EQ(42.0f, CallF(test->entry(), -42.0f));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), -std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnegs(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegate, test) {
+  EXPECT_DISASSEMBLY(
+      "20a51553 fneg.s fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-0.0f, CallF(test->entry(), 0.0f));
+  EXPECT_EQ(0.0f, CallF(test->entry(), -0.0f));
+  EXPECT_EQ(-42.0f, CallF(test->entry(), 42.0f));
+  EXPECT_EQ(42.0f, CallF(test->entry(), -42.0f));
+  EXPECT_EQ(-std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), -std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(BitCastSingleToInteger, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvxw(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BitCastSingleToInteger, test) {
+  EXPECT_DISASSEMBLY(
+      "e0050553 fmv.x.w a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(bit_cast<int32_t>(0.0f), CallI(test->entry(), 0.0f));
+  EXPECT_EQ(bit_cast<int32_t>(-0.0f), CallI(test->entry(), -0.0f));
+  EXPECT_EQ(bit_cast<int32_t>(42.0f), CallI(test->entry(), 42.0f));
+  EXPECT_EQ(bit_cast<int32_t>(-42.0f), CallI(test->entry(), -42.0f));
+  EXPECT_EQ(bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()),
+            CallI(test->entry(), std::numeric_limits<float>::quiet_NaN()));
+  EXPECT_EQ(bit_cast<int32_t>(std::numeric_limits<float>::signaling_NaN()),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+  EXPECT_EQ(bit_cast<int32_t>(std::numeric_limits<float>::infinity()),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(bit_cast<int32_t>(-std::numeric_limits<float>::infinity()),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(BitCastIntegerToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvwx(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BitCastIntegerToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "f0050553 fmv.w.x fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(bit_cast<int32_t>(0.0f))));
+  EXPECT_EQ(-0.0f, CallF(test->entry(), sign_extend(bit_cast<int32_t>(-0.0f))));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(bit_cast<int32_t>(42.0f))));
+  EXPECT_EQ(-42.0f,
+            CallF(test->entry(), sign_extend(bit_cast<int32_t>(-42.0f))));
+  EXPECT_EQ(true, isnan(CallF(test->entry(),
+                              sign_extend(bit_cast<int32_t>(
+                                  std::numeric_limits<float>::quiet_NaN())))));
+  EXPECT_EQ(true,
+            isnan(CallF(test->entry(),
+                        sign_extend(bit_cast<int32_t>(
+                            std::numeric_limits<float>::signaling_NaN())))));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), sign_extend(bit_cast<int32_t>(
+                                     std::numeric_limits<float>::infinity()))));
+  EXPECT_EQ(
+      -std::numeric_limits<float>::infinity(),
+      CallF(test->entry(), sign_extend(bit_cast<int32_t>(
+                               -std::numeric_limits<float>::infinity()))));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(ConvertSingleToDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtls(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0250553 fcvt.l.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(static_cast<int64_t>(kMinInt32),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  // float loses precision:
+  EXPECT_EQ(static_cast<int64_t>(kMaxInt32) + 1,
+            CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(static_cast<int64_t>(kMaxUint32) + 1,
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(kMinInt64, CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(kMaxInt64, CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(kMaxInt64, CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(kMinInt64,
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(kMaxInt64,
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(kMaxInt64,
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToUnsignedDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtlus(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToUnsignedDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0350553 fcvt.lu.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxInt32) + 1),
+            CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint32) + 1),
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxInt64) + 1),
+            CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtsl(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0250553 fcvt.s.l fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(-42.0f, CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(static_cast<float>(kMinInt32),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxInt32),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<float>(sign_extend(kMaxUint32)),
+            CallF(test->entry(), sign_extend(kMaxUint32)));
+  EXPECT_EQ(static_cast<float>(kMinInt64),
+            CallF(test->entry(), sign_extend(kMinInt64)));
+  EXPECT_EQ(static_cast<float>(kMaxInt64),
+            CallF(test->entry(), sign_extend(kMaxInt64)));
+  EXPECT_EQ(static_cast<float>(sign_extend(kMaxUint64)),
+            CallF(test->entry(), sign_extend(kMaxUint64)));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertUnsignedDoubleWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtslu(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertUnsignedDoubleWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0350553 fcvt.s.lu fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(-42))),
+            CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMinInt32))),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMaxInt32))),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMaxUint32))),
+            CallF(test->entry(), sign_extend(kMaxUint32)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMinInt64))),
+            CallF(test->entry(), sign_extend(kMinInt64)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMaxInt64))),
+            CallF(test->entry(), sign_extend(kMaxInt64)));
+  EXPECT_EQ(static_cast<float>(kMaxUint64),
+            CallF(test->entry(), sign_extend(kMaxUint64)));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(LoadDoubleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fld(FA0, Address(A0, 1 * sizeof(double)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00853507 fld fa0, 8(a0)\n"
+      "00008067 ret\n");
+
+  double* data = reinterpret_cast<double*>(malloc(3 * sizeof(double)));
+  data[0] = 1.7;
+  data[1] = 2.8;
+  data[2] = 3.9;
+  EXPECT_EQ(data[1], CallD(test->entry(), reinterpret_cast<intx_t>(data)));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreDoubleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsd(FA0, Address(A0, 1 * sizeof(double)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00a53427 fsd fa0, 8(a0)\n"
+      "00008067 ret\n");
+
+  double* data = reinterpret_cast<double*>(malloc(3 * sizeof(double)));
+  data[0] = 1.7;
+  data[1] = 2.8;
+  data[2] = 3.9;
+  CallD(test->entry(), reinterpret_cast<intx_t>(data), 4.2);
+  EXPECT_EQ(4.2, data[1]);
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmaddd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "62b50543 fmadd.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(22.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(26.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(16.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmsubd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "62b50547 fmsub.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(22.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(16.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(26.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleNegateMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmsubd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleNegateMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "62b5054b fnmsub.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-8.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-22.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-16.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-26.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleNegateMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmaddd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleNegateMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "62b5054f fnmadd.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-22.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-26.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-16.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ faddd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "02b50553 fadd.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-2.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), -3.0, -5.0));
+
+  EXPECT_EQ(10.0, CallD(test->entry(), 7.0, 3.0));
+  EXPECT_EQ(-4.0, CallD(test->entry(), -7.0, 3.0));
+  EXPECT_EQ(4.0, CallD(test->entry(), 7.0, -3.0));
+  EXPECT_EQ(-10.0, CallD(test->entry(), -7.0, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleSubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsubd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleSubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "0ab50553 fsub.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-2.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), -3.0, -5.0));
+
+  EXPECT_EQ(4.0, CallD(test->entry(), 7.0, 3.0));
+  EXPECT_EQ(-10.0, CallD(test->entry(), -7.0, 3.0));
+  EXPECT_EQ(10.0, CallD(test->entry(), 7.0, -3.0));
+  EXPECT_EQ(-4.0, CallD(test->entry(), -7.0, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMultiply, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmuld(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMultiply, test) {
+  EXPECT_DISASSEMBLY(
+      "12b50553 fmul.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(15.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-15.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-15.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(15.0, CallD(test->entry(), -3.0, -5.0));
+
+  EXPECT_EQ(21.0, CallD(test->entry(), 7.0, 3.0));
+  EXPECT_EQ(-21.0, CallD(test->entry(), -7.0, 3.0));
+  EXPECT_EQ(-21.0, CallD(test->entry(), 7.0, -3.0));
+  EXPECT_EQ(21.0, CallD(test->entry(), -7.0, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleDivide, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fdivd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleDivide, test) {
+  EXPECT_DISASSEMBLY(
+      "1ab50553 fdiv.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2.0, CallD(test->entry(), 10.0, 5.0));
+  EXPECT_EQ(-2.0, CallD(test->entry(), -10.0, 5.0));
+  EXPECT_EQ(-2.0, CallD(test->entry(), 10.0, -5.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), -10.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleSquareRoot, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsqrtd(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleSquareRoot, test) {
+  EXPECT_DISASSEMBLY(
+      "5a050553 fsqrt.d fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0, CallD(test->entry(), 0.0));
+  EXPECT_EQ(1.0, CallD(test->entry(), 1.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), 4.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 9.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "22b50553 fsgnj.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleNegatedSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjnd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleNegatedSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "22b51553 fsgnjn.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleXorSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjxd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleXorSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "22b52553 fsgnjx.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMin, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmind(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMin, test) {
+  EXPECT_DISASSEMBLY(
+      "2ab50553 fmin.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1.0, CallD(test->entry(), 3.0, 1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 3.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-1.0, CallD(test->entry(), 3.0, -1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, -3.0));
+  EXPECT_EQ(-5.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -3.0));
+  EXPECT_EQ(-5.0, CallD(test->entry(), -3.0, -5.0));
+
+  double qNAN = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, qNAN));
+  EXPECT_EQ(3.0, CallD(test->entry(), qNAN, 3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, qNAN));
+  EXPECT_EQ(-3.0, CallD(test->entry(), qNAN, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMax, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmaxd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMax, test) {
+  EXPECT_DISASSEMBLY(
+      "2ab51553 fmax.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 3.0));
+  EXPECT_EQ(5.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -3.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(1.0, CallD(test->entry(), -3.0, 1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, 3.0));
+  EXPECT_EQ(5.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-1.0, CallD(test->entry(), -3.0, -1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -5.0));
+
+  double qNAN = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, qNAN));
+  EXPECT_EQ(3.0, CallD(test->entry(), qNAN, 3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, qNAN));
+  EXPECT_EQ(-3.0, CallD(test->entry(), qNAN, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtsd(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "40150553 fcvt.s.d fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), 0.0));
+  EXPECT_EQ(42.0f, CallF(test->entry(), 42.0));
+  EXPECT_EQ(-42.0f, CallF(test->entry(), -42.0));
+  EXPECT_EQ(true, isnan(CallF(test->entry(),
+                              std::numeric_limits<double>::quiet_NaN())));
+  EXPECT_EQ(true, isnan(CallF(test->entry(),
+                              std::numeric_limits<double>::signaling_NaN())));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(-std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), -std::numeric_limits<double>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleToDouble, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtds(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleToDouble, test) {
+  EXPECT_DISASSEMBLY(
+      "42050553 fcvt.d.s fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0, CallD(test->entry(), 0.0f));
+  EXPECT_EQ(42.0, CallD(test->entry(), 42.0f));
+  EXPECT_EQ(-42.0, CallD(test->entry(), -42.0f));
+  EXPECT_EQ(true, isnan(CallD(test->entry(),
+                              std::numeric_limits<float>::quiet_NaN())));
+  EXPECT_EQ(true, isnan(CallD(test->entry(),
+                              std::numeric_limits<float>::signaling_NaN())));
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            CallD(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(-std::numeric_limits<double>::infinity(),
+            CallD(test->entry(), -std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(NaNBoxing, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(NaNBoxing, test) {
+  EXPECT_DISASSEMBLY("00008067 ret\n");
+  EXPECT_EQ(true, isnan(CallD(test->entry(), 42.0f)));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleEqual, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ feqd(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleEqual, test) {
+  EXPECT_DISASSEMBLY(
+      "a2b52553 feq.d a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, 1.0));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0, 3.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -1.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -3.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, 1.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, 3.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, -1.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, -3.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, -5.0));
+
+  double qNAN = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleLessThan, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fltd(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleLessThan, test) {
+  EXPECT_DISASSEMBLY(
+      "a2b51553 flt.d a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, 1.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, 3.0));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -1.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -3.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, 1.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, 3.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, -1.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, -3.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, -5.0));
+
+  double qNAN = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleLessOrEqual, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fled(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleLessOrEqual, test) {
+  EXPECT_DISASSEMBLY(
+      "a2b50553 fle.d a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, 1.0));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0, 3.0));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -1.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -3.0));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, 1.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, 3.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, -1.0));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0, -3.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, -5.0));
+
+  double qNAN = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleClassify, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fclassd(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleClassify, test) {
+  EXPECT_DISASSEMBLY(
+      "e2051553 fclass.d a0, fa0\n"
+      "00008067 ret\n");
+  // Neg infinity
+  EXPECT_EQ(1 << 0,
+            CallI(test->entry(), -std::numeric_limits<double>::infinity()));
+  // Neg normal
+  EXPECT_EQ(1 << 1, CallI(test->entry(), -1.0));
+  // Neg subnormal
+  EXPECT_EQ(1 << 2,
+            CallI(test->entry(), -std::numeric_limits<double>::min() / 2.0));
+  // Neg zero
+  EXPECT_EQ(1 << 3, CallI(test->entry(), -0.0));
+  // Pos zero
+  EXPECT_EQ(1 << 4, CallI(test->entry(), 0.0));
+  // Pos subnormal
+  EXPECT_EQ(1 << 5,
+            CallI(test->entry(), std::numeric_limits<double>::min() / 2.0));
+  // Pos normal
+  EXPECT_EQ(1 << 6, CallI(test->entry(), 1.0));
+  // Pos infinity
+  EXPECT_EQ(1 << 7,
+            CallI(test->entry(), std::numeric_limits<double>::infinity()));
+  // Signaling NaN
+  EXPECT_EQ(1 << 8,
+            CallI(test->entry(), std::numeric_limits<double>::signaling_NaN()));
+  // Queit NaN
+  EXPECT_EQ(1 << 9,
+            CallI(test->entry(), std::numeric_limits<double>::quiet_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtwd(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c2050553 fcvt.w.d a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, CallI(test->entry(), static_cast<double>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<double>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<double>(42)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), static_cast<double>(kMinInt32)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<double>(kMaxInt32)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<double>(kMaxUint32)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), static_cast<double>(kMinInt64)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<double>(kMaxInt64)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<double>(kMaxUint64)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), -std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), std::numeric_limits<double>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtwud(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c2150553 fcvt.wu.d a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<double>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<double>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<double>(42)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), static_cast<double>(kMinInt32)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<double>(kMaxInt32)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<double>(kMaxUint32)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), static_cast<double>(kMinInt64)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<double>(kMaxInt64)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<double>(kMaxUint64)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), -std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), std::numeric_limits<double>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertWordToDouble, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtdw(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertWordToDouble, test) {
+  EXPECT_DISASSEMBLY(
+      "d2050553 fcvt.d.w fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42.0, CallD(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(0.0, CallD(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0, CallD(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<double>(kMinInt32),
+            CallD(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<double>(kMaxInt32),
+            CallD(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(-1.0, CallD(test->entry(), sign_extend(kMaxUint32)));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertUnsignedWordToDouble, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtdwu(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertUnsignedWordToDouble, test) {
+  EXPECT_DISASSEMBLY(
+      "d2150553 fcvt.d.wu fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(
+      static_cast<double>(static_cast<uint32_t>(static_cast<int32_t>(-42))),
+      CallD(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(0.0, CallD(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0, CallD(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<double>(static_cast<uint32_t>(kMinInt32)),
+            CallD(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<double>(kMaxInt32),
+            CallD(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<double>(kMaxUint32),
+            CallD(test->entry(), sign_extend(kMaxUint32)));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMove, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvd(FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMove, test) {
+  EXPECT_DISASSEMBLY(
+      "22b58553 fmv.d fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(36.0, CallD(test->entry(), 42.0, 36.0));
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            CallD(test->entry(), -std::numeric_limits<double>::infinity(),
+                  std::numeric_limits<double>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleAbsoluteValue, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fabsd(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleAbsoluteValue, test) {
+  EXPECT_DISASSEMBLY(
+      "22a52553 fabs.d fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0, CallD(test->entry(), 0.0));
+  EXPECT_EQ(0.0, CallD(test->entry(), -0.0));
+  EXPECT_EQ(42.0, CallD(test->entry(), 42.0));
+  EXPECT_EQ(42.0, CallD(test->entry(), -42.0));
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            CallD(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            CallD(test->entry(), -std::numeric_limits<double>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleNegate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnegd(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleNegate, test) {
+  EXPECT_DISASSEMBLY(
+      "22a51553 fneg.d fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-0.0, CallD(test->entry(), 0.0));
+  EXPECT_EQ(0.0, CallD(test->entry(), -0.0));
+  EXPECT_EQ(-42.0, CallD(test->entry(), 42.0));
+  EXPECT_EQ(42.0, CallD(test->entry(), -42.0));
+  EXPECT_EQ(-std::numeric_limits<double>::infinity(),
+            CallD(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            CallD(test->entry(), -std::numeric_limits<double>::infinity()));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtld(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c2250553 fcvt.l.d a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, CallI(test->entry(), static_cast<double>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<double>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<double>(42)));
+  EXPECT_EQ(static_cast<int64_t>(kMinInt32),
+            CallI(test->entry(), static_cast<double>(kMinInt32)));
+  EXPECT_EQ(static_cast<int64_t>(kMaxInt32),
+            CallI(test->entry(), static_cast<double>(kMaxInt32)));
+  EXPECT_EQ(static_cast<int64_t>(kMaxUint32),
+            CallI(test->entry(), static_cast<double>(kMaxUint32)));
+  EXPECT_EQ(kMinInt64, CallI(test->entry(), static_cast<double>(kMinInt64)));
+  EXPECT_EQ(kMaxInt64, CallI(test->entry(), static_cast<double>(kMaxInt64)));
+  EXPECT_EQ(kMaxInt64, CallI(test->entry(), static_cast<double>(kMaxUint64)));
+  EXPECT_EQ(kMinInt64,
+            CallI(test->entry(), -std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(kMaxInt64,
+            CallI(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(kMaxInt64,
+            CallI(test->entry(), std::numeric_limits<double>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToDoubleWord_RNE, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtld(A0, FA0, RNE);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToDoubleWord_RNE, test) {
+  EXPECT_DISASSEMBLY(
+      "c2250553 fcvt.l.d a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToDoubleWord_RTZ, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtld(A0, FA0, RTZ);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToDoubleWord_RTZ, test) {
+  EXPECT_DISASSEMBLY(
+      "c2251553 fcvt.l.d a0, fa0, rtz\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-43, CallI(test->entry(), -43.6));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.5));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.6));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5));
+  EXPECT_EQ(42, CallI(test->entry(), 42.6));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4));
+  EXPECT_EQ(43, CallI(test->entry(), 43.5));
+  EXPECT_EQ(43, CallI(test->entry(), 43.6));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToDoubleWord_RDN, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtld(A0, FA0, RDN);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToDoubleWord_RDN, test) {
+  EXPECT_DISASSEMBLY(
+      "c2252553 fcvt.l.d a0, fa0, rdn\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.4));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.5));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.4));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5));
+  EXPECT_EQ(42, CallI(test->entry(), 42.6));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4));
+  EXPECT_EQ(43, CallI(test->entry(), 43.5));
+  EXPECT_EQ(43, CallI(test->entry(), 43.6));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToDoubleWord_RUP, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtld(A0, FA0, RUP);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToDoubleWord_RUP, test) {
+  EXPECT_DISASSEMBLY(
+      "c2253553 fcvt.l.d a0, fa0, rup\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-43, CallI(test->entry(), -43.6));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.5));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.6));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0));
+  EXPECT_EQ(43, CallI(test->entry(), 42.4));
+  EXPECT_EQ(43, CallI(test->entry(), 42.5));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToDoubleWord_RMM, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtld(A0, FA0, RMM);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToDoubleWord_RMM, test) {
+  EXPECT_DISASSEMBLY(
+      "c2254553 fcvt.l.d a0, fa0, rmm\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.5));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4));
+  EXPECT_EQ(43, CallI(test->entry(), 42.5));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleToUnsignedDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtlud(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleToUnsignedDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c2350553 fcvt.lu.d a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<double>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<double>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<double>(42)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), static_cast<double>(kMinInt32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxInt32)),
+            CallI(test->entry(), static_cast<double>(kMaxInt32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint32)),
+            CallI(test->entry(), static_cast<double>(kMaxUint32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), static_cast<double>(kMinInt64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxInt64) + 1),
+            CallI(test->entry(), static_cast<double>(kMaxInt64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), static_cast<double>(kMaxUint64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), -std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), std::numeric_limits<double>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(BitCastDoubleToInteger, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvxd(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BitCastDoubleToInteger, test) {
+  EXPECT_DISASSEMBLY(
+      "e2050553 fmv.x.d a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(bit_cast<int64_t>(0.0), CallI(test->entry(), 0.0));
+  EXPECT_EQ(bit_cast<int64_t>(-0.0), CallI(test->entry(), -0.0));
+  EXPECT_EQ(bit_cast<int64_t>(42.0), CallI(test->entry(), 42.0));
+  EXPECT_EQ(bit_cast<int64_t>(-42.0), CallI(test->entry(), -42.0));
+  EXPECT_EQ(bit_cast<int64_t>(std::numeric_limits<double>::quiet_NaN()),
+            CallI(test->entry(), std::numeric_limits<double>::quiet_NaN()));
+  EXPECT_EQ(bit_cast<int64_t>(std::numeric_limits<double>::signaling_NaN()),
+            CallI(test->entry(), std::numeric_limits<double>::signaling_NaN()));
+  EXPECT_EQ(bit_cast<int64_t>(std::numeric_limits<double>::infinity()),
+            CallI(test->entry(), std::numeric_limits<double>::infinity()));
+  EXPECT_EQ(bit_cast<int64_t>(-std::numeric_limits<double>::infinity()),
+            CallI(test->entry(), -std::numeric_limits<double>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleWordToDouble, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtdl(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleWordToDouble, test) {
+  EXPECT_DISASSEMBLY(
+      "d2250553 fcvt.d.l fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0, CallD(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0, CallD(test->entry(), sign_extend(42)));
+  EXPECT_EQ(-42.0, CallD(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(static_cast<double>(kMinInt32),
+            CallD(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<double>(kMaxInt32),
+            CallD(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<double>(sign_extend(kMaxUint32)),
+            CallD(test->entry(), sign_extend(kMaxUint32)));
+  EXPECT_EQ(static_cast<double>(kMinInt64),
+            CallD(test->entry(), sign_extend(kMinInt64)));
+  EXPECT_EQ(static_cast<double>(kMaxInt64),
+            CallD(test->entry(), sign_extend(kMaxInt64)));
+  EXPECT_EQ(static_cast<double>(sign_extend(kMaxUint64)),
+            CallD(test->entry(), sign_extend(kMaxUint64)));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertUnsignedDoubleWordToDouble, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtdlu(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertUnsignedDoubleWordToDouble, test) {
+  EXPECT_DISASSEMBLY(
+      "d2350553 fcvt.d.lu fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0, CallD(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0, CallD(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<double>(static_cast<uint64_t>(sign_extend(-42))),
+            CallD(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(static_cast<double>(static_cast<uint64_t>(sign_extend(kMinInt32))),
+            CallD(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<double>(static_cast<uint64_t>(sign_extend(kMaxInt32))),
+            CallD(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<double>(static_cast<uint64_t>(sign_extend(kMaxUint32))),
+            CallD(test->entry(), sign_extend(kMaxUint32)));
+  EXPECT_EQ(static_cast<double>(static_cast<uint64_t>(sign_extend(kMinInt64))),
+            CallD(test->entry(), sign_extend(kMinInt64)));
+  EXPECT_EQ(static_cast<double>(static_cast<uint64_t>(sign_extend(kMaxInt64))),
+            CallD(test->entry(), sign_extend(kMaxInt64)));
+  EXPECT_EQ(static_cast<double>(kMaxUint64),
+            CallD(test->entry(), sign_extend(kMaxUint64)));
+}
+
+ASSEMBLER_TEST_GENERATE(BitCastIntegerToDouble, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvdx(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BitCastIntegerToDouble, test) {
+  EXPECT_DISASSEMBLY(
+      "f2050553 fmv.d.x fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0, CallD(test->entry(), bit_cast<int64_t>(0.0)));
+  EXPECT_EQ(-0.0, CallD(test->entry(), bit_cast<int64_t>(-0.0)));
+  EXPECT_EQ(42.0, CallD(test->entry(), bit_cast<int64_t>(42.0)));
+  EXPECT_EQ(-42.0, CallD(test->entry(), bit_cast<int64_t>(-42.0)));
+  EXPECT_EQ(true, isnan(CallD(test->entry(),
+                              bit_cast<int64_t>(
+                                  std::numeric_limits<double>::quiet_NaN()))));
+  EXPECT_EQ(true,
+            isnan(CallD(test->entry(),
+                        bit_cast<int64_t>(
+                            std::numeric_limits<double>::signaling_NaN()))));
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            CallD(test->entry(),
+                  bit_cast<int64_t>(std::numeric_limits<double>::infinity())));
+  EXPECT_EQ(-std::numeric_limits<double>::infinity(),
+            CallD(test->entry(),
+                  bit_cast<int64_t>(-std::numeric_limits<double>::infinity())));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(Fibonacci, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label fib, base, done;
+  __ Bind(&fib);
+  __ subi(SP, SP, sizeof(uintx_t) * 4);
+  __ sx(RA, Address(SP, 3 * sizeof(uintx_t)));
+  __ sx(A0, Address(SP, 2 * sizeof(uintx_t)));
+  __ subi(A0, A0, 1);
+  __ blez(A0, &base);
+
+  __ jal(&fib);
+  __ sx(A0, Address(SP, 1 * sizeof(uintx_t)));
+  __ lx(A0, Address(SP, 2 * sizeof(uintx_t)));
+  __ subi(A0, A0, 2);
+  __ jal(&fib);
+  __ lx(A1, Address(SP, 1 * sizeof(uintx_t)));
+  __ add(A0, A0, A1);
+  __ j(&done);
+
+  __ Bind(&base);
+  __ li(A0, 1);
+
+  __ Bind(&done);
+  __ lx(RA, Address(SP, 3 * sizeof(uintx_t)));
+  __ addi(SP, SP, sizeof(uintx_t) * 4);
+  __ ret();
+  __ trap();
+}
+ASSEMBLER_TEST_RUN(Fibonacci, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "ff010113 addi sp, sp, -16\n"
+      "00112623 sw ra, 12(sp)\n"
+      "00a12423 sw a0, 8(sp)\n"
+      "fff50513 addi a0, a0, -1\n"
+      "02a05263 blez a0, +36\n"
+      "fedff0ef jal -20\n"
+      "00a12223 sw a0, 4(sp)\n"
+      "00812503 lw a0, 8(sp)\n"
+      "ffe50513 addi a0, a0, -2\n"
+      "fddff0ef jal -36\n"
+      "00412583 lw a1, 4(sp)\n"
+      "00b50533 add a0, a0, a1\n"
+      "0080006f j +8\n"
+      "00100513 li a0, 1\n"
+      "00c12083 lw ra, 12(sp)\n"
+      "01010113 addi sp, sp, 16\n"
+      "00008067 ret\n"
+      "00000000 trap\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "fe010113 addi sp, sp, -32\n"
+      "00113c23 sd ra, 24(sp)\n"
+      "00a13823 sd a0, 16(sp)\n"
+      "fff50513 addi a0, a0, -1\n"
+      "02a05263 blez a0, +36\n"
+      "fedff0ef jal -20\n"
+      "00a13423 sd a0, 8(sp)\n"
+      "01013503 ld a0, 16(sp)\n"
+      "ffe50513 addi a0, a0, -2\n"
+      "fddff0ef jal -36\n"
+      "00813583 ld a1, 8(sp)\n"
+      "00b50533 add a0, a0, a1\n"
+      "0080006f j +8\n"
+      "00100513 li a0, 1\n"
+      "01813083 ld ra, 24(sp)\n"
+      "02010113 addi sp, sp, 32\n"
+      "00008067 ret\n"
+      "00000000 trap\n");
+#else
+#error Unimplemented
+#endif
+  EXPECT_EQ(1, Call(test->entry(), 0));
+  EXPECT_EQ(1, Call(test->entry(), 1));
+  EXPECT_EQ(2, Call(test->entry(), 2));
+  EXPECT_EQ(3, Call(test->entry(), 3));
+  EXPECT_EQ(5, Call(test->entry(), 4));
+  EXPECT_EQ(8, Call(test->entry(), 5));
+  EXPECT_EQ(13, Call(test->entry(), 6));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreWordSP_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+
+  __ subi(SP, SP, 256);
+  __ sw(A1, Address(SP, 0));
+  __ lw(A0, Address(SP, 0));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreWordSP_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    c02e sw a1, 0(sp)\n"
+      "    4502 lw a0, 0(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ(sign_extend(0xAB010203), Call(test->entry(), 0, 0xAB010203));
+  EXPECT_EQ(sign_extend(0xCD020405), Call(test->entry(), 0, 0xCD020405));
+  EXPECT_EQ(sign_extend(0xEF030607), Call(test->entry(), 0, 0xEF030607));
+}
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreWordSP_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+
+  __ subi(SP, SP, 256);
+  __ sw(A1, Address(SP, 4));
+  __ lw(A0, Address(SP, 4));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreWordSP_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    c22e sw a1, 4(sp)\n"
+      "    4512 lw a0, 4(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ(sign_extend(0xAB010203), Call(test->entry(), 0, 0xAB010203));
+  EXPECT_EQ(sign_extend(0xCD020405), Call(test->entry(), 0, 0xCD020405));
+  EXPECT_EQ(sign_extend(0xEF030607), Call(test->entry(), 0, 0xEF030607));
+}
+
+#if XLEN == 32
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreSingleFloatSP_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ subi(SP, SP, 256);
+  __ fsw(FA1, Address(SP, 0));
+  __ flw(FA0, Address(SP, 0));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreSingleFloatSP_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    e02e fsw fa1, 0(sp)\n"
+      "    6502 flw fa0, 0(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ(1.7f, CallF(test->entry(), 0.0f, 1.7f));
+  EXPECT_EQ(2.8f, CallF(test->entry(), 0.0f, 2.8f));
+  EXPECT_EQ(3.9f, CallF(test->entry(), 0.0f, 3.9f));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreSingleFloatSP_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ subi(SP, SP, 256);
+  __ fsw(FA1, Address(SP, 4));
+  __ flw(FA0, Address(SP, 4));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreSingleFloatSP_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    e22e fsw fa1, 4(sp)\n"
+      "    6512 flw fa0, 4(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ(1.7f, CallF(test->entry(), 0.0f, 1.7f));
+  EXPECT_EQ(2.8f, CallF(test->entry(), 0.0f, 2.8f));
+  EXPECT_EQ(3.9f, CallF(test->entry(), 0.0f, 3.9f));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreDoubleFloatSP_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ subi(SP, SP, 256);
+  __ fsd(FA1, Address(SP, 0));
+  __ fld(FA0, Address(SP, 0));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreDoubleFloatSP_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    a02e fsd fa1, 0(sp)\n"
+      "    2502 fld fa0, 0(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ(1.7, CallD(test->entry(), 0.0, 1.7));
+  EXPECT_EQ(2.8, CallD(test->entry(), 0.0, 2.8));
+  EXPECT_EQ(3.9, CallD(test->entry(), 0.0, 3.9));
+}
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreDoubleFloatSP_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ subi(SP, SP, 256);
+  __ fsd(FA1, Address(SP, 8));
+  __ fld(FA0, Address(SP, 8));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreDoubleFloatSP_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    a42e fsd fa1, 8(sp)\n"
+      "    2522 fld fa0, 8(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ(1.7, CallD(test->entry(), 0.0, 1.7));
+  EXPECT_EQ(2.8, CallD(test->entry(), 0.0, 2.8));
+  EXPECT_EQ(3.9, CallD(test->entry(), 0.0, 3.9));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreDoubleWordSP_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ subi(SP, SP, 256);
+  __ sd(A1, Address(SP, 0));
+  __ ld(A0, Address(SP, 0));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreDoubleWordSP_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    e02e sd a1, 0(sp)\n"
+      "    6502 ld a0, 0(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ((intx_t)0xAB01020304050607,
+            Call(test->entry(), 0, 0xAB01020304050607));
+  EXPECT_EQ((intx_t)0xCD02040505060708,
+            Call(test->entry(), 0, 0xCD02040505060708));
+  EXPECT_EQ((intx_t)0xEF03060708090A0B,
+            Call(test->entry(), 0, 0xEF03060708090A0B));
+}
+ASSEMBLER_TEST_GENERATE(CompressedLoadStoreDoubleWordSP_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ subi(SP, SP, 256);
+  __ sd(A1, Address(SP, 8));
+  __ ld(A0, Address(SP, 8));
+  __ addi(SP, SP, 256);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadStoreDoubleWordSP_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    7111 addi sp, sp, -256\n"
+      "    e42e sd a1, 8(sp)\n"
+      "    6522 ld a0, 8(sp)\n"
+      "    6111 addi sp, sp, 256\n"
+      "    8082 ret\n");
+
+  EXPECT_EQ((intx_t)0xAB01020304050607,
+            Call(test->entry(), 0, 0xAB01020304050607));
+  EXPECT_EQ((intx_t)0xCD02040505060708,
+            Call(test->entry(), 0, 0xCD02040505060708));
+  EXPECT_EQ((intx_t)0xEF03060708090A0B,
+            Call(test->entry(), 0, 0xEF03060708090A0B));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(CompressedLoadWord_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ lw(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    4108 lw a0, 0(a0)\n"
+      "    8082 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-855505915,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(CompressedLoadWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ lw(A0, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    4148 lw a0, 4(a0)\n"
+      "    8082 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-285014521,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedStoreWord_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ sw(A1, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedStoreWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    c10c sw a1, 0(a0)\n"
+      "    8082 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xCD020405);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0xCD020405, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(CompressedStoreWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ sw(A1, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedStoreWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    c14c sw a1, 4(a0)\n"
+      "    8082 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xEF030607);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0xEF030607, values[2]);
+}
+
+#if XLEN == 32
+ASSEMBLER_TEST_GENERATE(CompressedLoadSingleFloat, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ flw(FA0, Address(A0, 1 * sizeof(float)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadSingleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "    6148 flw fa0, 4(a0)\n"
+      "    8082 ret\n");
+
+  float* data = reinterpret_cast<float*>(malloc(3 * sizeof(float)));
+  data[0] = 1.7f;
+  data[1] = 2.8f;
+  data[2] = 3.9f;
+  EXPECT_EQ(data[1], CallF(test->entry(), reinterpret_cast<intx_t>(data)));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedStoreSingleFloat, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ fsw(FA0, Address(A0, 1 * sizeof(float)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedStoreSingleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "    e148 fsw fa0, 4(a0)\n"
+      "    8082 ret\n");
+
+  float* data = reinterpret_cast<float*>(malloc(3 * sizeof(float)));
+  data[0] = 1.7f;
+  data[1] = 2.8f;
+  data[2] = 3.9f;
+  CallF(test->entry(), reinterpret_cast<intx_t>(data), 4.2f);
+  EXPECT_EQ(4.2f, data[1]);
+}
+#endif
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(CompressedLoadDoubleWord_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ ld(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadDoubleWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    6108 ld a0, 0(a0)\n"
+      "    8082 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-3674369926375274744,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(CompressedLoadDoubleWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ ld(A0, Address(A0, 8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadDoubleWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    6508 ld a0, 8(a0)\n"
+      "    8082 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-1224128046445295093,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedStoreDoubleWord_0, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ sd(A1, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedStoreDoubleWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "    e10c sd a1, 0(a0)\n"
+      "    8082 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xCD02040505060708);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0xCD02040505060708, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(CompressedStoreDoubleWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ sd(A1, Address(A0, 8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedStoreDoubleWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "    e50c sd a1, 8(a0)\n"
+      "    8082 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xEF03060708090A0B);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0xEF03060708090A0B, values[2]);
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedLoadDoubleFloat, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ fld(FA0, Address(A0, 1 * sizeof(double)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadDoubleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "    2508 fld fa0, 8(a0)\n"
+      "    8082 ret\n");
+
+  double* data = reinterpret_cast<double*>(malloc(3 * sizeof(double)));
+  data[0] = 1.7;
+  data[1] = 2.8;
+  data[2] = 3.9;
+  EXPECT_EQ(data[1], CallD(test->entry(), reinterpret_cast<intx_t>(data)));
+  free(data);
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedStoreDoubleFloat, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ fsd(FA0, Address(A0, 1 * sizeof(double)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedStoreDoubleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "    a508 fsd fa0, 8(a0)\n"
+      "    8082 ret\n");
+
+  double* data = reinterpret_cast<double*>(malloc(3 * sizeof(double)));
+  data[0] = 1.7;
+  data[1] = 2.8;
+  data[2] = 3.9;
+  CallD(test->entry(), reinterpret_cast<intx_t>(data), 4.2);
+  EXPECT_EQ(4.2, data[1]);
+}
+#endif
+
+#if XLEN == 32
+ASSEMBLER_TEST_GENERATE(CompressedJumpAndLink, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+
+  Label label1, label2;
+  __ mv(T3, RA);
+  __ jal(&label1, Assembler::kNearJump);  // Forward.
+  __ sub(A0, T0, T1);
+  __ mv(RA, T3);
+  __ ret();
+  __ trap();
+
+  __ Bind(&label2);
+  __ mv(T5, RA);
+  __ li(T1, 7);
+  __ jr(T5);
+  __ trap();
+
+  __ Bind(&label1);
+  __ mv(T4, RA);
+  __ li(T0, 4);
+  __ jal(&label2, Assembler::kNearJump);  // Backward.
+  __ mv(RA, T4);
+  __ jr(T4);
+  __ trap();
+}
+ASSEMBLER_TEST_RUN(CompressedJumpAndLink, test) {
+  EXPECT_DISASSEMBLY(
+      "    8e06 mv t3, ra\n"
+      "    2811 jal +20\n"
+      "40628533 sub a0, t0, t1\n"
+      "    80f2 mv ra, t3\n"
+      "    8082 ret\n"
+      "    0000 trap\n"
+      "    8f06 mv t5, ra\n"
+      "    431d li t1, 7\n"
+      "    8f02 jr t5\n"
+      "    0000 trap\n"
+      "    8e86 mv t4, ra\n"
+      "    4291 li t0, 4\n"
+      "    3fd5 jal -12\n"
+      "    80f6 mv ra, t4\n"
+      "    8e82 jr t4\n"
+      "    0000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry()));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(CompressedJump, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label label1, label2;
+  __ j(&label1, Assembler::kNearJump);  // Forward.
+  __ trap();
+  __ Bind(&label2);
+  __ li(T1, 7);
+  __ sub(A0, T0, T1);
+  __ ret();
+  __ Bind(&label1);
+  __ li(T0, 4);
+  __ j(&label2, Assembler::kNearJump);  // Backward.
+  __ trap();
+}
+ASSEMBLER_TEST_RUN(CompressedJump, test) {
+  EXPECT_DISASSEMBLY(
+      "    a031 j +12\n"
+      "    0000 trap\n"
+      "    431d li t1, 7\n"
+      "40628533 sub a0, t0, t1\n"
+      "    8082 ret\n"
+      "    4291 li t0, 4\n"
+      "    bfdd j -10\n"
+      "    0000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry()));
+}
+
+static int CompressedJumpAndLinkRegister_label1 = 0;
+static int CompressedJumpAndLinkRegister_label2 = 0;
+ASSEMBLER_TEST_GENERATE(CompressedJumpAndLinkRegister, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label label1, label2;
+  __ mv(T3, RA);
+  __ jalr(A1);  // Forward.
+  __ sub(A0, T0, T1);
+  __ jr(T3);
+  __ trap();
+
+  __ Bind(&label2);
+  __ mv(T5, RA);
+  __ li(T1, 7);
+  __ jr(T5);
+  __ trap();
+
+  __ Bind(&label1);
+  __ mv(T4, RA);
+  __ li(T0, 4);
+  __ jalr(A2);  // Backward.
+  __ jr(T4);
+  __ trap();
+
+  CompressedJumpAndLinkRegister_label1 = label1.Position();
+  CompressedJumpAndLinkRegister_label2 = label2.Position();
+}
+ASSEMBLER_TEST_RUN(CompressedJumpAndLinkRegister, test) {
+  EXPECT_DISASSEMBLY(
+      "    8e06 mv t3, ra\n"
+      "    9582 jalr a1\n"
+      "40628533 sub a0, t0, t1\n"
+      "    8e02 jr t3\n"
+      "    0000 trap\n"
+      "    8f06 mv t5, ra\n"
+      "    431d li t1, 7\n"
+      "    8f02 jr t5\n"
+      "    0000 trap\n"
+      "    8e86 mv t4, ra\n"
+      "    4291 li t0, 4\n"
+      "    9602 jalr a2\n"
+      "    8e82 jr t4\n"
+      "    0000 trap\n");
+  EXPECT_EQ(-3,
+            Call(test->entry(), 0,
+                 static_cast<intx_t>(test->entry() +
+                                     CompressedJumpAndLinkRegister_label1),
+                 static_cast<intx_t>(test->entry() +
+                                     CompressedJumpAndLinkRegister_label2)));
+}
+
+static int CompressedJumpRegister_label = 0;
+ASSEMBLER_TEST_GENERATE(CompressedJumpRegister, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label label;
+  __ jr(A1);
+  __ trap();
+  __ Bind(&label);
+  __ li(A0, 42);
+  __ ret();
+  CompressedJumpRegister_label = label.Position();
+}
+ASSEMBLER_TEST_RUN(CompressedJumpRegister, test) {
+  EXPECT_DISASSEMBLY(
+      "    8582 jr a1\n"
+      "    0000 trap\n"
+      "02a00513 li a0, 42\n"
+      "    8082 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0,
+                     static_cast<intx_t>(test->entry() +
+                                         CompressedJumpRegister_label)));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedBranchEqualZero, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label label;
+  __ beqz(A0, &label, Assembler::kNearJump);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedBranchEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "    c119 beqz a0, +6\n"
+      "    450d li a0, 3\n"
+      "    8082 ret\n"
+      "    4511 li a0, 4\n"
+      "    8082 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), -42));
+  EXPECT_EQ(4, Call(test->entry(), 0));
+  EXPECT_EQ(3, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedBranchNotEqualZero, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label label;
+  __ bnez(A0, &label, Assembler::kNearJump);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedBranchNotEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "    e119 bnez a0, +6\n"
+      "    450d li a0, 3\n"
+      "    8082 ret\n"
+      "    4511 li a0, 4\n"
+      "    8082 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), -42));
+  EXPECT_EQ(3, Call(test->entry(), 0));
+  EXPECT_EQ(4, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedLoadImmediate, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ li(A0, -7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "    5565 li a0, -7\n"
+      "    8082 ret\n");
+  EXPECT_EQ(-7, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedLoadUpperImmediate, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ lui(A0, 7 << 12);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedLoadUpperImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "    651d lui a0, 28672\n"
+      "    8082 ret\n");
+  EXPECT_EQ(7 << 12, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedAddImmediate, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ addi(A0, A0, 19);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAddImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "    054d addi a0, a0, 19\n"
+      "    8082 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 23));
+}
+
+#if XLEN == 64
+ASSEMBLER_TEST_GENERATE(CompressedAddImmediateWord, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ addiw(A0, A0, 19);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAddImmediateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "    254d addiw a0, a0, 19\n"
+      "    8082 ret\n");
+  EXPECT_EQ(19, Call(test->entry(), 0xFFFFFFFF00000000));
+  EXPECT_EQ(-237, Call(test->entry(), 0x00000000FFFFFF00));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(CompressedAddImmediateSP16, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ addi(SP, SP, -128);
+  __ addi(SP, SP, +128);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAddImmediateSP16, test) {
+  EXPECT_DISASSEMBLY(
+      "    7119 addi sp, sp, -128\n"
+      "    6109 addi sp, sp, 128\n"
+      "    8082 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedAddImmediateSP4N, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ addi(A1, SP, 36);
+  __ sub(A0, A1, SP);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAddImmediateSP4N, test) {
+  EXPECT_DISASSEMBLY(
+      "    104c addi a1, sp, 36\n"
+      "40258533 sub a0, a1, sp\n"
+      "    8082 ret\n");
+  EXPECT_EQ(36, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedShiftLeftLogicalImmediate, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ slli(A0, A0, 3);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedShiftLeftLogicalImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "    050e slli a0, a0, 3\n"
+      "    8082 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(336, Call(test->entry(), 42));
+  EXPECT_EQ(15872, Call(test->entry(), 1984));
+  EXPECT_EQ(-336, Call(test->entry(), -42));
+  EXPECT_EQ(-15872, Call(test->entry(), -1984));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedShiftRightLogicalImmediate, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ srli(A0, A0, 3);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedShiftRightLogicalImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "    810d srli a0, a0, 3\n"
+      "    8082 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(5, Call(test->entry(), 42));
+  EXPECT_EQ(248, Call(test->entry(), 1984));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-42) >> 3),
+            Call(test->entry(), -42));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-1984) >> 3),
+            Call(test->entry(), -1984));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedShiftRightArithmeticImmediate, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ srai(A0, A0, 3);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedShiftRightArithmeticImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "    850d srai a0, a0, 3\n"
+      "    8082 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(5, Call(test->entry(), 42));
+  EXPECT_EQ(248, Call(test->entry(), 1984));
+  EXPECT_EQ(-6, Call(test->entry(), -42));
+  EXPECT_EQ(-248, Call(test->entry(), -1984));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedAndImmediate, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ andi(A0, A0, 6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAndImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "    8919 andi a0, a0, 6\n"
+      "    8082 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(2, Call(test->entry(), 43));
+  EXPECT_EQ(0, Call(test->entry(), 1984));
+  EXPECT_EQ(6, Call(test->entry(), -42));
+  EXPECT_EQ(0, Call(test->entry(), -1984));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedAndImmediate2, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ andi(A0, A0, -6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAndImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "    9969 andi a0, a0, -6\n"
+      "    8082 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(42, Call(test->entry(), 43));
+  EXPECT_EQ(1984, Call(test->entry(), 1984));
+  EXPECT_EQ(-46, Call(test->entry(), -42));
+  EXPECT_EQ(-1984, Call(test->entry(), -1984));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedMove, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ mv(A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedMove, test) {
+  EXPECT_DISASSEMBLY(
+      "    852e mv a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0, 42));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedAdd, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ add(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "    952e add a0, a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(24, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-10, Call(test->entry(), 7, -17));
+  EXPECT_EQ(10, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, -17));
+  EXPECT_EQ(24, Call(test->entry(), 17, 7));
+  EXPECT_EQ(10, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedAnd, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ and_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAnd, test) {
+  EXPECT_DISASSEMBLY(
+      "    8d6d and a0, a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 7, 17));
+  EXPECT_EQ(7, Call(test->entry(), 7, -17));
+  EXPECT_EQ(17, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-23, Call(test->entry(), -7, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 7));
+  EXPECT_EQ(17, Call(test->entry(), 17, -7));
+  EXPECT_EQ(7, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-23, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedOr, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ or_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedOr, test) {
+  EXPECT_DISASSEMBLY(
+      "    8d4d or a0, a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(23, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-17, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-7, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-1, Call(test->entry(), -7, -17));
+  EXPECT_EQ(23, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-7, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-1, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedXor, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ xor_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedXor, test) {
+  EXPECT_DISASSEMBLY(
+      "    8d2d xor a0, a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(22, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(22, Call(test->entry(), -7, -17));
+  EXPECT_EQ(22, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(22, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedSubtract, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ sub(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedSubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "    8d0d sub a0, a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(-10, Call(test->entry(), 7, 17));
+  EXPECT_EQ(24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(10, Call(test->entry(), -7, -17));
+  EXPECT_EQ(10, Call(test->entry(), 17, 7));
+  EXPECT_EQ(24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, -7));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(CompressedAddWord, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ addw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedAddWord, test) {
+  EXPECT_DISASSEMBLY(
+      "    9d2d addw a0, a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(24, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-10, Call(test->entry(), 7, -17));
+  EXPECT_EQ(10, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, -17));
+  EXPECT_EQ(24, Call(test->entry(), 17, 7));
+  EXPECT_EQ(10, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, -7));
+  EXPECT_EQ(3, Call(test->entry(), 0x200000002, 0x100000001));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedSubtractWord, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ subw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedSubtractWord, test) {
+  EXPECT_DISASSEMBLY(
+      "    9d0d subw a0, a0, a1\n"
+      "    8082 ret\n");
+  EXPECT_EQ(-10, Call(test->entry(), 7, 17));
+  EXPECT_EQ(24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(10, Call(test->entry(), -7, -17));
+  EXPECT_EQ(10, Call(test->entry(), 17, 7));
+  EXPECT_EQ(24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, -7));
+  EXPECT_EQ(1, Call(test->entry(), 0x200000002, 0x100000001));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(CompressedNop, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ nop();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedNop, test) {
+  EXPECT_DISASSEMBLY(
+      "    0001 nop\n"
+      "    8082 ret\n");
+  EXPECT_EQ(123, Call(test->entry(), 123));
+}
+
+ASSEMBLER_TEST_GENERATE(CompressedEnvironmentBreak, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ ebreak();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(CompressedEnvironmentBreak, test) {
+  EXPECT_DISASSEMBLY(
+      "    9002 ebreak\n"
+      "    8082 ret\n");
+
+  // Not running: would trap.
+}
+
+ASSEMBLER_TEST_GENERATE(LoadImmediate_MaxInt32, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ LoadImmediate(A0, kMaxInt32);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadImmediate_MaxInt32, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "80000537 lui a0, -2147483648\n"
+      "    157d addi a0, a0, -1\n"
+      "    8082 ret\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "80000537 lui a0, -2147483648\n"
+      "    357d addiw a0, a0, -1\n"
+      "    8082 ret\n");
+#endif
+  EXPECT_EQ(kMaxInt32, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadImmediate_MinInt32, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ LoadImmediate(A0, kMinInt32);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadImmediate_MinInt32, test) {
+  EXPECT_DISASSEMBLY(
+      "80000537 lui a0, -2147483648\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMinInt32, Call(test->entry()));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(LoadImmediate_MinInt64, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ LoadImmediate(A0, kMinInt64);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadImmediate_MinInt64, test) {
+  EXPECT_DISASSEMBLY(
+      "f8000537 lui a0, -134217728\n"
+      "    0532 slli a0, a0, 12\n"
+      "    0532 slli a0, a0, 12\n"
+      "    0532 slli a0, a0, 12\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMinInt64, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadImmediate_Large, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  __ LoadImmediate(A0, 0xABCDABCDABCDABCD);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadImmediate_Large, test) {
+  EXPECT_DISASSEMBLY(
+      "fabce537 lui a0, -88285184\n"
+      "abd5051b addiw a0, a0, -1347\n"
+      "    0532 slli a0, a0, 12\n"
+      "dac50513 addi a0, a0, -596\n"
+      "    0532 slli a0, a0, 12\n"
+      "cdb50513 addi a0, a0, -805\n"
+      "    0532 slli a0, a0, 12\n"
+      "bcd50513 addi a0, a0, -1075\n"
+      "    8082 ret\n");
+  EXPECT_EQ(static_cast<int64_t>(0xABCDABCDABCDABCD), Call(test->entry()));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(AddImmediateBranchOverflow, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label overflow;
+
+  __ AddImmediateBranchOverflow(A0, A0, 2, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediateBranchOverflow, test) {
+  EXPECT_DISASSEMBLY(
+      "    872a mv tmp2, a0\n"
+      "    0509 addi a0, a0, 2\n"
+      "00e54363 blt a0, tmp2, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMaxIntX - 1, Call(test->entry(), kMaxIntX - 3));
+  EXPECT_EQ(kMaxIntX, Call(test->entry(), kMaxIntX - 2));
+  EXPECT_EQ(0, Call(test->entry(), kMaxIntX - 1));
+  EXPECT_EQ(0, Call(test->entry(), kMaxIntX));
+}
+
+ASSEMBLER_TEST_GENERATE(AddBranchOverflow_NonDestructive, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label overflow;
+
+  __ AddBranchOverflow(A0, A1, A2, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddBranchOverflow_NonDestructive, test) {
+  EXPECT_DISASSEMBLY(
+      "00c58533 add a0, a1, a2\n"
+      "00062693 slti tmp, a2, 0\n"
+      "00b52733 slt tmp2, a0, a1\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMaxIntX - 1, Call(test->entry(), 42, kMaxIntX, -1));
+  EXPECT_EQ(kMaxIntX, Call(test->entry(), 42, kMaxIntX, 0));
+  EXPECT_EQ(0, Call(test->entry(), 42, kMaxIntX, 1));
+
+  EXPECT_EQ(0, Call(test->entry(), 42, kMinIntX, -1));
+  EXPECT_EQ(kMinIntX + 1, Call(test->entry(), 42, kMinIntX, 1));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), 42, kMinIntX, 0));
+}
+ASSEMBLER_TEST_GENERATE(AddBranchOverflow_Destructive, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label overflow;
+
+  __ AddBranchOverflow(A0, A1, A0, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 0);
+  __ ret();
+}
+
+ASSEMBLER_TEST_RUN(AddBranchOverflow_Destructive, test) {
+  EXPECT_DISASSEMBLY(
+      "00052693 slti tmp, a0, 0\n"
+      "    952e add a0, a0, a1\n"
+      "00b52733 slt tmp2, a0, a1\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMaxIntX - 1, Call(test->entry(), kMaxIntX, -1));
+  EXPECT_EQ(kMaxIntX, Call(test->entry(), kMaxIntX, 0));
+  EXPECT_EQ(0, Call(test->entry(), kMaxIntX, 1));
+
+  EXPECT_EQ(0, Call(test->entry(), kMinIntX, -1));
+  EXPECT_EQ(kMinIntX + 1, Call(test->entry(), kMinIntX, 1));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), kMinIntX, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(SubtractImmediateBranchOverflow, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label overflow;
+
+  __ SubtractImmediateBranchOverflow(A0, A0, 2, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SubtractImmediateBranchOverflow, test) {
+  EXPECT_DISASSEMBLY(
+      "    872a mv tmp2, a0\n"
+      "    1579 addi a0, a0, -2\n"
+      "00a74363 blt tmp2, a0, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMinIntX + 1, Call(test->entry(), kMinIntX + 3));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), kMinIntX + 2));
+  EXPECT_EQ(0, Call(test->entry(), kMinIntX + 1));
+  EXPECT_EQ(0, Call(test->entry(), kMinIntX));
+}
+
+ASSEMBLER_TEST_GENERATE(SubtractBranchOverflow_NonDestructive, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+
+  Label overflow;
+  __ SubtractBranchOverflow(A0, A1, A2, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SubtractBranchOverflow_NonDestructive, test) {
+  EXPECT_DISASSEMBLY(
+      "40c58533 sub a0, a1, a2\n"
+      "00062693 slti tmp, a2, 0\n"
+      "00a5a733 slt tmp2, a1, a0\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMaxIntX - 1, Call(test->entry(), 42, kMaxIntX, 1));
+  EXPECT_EQ(kMaxIntX, Call(test->entry(), 42, kMaxIntX, 0));
+  EXPECT_EQ(0, Call(test->entry(), 42, kMaxIntX, -1));
+
+  EXPECT_EQ(0, Call(test->entry(), 42, kMinIntX, 1));
+  EXPECT_EQ(kMinIntX + 1, Call(test->entry(), 42, kMinIntX, -1));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), 42, kMinIntX, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(SubtractBranchOverflow_Destructive, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+
+  Label overflow;
+  __ SubtractBranchOverflow(A0, A0, A1, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SubtractBranchOverflow_Destructive, test) {
+  EXPECT_DISASSEMBLY(
+      "00052693 slti tmp, a0, 0\n"
+      "    8d0d sub a0, a0, a1\n"
+      "00b52733 slt tmp2, a0, a1\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+  EXPECT_EQ(kMaxIntX - 1, Call(test->entry(), kMaxIntX, 1));
+  EXPECT_EQ(kMaxIntX, Call(test->entry(), kMaxIntX, 0));
+  EXPECT_EQ(0, Call(test->entry(), kMaxIntX, -1));
+
+  EXPECT_EQ(0, Call(test->entry(), kMinIntX, 1));
+  EXPECT_EQ(kMinIntX + 1, Call(test->entry(), kMinIntX, -1));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), kMinIntX, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyImmediateBranchOverflow, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+  Label overflow;
+
+  __ MultiplyImmediateBranchOverflow(A0, A0, 2, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyImmediateBranchOverflow, test) {
+#if XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "    4709 li tmp2, 2\n"
+      "02e516b3 mulh tmp, a0, tmp2\n"
+      "02e50533 mul a0, a0, tmp2\n"
+      "43f55713 srai tmp2, a0, 0x3f\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+#elif XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "    4709 li tmp2, 2\n"
+      "02e516b3 mulh tmp, a0, tmp2\n"
+      "02e50533 mul a0, a0, tmp2\n"
+      "41f55713 srai tmp2, a0, 0x1f\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "    4501 li a0, 0\n"
+      "    8082 ret\n");
+#endif
+  EXPECT_EQ(0, Call(test->entry(), kMinIntX));
+  EXPECT_EQ(0, Call(test->entry(), kMaxIntX));
+  EXPECT_EQ(-2, Call(test->entry(), -1));
+  EXPECT_EQ(2, Call(test->entry(), 1));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), kMinIntX / 2));
+  EXPECT_EQ(kMaxIntX - 1, Call(test->entry(), (kMaxIntX - 1) / 2));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyBranchOverflow_NonDestructive, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+
+  Label overflow;
+  __ MultiplyBranchOverflow(A0, A1, A2, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyBranchOverflow_NonDestructive, test) {
+#if XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "02c596b3 mulh tmp, a1, a2\n"
+      "02c58533 mul a0, a1, a2\n"
+      "43f55713 srai tmp2, a0, 0x3f\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "02a00513 li a0, 42\n"
+      "    8082 ret\n");
+#elif XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "02c596b3 mulh tmp, a1, a2\n"
+      "02c58533 mul a0, a1, a2\n"
+      "41f55713 srai tmp2, a0, 0x1f\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "02a00513 li a0, 42\n"
+      "    8082 ret\n");
+#endif
+  EXPECT_EQ(42, Call(test->entry(), 42, kMaxIntX, -2));
+  EXPECT_EQ(-kMaxIntX, Call(test->entry(), 42, kMaxIntX, -1));
+  EXPECT_EQ(0, Call(test->entry(), 42, kMaxIntX, 0));
+  EXPECT_EQ(kMaxIntX, Call(test->entry(), 42, kMaxIntX, 1));
+  EXPECT_EQ(42, Call(test->entry(), 42, kMaxIntX, 2));
+
+  EXPECT_EQ(42, Call(test->entry(), 42, kMinIntX, -2));
+  EXPECT_EQ(42, Call(test->entry(), 42, kMinIntX, -1));
+  EXPECT_EQ(0, Call(test->entry(), 42, kMinIntX, 0));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), 42, kMinIntX, 1));
+  EXPECT_EQ(42, Call(test->entry(), 42, kMinIntX, 2));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyBranchOverflow_Destructive, assembler) {
+  FLAG_use_compressed_instructions = true;
+  __ SetExtensions(RV_GC);
+
+  Label overflow;
+  __ MultiplyBranchOverflow(A0, A0, A1, &overflow);
+  __ ret();
+  __ Bind(&overflow);
+  __ li(A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyBranchOverflow_Destructive, test) {
+#if XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "02b516b3 mulh tmp, a0, a1\n"
+      "02b50533 mul a0, a0, a1\n"
+      "43f55713 srai tmp2, a0, 0x3f\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "02a00513 li a0, 42\n"
+      "    8082 ret\n");
+#elif XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "02b516b3 mulh tmp, a0, a1\n"
+      "02b50533 mul a0, a0, a1\n"
+      "41f55713 srai tmp2, a0, 0x1f\n"
+      "00e69363 bne tmp, tmp2, +6\n"
+      "    8082 ret\n"
+      "02a00513 li a0, 42\n"
+      "    8082 ret\n");
+#endif
+  EXPECT_EQ(42, Call(test->entry(), kMaxIntX, -2));
+  EXPECT_EQ(-kMaxIntX, Call(test->entry(), kMaxIntX, -1));
+  EXPECT_EQ(0, Call(test->entry(), kMaxIntX, 0));
+  EXPECT_EQ(kMaxIntX, Call(test->entry(), kMaxIntX, 1));
+  EXPECT_EQ(42, Call(test->entry(), kMaxIntX, 2));
+
+  EXPECT_EQ(42, Call(test->entry(), kMinIntX, -2));
+  EXPECT_EQ(42, Call(test->entry(), kMinIntX, -1));
+  EXPECT_EQ(0, Call(test->entry(), kMinIntX, 0));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), kMinIntX, 1));
+  EXPECT_EQ(42, Call(test->entry(), kMinIntX, 2));
+}
+
+#define TEST_ENCODING(type, name)                                              \
+  VM_UNIT_TEST_CASE(Encoding##name) {                                          \
+    for (intptr_t v = -(1 << 21); v <= (1 << 21); v++) {                       \
+      type value = static_cast<type>(v);                                       \
+      if (!Is##name(value)) continue;                                          \
+      int32_t encoded = Encode##name(value);                                   \
+      type decoded = Decode##name(encoded);                                    \
+      EXPECT_EQ(value, decoded);                                               \
+    }                                                                          \
+  }
+
+TEST_ENCODING(Register, Rd)
+TEST_ENCODING(Register, Rs1)
+TEST_ENCODING(Register, Rs2)
+TEST_ENCODING(FRegister, FRd)
+TEST_ENCODING(FRegister, FRs1)
+TEST_ENCODING(FRegister, FRs2)
+TEST_ENCODING(FRegister, FRs3)
+TEST_ENCODING(Funct2, Funct2)
+TEST_ENCODING(Funct3, Funct3)
+TEST_ENCODING(Funct5, Funct5)
+TEST_ENCODING(Funct7, Funct7)
+TEST_ENCODING(Funct12, Funct12)
+TEST_ENCODING(RoundingMode, RoundingMode)
+TEST_ENCODING(intptr_t, BTypeImm)
+TEST_ENCODING(intptr_t, JTypeImm)
+TEST_ENCODING(intptr_t, ITypeImm)
+TEST_ENCODING(intptr_t, STypeImm)
+TEST_ENCODING(intptr_t, UTypeImm)
+
+TEST_ENCODING(Register, CRd)
+TEST_ENCODING(Register, CRs1)
+TEST_ENCODING(Register, CRs2)
+TEST_ENCODING(Register, CRdp)
+TEST_ENCODING(Register, CRs1p)
+TEST_ENCODING(Register, CRs2p)
+TEST_ENCODING(FRegister, CFRd)
+TEST_ENCODING(FRegister, CFRs1)
+TEST_ENCODING(FRegister, CFRs2)
+TEST_ENCODING(FRegister, CFRdp)
+TEST_ENCODING(FRegister, CFRs1p)
+TEST_ENCODING(FRegister, CFRs2p)
+TEST_ENCODING(intptr_t, CSPLoad4Imm)
+TEST_ENCODING(intptr_t, CSPLoad8Imm)
+TEST_ENCODING(intptr_t, CSPStore4Imm)
+TEST_ENCODING(intptr_t, CSPStore8Imm)
+TEST_ENCODING(intptr_t, CMem4Imm)
+TEST_ENCODING(intptr_t, CMem8Imm)
+TEST_ENCODING(intptr_t, CJImm)
+TEST_ENCODING(intptr_t, CBImm)
+TEST_ENCODING(intptr_t, CIImm)
+TEST_ENCODING(intptr_t, CUImm)
+TEST_ENCODING(intptr_t, CI16Imm)
+TEST_ENCODING(intptr_t, CI4SPNImm)
+
+#undef TEST_ENCODING
+
+}  // namespace compiler
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index a72850f..4e36eff 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -20,10 +20,10 @@
 namespace compiler {
 
 Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches)
+                     intptr_t far_branch_level)
     : AssemblerBase(object_pool_builder), constant_pool_allowed_(false) {
   // Far branching mode is only needed and implemented for ARM.
-  ASSERT(!use_far_branches);
+  ASSERT(far_branch_level == 0);
 
   generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
     call(Address(THR,
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index dd5882c..52af843 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -295,7 +295,7 @@
 class Assembler : public AssemblerBase {
  public:
   explicit Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches = false);
+                     intptr_t far_branch_level = 0);
 
   ~Assembler() {}
 
diff --git a/runtime/vm/compiler/assembler/disassembler.cc b/runtime/vm/compiler/assembler/disassembler.cc
index 2777ccd..58b3430 100644
--- a/runtime/vm/compiler/assembler/disassembler.cc
+++ b/runtime/vm/compiler/assembler/disassembler.cc
@@ -114,7 +114,30 @@
   if (overflowed_) {
     return;
   }
-  intptr_t len = strlen(human_buffer);
+  intptr_t len;
+
+  // TODO(compiler): Update assembler tests for other architectures so there is
+  // coverage of encodings, not just mnemonics.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+  len = strlen(hex_buffer);
+  if (remaining_ < len + 100) {
+    *buffer_++ = '.';
+    *buffer_++ = '.';
+    *buffer_++ = '.';
+    *buffer_++ = '\n';
+    *buffer_++ = '\0';
+    overflowed_ = true;
+    return;
+  }
+  memmove(buffer_, hex_buffer, len);
+  buffer_ += len;
+  remaining_ -= len;
+  *buffer_++ = ' ';
+  remaining_--;
+  *buffer_ = '\0';
+#endif
+
+  len = strlen(human_buffer);
   if (remaining_ < len + 100) {
     *buffer_++ = '.';
     *buffer_++ = '.';
diff --git a/runtime/vm/compiler/assembler/disassembler_riscv.cc b/runtime/vm/compiler/assembler/disassembler_riscv.cc
new file mode 100644
index 0000000..9a3ae1b
--- /dev/null
+++ b/runtime/vm/compiler/assembler/disassembler_riscv.cc
@@ -0,0 +1,1595 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/compiler/assembler/disassembler.h"
+
+#include "platform/assert.h"
+#include "vm/instructions.h"
+
+namespace dart {
+
+#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
+
+// We deviate from objdump in two places:
+//  - branches display displacements instead of targets so our output is
+//    position independent for tests.
+//  - auipc/lui display the decoded value instead of the encoded value
+class RISCVDisassembler {
+ public:
+  explicit RISCVDisassembler(char* buffer,
+                             size_t buffer_size,
+                             ExtensionSet extensions)
+      : extensions_(extensions),
+        buffer_(buffer),
+        buffer_size_(buffer_size),
+        buffer_pos_(0) {}
+
+  bool Supports(Extension extension) const {
+    return extensions_.Includes(extension);
+  }
+  bool Supports(ExtensionSet extensions) const {
+    return extensions_.IncludesAll(extensions);
+  }
+
+  intptr_t Disassemble(uword pc) {
+    uint16_t parcel = *reinterpret_cast<uint16_t*>(pc);
+    if (Supports(RV_C) && IsCInstruction(parcel)) {
+      CInstr instr(parcel);
+      DisassembleInstruction(instr);
+      return instr.length();
+    } else {
+      uint32_t parcel = *reinterpret_cast<uint32_t*>(pc);
+      Instr instr(parcel);
+      DisassembleInstruction(instr);
+      return instr.length();
+    }
+  }
+
+ private:
+  void DisassembleInstruction(Instr instr);
+  void DisassembleInstruction(CInstr instr);
+  void DisassembleLUI(Instr instr);
+  void DisassembleAUIPC(Instr instr);
+  void DisassembleJAL(Instr instr);
+  void DisassembleJALR(Instr instr);
+  void DisassembleBRANCH(Instr instr);
+  void DisassembleLOAD(Instr instr);
+  void DisassembleSTORE(Instr instr);
+  void DisassembleOPIMM(Instr instr);
+  void DisassembleOPIMM32(Instr instr);
+  void DisassembleOP(Instr instr);
+  void DisassembleOP_0(Instr instr);
+  void DisassembleOP_SUB(Instr instr);
+  void DisassembleOP_MULDIV(Instr instr);
+  void DisassembleOP32(Instr instr);
+  void DisassembleOP32_0(Instr instr);
+  void DisassembleOP32_SUB(Instr instr);
+  void DisassembleOP32_MULDIV(Instr instr);
+  void DisassembleMISCMEM(Instr instr);
+  void DisassembleSYSTEM(Instr instr);
+  void DisassembleAMO(Instr instr);
+  void DisassembleAMO32(Instr instr);
+  void DisassembleAMO64(Instr instr);
+  void DisassembleLOADFP(Instr instr);
+  void DisassembleSTOREFP(Instr instr);
+  void DisassembleFMADD(Instr instr);
+  void DisassembleFMSUB(Instr instr);
+  void DisassembleFNMADD(Instr instr);
+  void DisassembleFNMSUB(Instr instr);
+  void DisassembleOPFP(Instr instr);
+
+  void UnknownInstruction(Instr instr);
+  void UnknownInstruction(CInstr instr);
+
+  void Print(const char* format, Instr instr, ExtensionSet extension);
+  void Print(const char* format, CInstr instr, ExtensionSet extension);
+  const char* PrintOption(const char* format, Instr instr);
+  const char* PrintOption(const char* format, CInstr instr);
+
+  void Printf(const char* format, ...) PRINTF_ATTRIBUTE(2, 3) {
+    va_list args;
+    va_start(args, format);
+    intptr_t len = Utils::VSNPrint(buffer_ + buffer_pos_,
+                                   buffer_size_ - buffer_pos_, format, args);
+    va_end(args);
+    buffer_pos_ += len;
+    buffer_[buffer_pos_] = '\0';
+  }
+
+  const ExtensionSet extensions_;
+  char* buffer_;        // Decode instructions into this buffer.
+  size_t buffer_size_;  // The size of the character buffer.
+  size_t buffer_pos_;   // Current character position in buffer.
+};
+
+void RISCVDisassembler::DisassembleInstruction(Instr instr) {
+  switch (instr.opcode()) {
+    case LUI:
+      DisassembleLUI(instr);
+      break;
+    case AUIPC:
+      DisassembleAUIPC(instr);
+      break;
+    case JAL:
+      DisassembleJAL(instr);
+      break;
+    case JALR:
+      DisassembleJALR(instr);
+      break;
+    case BRANCH:
+      DisassembleBRANCH(instr);
+      break;
+    case LOAD:
+      DisassembleLOAD(instr);
+      break;
+    case STORE:
+      DisassembleSTORE(instr);
+      break;
+    case OPIMM:
+      DisassembleOPIMM(instr);
+      break;
+    case OPIMM32:
+      DisassembleOPIMM32(instr);
+      break;
+    case OP:
+      DisassembleOP(instr);
+      break;
+    case OP32:
+      DisassembleOP32(instr);
+      break;
+    case MISCMEM:
+      DisassembleMISCMEM(instr);
+      break;
+    case SYSTEM:
+      DisassembleSYSTEM(instr);
+      break;
+    case AMO:
+      DisassembleAMO(instr);
+      break;
+    case LOADFP:
+      DisassembleLOADFP(instr);
+      break;
+    case STOREFP:
+      DisassembleSTOREFP(instr);
+      break;
+    case FMADD:
+      DisassembleFMADD(instr);
+      break;
+    case FMSUB:
+      DisassembleFMSUB(instr);
+      break;
+    case FNMADD:
+      DisassembleFNMADD(instr);
+      break;
+    case FNMSUB:
+      DisassembleFNMSUB(instr);
+      break;
+    case OPFP:
+      DisassembleOPFP(instr);
+      break;
+    default:
+      if ((instr.encoding() == 0) ||
+          (instr.encoding() == static_cast<uint32_t>(-1))) {
+        Print("trap", instr, RV_I);
+        break;
+      }
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleInstruction(CInstr instr) {
+  switch (instr.opcode()) {
+    case C_LWSP:
+      Print("lw 'rd, 'spload4imm(sp)", instr, RV_C);
+      break;
+#if XLEN == 32
+    case C_FLWSP:
+      Print("flw 'frd, 'spload4imm(sp)", instr, RV_C | RV_F);
+      break;
+#else
+    case C_LDSP:
+      Print("ld 'rd, 'spload8imm(sp)", instr, RV_C);
+      break;
+#endif
+    case C_FLDSP:
+      Print("fld 'frd, 'spload8imm(sp)", instr, RV_C | RV_D);
+      break;
+    case C_SWSP:
+      Print("sw 'rs2, 'spstore4imm(sp)", instr, RV_C);
+      break;
+#if XLEN == 32
+    case C_FSWSP:
+      Print("fsw 'frs2, 'spstore4imm(sp)", instr, RV_C | RV_F);
+      break;
+#else
+    case C_SDSP:
+      Print("sd 'rs2, 'spstore8imm(sp)", instr, RV_C);
+      break;
+#endif
+    case C_FSDSP:
+      Print("fsd 'frs2, 'spstore8imm(sp)", instr, RV_C | RV_D);
+      break;
+    case C_LW:
+      Print("lw 'rdp, 'mem4imm('rs1p)", instr, RV_C);
+      break;
+#if XLEN == 32
+    case C_FLW:
+      Print("flw 'frdp, 'mem4imm('rs1p)", instr, RV_C | RV_F);
+      break;
+#else
+    case C_LD:
+      Print("ld 'rdp, 'mem8imm('rs1p)", instr, RV_C);
+      break;
+#endif
+    case C_FLD:
+      Print("fld 'frdp, 'mem8imm('rs1p)", instr, RV_C | RV_D);
+      break;
+    case C_SW:
+      Print("sw 'rs2p, 'mem4imm('rs1p)", instr, RV_C);
+      break;
+#if XLEN == 32
+    case C_FSW:
+      Print("fsw 'frs2p, 'mem4imm('rs1p)", instr, RV_C | RV_F);
+      break;
+#else
+    case C_SD:
+      Print("sd 'rs2p, 'mem8imm('rs1p)", instr, RV_C);
+      break;
+#endif
+    case C_FSD:
+      Print("fsd 'frs2p, 'mem8imm('rs1p)", instr, RV_C | RV_F);
+      break;
+    case C_J:
+      Print("j 'jimm", instr, RV_C);
+      break;
+#if XLEN == 32
+    case C_JAL:
+      Print("jal 'jimm", instr, RV_C);
+      break;
+#endif
+    case C_JR:
+      if (instr.encoding() & (C_JALR ^ C_JR)) {
+        if ((instr.rs1() == ZR) && (instr.rs2() == ZR)) {
+          Print("ebreak", instr, RV_C);
+        } else if (instr.rs2() == ZR) {
+          Print("jalr 'rs1", instr, RV_C);
+        } else {
+          Print("add 'rd, 'rs1, 'rs2", instr, RV_C);
+        }
+      } else {
+        if (instr.rd() != ZR && instr.rs2() != ZR) {
+          Print("mv 'rd, 'rs2", instr, RV_C);
+        } else if (instr.rs2() != ZR) {
+          UnknownInstruction(instr);
+        } else if (instr.rs1() == RA) {
+          Print("ret", instr, RV_C);
+        } else {
+          Print("jr 'rs1", instr, RV_C);
+        }
+      }
+      break;
+    case C_BEQZ:
+      Print("beqz 'rs1p, 'bimm", instr, RV_C);
+      break;
+    case C_BNEZ:
+      Print("bnez 'rs1p, 'bimm", instr, RV_C);
+      break;
+    case C_LI:
+      Print("li 'rd, 'iimm", instr, RV_C);
+      break;
+    case C_LUI:
+      if (instr.rd() == SP) {
+        Print("addi 'rd, 'rs1, 'i16imm", instr, RV_C);
+      } else {
+        Print("lui 'rd, 'uimm", instr, RV_C);
+      }
+      break;
+    case C_ADDI:
+      if ((instr.rd() == ZR) && (instr.rs1() == ZR) && (instr.i_imm() == 0)) {
+        Print("nop", instr, RV_C);
+      } else {
+        Print("addi 'rd, 'rs1, 'iimm", instr, RV_C);
+      }
+      break;
+#if XLEN >= 64
+    case C_ADDIW:
+      if (instr.i_imm() == 0) {
+        Print("sext.w 'rd, 'rs1", instr, RV_C);
+      } else {
+        Print("addiw 'rd, 'rs1, 'iimm", instr, RV_C);
+      }
+      break;
+#endif
+    case C_ADDI4SPN:
+      if (instr.i4spn_imm() == 0) {
+        UnknownInstruction(instr);
+      } else {
+        Print("addi 'rdp, sp, 'i4spnimm", instr, RV_C);
+      }
+      break;
+    case C_SLLI:
+      if (instr.i_imm() == 0) {
+        UnknownInstruction(instr);
+      } else {
+        Print("slli 'rd, 'rs1, 'iimm", instr, RV_C);
+      }
+      break;
+    case C_MISCALU:
+      switch (instr.encoding() & C_MISCALU_MASK) {
+        case C_SRLI:
+          if (instr.i_imm() == 0) {
+            UnknownInstruction(instr);
+          } else {
+            Print("srli 'rs1p, 'rs1p, 'iimm", instr, RV_C);
+          }
+          break;
+        case C_SRAI:
+          if (instr.i_imm() == 0) {
+            UnknownInstruction(instr);
+          } else {
+            Print("srai 'rs1p, 'rs1p, 'iimm", instr, RV_C);
+          }
+          break;
+        case C_ANDI:
+          Print("andi 'rs1p, 'rs1p, 'iimm", instr, RV_C);
+          break;
+        case C_RR:
+          switch (instr.encoding() & C_RR_MASK) {
+            case C_AND:
+              Print("and 'rs1p, 'rs1p, 'rs2p", instr, RV_C);
+              break;
+            case C_OR:
+              Print("or 'rs1p, 'rs1p, 'rs2p", instr, RV_C);
+              break;
+            case C_XOR:
+              Print("xor 'rs1p, 'rs1p, 'rs2p", instr, RV_C);
+              break;
+            case C_SUB:
+              Print("sub 'rs1p, 'rs1p, 'rs2p", instr, RV_C);
+              break;
+#if XLEN >= 64
+            case C_ADDW:
+              Print("addw 'rs1p, 'rs1p, 'rs2p", instr, RV_C);
+              break;
+            case C_SUBW:
+              Print("subw 'rs1p, 'rs1p, 'rs2p", instr, RV_C);
+              break;
+#endif
+            default:
+              UnknownInstruction(instr);
+          }
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    default:
+      if ((instr.encoding() == 0) ||
+          (instr.encoding() == static_cast<uint16_t>(-1))) {
+        Print("trap", instr, RV_C);
+        break;
+      }
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleLUI(Instr instr) {
+  Print("lui 'rd, 'uimm", instr, RV_I);
+}
+
+void RISCVDisassembler::DisassembleAUIPC(Instr instr) {
+  Print("auipc 'rd, 'uimm", instr, RV_I);
+}
+
+void RISCVDisassembler::DisassembleJAL(Instr instr) {
+  if (instr.rd() == ZR) {
+    Print("j 'jimm", instr, RV_I);
+  } else if (instr.rd() == RA) {
+    Print("jal 'jimm", instr, RV_I);
+  } else {
+    Print("jal 'rd, 'jimm", instr, RV_I);
+  }
+}
+
+void RISCVDisassembler::DisassembleJALR(Instr instr) {
+  if (instr.rd() == ZR) {
+    if ((instr.rs1() == RA) && (instr.itype_imm() == 0)) {
+      Print("ret", instr, RV_I);
+    } else if (instr.itype_imm() == 0) {
+      Print("jr 'rs1", instr, RV_I);
+    } else {
+      Print("jr 'iimm('rs1)", instr, RV_I);
+    }
+  } else if (instr.rd() == RA) {
+    if (instr.itype_imm() == 0) {
+      Print("jalr 'rs1", instr, RV_I);
+    } else {
+      Print("jalr 'iimm('rs1)", instr, RV_I);
+    }
+  } else {
+    if (instr.itype_imm() == 0) {
+      Print("jalr 'rd, 'rs1", instr, RV_I);
+    } else {
+      Print("jalr 'rd, 'iimm('rs1)", instr, RV_I);
+    }
+  }
+}
+
+void RISCVDisassembler::DisassembleBRANCH(Instr instr) {
+  switch (instr.funct3()) {
+    case BEQ:
+      if (instr.rs2() == ZR) {
+        Print("beqz 'rs1, 'bimm", instr, RV_I);
+      } else {
+        Print("beq 'rs1, 'rs2, 'bimm", instr, RV_I);
+      }
+      break;
+    case BNE:
+      if (instr.rs2() == ZR) {
+        Print("bnez 'rs1, 'bimm", instr, RV_I);
+      } else {
+        Print("bne 'rs1, 'rs2, 'bimm", instr, RV_I);
+      }
+      break;
+    case BLT:
+      if (instr.rs2() == ZR) {
+        Print("bltz 'rs1, 'bimm", instr, RV_I);
+      } else if (instr.rs1() == ZR) {
+        Print("bgtz 'rs2, 'bimm", instr, RV_I);
+      } else {
+        Print("blt 'rs1, 'rs2, 'bimm", instr, RV_I);
+      }
+      break;
+    case BGE:
+      if (instr.rs2() == ZR) {
+        Print("bgez 'rs1, 'bimm", instr, RV_I);
+      } else if (instr.rs1() == ZR) {
+        Print("blez 'rs2, 'bimm", instr, RV_I);
+      } else {
+        Print("ble 'rs2, 'rs1, 'bimm", instr, RV_I);
+      }
+      break;
+    case BLTU:
+      Print("bltu 'rs1, 'rs2, 'bimm", instr, RV_I);
+      break;
+    case BGEU:
+      Print("bleu 'rs2, 'rs1, 'bimm", instr, RV_I);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleLOAD(Instr instr) {
+  switch (instr.funct3()) {
+    case LB:
+      Print("lb 'rd, 'iimm('rs1)", instr, RV_I);
+      break;
+    case LH:
+      Print("lh 'rd, 'iimm('rs1)", instr, RV_I);
+      break;
+    case LW:
+      Print("lw 'rd, 'iimm('rs1)", instr, RV_I);
+      break;
+    case LBU:
+      Print("lbu 'rd, 'iimm('rs1)", instr, RV_I);
+      break;
+    case LHU:
+      Print("lhu 'rd, 'iimm('rs1)", instr, RV_I);
+      break;
+#if XLEN >= 64
+    case LWU:
+      Print("lwu 'rd, 'iimm('rs1)", instr, RV_I);
+      break;
+    case LD:
+      Print("ld 'rd, 'iimm('rs1)", instr, RV_I);
+      break;
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleLOADFP(Instr instr) {
+  switch (instr.funct3()) {
+    case S:
+      Print("flw 'frd, 'iimm('rs1)", instr, RV_F);
+      break;
+    case D:
+      Print("fld 'frd, 'iimm('rs1)", instr, RV_D);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleSTORE(Instr instr) {
+  switch (instr.funct3()) {
+    case SB:
+      Print("sb 'rs2, 'simm('rs1)", instr, RV_I);
+      break;
+    case SH:
+      Print("sh 'rs2, 'simm('rs1)", instr, RV_I);
+      break;
+    case SW:
+      Print("sw 'rs2, 'simm('rs1)", instr, RV_I);
+      break;
+#if XLEN >= 64
+    case SD:
+      Print("sd 'rs2, 'simm('rs1)", instr, RV_I);
+      break;
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleSTOREFP(Instr instr) {
+  switch (instr.funct3()) {
+    case S:
+      Print("fsw 'frs2, 'simm('rs1)", instr, RV_F);
+      break;
+    case D:
+      Print("fsd 'frs2, 'simm('rs1)", instr, RV_D);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOPIMM(Instr instr) {
+  switch (instr.funct3()) {
+    case ADDI:
+      if ((instr.rd() == ZR) && (instr.rs1() == ZR) &&
+          (instr.itype_imm() == 0)) {
+        Print("nop", instr, RV_I);  // The canonical nop.
+      } else if (instr.itype_imm() == 0) {
+        Print("mv 'rd, 'rs1", instr, RV_I);
+      } else if (instr.rs1() == ZR) {
+        Print("li 'rd, 'iimm", instr, RV_I);
+      } else {
+        Print("addi 'rd, 'rs1, 'iimm", instr, RV_I);
+      }
+      break;
+    case SLTI:
+      Print("slti 'rd, 'rs1, 'iimm", instr, RV_I);
+      break;
+    case SLTIU:
+      if (instr.itype_imm() == 1) {
+        Print("seqz 'rd, 'rs1", instr, RV_I);
+      } else {
+        Print("sltiu 'rd, 'rs1, 'iimm", instr, RV_I);
+      }
+      break;
+    case XORI:
+      if (instr.itype_imm() == -1) {
+        Print("not 'rd, 'rs1", instr, RV_I);
+      } else {
+        Print("xori 'rd, 'rs1, 'iimm", instr, RV_I);
+      }
+      break;
+    case ORI:
+      Print("ori 'rd, 'rs1, 'iimm", instr, RV_I);
+      break;
+    case ANDI:
+      Print("andi 'rd, 'rs1, 'iimm", instr, RV_I);
+      break;
+    case SLLI:
+      Print("slli 'rd, 'rs1, 'shamt", instr, RV_I);
+      break;
+    case SRI:
+      if ((instr.funct7() & 0b1111110) == SRA) {
+        Print("srai 'rd, 'rs1, 'shamt", instr, RV_I);
+      } else {
+        Print("srli 'rd, 'rs1, 'shamt", instr, RV_I);
+      }
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOPIMM32(Instr instr) {
+  switch (instr.funct3()) {
+#if XLEN >= 64
+    case ADDI:
+      if (instr.itype_imm() == 0) {
+        Print("sext.w 'rd, 'rs1", instr, RV_I);
+      } else {
+        Print("addiw 'rd, 'rs1, 'iimm", instr, RV_I);
+      }
+      break;
+    case SLLI:
+      Print("slliw 'rd, 'rs1, 'shamt", instr, RV_I);
+      break;
+    case SRI:
+      if (instr.funct7() == SRA) {
+        Print("sraiw 'rd, 'rs1, 'shamt", instr, RV_I);
+      } else {
+        Print("srliw 'rd, 'rs1, 'shamt", instr, RV_I);
+      }
+      break;
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP(Instr instr) {
+  switch (instr.funct7()) {
+    case 0:
+      DisassembleOP_0(instr);
+      break;
+    case SUB:
+      DisassembleOP_SUB(instr);
+      break;
+    case MULDIV:
+      DisassembleOP_MULDIV(instr);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP_0(Instr instr) {
+  switch (instr.funct3()) {
+    case ADD:
+      Print("add 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    case SLL:
+      Print("sll 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    case SLT:
+      if (instr.rs2() == ZR) {
+        Print("sltz 'rd, 'rs1", instr, RV_I);
+      } else if (instr.rs1() == ZR) {
+        Print("sgtz 'rd, 'rs2", instr, RV_I);
+      } else {
+        Print("slt 'rd, 'rs1, 'rs2", instr, RV_I);
+      }
+      break;
+    case SLTU:
+      if (instr.rs1() == ZR) {
+        Print("snez 'rd, 'rs2", instr, RV_I);
+      } else {
+        Print("sltu 'rd, 'rs1, 'rs2", instr, RV_I);
+      }
+      break;
+    case XOR:
+      Print("xor 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    case SR:
+      Print("srl 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    case OR:
+      Print("or 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    case AND:
+      Print("and 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP_SUB(Instr instr) {
+  switch (instr.funct3()) {
+    case ADD:
+      if (instr.rs1() == ZR) {
+        Print("neg 'rd, 'rs2", instr, RV_I);
+      } else {
+        Print("sub 'rd, 'rs1, 'rs2", instr, RV_I);
+      }
+      break;
+    case SR:
+      Print("sra 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP_MULDIV(Instr instr) {
+  switch (instr.funct3()) {
+    case MUL:
+      Print("mul 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case MULH:
+      Print("mulh 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case MULHSU:
+      Print("mulhsu 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case MULHU:
+      Print("mulhu 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case DIV:
+      Print("div 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case DIVU:
+      Print("divu 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case REM:
+      Print("rem 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case REMU:
+      Print("remu 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP32(Instr instr) {
+  switch (instr.funct7()) {
+    case 0:
+      DisassembleOP32_0(instr);
+      break;
+    case SUB:
+      DisassembleOP32_SUB(instr);
+      break;
+    case MULDIV:
+      DisassembleOP32_MULDIV(instr);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP32_0(Instr instr) {
+  switch (instr.funct3()) {
+#if XLEN >= 64
+    case ADD:
+      Print("addw 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    case SLL:
+      Print("sllw 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    case SR: {
+      Print("srlw 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+    }
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP32_SUB(Instr instr) {
+  switch (instr.funct3()) {
+#if XLEN >= 64
+    case ADD:
+      if (instr.rs1() == ZR) {
+        Print("negw 'rd, 'rs2", instr, RV_I);
+      } else {
+        Print("subw 'rd, 'rs1, 'rs2", instr, RV_I);
+      }
+      break;
+    case SR:
+      Print("sraw 'rd, 'rs1, 'rs2", instr, RV_I);
+      break;
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOP32_MULDIV(Instr instr) {
+  switch (instr.funct3()) {
+#if XLEN >= 64
+    case MULW:
+      Print("mulw 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case DIVW:
+      Print("divw 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case DIVUW:
+      Print("divuw 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case REMW:
+      Print("remw 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+    case REMUW:
+      Print("remuw 'rd, 'rs1, 'rs2", instr, RV_M);
+      break;
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleMISCMEM(Instr instr) {
+  switch (instr.funct3()) {
+    case FENCE:
+      Print("fence'predsucc", instr, RV_I);
+      break;
+    case FENCEI:
+      Print("fence.i", instr, RV_I);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleSYSTEM(Instr instr) {
+  switch (instr.funct3()) {
+    case 0:
+      switch (instr.funct12()) {
+        case ECALL:
+          if (instr.rs1() == ZR) {
+            Print("ecall", instr, RV_I);
+          } else {
+            Print("SimulatorPrintObject 'rs1", instr, RV_I);
+          }
+          break;
+        case EBREAK:
+          Print("ebreak", instr, RV_I);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    case CSRRW:
+      if (instr.rd() == ZR) {
+        Print("csrw 'csr, 'rs1", instr, RV_I);
+      } else {
+        Print("csrrw 'rd, 'csr, 'rs1", instr, RV_I);
+      }
+      break;
+    case CSRRS:
+      if (instr.rs1() == ZR) {
+        Print("csrr 'rd, 'csr", instr, RV_I);
+      } else if (instr.rd() == ZR) {
+        Print("csrs 'csr, 'rs1", instr, RV_I);
+      } else {
+        Print("csrrs 'rd, 'csr, 'rs1", instr, RV_I);
+      }
+      break;
+    case CSRRC:
+      if (instr.rd() == ZR) {
+        Print("csrc 'csr, 'rs1", instr, RV_I);
+      } else {
+        Print("csrrc 'rd, 'csr, 'rs1", instr, RV_I);
+      }
+      break;
+    case CSRRWI:
+      if (instr.rd() == ZR) {
+        Print("csrwi 'csr, 'zimm", instr, RV_I);
+      } else {
+        Print("csrrwi 'rd, 'csr, 'zimm", instr, RV_I);
+      }
+      break;
+    case CSRRSI:
+      if (instr.rd() == ZR) {
+        Print("csrsi 'csr, 'zimm", instr, RV_I);
+      } else {
+        Print("csrrsi 'rd, 'csr, 'zimm", instr, RV_I);
+      }
+      break;
+    case CSRRCI:
+      if (instr.rd() == ZR) {
+        Print("csrci 'csr, 'zimm", instr, RV_I);
+      } else {
+        Print("csrrci 'rd, 'csr, 'zimm", instr, RV_I);
+      }
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleAMO(Instr instr) {
+  switch (instr.funct3()) {
+    case WIDTH32:
+      DisassembleAMO32(instr);
+      break;
+    case WIDTH64:
+      DisassembleAMO64(instr);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleAMO32(Instr instr) {
+  switch (instr.funct5()) {
+    case LR:
+      Print("lr.w'order 'rd, ('rs1)", instr, RV_A);
+      break;
+    case SC:
+      Print("sc.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOSWAP:
+      Print("amoswap.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOADD:
+      Print("amoadd.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOXOR:
+      Print("amoxor.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOAND:
+      Print("amoand.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOOR:
+      Print("amoor.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMIN:
+      Print("amomin.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMAX:
+      Print("amomax.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMINU:
+      Print("amominu.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMAXU:
+      Print("amomaxu.w'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleAMO64(Instr instr) {
+  switch (instr.funct5()) {
+#if XLEN >= 64
+    case LR:
+      Print("lr.d'order 'rd, ('rs1)", instr, RV_A);
+      break;
+    case SC:
+      Print("sc.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOSWAP:
+      Print("amoswap.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOADD:
+      Print("amoadd.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOXOR:
+      Print("amoxor.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOAND:
+      Print("amoand.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOOR:
+      Print("amoor.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMIN:
+      Print("amomin.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMAX:
+      Print("amomax.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMINU:
+      Print("amominu.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+    case AMOMAXU:
+      Print("amomaxu.d'order 'rd, 'rs2, ('rs1)", instr, RV_A);
+      break;
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleFMADD(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S:
+      Print("fmadd.s 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_F);
+      break;
+    case F2_D:
+      Print("fmadd.d 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_D);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleFMSUB(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S:
+      Print("fmsub.s 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_F);
+      break;
+    case F2_D:
+      Print("fmsub.d 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_D);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleFNMADD(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S:
+      Print("fnmadd.s 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_F);
+      break;
+    case F2_D:
+      Print("fnmadd.d 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_D);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleFNMSUB(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S:
+      Print("fnmsub.s 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_F);
+      break;
+    case F2_D:
+      Print("fnmsub.d 'frd, 'frs1, 'frs2, 'frs3'round", instr, RV_D);
+      break;
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::DisassembleOPFP(Instr instr) {
+  switch (instr.funct7()) {
+    case FADDS:
+      Print("fadd.s 'frd, 'frs1, 'frs2'round", instr, RV_F);
+      break;
+    case FSUBS:
+      Print("fsub.s 'frd, 'frs1, 'frs2'round", instr, RV_F);
+      break;
+    case FMULS:
+      Print("fmul.s 'frd, 'frs1, 'frs2'round", instr, RV_F);
+      break;
+    case FDIVS:
+      Print("fdiv.s 'frd, 'frs1, 'frs2'round", instr, RV_F);
+      break;
+    case FSQRTS:
+      Print("fsqrt.s 'frd, 'frs1'round", instr, RV_F);
+      break;
+    case FSGNJS: {
+      switch (instr.funct3()) {
+        case J:
+          if (instr.frs1() == instr.frs2()) {
+            Print("fmv.s 'frd, 'frs1", instr, RV_F);
+          } else {
+            Print("fsgnj.s 'frd, 'frs1, 'frs2", instr, RV_F);
+          }
+          break;
+        case JN:
+          if (instr.frs1() == instr.frs2()) {
+            Print("fneg.s 'frd, 'frs1", instr, RV_F);
+          } else {
+            Print("fsgnjn.s 'frd, 'frs1, 'frs2", instr, RV_F);
+          }
+          break;
+        case JX:
+          if (instr.frs1() == instr.frs2()) {
+            Print("fabs.s 'frd, 'frs1", instr, RV_F);
+          } else {
+            Print("fsgnjx.s 'frd, 'frs1, 'frs2", instr, RV_F);
+          }
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FMINMAXS: {
+      switch (instr.funct3()) {
+        case MIN:
+          Print("fmin.s 'frd, 'frs1, 'frs2", instr, RV_F);
+          break;
+        case MAX:
+          Print("fmax.s 'frd, 'frs1, 'frs2", instr, RV_F);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FCMPS: {
+      switch (instr.funct3()) {
+        case FEQ:
+          Print("feq.s 'rd, 'frs1, 'frs2", instr, RV_F);
+          break;
+        case FLT:
+          Print("flt.s 'rd, 'frs1, 'frs2", instr, RV_F);
+          break;
+        case FLE:
+          Print("fle.s 'rd, 'frs1, 'frs2", instr, RV_F);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FCLASSS:  // = FMVXW
+      switch (instr.funct3()) {
+        case 1:
+          Print("fclass.s 'rd, 'frs1", instr, RV_F);
+          break;
+        case 0:
+          Print("fmv.x.w 'rd, 'frs1", instr, RV_F);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    case FCVTintS:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          Print("fcvt.w.s 'rd, 'frs1'round", instr, RV_F);
+          break;
+        case WU:
+          Print("fcvt.wu.s 'rd, 'frs1'round", instr, RV_F);
+          break;
+#if XLEN >= 64
+        case L:
+          Print("fcvt.l.s 'rd, 'frs1'round", instr, RV_F);
+          break;
+        case LU:
+          Print("fcvt.lu.s 'rd, 'frs1'round", instr, RV_F);
+          break;
+#endif
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    case FCVTSint:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          Print("fcvt.s.w 'frd, 'rs1", instr, RV_F);
+          break;
+        case WU:
+          Print("fcvt.s.wu 'frd, 'rs1", instr, RV_F);
+          break;
+#if XLEN >= 64
+        case L:
+          Print("fcvt.s.l 'frd, 'rs1", instr, RV_F);
+          break;
+        case LU:
+          Print("fcvt.s.lu 'frd, 'rs1", instr, RV_F);
+          break;
+#endif
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    case FMVWX:
+      Print("fmv.w.x 'frd, 'rs1", instr, RV_F);
+      break;
+    case FADDD:
+      Print("fadd.d 'frd, 'frs1, 'frs2'round", instr, RV_D);
+      break;
+    case FSUBD:
+      Print("fsub.d 'frd, 'frs1, 'frs2'round", instr, RV_D);
+      break;
+    case FMULD:
+      Print("fmul.d 'frd, 'frs1, 'frs2'round", instr, RV_D);
+      break;
+    case FDIVD:
+      Print("fdiv.d 'frd, 'frs1, 'frs2'round", instr, RV_D);
+      break;
+    case FSQRTD:
+      Print("fsqrt.d 'frd, 'frs1'round", instr, RV_D);
+      break;
+    case FSGNJD: {
+      switch (instr.funct3()) {
+        case J:
+          if (instr.frs1() == instr.frs2()) {
+            Print("fmv.d 'frd, 'frs1", instr, RV_D);
+          } else {
+            Print("fsgnj.d 'frd, 'frs1, 'frs2", instr, RV_D);
+          }
+          break;
+        case JN:
+          if (instr.frs1() == instr.frs2()) {
+            Print("fneg.d 'frd, 'frs1", instr, RV_D);
+          } else {
+            Print("fsgnjn.d 'frd, 'frs1, 'frs2", instr, RV_D);
+          }
+          break;
+        case JX:
+          if (instr.frs1() == instr.frs2()) {
+            Print("fabs.d 'frd, 'frs1", instr, RV_D);
+          } else {
+            Print("fsgnjx.d 'frd, 'frs1, 'frs2", instr, RV_D);
+          }
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FMINMAXD: {
+      switch (instr.funct3()) {
+        case MIN:
+          Print("fmin.d 'frd, 'frs1, 'frs2", instr, RV_D);
+          break;
+        case MAX:
+          Print("fmax.d 'frd, 'frs1, 'frs2", instr, RV_D);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FCVTS: {
+      switch (instr.rs2()) {
+        case 1:
+          Print("fcvt.s.d 'frd, 'frs1'round", instr, RV_D);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FCVTD: {
+      switch (instr.rs2()) {
+        case 0:
+          Print("fcvt.d.s 'frd, 'frs1", instr, RV_D);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FCMPD: {
+      switch (instr.funct3()) {
+        case FEQ:
+          Print("feq.d 'rd, 'frs1, 'frs2", instr, RV_D);
+          break;
+        case FLT:
+          Print("flt.d 'rd, 'frs1, 'frs2", instr, RV_D);
+          break;
+        case FLE:
+          Print("fle.d 'rd, 'frs1, 'frs2", instr, RV_D);
+          break;
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    }
+    case FCLASSD:  // = FMVXD
+      switch (instr.funct3()) {
+        case 1:
+          Print("fclass.d 'rd, 'frs1", instr, RV_D);
+          break;
+#if XLEN >= 64
+        case 0:
+          Print("fmv.x.d 'rd, 'frs1", instr, RV_D);
+          break;
+#endif
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    case FCVTintD:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          Print("fcvt.w.d 'rd, 'frs1'round", instr, RV_D);
+          break;
+        case WU:
+          Print("fcvt.wu.d 'rd, 'frs1'round", instr, RV_D);
+          break;
+#if XLEN >= 64
+        case L:
+          Print("fcvt.l.d 'rd, 'frs1'round", instr, RV_D);
+          break;
+        case LU:
+          Print("fcvt.lu.d 'rd, 'frs1'round", instr, RV_D);
+          break;
+#endif
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+    case FCVTDint:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          Print("fcvt.d.w 'frd, 'rs1", instr, RV_D);
+          break;
+        case WU:
+          Print("fcvt.d.wu 'frd, 'rs1", instr, RV_D);
+          break;
+#if XLEN >= 64
+        case L:
+          Print("fcvt.d.l 'frd, 'rs1", instr, RV_D);
+          break;
+        case LU:
+          Print("fcvt.d.lu 'frd, 'rs1", instr, RV_D);
+          break;
+#endif
+        default:
+          UnknownInstruction(instr);
+      }
+      break;
+#if XLEN >= 64
+    case FMVDX:
+      Print("fmv.d.x 'frd, 'rs1", instr, RV_D);
+      break;
+#endif
+    default:
+      UnknownInstruction(instr);
+  }
+}
+
+void RISCVDisassembler::UnknownInstruction(Instr instr) {
+  if (instr.encoding() == 0) {
+    Print("trap", instr, RV_I);
+  } else {
+    Print("unknown", instr, ExtensionSet::Empty());
+  }
+}
+
+void RISCVDisassembler::UnknownInstruction(CInstr instr) {
+  if (instr.encoding() == 0) {
+    Print("trap", instr, RV_I);
+  } else {
+    Print("unknown", instr, ExtensionSet::Empty());
+  }
+}
+
+void RISCVDisassembler::Print(const char* format,
+                              Instr instr,
+                              ExtensionSet ex) {
+  // Printf("  %08x ", instr.encoding());
+
+  while (format[0] != '\0') {
+    if (format[0] == '\'') {
+      format = PrintOption(format + 1, instr);
+    } else {
+      Printf("%c", format[0]);
+      format++;
+    }
+  }
+
+  // Printf("\n");
+}
+
+void RISCVDisassembler::Print(const char* format,
+                              CInstr instr,
+                              ExtensionSet ex) {
+  // Printf("      %04x ", instr.encoding());
+
+  while (format[0] != '\0') {
+    if (format[0] == '\'') {
+      format = PrintOption(format + 1, instr);
+    } else {
+      Printf("%c", format[0]);
+      format++;
+    }
+  }
+
+  // Printf("\n");
+}
+
+#define STRING_STARTS_WITH(string, compare_string)                             \
+  (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+const char* RISCVDisassembler::PrintOption(const char* format, Instr instr) {
+  if (STRING_STARTS_WITH(format, "rd")) {
+    Printf("%s", cpu_reg_names[instr.rd()]);
+    return format + 2;
+  } else if (STRING_STARTS_WITH(format, "rs1")) {
+    Printf("%s", cpu_reg_names[instr.rs1()]);
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "rs2")) {
+    Printf("%s", cpu_reg_names[instr.rs2()]);
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "shamt")) {
+    Printf("0x%x", instr.shamt());
+    return format + 5;
+  } else if (STRING_STARTS_WITH(format, "jimm")) {
+    Printf("%+" Pd, static_cast<intptr_t>(instr.jtype_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "uimm")) {
+    // objdump instead displays (imm >> 12) as hex.
+    Printf("%" Pd, static_cast<intptr_t>(instr.utype_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "iimm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.itype_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "simm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.stype_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "bimm")) {
+    Printf("%+" Pd, static_cast<intptr_t>(instr.btype_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "zimm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.zimm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "csr")) {
+    Printf("0x%" Px, static_cast<intptr_t>(instr.csr()));
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "order")) {
+    switch (instr.memory_order()) {
+      case std::memory_order_relaxed:
+        break;
+      case std::memory_order_acquire:
+        Printf(".aq");
+        break;
+      case std::memory_order_release:
+        Printf(".rl");
+        break;
+      case std::memory_order_acq_rel:
+        Printf(".aqrl");
+        break;
+      default:
+        UNREACHABLE();
+    }
+    return format + 5;
+  } else if (STRING_STARTS_WITH(format, "round")) {
+    switch (instr.rounding()) {
+      case RNE:
+        // Printf(", rne");
+        break;
+      case RTZ:
+        Printf(", rtz");
+        break;
+      case RDN:
+        Printf(", rdn");
+        break;
+      case RUP:
+        Printf(", rup");
+        break;
+      case RMM:
+        Printf(", rmm");
+        break;
+      case DYN:
+        Printf(", dyn");
+        break;
+      default:
+        Printf("<invalid rounding mode>");
+    }
+    return format + 5;
+  } else if (STRING_STARTS_WITH(format, "predsucc")) {
+    HartEffects pred = static_cast<HartEffects>((instr.itype_imm() >> 4) & 0xF);
+    HartEffects succ = static_cast<HartEffects>((instr.itype_imm() >> 0) & 0xF);
+    if ((pred != HartEffects::kAll) || (succ != HartEffects::kAll)) {
+      Printf(" ");
+      if ((pred & HartEffects::kInput) != 0) Printf("i");
+      if ((pred & HartEffects::kOutput) != 0) Printf("o");
+      if ((pred & HartEffects::kRead) != 0) Printf("r");
+      if ((pred & HartEffects::kWrite) != 0) Printf("w");
+      Printf(",");
+      if ((succ & HartEffects::kInput) != 0) Printf("i");
+      if ((succ & HartEffects::kOutput) != 0) Printf("o");
+      if ((succ & HartEffects::kRead) != 0) Printf("r");
+      if ((succ & HartEffects::kWrite) != 0) Printf("w");
+    }
+    return format + 8;
+  } else if (STRING_STARTS_WITH(format, "frd")) {
+    Printf("%s", fpu_reg_names[instr.frd()]);
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "frs1")) {
+    Printf("%s", fpu_reg_names[instr.frs1()]);
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "frs2")) {
+    Printf("%s", fpu_reg_names[instr.frs2()]);
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "frs3")) {
+    Printf("%s", fpu_reg_names[instr.frs3()]);
+    return format + 4;
+  }
+
+  FATAL1("Bad format %s\n", format);
+  return nullptr;
+}
+
+const char* RISCVDisassembler::PrintOption(const char* format, CInstr instr) {
+  if (STRING_STARTS_WITH(format, "rdp")) {
+    Printf("%s", cpu_reg_names[instr.rdp()]);
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "rs1p")) {
+    Printf("%s", cpu_reg_names[instr.rs1p()]);
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "rs2p")) {
+    Printf("%s", cpu_reg_names[instr.rs2p()]);
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "rd")) {
+    Printf("%s", cpu_reg_names[instr.rd()]);
+    return format + 2;
+  } else if (STRING_STARTS_WITH(format, "rs1")) {
+    Printf("%s", cpu_reg_names[instr.rs1()]);
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "rs2")) {
+    Printf("%s", cpu_reg_names[instr.rs2()]);
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "frdp")) {
+    Printf("%s", fpu_reg_names[instr.frdp()]);
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "frs1p")) {
+    Printf("%s", fpu_reg_names[instr.frs1p()]);
+    return format + 5;
+  } else if (STRING_STARTS_WITH(format, "frs2p")) {
+    Printf("%s", fpu_reg_names[instr.frs2p()]);
+    return format + 5;
+  } else if (STRING_STARTS_WITH(format, "frd")) {
+    Printf("%s", fpu_reg_names[instr.frd()]);
+    return format + 3;
+  } else if (STRING_STARTS_WITH(format, "frs1")) {
+    Printf("%s", fpu_reg_names[instr.frs1()]);
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "frs2")) {
+    Printf("%s", fpu_reg_names[instr.frs2()]);
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "spload4imm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.spload4_imm()));
+    return format + 10;
+  } else if (STRING_STARTS_WITH(format, "spload8imm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.spload8_imm()));
+    return format + 10;
+  } else if (STRING_STARTS_WITH(format, "spstore4imm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.spstore4_imm()));
+    return format + 11;
+  } else if (STRING_STARTS_WITH(format, "spstore8imm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.spstore8_imm()));
+    return format + 11;
+  } else if (STRING_STARTS_WITH(format, "mem4imm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.mem4_imm()));
+    return format + 7;
+  } else if (STRING_STARTS_WITH(format, "mem8imm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.mem8_imm()));
+    return format + 7;
+  } else if (STRING_STARTS_WITH(format, "jimm")) {
+    Printf("%+" Pd, static_cast<intptr_t>(instr.j_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "bimm")) {
+    Printf("%+" Pd, static_cast<intptr_t>(instr.b_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "iimm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.i_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "uimm")) {
+    // objdump instead displays (imm >> 12) as hex.
+    Printf("%" Pd, static_cast<intptr_t>(instr.u_imm()));
+    return format + 4;
+  } else if (STRING_STARTS_WITH(format, "i16imm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.i16_imm()));
+    return format + 6;
+  } else if (STRING_STARTS_WITH(format, "i4spnimm")) {
+    Printf("%" Pd, static_cast<intptr_t>(instr.i4spn_imm()));
+    return format + 8;
+  }
+
+  FATAL1("Bad format %s\n", format);
+  return nullptr;
+}
+
+void Disassembler::DecodeInstruction(char* hex_buffer,
+                                     intptr_t hex_size,
+                                     char* human_buffer,
+                                     intptr_t human_size,
+                                     int* out_instr_size,
+                                     const Code& code,
+                                     Object** object,
+                                     uword pc) {
+  RISCVDisassembler decoder(human_buffer, human_size,
+                            FLAG_use_compressed_instructions ? RV_GC : RV_G);
+  int instr_size = decoder.Disassemble(pc);
+  if (instr_size == 2) {
+    Utils::SNPrint(hex_buffer, hex_size, "    %04x",
+                   *reinterpret_cast<uint16_t*>(pc));
+  } else if (instr_size == 4) {
+    Utils::SNPrint(hex_buffer, hex_size, "%08x",
+                   *reinterpret_cast<uint32_t*>(pc));
+  }
+  if (out_instr_size) {
+    *out_instr_size = instr_size;
+  }
+
+  *object = NULL;
+  if (!code.IsNull()) {
+    *object = &Object::Handle();
+    if (!DecodeLoadObjectFromPoolOrThread(pc, code, *object)) {
+      *object = NULL;
+    }
+  }
+}
+
+#endif  // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
+
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_riscv.cc b/runtime/vm/compiler/backend/flow_graph_compiler_riscv.cc
new file mode 100644
index 0000000..c31d864
--- /dev/null
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_riscv.cc
@@ -0,0 +1,1164 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/compiler/backend/flow_graph_compiler.h"
+
+#include "vm/compiler/api/type_check_mode.h"
+#include "vm/compiler/backend/il_printer.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/compiler/jit/compiler.h"
+#include "vm/cpu.h"
+#include "vm/dart_entry.h"
+#include "vm/deopt_instructions.h"
+#include "vm/dispatch_table.h"
+#include "vm/instructions.h"
+#include "vm/object_store.h"
+#include "vm/parser.h"
+#include "vm/stack_frame.h"
+#include "vm/stub_code.h"
+#include "vm/symbols.h"
+
+namespace dart {
+
+DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
+DECLARE_FLAG(bool, enable_simd_inline);
+
+void FlowGraphCompiler::ArchSpecificInitialization() {
+  // Note: Unlike the other architectures, we are not using PC-relative calls
+  // in AOT to call the write barrier stubs. We are making use of TMP as an
+  // alternate link register to avoid spilling RA inline and don't want to
+  // introduce another relocation type.
+}
+
+FlowGraphCompiler::~FlowGraphCompiler() {
+  // BlockInfos are zone-allocated, so their destructors are not called.
+  // Verify the labels explicitly here.
+  for (int i = 0; i < block_info_.length(); ++i) {
+    ASSERT(!block_info_[i]->jump_label()->IsLinked());
+  }
+}
+
+bool FlowGraphCompiler::SupportsUnboxedDoubles() {
+  return true;
+}
+
+bool FlowGraphCompiler::SupportsUnboxedSimd128() {
+  // TODO(riscv): Dynamically test for the vector extension and otherwise
+  // allocate SIMD values to register-pairs or quads?
+  return false;
+}
+
+bool FlowGraphCompiler::CanConvertInt64ToDouble() {
+#if XLEN == 32
+  return false;
+#else
+  return true;
+#endif
+}
+
+void FlowGraphCompiler::EnterIntrinsicMode() {
+  ASSERT(!intrinsic_mode());
+  intrinsic_mode_ = true;
+  ASSERT(!assembler()->constant_pool_allowed());
+}
+
+void FlowGraphCompiler::ExitIntrinsicMode() {
+  ASSERT(intrinsic_mode());
+  intrinsic_mode_ = false;
+}
+
+TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
+                                                DeoptInfoBuilder* builder,
+                                                const Array& deopt_table) {
+  if (deopt_env_ == NULL) {
+    ++builder->current_info_number_;
+    return TypedData::null();
+  }
+
+  intptr_t stack_height = compiler->StackSize();
+  AllocateIncomingParametersRecursive(deopt_env_, &stack_height);
+
+  intptr_t slot_ix = 0;
+  Environment* current = deopt_env_;
+
+  // Emit all kMaterializeObject instructions describing objects to be
+  // materialized on the deoptimization as a prefix to the deoptimization info.
+  EmitMaterializations(deopt_env_, builder);
+
+  // The real frame starts here.
+  builder->MarkFrameStart();
+
+  Zone* zone = compiler->zone();
+
+  builder->AddPp(current->function(), slot_ix++);
+  builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
+  builder->AddCallerFp(slot_ix++);
+  builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
+
+  // Emit all values that are needed for materialization as a part of the
+  // expression stack for the bottom-most frame. This guarantees that GC
+  // will be able to find them during materialization.
+  slot_ix = builder->EmitMaterializationArguments(slot_ix);
+
+  // For the innermost environment, set outgoing arguments and the locals.
+  for (intptr_t i = current->Length() - 1;
+       i >= current->fixed_parameter_count(); i--) {
+    builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
+  }
+
+  Environment* previous = current;
+  current = current->outer();
+  while (current != NULL) {
+    builder->AddPp(current->function(), slot_ix++);
+    builder->AddPcMarker(previous->function(), slot_ix++);
+    builder->AddCallerFp(slot_ix++);
+
+    // For any outer environment the deopt id is that of the call instruction
+    // which is recorded in the outer environment.
+    builder->AddReturnAddress(current->function(),
+                              DeoptId::ToDeoptAfter(current->GetDeoptId()),
+                              slot_ix++);
+
+    // The values of outgoing arguments can be changed from the inlined call so
+    // we must read them from the previous environment.
+    for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
+      builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
+                       slot_ix++);
+    }
+
+    // Set the locals, note that outgoing arguments are not in the environment.
+    for (intptr_t i = current->Length() - 1;
+         i >= current->fixed_parameter_count(); i--) {
+      builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
+    }
+
+    // Iterate on the outer environment.
+    previous = current;
+    current = current->outer();
+  }
+  // The previous pointer is now the outermost environment.
+  ASSERT(previous != NULL);
+
+  // Add slots for the outermost environment.
+  builder->AddCallerPp(slot_ix++);
+  builder->AddPcMarker(previous->function(), slot_ix++);
+  builder->AddCallerFp(slot_ix++);
+  builder->AddCallerPc(slot_ix++);
+
+  // For the outermost environment, set the incoming arguments.
+  for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
+    builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
+  }
+
+  return builder->CreateDeoptInfo(deopt_table);
+}
+
+void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
+                                             intptr_t stub_ix) {
+  // Calls do not need stubs, they share a deoptimization trampoline.
+  ASSERT(reason() != ICData::kDeoptAtCall);
+  compiler::Assembler* assembler = compiler->assembler();
+#define __ assembler->
+  __ Comment("%s", Name());
+  __ Bind(entry_label());
+  if (FLAG_trap_on_deoptimization) {
+    __ trap();
+  }
+
+  ASSERT(deopt_env() != NULL);
+  __ Call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
+  set_pc_offset(assembler->CodeSize());
+#undef __
+}
+
+#define __ assembler->
+// Static methods of FlowGraphCompiler that take an assembler.
+
+void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
+                                                Register reg_to_call,
+                                                intptr_t sub_type_cache_index) {
+  __ LoadField(
+      TTSInternalRegs::kScratchReg,
+      compiler::FieldAddress(
+          reg_to_call,
+          compiler::target::AbstractType::type_test_stub_entry_point_offset()));
+  __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
+                           sub_type_cache_index);
+  __ jalr(TTSInternalRegs::kScratchReg);
+}
+
+#undef __
+#define __ assembler()->
+// Instance methods of FlowGraphCompiler.
+
+// Fall through if bool_register contains null.
+void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
+                                           compiler::Label* is_true,
+                                           compiler::Label* is_false) {
+  compiler::Label fall_through;
+  __ beq(bool_register, NULL_REG, &fall_through,
+         compiler::Assembler::kNearJump);
+  BranchLabels labels = {is_true, is_false, &fall_through};
+  Condition true_condition =
+      EmitBoolTest(bool_register, labels, /*invert=*/false);
+  ASSERT(true_condition != kInvalidCondition);
+  __ BranchIf(true_condition, is_true);
+  __ j(is_false);
+  __ Bind(&fall_through);
+}
+
+void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
+  if (is_optimizing()) {
+    return;
+  }
+  Definition* defn = instr->AsDefinition();
+  if ((defn != NULL) && defn->HasTemp()) {
+    const Location value = defn->locs()->out(0);
+    if (value.IsRegister()) {
+      __ PushRegister(value.reg());
+    } else if (value.IsFpuRegister()) {
+      ASSERT(instr->representation() == kUnboxedDouble);
+      // In unoptimized code at instruction epilogue the only
+      // live register is an output register.
+      instr->locs()->live_registers()->Clear();
+      __ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
+      GenerateNonLazyDeoptableStubCall(
+          InstructionSource(),  // No token position.
+          StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());
+      __ PushRegister(BoxDoubleStubABI::kResultReg);
+    } else {
+      UNREACHABLE();
+    }
+  }
+}
+
+void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
+    const Function& extracted_method,
+    intptr_t type_arguments_field_offset) {
+  // No frame has been setup here.
+  ASSERT(!__ constant_pool_allowed());
+  ASSERT(extracted_method.IsZoneHandle());
+
+  const Code& build_method_extractor =
+      Code::ZoneHandle(extracted_method.IsGeneric()
+                           ? isolate_group()
+                                 ->object_store()
+                                 ->build_generic_method_extractor_code()
+                           : isolate_group()
+                                 ->object_store()
+                                 ->build_nongeneric_method_extractor_code());
+
+  const intptr_t stub_index = __ object_pool_builder().AddObject(
+      build_method_extractor, ObjectPool::Patchability::kNotPatchable);
+  const intptr_t function_index = __ object_pool_builder().AddObject(
+      extracted_method, ObjectPool::Patchability::kNotPatchable);
+
+  // We use a custom pool register to preserve caller PP.
+  Register kPoolReg = A1;
+
+  // T1 = extracted function
+  // T4 = offset of type argument vector (or 0 if class is not generic)
+  intptr_t pp_offset = 0;
+  if (FLAG_precompiled_mode) {
+    // PP is not tagged on riscv.
+    kPoolReg = PP;
+    pp_offset = kHeapObjectTag;
+  } else {
+    __ LoadFieldFromOffset(kPoolReg, CODE_REG, Code::object_pool_offset());
+  }
+  __ LoadImmediate(T4, type_arguments_field_offset);
+  __ LoadFieldFromOffset(
+      T1, kPoolReg, ObjectPool::element_offset(function_index) + pp_offset);
+  __ LoadFieldFromOffset(CODE_REG, kPoolReg,
+                         ObjectPool::element_offset(stub_index) + pp_offset);
+  __ LoadFieldFromOffset(TMP, CODE_REG,
+                         Code::entry_point_offset(Code::EntryKind::kUnchecked));
+  __ jr(TMP);
+}
+
+void FlowGraphCompiler::EmitFrameEntry() {
+  const Function& function = parsed_function().function();
+  if (CanOptimizeFunction() && function.IsOptimizable() &&
+      (!is_optimizing() || may_reoptimize())) {
+    __ Comment("Invocation Count Check");
+    const Register function_reg = A0;
+    const Register usage_reg = A1;
+    __ lx(function_reg, compiler::FieldAddress(CODE_REG, Code::owner_offset()));
+
+    __ LoadFieldFromOffset(usage_reg, function_reg,
+                           Function::usage_counter_offset(),
+                           compiler::kFourBytes);
+    // Reoptimization of an optimized function is triggered by counting in
+    // IC stubs, but not at the entry of the function.
+    if (!is_optimizing()) {
+      __ addi(usage_reg, usage_reg, 1);
+      __ StoreFieldToOffset(usage_reg, function_reg,
+                            Function::usage_counter_offset(),
+                            compiler::kFourBytes);
+    }
+    __ CompareImmediate(usage_reg, GetOptimizationThreshold());
+    compiler::Label dont_optimize;
+    __ BranchIf(LT, &dont_optimize, compiler::Assembler::kNearJump);
+    __ lx(TMP, compiler::Address(THR, Thread::optimize_entry_offset()));
+    __ jr(TMP);
+    __ Bind(&dont_optimize);
+  }
+
+  if (flow_graph().graph_entry()->NeedsFrame()) {
+    __ Comment("Enter frame");
+    if (flow_graph().IsCompiledForOsr()) {
+      const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
+      ASSERT(extra_slots >= 0);
+      __ EnterOsrFrame(extra_slots * kWordSize);
+    } else {
+      ASSERT(StackSize() >= 0);
+      __ EnterDartFrame(StackSize() * kWordSize);
+    }
+  } else if (FLAG_precompiled_mode) {
+    assembler()->set_constant_pool_allowed(true);
+  }
+}
+
+const InstructionSource& PrologueSource() {
+  static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
+                                           /*inlining_id=*/0);
+  return prologue_source;
+}
+
+void FlowGraphCompiler::EmitPrologue() {
+  BeginCodeSourceRange(PrologueSource());
+
+  EmitFrameEntry();
+  ASSERT(assembler()->constant_pool_allowed());
+
+  // In unoptimized code, initialize (non-argument) stack allocated slots.
+  if (!is_optimizing()) {
+    const int num_locals = parsed_function().num_stack_locals();
+
+    intptr_t args_desc_slot = -1;
+    if (parsed_function().has_arg_desc_var()) {
+      args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
+          parsed_function().arg_desc_var());
+    }
+
+    __ Comment("Initialize spill slots");
+    for (intptr_t i = 0; i < num_locals; ++i) {
+      const intptr_t slot_index =
+          compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
+      Register value_reg =
+          slot_index == args_desc_slot ? ARGS_DESC_REG : NULL_REG;
+      __ StoreToOffset(value_reg, FP, slot_index * kWordSize);
+      // TODO(riscv): Using an SP-relative address instead of an FP-relative
+      // address would allow for compressed instructions.
+    }
+  }
+
+  EndCodeSourceRange(PrologueSource());
+}
+
+void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
+  ASSERT(!stub.IsNull());
+  if (CanPcRelativeCall(stub)) {
+    __ GenerateUnRelocatedPcRelativeCall();
+    AddPcRelativeCallStubTarget(stub);
+  } else {
+    __ JumpAndLink(stub);
+    AddStubCallTarget(stub);
+  }
+}
+
+void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
+  ASSERT(!stub.IsNull());
+  if (CanPcRelativeCall(stub)) {
+    __ LeaveDartFrame();
+    __ GenerateUnRelocatedPcRelativeTailCall();
+    AddPcRelativeTailCallStubTarget(stub);
+#if defined(DEBUG)
+    __ Breakpoint();
+#endif
+  } else {
+    __ LoadObject(CODE_REG, stub);
+    __ LeaveDartFrame();
+    __ lx(TMP, compiler::FieldAddress(
+                   CODE_REG, compiler::target::Code::entry_point_offset()));
+    __ jr(TMP);
+    AddStubCallTarget(stub);
+  }
+}
+
+void FlowGraphCompiler::GeneratePatchableCall(const InstructionSource& source,
+                                              const Code& stub,
+                                              UntaggedPcDescriptors::Kind kind,
+                                              LocationSummary* locs) {
+  __ JumpAndLinkPatchable(stub);
+  EmitCallsiteMetadata(source, DeoptId::kNone, kind, locs,
+                       pending_deoptimization_env_);
+}
+
+void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
+                                         const InstructionSource& source,
+                                         const Code& stub,
+                                         UntaggedPcDescriptors::Kind kind,
+                                         LocationSummary* locs,
+                                         Code::EntryKind entry_kind) {
+  ASSERT(CanCallDart());
+  __ JumpAndLinkPatchable(stub, entry_kind);
+  EmitCallsiteMetadata(source, deopt_id, kind, locs,
+                       pending_deoptimization_env_);
+}
+
+void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
+                                               const InstructionSource& source,
+                                               UntaggedPcDescriptors::Kind kind,
+                                               LocationSummary* locs,
+                                               const Function& target,
+                                               Code::EntryKind entry_kind) {
+  ASSERT(CanCallDart());
+  if (CanPcRelativeCall(target)) {
+    __ GenerateUnRelocatedPcRelativeCall();
+    AddPcRelativeCallTarget(target, entry_kind);
+    EmitCallsiteMetadata(source, deopt_id, kind, locs,
+                         pending_deoptimization_env_);
+  } else {
+    // Call sites to the same target can share object pool entries. These
+    // call sites are never patched for breakpoints: the function is deoptimized
+    // and the unoptimized code with IC calls for static calls is patched
+    // instead.
+    ASSERT(is_optimizing());
+    const auto& stub = StubCode::CallStaticFunction();
+    __ JumpAndLinkWithEquivalence(stub, target, entry_kind);
+    EmitCallsiteMetadata(source, deopt_id, kind, locs,
+                         pending_deoptimization_env_);
+    AddStaticCallTarget(target, entry_kind);
+  }
+}
+
+void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
+  // We do not check for overflow when incrementing the edge counter.  The
+  // function should normally be optimized long before the counter can
+  // overflow; and though we do not reset the counters when we optimize or
+  // deoptimize, there is a bound on the number of
+  // optimization/deoptimization cycles we will attempt.
+  ASSERT(!edge_counters_array_.IsNull());
+  ASSERT(assembler_->constant_pool_allowed());
+  __ Comment("Edge counter");
+  __ LoadObject(A0, edge_counters_array_);
+  __ LoadFieldFromOffset(TMP, A0, Array::element_offset(edge_id));
+  __ addi(TMP, TMP, Smi::RawValue(1));
+  __ StoreFieldToOffset(TMP, A0, Array::element_offset(edge_id));
+}
+
+void FlowGraphCompiler::EmitOptimizedInstanceCall(
+    const Code& stub,
+    const ICData& ic_data,
+    intptr_t deopt_id,
+    const InstructionSource& source,
+    LocationSummary* locs,
+    Code::EntryKind entry_kind) {
+  ASSERT(CanCallDart());
+  ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
+  // Each ICData propagated from unoptimized to optimized code contains the
+  // function that corresponds to the Dart function of that IC call. Due
+  // to inlining in optimized code, that function may not correspond to the
+  // top-level function (parsed_function().function()) which could be
+  // reoptimized and which counter needs to be incremented.
+  // Pass the function explicitly, it is used in IC stub.
+
+  __ LoadObject(A6, parsed_function().function());
+  __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
+  __ LoadUniqueObject(IC_DATA_REG, ic_data);
+  GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
+                   entry_kind);
+  __ Drop(ic_data.SizeWithTypeArgs());
+}
+
+void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
+                                            const ICData& ic_data,
+                                            intptr_t deopt_id,
+                                            const InstructionSource& source,
+                                            LocationSummary* locs,
+                                            Code::EntryKind entry_kind) {
+  ASSERT(CanCallDart());
+  ASSERT(entry_kind == Code::EntryKind::kNormal ||
+         entry_kind == Code::EntryKind::kUnchecked);
+  ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
+  __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
+  __ LoadUniqueObject(IC_DATA_REG, ic_data);
+  __ LoadUniqueObject(CODE_REG, stub);
+  const intptr_t entry_point_offset =
+      entry_kind == Code::EntryKind::kNormal
+          ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
+          : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
+  __ lx(RA, compiler::FieldAddress(CODE_REG, entry_point_offset));
+  __ jalr(RA);
+  EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
+                       pending_deoptimization_env_);
+  __ Drop(ic_data.SizeWithTypeArgs());
+}
+
+void FlowGraphCompiler::EmitMegamorphicInstanceCall(
+    const String& name,
+    const Array& arguments_descriptor,
+    intptr_t deopt_id,
+    const InstructionSource& source,
+    LocationSummary* locs) {
+  ASSERT(CanCallDart());
+  ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
+  const ArgumentsDescriptor args_desc(arguments_descriptor);
+  const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
+      zone(),
+      MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
+
+  __ Comment("MegamorphicCall");
+  // Load receiver into A0.
+  __ LoadFromOffset(A0, SP,
+                    (args_desc.Count() - 1) * compiler::target::kWordSize);
+  // Use same code pattern as instance call so it can be parsed by code patcher.
+  if (FLAG_precompiled_mode) {
+    UNIMPLEMENTED();
+  } else {
+    __ LoadUniqueObject(IC_DATA_REG, cache);
+    __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
+    __ Call(compiler::FieldAddress(
+        CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
+  }
+
+  RecordSafepoint(locs);
+  AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
+  if (!FLAG_precompiled_mode) {
+    const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
+    if (is_optimizing()) {
+      AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
+    } else {
+      // Add deoptimization continuation point after the call and before the
+      // arguments are removed.
+      AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after,
+                           source);
+    }
+  }
+  RecordCatchEntryMoves(pending_deoptimization_env_);
+  __ Drop(args_desc.SizeWithTypeArgs());
+}
+
+void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
+                                            intptr_t deopt_id,
+                                            const InstructionSource& source,
+                                            LocationSummary* locs,
+                                            Code::EntryKind entry_kind,
+                                            bool receiver_can_be_smi) {
+  ASSERT(CanCallDart());
+  ASSERT(ic_data.NumArgsTested() == 1);
+  const Code& initial_stub = StubCode::SwitchableCallMiss();
+  const char* switchable_call_mode = "smiable";
+  if (!receiver_can_be_smi) {
+    switchable_call_mode = "non-smi";
+    ic_data.set_receiver_cannot_be_smi(true);
+  }
+  const UnlinkedCall& data =
+      UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
+
+  __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
+  // Clear argument descriptor to keep gc happy when it gets pushed on to
+  // the stack.
+  __ LoadImmediate(ARGS_DESC_REG, 0);
+  __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
+  if (FLAG_precompiled_mode) {
+    // The AOT runtime will replace the slot in the object pool with the
+    // entrypoint address - see app_snapshot.cc.
+    __ LoadUniqueObject(RA, initial_stub);
+  } else {
+    __ LoadUniqueObject(CODE_REG, initial_stub);
+    const intptr_t entry_point_offset =
+        entry_kind == Code::EntryKind::kNormal
+            ? compiler::target::Code::entry_point_offset(
+                  Code::EntryKind::kMonomorphic)
+            : compiler::target::Code::entry_point_offset(
+                  Code::EntryKind::kMonomorphicUnchecked);
+    __ lx(RA, compiler::FieldAddress(CODE_REG, entry_point_offset));
+  }
+  __ LoadUniqueObject(IC_DATA_REG, data);
+  __ jalr(RA);
+
+  EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
+                       locs, pending_deoptimization_env_);
+  __ Drop(ic_data.SizeWithTypeArgs());
+}
+
+void FlowGraphCompiler::EmitUnoptimizedStaticCall(
+    intptr_t size_with_type_args,
+    intptr_t deopt_id,
+    const InstructionSource& source,
+    LocationSummary* locs,
+    const ICData& ic_data,
+    Code::EntryKind entry_kind) {
+  ASSERT(CanCallDart());
+  const Code& stub =
+      StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
+  __ LoadObject(IC_DATA_REG, ic_data);
+  GenerateDartCall(deopt_id, source, stub,
+                   UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
+  __ Drop(size_with_type_args);
+}
+
+void FlowGraphCompiler::EmitOptimizedStaticCall(
+    const Function& function,
+    const Array& arguments_descriptor,
+    intptr_t size_with_type_args,
+    intptr_t deopt_id,
+    const InstructionSource& source,
+    LocationSummary* locs,
+    Code::EntryKind entry_kind) {
+  ASSERT(CanCallDart());
+  ASSERT(!function.IsClosureFunction());
+  if (function.HasOptionalParameters() || function.IsGeneric()) {
+    __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
+  } else {
+    if (!FLAG_precompiled_mode) {
+      __ LoadImmediate(ARGS_DESC_REG, 0);  // GC safe smi zero because of stub.
+    }
+  }
+  // Do not use the code from the function, but let the code be patched so that
+  // we can record the outgoing edges to other code.
+  GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
+                         function, entry_kind);
+  __ Drop(size_with_type_args);
+}
+
+void FlowGraphCompiler::EmitDispatchTableCall(
+    int32_t selector_offset,
+    const Array& arguments_descriptor) {
+  const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
+  ASSERT(CanCallDart());
+  ASSERT(cid_reg != ARGS_DESC_REG);
+  if (!arguments_descriptor.IsNull()) {
+    __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
+  }
+  const intptr_t offset = selector_offset - DispatchTable::OriginElement();
+  // Would like cid_reg to be available on entry to the target function
+  // for checking purposes.
+  ASSERT(cid_reg != TMP);
+  intx_t imm = offset << compiler::target::kWordSizeLog2;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  __ slli(TMP, cid_reg, compiler::target::kWordSizeLog2);
+  if (hi != 0) {
+    __ lui(TMP2, hi);
+    __ add(TMP, TMP, TMP2);
+  }
+  __ add(TMP, TMP, DISPATCH_TABLE_REG);
+  __ lx(TMP, compiler::Address(TMP, lo));
+  __ jalr(TMP);
+}
+
+Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
+    Register reg,
+    const Object& obj,
+    bool needs_number_check,
+    const InstructionSource& source,
+    intptr_t deopt_id) {
+  if (needs_number_check) {
+    ASSERT(!obj.IsMint() && !obj.IsDouble());
+    __ LoadObject(TMP, obj);
+    __ PushRegisterPair(TMP, reg);
+    if (is_optimizing()) {
+      __ JumpAndLinkPatchable(StubCode::OptimizedIdenticalWithNumberCheck());
+    } else {
+      __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
+    }
+    AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
+    __ PopRegisterPair(ZR, reg);
+    // RISC-V has no condition flags, so the result is instead returned as
+    // TMP zero if equal, non-zero if non-equal.
+    ASSERT(reg != TMP);
+    __ CompareImmediate(TMP, 0);
+  } else {
+    __ CompareObject(reg, obj);
+  }
+  return EQ;
+}
+
+Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
+    Register left,
+    Register right,
+    bool needs_number_check,
+    const InstructionSource& source,
+    intptr_t deopt_id) {
+  if (needs_number_check) {
+    __ PushRegisterPair(right, left);
+    if (is_optimizing()) {
+      __ JumpAndLinkPatchable(StubCode::OptimizedIdenticalWithNumberCheck());
+    } else {
+      __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
+    }
+    AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
+    __ PopRegisterPair(right, left);
+    // RISC-V has no condition flags, so the result is instead returned as
+    // TMP zero if equal, non-zero if non-equal.
+    ASSERT(left != TMP);
+    ASSERT(right != TMP);
+    __ CompareImmediate(TMP, 0);
+  } else {
+    __ CompareObjectRegisters(left, right);
+  }
+  return EQ;
+}
+
+Condition FlowGraphCompiler::EmitBoolTest(Register value,
+                                          BranchLabels labels,
+                                          bool invert) {
+  __ Comment("BoolTest");
+  __ TestImmediate(value, compiler::target::ObjectAlignment::kBoolValueMask);
+  return invert ? NE : EQ;
+}
+
+// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
+// FlowGraphCompiler::SlowPathEnvironmentFor.
+void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
+#if defined(DEBUG)
+  locs->CheckWritableInputs();
+  ClobberDeadTempRegisters(locs);
+#endif
+  // TODO(vegorov): consider saving only caller save (volatile) registers.
+  __ PushRegisters(*locs->live_registers());
+}
+
+void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
+  __ PopRegisters(*locs->live_registers());
+}
+
+#if defined(DEBUG)
+void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
+  // Clobber temporaries that have not been manually preserved.
+  for (intptr_t i = 0; i < locs->temp_count(); ++i) {
+    Location tmp = locs->temp(i);
+    // TODO(zerny): clobber non-live temporary FPU registers.
+    if (tmp.IsRegister() &&
+        !locs->live_registers()->ContainsRegister(tmp.reg())) {
+      __ li(tmp.reg(), 0xf7);
+    }
+  }
+}
+#endif
+
+Register FlowGraphCompiler::EmitTestCidRegister() {
+  return A1;
+}
+
+void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
+    intptr_t count_without_type_args,
+    const Array& arguments_descriptor) {
+  __ Comment("EmitTestAndCall");
+  // Load receiver into A0.
+  __ LoadFromOffset(A0, SP, (count_without_type_args - 1) * kWordSize);
+  __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
+}
+
+void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
+                                                 bool if_smi) {
+  if (if_smi) {
+    __ BranchIfSmi(A0, label);
+  } else {
+    __ BranchIfNotSmi(A0, label);
+  }
+}
+
+void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
+  ASSERT(class_id_reg != A0);
+  __ LoadClassId(class_id_reg, A0);
+}
+
+#undef __
+#define __ assembler->
+
+int FlowGraphCompiler::EmitTestAndCallCheckCid(compiler::Assembler* assembler,
+                                               compiler::Label* label,
+                                               Register class_id_reg,
+                                               const CidRangeValue& range,
+                                               int bias,
+                                               bool jump_on_miss) {
+  const intptr_t cid_start = range.cid_start;
+  if (range.IsSingleCid()) {
+    __ AddImmediate(class_id_reg, class_id_reg, bias - cid_start);
+    if (jump_on_miss) {
+      __ bnez(class_id_reg, label);
+    } else {
+      __ beqz(class_id_reg, label);
+    }
+    bias = cid_start;
+  } else {
+    __ AddImmediate(class_id_reg, class_id_reg, bias - cid_start);
+    bias = cid_start;
+    __ CompareImmediate(class_id_reg, range.Extent());
+    __ BranchIf(jump_on_miss ? UNSIGNED_GREATER : UNSIGNED_LESS_EQUAL, label);
+  }
+  return bias;
+}
+
+#undef __
+#define __ assembler()->
+
+void FlowGraphCompiler::EmitMove(Location destination,
+                                 Location source,
+                                 TemporaryRegisterAllocator* allocator) {
+  if (destination.Equals(source)) return;
+
+  if (source.IsRegister()) {
+    if (destination.IsRegister()) {
+      __ mv(destination.reg(), source.reg());
+    } else {
+      ASSERT(destination.IsStackSlot());
+      const intptr_t dest_offset = destination.ToStackSlotOffset();
+      __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
+    }
+  } else if (source.IsStackSlot()) {
+    if (destination.IsRegister()) {
+      const intptr_t source_offset = source.ToStackSlotOffset();
+      __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
+    } else if (destination.IsFpuRegister()) {
+      const intptr_t src_offset = source.ToStackSlotOffset();
+      FRegister dst = destination.fpu_reg();
+      __ fld(dst, compiler::Address(source.base_reg(), src_offset));
+    } else {
+      ASSERT(destination.IsStackSlot());
+      const intptr_t source_offset = source.ToStackSlotOffset();
+      const intptr_t dest_offset = destination.ToStackSlotOffset();
+      Register tmp = allocator->AllocateTemporary();
+      __ LoadFromOffset(tmp, source.base_reg(), source_offset);
+      __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
+      allocator->ReleaseTemporary();
+    }
+  } else if (source.IsFpuRegister()) {
+    if (destination.IsFpuRegister()) {
+      __ fmvd(destination.fpu_reg(), source.fpu_reg());
+    } else {
+      if (destination.IsStackSlot() /*32-bit float*/ ||
+          destination.IsDoubleStackSlot()) {
+        const intptr_t dest_offset = destination.ToStackSlotOffset();
+        FRegister src = source.fpu_reg();
+        __ fsd(src, compiler::Address(destination.base_reg(), dest_offset));
+      } else {
+        ASSERT(destination.IsQuadStackSlot());
+        UNIMPLEMENTED();
+      }
+    }
+  } else if (source.IsDoubleStackSlot()) {
+    if (destination.IsFpuRegister()) {
+      const intptr_t source_offset = source.ToStackSlotOffset();
+      const FRegister dst = destination.fpu_reg();
+      __ fld(dst, compiler::Address(source.base_reg(), source_offset));
+    } else {
+      ASSERT(destination.IsDoubleStackSlot() ||
+             destination.IsStackSlot() /*32-bit float*/);
+      const intptr_t source_offset = source.ToStackSlotOffset();
+      const intptr_t dest_offset = destination.ToStackSlotOffset();
+      __ fld(FTMP, compiler::Address(source.base_reg(), source_offset));
+      __ fsd(FTMP, compiler::Address(destination.base_reg(), dest_offset));
+    }
+  } else if (source.IsQuadStackSlot()) {
+    UNIMPLEMENTED();
+  } else if (source.IsPairLocation()) {
+#if XLEN == 32
+    ASSERT(destination.IsPairLocation());
+    for (intptr_t i : {0, 1}) {
+      EmitMove(destination.Component(i), source.Component(i), allocator);
+    }
+#else
+    UNREACHABLE();
+#endif
+  } else {
+    ASSERT(source.IsConstant());
+    if (destination.IsStackSlot()) {
+      Register tmp = allocator->AllocateTemporary();
+      source.constant_instruction()->EmitMoveToLocation(this, destination, tmp,
+                                                        source.pair_index());
+      allocator->ReleaseTemporary();
+    } else {
+      source.constant_instruction()->EmitMoveToLocation(
+          this, destination, kNoRegister, source.pair_index());
+    }
+  }
+}
+
+static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
+  switch (bytes) {
+    case 8:
+      return compiler::OperandSize::kEightBytes;
+    case 4:
+      return compiler::OperandSize::kFourBytes;
+    case 2:
+      return compiler::OperandSize::kTwoBytes;
+    case 1:
+      return compiler::OperandSize::kByte;
+    default:
+      UNIMPLEMENTED();
+  }
+}
+
+void FlowGraphCompiler::EmitNativeMoveArchitecture(
+    const compiler::ffi::NativeLocation& destination,
+    const compiler::ffi::NativeLocation& source) {
+  const auto& src_type = source.payload_type();
+  const auto& dst_type = destination.payload_type();
+  ASSERT(src_type.IsFloat() == dst_type.IsFloat());
+  ASSERT(src_type.IsInt() == dst_type.IsInt());
+  ASSERT(src_type.IsSigned() == dst_type.IsSigned());
+  ASSERT(src_type.IsPrimitive());
+  ASSERT(dst_type.IsPrimitive());
+  const intptr_t src_size = src_type.SizeInBytes();
+  const intptr_t dst_size = dst_type.SizeInBytes();
+  const bool sign_or_zero_extend = dst_size > src_size;
+
+  if (source.IsRegisters()) {
+    const auto& src = source.AsRegisters();
+    ASSERT(src.num_regs() == 1);
+    const auto src_reg = src.reg_at(0);
+
+    if (destination.IsRegisters()) {
+      const auto& dst = destination.AsRegisters();
+      ASSERT(dst.num_regs() == 1);
+      const auto dst_reg = dst.reg_at(0);
+      if (!sign_or_zero_extend) {
+        // TODO(riscv): Unreachable? Calling convention always extends.
+        __ mv(dst_reg, src_reg);
+      } else {
+        switch (src_type.AsPrimitive().representation()) {
+          // Calling convention: scalars are extended according to the sign of
+          // their type to 32-bits, then sign-extended to XLEN bits.
+          case compiler::ffi::kInt8:
+            __ slli(dst_reg, src_reg, XLEN - 8);
+            __ srai(dst_reg, dst_reg, XLEN - 8);
+            return;
+          case compiler::ffi::kInt16:
+            __ slli(dst_reg, src_reg, XLEN - 16);
+            __ srai(dst_reg, dst_reg, XLEN - 16);
+            return;
+          case compiler::ffi::kUint8:
+            __ andi(dst_reg, src_reg, 0xFF);
+            return;
+          case compiler::ffi::kUint16:
+            __ slli(dst_reg, src_reg, 16);
+#if XLEN == 32
+            __ srli(dst_reg, dst_reg, 16);
+#else
+            __ srliw(dst_reg, dst_reg, 16);
+#endif
+            return;
+          default:
+            // 32 to 64 bit is covered in IL by Representation conversions.
+            UNIMPLEMENTED();
+        }
+      }
+
+    } else if (destination.IsFpuRegisters()) {
+      // Fpu Registers should only contain doubles and registers only ints.
+      UNIMPLEMENTED();
+
+    } else {
+      ASSERT(destination.IsStack());
+      const auto& dst = destination.AsStack();
+      ASSERT(!sign_or_zero_extend);
+      auto const op_size = BytesToOperandSize(dst_size);
+      __ StoreToOffset(src.reg_at(0), dst.base_register(),
+                       dst.offset_in_bytes(), op_size);
+    }
+  } else if (source.IsFpuRegisters()) {
+    const auto& src = source.AsFpuRegisters();
+    // We have not implemented conversions here, use IL convert instructions.
+    ASSERT(src_type.Equals(dst_type));
+
+    if (destination.IsRegisters()) {
+      // Fpu Registers should only contain doubles and registers only ints.
+      UNIMPLEMENTED();
+
+    } else if (destination.IsFpuRegisters()) {
+      const auto& dst = destination.AsFpuRegisters();
+      __ fmvd(dst.fpu_reg(), src.fpu_reg());
+
+    } else {
+      ASSERT(destination.IsStack());
+      ASSERT(src_type.IsFloat());
+      const auto& dst = destination.AsStack();
+      switch (dst_size) {
+        case 8:
+          __ StoreDToOffset(src.fpu_reg(), dst.base_register(),
+                            dst.offset_in_bytes());
+          return;
+        case 4:
+          __ StoreSToOffset(src.fpu_reg(), dst.base_register(),
+                            dst.offset_in_bytes());
+          return;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+  } else {
+    ASSERT(source.IsStack());
+    const auto& src = source.AsStack();
+    if (destination.IsRegisters()) {
+      const auto& dst = destination.AsRegisters();
+      ASSERT(dst.num_regs() == 1);
+      const auto dst_reg = dst.reg_at(0);
+      ASSERT(!sign_or_zero_extend);
+      __ LoadFromOffset(dst_reg, src.base_register(), src.offset_in_bytes(),
+                        BytesToOperandSize(dst_size));
+    } else if (destination.IsFpuRegisters()) {
+      ASSERT(src_type.Equals(dst_type));
+      ASSERT(src_type.IsFloat());
+      const auto& dst = destination.AsFpuRegisters();
+      switch (src_size) {
+        case 8:
+          __ LoadDFromOffset(dst.fpu_reg(), src.base_register(),
+                             src.offset_in_bytes());
+          return;
+        case 4:
+          __ LoadSFromOffset(dst.fpu_reg(), src.base_register(),
+                             src.offset_in_bytes());
+          return;
+        default:
+          UNIMPLEMENTED();
+      }
+
+    } else {
+      ASSERT(destination.IsStack());
+      UNREACHABLE();
+    }
+  }
+}
+
+void FlowGraphCompiler::LoadBSSEntry(BSS::Relocation relocation,
+                                     Register dst,
+                                     Register tmp) {
+  UNIMPLEMENTED();
+}
+
+#undef __
+#define __ compiler_->assembler()->
+
+void ParallelMoveResolver::EmitSwap(int index) {
+  MoveOperands* move = moves_[index];
+  const Location source = move->src();
+  const Location destination = move->dest();
+
+  if (source.IsRegister() && destination.IsRegister()) {
+    ASSERT(source.reg() != TMP);
+    ASSERT(destination.reg() != TMP);
+    __ mv(TMP, source.reg());
+    __ mv(source.reg(), destination.reg());
+    __ mv(destination.reg(), TMP);
+  } else if (source.IsRegister() && destination.IsStackSlot()) {
+    Exchange(source.reg(), destination.base_reg(),
+             destination.ToStackSlotOffset());
+  } else if (source.IsStackSlot() && destination.IsRegister()) {
+    Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
+  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
+    Exchange(source.base_reg(), source.ToStackSlotOffset(),
+             destination.base_reg(), destination.ToStackSlotOffset());
+  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
+    const FRegister dst = destination.fpu_reg();
+    const FRegister src = source.fpu_reg();
+    __ fmvd(FTMP, src);
+    __ fmvd(src, dst);
+    __ fmvd(dst, FTMP);
+  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
+    UNIMPLEMENTED();
+  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
+    const intptr_t source_offset = source.ToStackSlotOffset();
+    const intptr_t dest_offset = destination.ToStackSlotOffset();
+
+    ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister);
+    FRegister scratch = ensure_scratch.reg();
+    __ LoadDFromOffset(FTMP, source.base_reg(), source_offset);
+    __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
+    __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
+    __ StoreDToOffset(scratch, source.base_reg(), source_offset);
+  } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
+    UNIMPLEMENTED();
+  } else {
+    UNREACHABLE();
+  }
+
+  // The swap of source and destination has executed a move from source to
+  // destination.
+  move->Eliminate();
+
+  // Any unperformed (including pending) move with a source of either
+  // this move's source or destination needs to have their source
+  // changed to reflect the state of affairs after the swap.
+  for (int i = 0; i < moves_.length(); ++i) {
+    const MoveOperands& other_move = *moves_[i];
+    if (other_move.Blocks(source)) {
+      moves_[i]->set_src(destination);
+    } else if (other_move.Blocks(destination)) {
+      moves_[i]->set_src(source);
+    }
+  }
+}
+
+void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst,
+                                              const compiler::Address& src) {
+  UNREACHABLE();
+}
+
+// Do not call or implement this function. Instead, use the form below that
+// uses an offset from the frame pointer instead of an Address.
+void ParallelMoveResolver::Exchange(Register reg,
+                                    const compiler::Address& mem) {
+  UNREACHABLE();
+}
+
+// Do not call or implement this function. Instead, use the form below that
+// uses offsets from the frame pointer instead of Addresses.
+void ParallelMoveResolver::Exchange(const compiler::Address& mem1,
+                                    const compiler::Address& mem2) {
+  UNREACHABLE();
+}
+
+void ParallelMoveResolver::Exchange(Register reg,
+                                    Register base_reg,
+                                    intptr_t stack_offset) {
+  ScratchRegisterScope tmp(this, reg);
+  __ mv(tmp.reg(), reg);
+  __ LoadFromOffset(reg, base_reg, stack_offset);
+  __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
+}
+
+void ParallelMoveResolver::Exchange(Register base_reg1,
+                                    intptr_t stack_offset1,
+                                    Register base_reg2,
+                                    intptr_t stack_offset2) {
+  ScratchRegisterScope tmp1(this, kNoRegister);
+  ScratchRegisterScope tmp2(this, tmp1.reg());
+  __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
+  __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
+  __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
+  __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
+}
+
+void ParallelMoveResolver::SpillScratch(Register reg) {
+  __ PushRegister(reg);
+}
+
+void ParallelMoveResolver::RestoreScratch(Register reg) {
+  __ PopRegister(reg);
+}
+
+void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
+  __ subi(SP, SP, sizeof(double));
+  __ fsd(reg, compiler::Address(SP, 0));
+}
+
+void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
+  __ fld(reg, compiler::Address(SP, 0));
+  __ addi(SP, SP, sizeof(double));
+}
+
+#undef __
+
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 02daa82..df288f5 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -5676,7 +5676,7 @@
 
   if (is_smi.IsLinked()) {
     compiler::Label done;
-    __ Jump(&done);
+    __ Jump(&done, compiler::Assembler::kNearJump);
     __ Bind(&is_smi);
     EmitSmiConversion(compiler);
     __ Bind(&done);
@@ -6374,7 +6374,8 @@
   set_native_c_function(native_function);
 }
 
-#if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64)
+#if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64) &&                \
+    !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
 
 LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
   UNREACHABLE();
@@ -6384,7 +6385,8 @@
   UNREACHABLE();
 }
 
-#endif  // !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64)
+#endif  // !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64) &&         \
+        // !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
 
 Representation FfiCallInstr::RequiredInputRepresentation(intptr_t idx) const {
   if (idx < TargetAddressIndex()) {
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 719d0d9..58b4a50 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -1222,12 +1222,11 @@
   void Unsupported(FlowGraphCompiler* compiler);
 
   static bool SlowPathSharingSupported(bool is_optimizing) {
-#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM) ||                    \
-    defined(TARGET_ARCH_ARM64)
+#if defined(TARGET_ARCH_IA32)
+    return false;
+#else
     return FLAG_enable_slow_path_sharing && FLAG_precompiled_mode &&
            is_optimizing;
-#else
-    return false;
 #endif
   }
 
@@ -5318,6 +5317,8 @@
                        const Register temp0,
                        const Register temp1);
 
+  void EmitCall(FlowGraphCompiler* compiler, Register target);
+
   Zone* const zone_;
   const compiler::ffi::CallMarshaller& marshaller_;
 
@@ -7969,7 +7970,7 @@
   }
 
   static bool IsSupported(Token::Kind op_kind, Value* left, Value* right) {
-#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
+#if defined(TARGET_ARCH_IS_32_BIT)
     switch (op_kind) {
       case Token::kADD:
       case Token::kSUB:
diff --git a/runtime/vm/compiler/backend/il_riscv.cc b/runtime/vm/compiler/backend/il_riscv.cc
new file mode 100644
index 0000000..b10e2d7
--- /dev/null
+++ b/runtime/vm/compiler/backend/il_riscv.cc
@@ -0,0 +1,7378 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/compiler/backend/il.h"
+
+#include "vm/compiler/backend/flow_graph.h"
+#include "vm/compiler/backend/flow_graph_compiler.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/compiler/backend/locations_helpers.h"
+#include "vm/compiler/backend/range_analysis.h"
+#include "vm/compiler/ffi/native_calling_convention.h"
+#include "vm/compiler/jit/compiler.h"
+#include "vm/dart_entry.h"
+#include "vm/instructions.h"
+#include "vm/object_store.h"
+#include "vm/parser.h"
+#include "vm/simulator.h"
+#include "vm/stack_frame.h"
+#include "vm/stub_code.h"
+#include "vm/symbols.h"
+#include "vm/type_testing_stubs.h"
+
+#define __ (compiler->assembler())->
+#define Z (compiler->zone())
+
+namespace dart {
+
+// Generic summary for call instructions that have all arguments pushed
+// on the stack and return the result in a fixed register A0 (or FA0 if
+// the return type is double).
+LocationSummary* Instruction::MakeCallSummary(Zone* zone,
+                                              const Instruction* instr,
+                                              LocationSummary* locs) {
+  ASSERT(locs == nullptr || locs->always_calls());
+  LocationSummary* result =
+      ((locs == nullptr)
+           ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
+           : locs);
+  const auto representation = instr->representation();
+  switch (representation) {
+    case kTagged:
+      result->set_out(
+          0, Location::RegisterLocation(CallingConventions::kReturnReg));
+      break;
+    case kUnboxedInt64:
+#if XLEN == 32
+      result->set_out(
+          0, Location::Pair(
+                 Location::RegisterLocation(CallingConventions::kReturnReg),
+                 Location::RegisterLocation(
+                     CallingConventions::kSecondReturnReg)));
+#else
+      result->set_out(
+          0, Location::RegisterLocation(CallingConventions::kReturnReg));
+#endif
+      break;
+    case kUnboxedDouble:
+      result->set_out(
+          0, Location::FpuRegisterLocation(CallingConventions::kReturnFpuReg));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return result;
+}
+
+LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
+                                                             bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+
+  locs->set_in(0, Location::RequiresRegister());
+  switch (representation()) {
+    case kTagged:
+      locs->set_out(0, Location::RequiresRegister());
+      break;
+    case kUnboxedInt64:
+#if XLEN == 32
+      locs->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                      Location::RequiresRegister()));
+#else
+      locs->set_out(0, Location::RequiresRegister());
+#endif
+      break;
+    case kUnboxedDouble:
+      locs->set_out(0, Location::RequiresFpuRegister());
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return locs;
+}
+
+void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(RequiredInputRepresentation(0) == kTagged);  // It is a Smi.
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiTagSize == 1);
+
+  const Register index = locs()->in(0).reg();
+
+  switch (representation()) {
+    case kTagged: {
+      const auto out = locs()->out(0).reg();
+      __ slli(TMP, index, kWordSizeLog2 - kSmiTagSize);
+      __ add(TMP, TMP, base_reg());
+      __ LoadFromOffset(out, TMP, offset());
+      break;
+    }
+    case kUnboxedInt64: {
+#if XLEN == 32
+      const auto out_lo = locs()->out(0).AsPairLocation()->At(0).reg();
+      const auto out_hi = locs()->out(0).AsPairLocation()->At(1).reg();
+      __ slli(TMP, index, kWordSizeLog2 - kSmiTagSize);
+      __ add(TMP, TMP, base_reg());
+      __ LoadFromOffset(out_lo, TMP, offset());
+      __ LoadFromOffset(out_hi, TMP, offset() + compiler::target::kWordSize);
+#else
+      const auto out = locs()->out(0).reg();
+      __ slli(TMP, index, kWordSizeLog2 - kSmiTagSize);
+      __ add(TMP, TMP, base_reg());
+      __ LoadFromOffset(out, TMP, offset());
+#endif
+      break;
+    }
+    case kUnboxedDouble: {
+      const auto out = locs()->out(0).fpu_reg();
+      const intptr_t kDoubleSizeLog2 = 3;
+      __ slli(TMP, index, kDoubleSizeLog2 - kSmiTagSize);
+      __ add(TMP, TMP, base_reg());
+      __ LoadDFromOffset(out, TMP, offset());
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+DEFINE_BACKEND(StoreIndexedUnsafe,
+               (NoLocation, Register index, Register value)) {
+  ASSERT(instr->RequiredInputRepresentation(
+             StoreIndexedUnsafeInstr::kIndexPos) == kTagged);  // It is a Smi.
+  __ slli(TMP, index, compiler::target::kWordSizeLog2 - kSmiTagSize);
+  __ add(TMP, TMP, instr->base_reg());
+  __ sx(value, compiler::Address(TMP, instr->offset()));
+
+  ASSERT(kSmiTag == 0);
+}
+
+DEFINE_BACKEND(TailCall,
+               (NoLocation,
+                Fixed<Register, ARGS_DESC_REG>,
+                Temp<Register> temp)) {
+  compiler->EmitTailCallToStub(instr->code());
+
+  // Even though the TailCallInstr will be the last instruction in a basic
+  // block, the flow graph compiler will emit native code for other blocks after
+  // the one containing this instruction and needs to be able to use the pool.
+  // (The `LeaveDartFrame` above disables usages of the pool.)
+  __ set_constant_pool_allowed(true);
+}
+
+LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
+                                                      bool opt) const {
+  const intptr_t kNumInputs = 5;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(kSrcPos, Location::WritableRegister());
+  locs->set_in(kDestPos, Location::WritableRegister());
+  locs->set_in(kSrcStartPos, Location::RequiresRegister());
+  locs->set_in(kDestStartPos, Location::RequiresRegister());
+  locs->set_in(kLengthPos, Location::WritableRegister());
+  return locs;
+}
+
+void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register src_reg = locs()->in(kSrcPos).reg();
+  const Register dest_reg = locs()->in(kDestPos).reg();
+  const Register src_start_reg = locs()->in(kSrcStartPos).reg();
+  const Register dest_start_reg = locs()->in(kDestStartPos).reg();
+  const Register length_reg = locs()->in(kLengthPos).reg();
+
+  EmitComputeStartPointer(compiler, src_cid_, src_start(), src_reg,
+                          src_start_reg);
+  EmitComputeStartPointer(compiler, dest_cid_, dest_start(), dest_reg,
+                          dest_start_reg);
+
+  compiler::Label loop, done;
+
+  // Untag length and skip copy if length is zero.
+  __ SmiUntag(length_reg);
+  __ beqz(length_reg, &done);
+
+  __ Bind(&loop);
+  switch (element_size_) {
+    case 1:
+      __ lb(TMP, compiler::Address(src_reg));
+      __ addi(src_reg, src_reg, 1);
+      __ sb(TMP, compiler::Address(dest_reg));
+      __ addi(dest_reg, dest_reg, 1);
+      break;
+    case 2:
+      __ lh(TMP, compiler::Address(src_reg));
+      __ addi(src_reg, src_reg, 2);
+      __ sh(TMP, compiler::Address(dest_reg));
+      __ addi(dest_reg, dest_reg, 2);
+      break;
+    case 4:
+      __ lw(TMP, compiler::Address(src_reg));
+      __ addi(src_reg, src_reg, 4);
+      __ sw(TMP, compiler::Address(dest_reg));
+      __ addi(dest_reg, dest_reg, 4);
+      break;
+    case 8:
+#if XLEN == 32
+      __ lw(TMP, compiler::Address(src_reg, 0));
+      __ lw(TMP2, compiler::Address(src_reg, 4));
+      __ addi(src_reg, src_reg, 16);
+      __ sw(TMP, compiler::Address(dest_reg, 0));
+      __ sw(TMP2, compiler::Address(dest_reg, 4));
+      __ addi(dest_reg, dest_reg, 16);
+#else
+      __ ld(TMP, compiler::Address(src_reg));
+      __ addi(src_reg, src_reg, 8);
+      __ sd(TMP, compiler::Address(dest_reg));
+      __ addi(dest_reg, dest_reg, 8);
+#endif
+      break;
+    case 16:
+#if XLEN == 32
+      __ lw(TMP, compiler::Address(src_reg, 0));
+      __ lw(TMP2, compiler::Address(src_reg, 4));
+      __ sw(TMP, compiler::Address(dest_reg, 0));
+      __ sw(TMP2, compiler::Address(dest_reg, 4));
+      __ lw(TMP, compiler::Address(src_reg, 8));
+      __ lw(TMP2, compiler::Address(src_reg, 12));
+      __ addi(src_reg, src_reg, 16);
+      __ sw(TMP, compiler::Address(dest_reg, 8));
+      __ sw(TMP2, compiler::Address(dest_reg, 12));
+      __ addi(dest_reg, dest_reg, 16);
+#elif XLEN == 64
+      __ ld(TMP, compiler::Address(src_reg, 0));
+      __ ld(TMP2, compiler::Address(src_reg, 8));
+      __ addi(src_reg, src_reg, 16);
+      __ sd(TMP, compiler::Address(dest_reg, 0));
+      __ sd(TMP2, compiler::Address(dest_reg, 8));
+      __ addi(dest_reg, dest_reg, 16);
+#elif XLEN == 128
+      __ lq(TMP, compiler::Address(src_reg));
+      __ addi(src_reg, src_reg, 16);
+      __ sq(TMP, compiler::Address(dest_reg));
+      __ addi(dest_reg, dest_reg, 16);
+#endif
+      break;
+  }
+  __ subi(length_reg, length_reg, 1);
+  __ bnez(length_reg, &loop);
+  __ Bind(&done);
+}
+
+void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
+                                              classid_t array_cid,
+                                              Value* start,
+                                              Register array_reg,
+                                              Register start_reg) {
+  if (IsTypedDataBaseClassId(array_cid)) {
+    __ lx(array_reg,
+          compiler::FieldAddress(
+              array_reg, compiler::target::TypedDataBase::data_field_offset()));
+  } else {
+    switch (array_cid) {
+      case kOneByteStringCid:
+        __ addi(
+            array_reg, array_reg,
+            compiler::target::OneByteString::data_offset() - kHeapObjectTag);
+        break;
+      case kTwoByteStringCid:
+        __ addi(
+            array_reg, array_reg,
+            compiler::target::OneByteString::data_offset() - kHeapObjectTag);
+        break;
+      case kExternalOneByteStringCid:
+        __ lx(array_reg,
+              compiler::FieldAddress(array_reg,
+                                     compiler::target::ExternalOneByteString::
+                                         external_data_offset()));
+        break;
+      case kExternalTwoByteStringCid:
+        __ lx(array_reg,
+              compiler::FieldAddress(array_reg,
+                                     compiler::target::ExternalTwoByteString::
+                                         external_data_offset()));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+  intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) - 1;
+  if (shift < 0) {
+    __ srai(TMP, start_reg, -shift);
+    __ add(array_reg, array_reg, TMP);
+  } else if (shift == 0) {
+    __ add(array_reg, array_reg, start_reg);
+  } else {
+    __ slli(TMP, start_reg, shift);
+    __ add(array_reg, array_reg, TMP);
+  }
+}
+
+LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  if (representation() == kUnboxedDouble) {
+    locs->set_in(0, Location::RequiresFpuRegister());
+  } else if (representation() == kUnboxedInt64) {
+#if XLEN == 32
+    locs->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                   Location::RequiresRegister()));
+#else
+    locs->set_in(0, Location::RequiresRegister());
+#endif
+  } else {
+    locs->set_in(0, LocationAnyOrConstant(value()));
+  }
+  return locs;
+}
+
+void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode
+  // where arguments are pushed by their definitions.
+  if (compiler->is_optimizing()) {
+    if (previous()->IsPushArgument()) {
+      // Already generated.
+      return;
+    }
+
+    // Count the arguments first so we can update SP once instead of using
+    // separate pushes.
+    intptr_t size = 0;
+    for (PushArgumentInstr* push_arg = this; push_arg != nullptr;
+         push_arg = push_arg->next()->AsPushArgument()) {
+      const Location value = push_arg->locs()->in(0);
+      if (value.IsRegister()) {
+        size += compiler::target::kWordSize;
+#if XLEN == 32
+      } else if (value.IsPairLocation()) {
+        size += 2 * compiler::target::kWordSize;
+#endif
+      } else if (value.IsConstant()) {
+        size += compiler::target::kWordSize;
+      } else if (value.IsFpuRegister()) {
+        size += sizeof(double);
+      } else if (value.IsStackSlot()) {
+        size += compiler::target::kWordSize;
+      } else {
+        UNREACHABLE();
+      }
+    }
+    __ subi(SP, SP, size);
+
+    intptr_t offset = size;
+    for (PushArgumentInstr* push_arg = this; push_arg != nullptr;
+         push_arg = push_arg->next()->AsPushArgument()) {
+      const Location value = push_arg->locs()->in(0);
+      if (value.IsRegister()) {
+        offset -= compiler::target::kWordSize;
+        __ StoreToOffset(value.reg(), SP, offset);
+#if XLEN == 32
+      } else if (value.IsPairLocation()) {
+        offset -= compiler::target::kWordSize;
+        __ StoreToOffset(value.AsPairLocation()->At(1).reg(), SP, offset);
+        offset -= compiler::target::kWordSize;
+        __ StoreToOffset(value.AsPairLocation()->At(0).reg(), SP, offset);
+#endif
+      } else if (value.IsConstant()) {
+        const Object& constant = value.constant();
+        Register reg;
+        if (constant.IsNull()) {
+          reg = NULL_REG;
+        } else if (constant.IsSmi() && Smi::Cast(constant).Value() == 0) {
+          reg = ZR;
+        } else {
+          reg = TMP;
+          __ LoadObject(TMP, constant);
+        }
+        offset -= compiler::target::kWordSize;
+        __ StoreToOffset(reg, SP, offset);
+      } else if (value.IsFpuRegister()) {
+        offset -= sizeof(double);
+        __ StoreDToOffset(value.fpu_reg(), SP, offset);
+      } else if (value.IsStackSlot()) {
+        const intptr_t value_offset = value.ToStackSlotOffset();
+        __ LoadFromOffset(TMP, value.base_reg(), value_offset);
+        offset -= compiler::target::kWordSize;
+        __ StoreToOffset(TMP, SP, offset);
+      } else {
+        UNREACHABLE();
+      }
+    }
+    ASSERT(offset == 0);
+  }
+}
+
+LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  switch (representation()) {
+    case kTagged:
+      locs->set_in(0,
+                   Location::RegisterLocation(CallingConventions::kReturnReg));
+      break;
+    case kUnboxedInt64:
+#if XLEN == 32
+      locs->set_in(
+          0, Location::Pair(
+                 Location::RegisterLocation(CallingConventions::kReturnReg),
+                 Location::RegisterLocation(
+                     CallingConventions::kSecondReturnReg)));
+#else
+      locs->set_in(0,
+                   Location::RegisterLocation(CallingConventions::kReturnReg));
+#endif
+      break;
+    case kUnboxedDouble:
+      locs->set_in(
+          0, Location::FpuRegisterLocation(CallingConventions::kReturnFpuReg));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return locs;
+}
+
+// Attempt optimized compilation at return instruction instead of at the entry.
+// The entry needs to be patchable, no inlined objects are allowed in the area
+// that will be overwritten by the patch instructions: a branch macro sequence.
+void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (locs()->in(0).IsRegister()) {
+    const Register result = locs()->in(0).reg();
+    ASSERT(result == CallingConventions::kReturnReg);
+  } else if (locs()->in(0).IsPairLocation()) {
+    const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
+    const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
+    ASSERT(result_lo == CallingConventions::kReturnReg);
+    ASSERT(result_hi == CallingConventions::kSecondReturnReg);
+  } else {
+    ASSERT(locs()->in(0).IsFpuRegister());
+    const FpuRegister result = locs()->in(0).fpu_reg();
+    ASSERT(result == CallingConventions::kReturnFpuReg);
+  }
+
+  if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
+    __ ret();
+    return;
+  }
+
+#if defined(DEBUG)
+  compiler::Label stack_ok;
+  __ Comment("Stack Check");
+  const intptr_t fp_sp_dist =
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
+      kWordSize;
+  ASSERT(fp_sp_dist <= 0);
+  __ sub(TMP, SP, FP);
+  __ CompareImmediate(TMP, fp_sp_dist);
+  __ BranchIf(EQ, &stack_ok, compiler::Assembler::kNearJump);
+  __ ebreak();
+  __ Bind(&stack_ok);
+#endif
+  ASSERT(__ constant_pool_allowed());
+  if (yield_index() != UntaggedPcDescriptors::kInvalidYieldIndex) {
+    compiler->EmitYieldPositionMetadata(source(), yield_index());
+  }
+  __ LeaveDartFrame();  // Disallows constant pool use.
+  __ ret();
+  // This ReturnInstr may be emitted out of order by the optimizer. The next
+  // block may be a target expecting a properly set constant pool pointer.
+  __ set_constant_pool_allowed(true);
+}
+
+// Detect pattern when one value is zero and another is a power of 2.
+static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
+  return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
+         (Utils::IsPowerOfTwo(v2) && (v1 == 0));
+}
+
+LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
+                                                      bool opt) const {
+  comparison()->InitializeLocationSummary(zone, opt);
+  return comparison()->locs();
+}
+
+void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register result = locs()->out(0).reg();
+
+  Location left = locs()->in(0);
+  Location right = locs()->in(1);
+  ASSERT(!left.IsConstant() || !right.IsConstant());
+
+  // Emit comparison code. This must not overwrite the result register.
+  // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
+  // the labels or returning an invalid condition.
+  BranchLabels labels = {NULL, NULL, NULL};
+  Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
+  ASSERT(true_condition != kInvalidCondition);
+
+  const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
+
+  intptr_t true_value = if_true_;
+  intptr_t false_value = if_false_;
+
+  if (is_power_of_two_kind) {
+    if (true_value == 0) {
+      // We need to have zero in result on true_condition.
+      true_condition = InvertCondition(true_condition);
+    }
+  } else {
+    if (true_value == 0) {
+      // Swap values so that false_value is zero.
+      intptr_t temp = true_value;
+      true_value = false_value;
+      false_value = temp;
+    } else {
+      true_condition = InvertCondition(true_condition);
+    }
+  }
+
+  __ SetIf(true_condition, result);
+
+  if (is_power_of_two_kind) {
+    const intptr_t shift =
+        Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
+    __ slli(result, result, shift + kSmiTagSize);
+  } else {
+    __ subi(result, result, 1);
+    const int64_t val = Smi::RawValue(true_value) - Smi::RawValue(false_value);
+    __ AndImmediate(result, result, val);
+    if (false_value != 0) {
+      __ AddImmediate(result, Smi::RawValue(false_value));
+    }
+  }
+}
+
+LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
+                                                       bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+  summary->set_in(0, Location::RegisterLocation(T0));  // Function.
+  return MakeCallSummary(zone, this, summary);
+}
+
+void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // Load arguments descriptor in S4.
+  const intptr_t argument_count = ArgumentCount();  // Includes type args.
+  const Array& arguments_descriptor =
+      Array::ZoneHandle(Z, GetArgumentsDescriptor());
+  __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
+
+  ASSERT(locs()->in(0).reg() == T0);
+  if (FLAG_precompiled_mode) {
+    // T0: Closure with a cached entry point.
+    __ LoadFieldFromOffset(A1, T0,
+                           compiler::target::Closure::entry_point_offset());
+  } else {
+    // T0: Function.
+    __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+                                     compiler::target::Function::code_offset());
+    // Closure functions only have one entry point.
+    __ LoadFieldFromOffset(A1, T0,
+                           compiler::target::Function::entry_point_offset());
+  }
+
+  // T0: Function (argument to lazy compile stub)
+  // S4: Arguments descriptor array.
+  // A1: instructions entry point.
+  if (!FLAG_precompiled_mode) {
+    // S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
+    __ LoadImmediate(IC_DATA_REG, 0);
+  }
+  __ jalr(A1);
+  compiler->EmitCallsiteMetadata(source(), deopt_id(),
+                                 UntaggedPcDescriptors::kOther, locs(), env());
+  __ Drop(argument_count);
+}
+
+LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
+                                                     bool opt) const {
+  return LocationSummary::Make(zone, 0, Location::RequiresRegister(),
+                               LocationSummary::kNoCall);
+}
+
+void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register result = locs()->out(0).reg();
+  __ LoadFromOffset(result, FP,
+                    compiler::target::FrameOffsetInBytesForVariable(&local()));
+  // TODO(riscv): Using an SP-relative address instead of an FP-relative
+  // address would allow for compressed instructions.
+}
+
+LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
+                                                      bool opt) const {
+  return LocationSummary::Make(zone, 1, Location::SameAsFirstInput(),
+                               LocationSummary::kNoCall);
+}
+
+void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register value = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  ASSERT(result == value);  // Assert that register assignment is correct.
+  __ StoreToOffset(value, FP,
+                   compiler::target::FrameOffsetInBytesForVariable(&local()));
+  // TODO(riscv): Using an SP-relative address instead of an FP-relative
+  // address would allow for compressed instructions.
+}
+
+LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
+                                                    bool opt) const {
+  return LocationSummary::Make(zone, 0, Location::RequiresRegister(),
+                               LocationSummary::kNoCall);
+}
+
+void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // The register allocator drops constant definitions that have no uses.
+  if (!locs()->out(0).IsInvalid()) {
+    const Register result = locs()->out(0).reg();
+    __ LoadObject(result, value());
+  }
+}
+
+void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
+                                       const Location& destination,
+                                       Register tmp,
+                                       intptr_t pair_index) {
+  if (destination.IsRegister()) {
+    if (RepresentationUtils::IsUnboxedInteger(representation())) {
+      int64_t v;
+      const bool ok = compiler::HasIntegerValue(value_, &v);
+      RELEASE_ASSERT(ok);
+      if (value_.IsSmi() && RepresentationUtils::IsUnsigned(representation())) {
+        // If the value is negative, then the sign bit was preserved during
+        // Smi untagging, which means the resulting value may be unexpected.
+        ASSERT(v >= 0);
+      }
+#if XLEN == 32
+      __ LoadImmediate(destination.reg(), pair_index == 0
+                                              ? Utils::Low32Bits(v)
+                                              : Utils::High32Bits(v));
+#else
+      ASSERT(pair_index == 0);  // No pair representation needed on 64-bit.
+      __ LoadImmediate(destination.reg(), v);
+#endif
+    } else {
+      ASSERT(representation() == kTagged);
+      __ LoadObject(destination.reg(), value_);
+    }
+  } else if (destination.IsFpuRegister()) {
+    const FRegister dst = destination.fpu_reg();
+    __ LoadDImmediate(dst, Double::Cast(value_).value());
+  } else if (destination.IsDoubleStackSlot()) {
+    __ LoadDImmediate(FTMP, Double::Cast(value_).value());
+    const intptr_t dest_offset = destination.ToStackSlotOffset();
+    __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
+  } else {
+    ASSERT(destination.IsStackSlot());
+    ASSERT(tmp != kNoRegister);
+    const intptr_t dest_offset = destination.ToStackSlotOffset();
+    if (RepresentationUtils::IsUnboxedInteger(representation())) {
+      int64_t v;
+      const bool ok = compiler::HasIntegerValue(value_, &v);
+      RELEASE_ASSERT(ok);
+#if XLEN == 32
+      __ LoadImmediate(
+          tmp, pair_index == 0 ? Utils::Low32Bits(v) : Utils::High32Bits(v));
+#else
+      ASSERT(pair_index == 0);  // No pair representation needed on 64-bit.
+      __ LoadImmediate(tmp, v);
+#endif
+    } else {
+      ASSERT(representation() == kTagged);
+      if (value_.IsNull()) {
+        tmp = NULL_REG;
+      } else if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) {
+        tmp = ZR;
+      } else {
+        __ LoadObject(tmp, value_);
+      }
+    }
+    __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
+  }
+}
+
+LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
+                                                           bool opt) const {
+  const bool is_unboxed_int =
+      RepresentationUtils::IsUnboxedInteger(representation());
+  ASSERT(!is_unboxed_int || RepresentationUtils::ValueSize(representation()) <=
+                                compiler::target::kWordSize);
+  const intptr_t kNumInputs = 0;
+  const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  if (is_unboxed_int) {
+    locs->set_out(0, Location::RequiresRegister());
+  } else {
+    switch (representation()) {
+      case kUnboxedDouble:
+        locs->set_out(0, Location::RequiresFpuRegister());
+        locs->set_temp(0, Location::RequiresRegister());
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+  return locs;
+}
+
+void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (!locs()->out(0).IsInvalid()) {
+    const Register scratch =
+        RepresentationUtils::IsUnboxedInteger(representation())
+            ? kNoRegister
+            : locs()->temp(0).reg();
+    EmitMoveToLocation(compiler, locs()->out(0), scratch);
+  }
+}
+
+LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
+                                                            bool opt) const {
+  auto const dst_type_loc =
+      LocationFixedRegisterOrConstant(dst_type(), TypeTestABI::kDstTypeReg);
+
+  // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
+  // since TTS preserves them. So we make this a `kNoCall` summary,
+  // even though most other registers can be modified by the stub. To tell the
+  // register allocator about it, we reserve all the other registers as
+  // temporary registers.
+  // TODO(http://dartbug.com/32788): Simplify this.
+
+  const intptr_t kNonChangeableInputRegs =
+      (1 << TypeTestABI::kInstanceReg) |
+      ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
+      (1 << TypeTestABI::kInstantiatorTypeArgumentsReg) |
+      (1 << TypeTestABI::kFunctionTypeArgumentsReg);
+
+  const intptr_t kNumInputs = 4;
+
+  // We invoke a stub that can potentially clobber any CPU register
+  // but can only clobber FPU registers on the slow path when
+  // entering runtime. ARM64 ABI only guarantees that lower
+  // 64-bits of an V registers are preserved so we block all
+  // of them except for FpuTMP.
+  const intptr_t kCpuRegistersToPreserve =
+      kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
+  const intptr_t kFpuRegistersToPreserve =
+      Utils::NBitMask<intptr_t>(kNumberOfFpuRegisters) & ~(1l << FpuTMP);
+
+  const intptr_t kNumTemps = (Utils::CountOneBits32(kCpuRegistersToPreserve) +
+                              Utils::CountOneBits32(kFpuRegistersToPreserve));
+
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallCalleeSafe);
+  summary->set_in(kInstancePos,
+                  Location::RegisterLocation(TypeTestABI::kInstanceReg));
+  summary->set_in(kDstTypePos, dst_type_loc);
+  summary->set_in(
+      kInstantiatorTAVPos,
+      Location::RegisterLocation(TypeTestABI::kInstantiatorTypeArgumentsReg));
+  summary->set_in(kFunctionTAVPos, Location::RegisterLocation(
+                                       TypeTestABI::kFunctionTypeArgumentsReg));
+  summary->set_out(0, Location::SameAsFirstInput());
+
+  // Let's reserve all registers except for the input ones.
+  intptr_t next_temp = 0;
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
+    const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
+    if (should_preserve) {
+      summary->set_temp(next_temp++,
+                        Location::RegisterLocation(static_cast<Register>(i)));
+    }
+  }
+
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    const bool should_preserve = ((1l << i) & kFpuRegistersToPreserve) != 0;
+    if (should_preserve) {
+      summary->set_temp(next_temp++, Location::FpuRegisterLocation(
+                                         static_cast<FpuRegister>(i)));
+    }
+  }
+
+  return summary;
+}
+
+void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(locs()->always_calls());
+
+  auto object_store = compiler->isolate_group()->object_store();
+  const auto& assert_boolean_stub =
+      Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
+
+  compiler::Label done;
+  __ andi(TMP, AssertBooleanABI::kObjectReg, 1 << kBoolVsNullBitPosition);
+  __ bnez(TMP, &done, compiler::Assembler::kNearJump);
+  compiler->GenerateStubCall(source(), assert_boolean_stub,
+                             /*kind=*/UntaggedPcDescriptors::kOther, locs(),
+                             deopt_id(), env());
+  __ Bind(&done);
+}
+
+static Condition TokenKindToIntCondition(Token::Kind kind) {
+  switch (kind) {
+    case Token::kEQ:
+      return EQ;
+    case Token::kNE:
+      return NE;
+    case Token::kLT:
+      return LT;
+    case Token::kGT:
+      return GT;
+    case Token::kLTE:
+      return LE;
+    case Token::kGTE:
+      return GE;
+    default:
+      UNREACHABLE();
+      return VS;
+  }
+}
+
+static Condition FlipCondition(Condition condition) {
+  switch (condition) {
+    case EQ:
+      return EQ;
+    case NE:
+      return NE;
+    case LT:
+      return GT;
+    case LE:
+      return GE;
+    case GT:
+      return LT;
+    case GE:
+      return LE;
+    case CC:
+      return HI;
+    case LS:
+      return CS;
+    case HI:
+      return CC;
+    case CS:
+      return LS;
+    default:
+      UNREACHABLE();
+      return EQ;
+  }
+}
+
+static void EmitBranchOnCondition(
+    FlowGraphCompiler* compiler,
+    Condition true_condition,
+    BranchLabels labels,
+    compiler::Assembler::JumpDistance jump_distance =
+        compiler::Assembler::kFarJump) {
+  if (labels.fall_through == labels.false_label) {
+    // If the next block is the false successor we will fall through to it.
+    __ BranchIf(true_condition, labels.true_label, jump_distance);
+  } else {
+    // If the next block is not the false successor we will branch to it.
+    Condition false_condition = InvertCondition(true_condition);
+    __ BranchIf(false_condition, labels.false_label, jump_distance);
+
+    // Fall through or jump to the true successor.
+    if (labels.fall_through != labels.true_label) {
+      __ j(labels.true_label, jump_distance);
+    }
+  }
+}
+
+static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
+                                     LocationSummary* locs,
+                                     Token::Kind kind,
+                                     BranchLabels labels) {
+  Location left = locs->in(0);
+  Location right = locs->in(1);
+  ASSERT(!left.IsConstant() || !right.IsConstant());
+
+  Condition true_condition = TokenKindToIntCondition(kind);
+  if (left.IsConstant() || right.IsConstant()) {
+    // Ensure constant is on the right.
+    if (left.IsConstant()) {
+      Location tmp = right;
+      right = left;
+      left = tmp;
+      true_condition = FlipCondition(true_condition);
+    }
+    __ CompareObject(left.reg(), right.constant());
+  } else {
+    __ CompareObjectRegisters(left.reg(), right.reg());
+  }
+  return true_condition;
+}
+
+#if XLEN == 32
+static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
+                                           LocationSummary* locs,
+                                           Token::Kind kind) {
+  ASSERT(Token::IsEqualityOperator(kind));
+  PairLocation* left_pair = locs->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+  PairLocation* right_pair = locs->in(1).AsPairLocation();
+  Register right_lo = right_pair->At(0).reg();
+  Register right_hi = right_pair->At(1).reg();
+
+  __ xor_(TMP, left_lo, right_lo);
+  __ xor_(TMP2, left_hi, right_hi);
+  __ or_(TMP, TMP, TMP2);
+  __ CompareImmediate(TMP, 0);
+  if (kind == Token::kEQ) {
+    return EQUAL;
+  } else if (kind == Token::kNE) {
+    return NOT_EQUAL;
+  }
+  UNREACHABLE();
+}
+
+static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
+                                             LocationSummary* locs,
+                                             Token::Kind kind,
+                                             BranchLabels labels) {
+  PairLocation* left_pair = locs->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+  PairLocation* right_pair = locs->in(1).AsPairLocation();
+  Register right_lo = right_pair->At(0).reg();
+  Register right_hi = right_pair->At(1).reg();
+
+  switch (kind) {
+    case Token::kEQ:
+      __ bne(left_lo, right_lo, labels.false_label);
+      __ CompareRegisters(left_hi, right_hi);
+      return EQUAL;
+    case Token::kNE:
+      __ bne(left_lo, right_lo, labels.true_label);
+      __ CompareRegisters(left_hi, right_hi);
+      return NOT_EQUAL;
+    case Token::kLT:
+      __ blt(left_hi, right_hi, labels.true_label);
+      __ bgt(left_hi, right_hi, labels.false_label);
+      __ CompareRegisters(left_lo, right_lo);
+      return UNSIGNED_LESS;
+    case Token::kGT:
+      __ bgt(left_hi, right_hi, labels.true_label);
+      __ blt(left_hi, right_hi, labels.false_label);
+      __ CompareRegisters(left_lo, right_lo);
+      return UNSIGNED_GREATER;
+    case Token::kLTE:
+      __ blt(left_hi, right_hi, labels.true_label);
+      __ bgt(left_hi, right_hi, labels.false_label);
+      __ CompareRegisters(left_lo, right_lo);
+      return UNSIGNED_LESS_EQUAL;
+    case Token::kGTE:
+      __ bgt(left_hi, right_hi, labels.true_label);
+      __ blt(left_hi, right_hi, labels.false_label);
+      __ CompareRegisters(left_lo, right_lo);
+      return UNSIGNED_GREATER_EQUAL;
+    default:
+      UNREACHABLE();
+  }
+}
+#else
+// Similar to ComparisonInstr::EmitComparisonCode, may either:
+//   - emit comparison code and return a valid condition in which case the
+//     caller is expected to emit a branch to the true label based on that
+//     condition (or a branch to the false label on the opposite condition).
+//   - emit comparison code with a branch directly to the labels and return
+//     kInvalidCondition.
+static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler,
+                                       LocationSummary* locs,
+                                       Token::Kind kind,
+                                       BranchLabels labels) {
+  Location left = locs->in(0);
+  Location right = locs->in(1);
+  ASSERT(!left.IsConstant() || !right.IsConstant());
+
+  Condition true_condition = TokenKindToIntCondition(kind);
+  if (left.IsConstant() || right.IsConstant()) {
+    // Ensure constant is on the right.
+    ConstantInstr* constant = nullptr;
+    if (left.IsConstant()) {
+      constant = left.constant_instruction();
+      Location tmp = right;
+      right = left;
+      left = tmp;
+      true_condition = FlipCondition(true_condition);
+    } else {
+      constant = right.constant_instruction();
+    }
+
+    if (RepresentationUtils::IsUnboxedInteger(constant->representation())) {
+      int64_t value;
+      const bool ok = compiler::HasIntegerValue(constant->value(), &value);
+      RELEASE_ASSERT(ok);
+      __ CompareImmediate(left.reg(), value);
+    } else {
+      UNREACHABLE();
+    }
+  } else {
+    __ CompareRegisters(left.reg(), right.reg());
+  }
+  return true_condition;
+}
+#endif
+
+static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
+                                                LocationSummary* locs,
+                                                Token::Kind kind,
+                                                BranchLabels labels) {
+  ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
+  const Register left = locs->in(0).reg();
+  const Register right = locs->in(1).reg();
+  const Condition true_condition = TokenKindToIntCondition(kind);
+  compiler::Label* equal_result =
+      (true_condition == EQ) ? labels.true_label : labels.false_label;
+  compiler::Label* not_equal_result =
+      (true_condition == EQ) ? labels.false_label : labels.true_label;
+
+  // Check if operands have the same value. If they don't, then they could
+  // be equal only if both of them are Mints with the same value.
+  __ CompareObjectRegisters(left, right);
+  __ BranchIf(EQ, equal_result);
+  __ and_(TMP, left, right);
+  __ BranchIfSmi(TMP, not_equal_result);
+  __ CompareClassId(left, kMintCid, TMP);
+  __ BranchIf(NE, not_equal_result);
+  __ CompareClassId(right, kMintCid, TMP);
+  __ BranchIf(NE, not_equal_result);
+#if XLEN == 32
+  __ LoadFieldFromOffset(TMP, left, compiler::target::Mint::value_offset());
+  __ LoadFieldFromOffset(TMP2, right, compiler::target::Mint::value_offset());
+  __ bne(TMP, TMP2, not_equal_result);
+  __ LoadFieldFromOffset(
+      TMP, left,
+      compiler::target::Mint::value_offset() + compiler::target::kWordSize);
+  __ LoadFieldFromOffset(
+      TMP2, right,
+      compiler::target::Mint::value_offset() + compiler::target::kWordSize);
+#else
+  __ LoadFieldFromOffset(TMP, left, Mint::value_offset());
+  __ LoadFieldFromOffset(TMP2, right, Mint::value_offset());
+#endif
+  __ CompareRegisters(TMP, TMP2);
+  return true_condition;
+}
+
+LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
+                                                           bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  if (is_null_aware()) {
+    LocationSummary* locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    locs->set_in(0, Location::RequiresRegister());
+    locs->set_in(1, Location::RequiresRegister());
+    locs->set_out(0, Location::RequiresRegister());
+    return locs;
+  }
+#if XLEN == 32
+  if (operation_cid() == kMintCid) {
+    LocationSummary* locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    locs->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                   Location::RequiresRegister()));
+    locs->set_in(1, Location::Pair(Location::RequiresRegister(),
+                                   Location::RequiresRegister()));
+    locs->set_out(0, Location::RequiresRegister());
+    return locs;
+  }
+#endif
+  if (operation_cid() == kDoubleCid) {
+    LocationSummary* locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    locs->set_in(0, Location::RequiresFpuRegister());
+    locs->set_in(1, Location::RequiresFpuRegister());
+    locs->set_out(0, Location::RequiresRegister());
+    return locs;
+  }
+  if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
+    LocationSummary* locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    if (is_null_aware()) {
+      locs->set_in(0, Location::RequiresRegister());
+      locs->set_in(1, Location::RequiresRegister());
+    } else {
+      locs->set_in(0, LocationRegisterOrConstant(left()));
+      // Only one input can be a constant operand. The case of two constant
+      // operands should be handled by constant propagation.
+      // Only right can be a stack slot.
+      locs->set_in(1, locs->in(0).IsConstant()
+                          ? Location::RequiresRegister()
+                          : LocationRegisterOrConstant(right()));
+    }
+    locs->set_out(0, Location::RequiresRegister());
+    return locs;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
+                                        LocationSummary* locs,
+                                        BranchLabels labels,
+                                        Token::Kind kind) {
+  const FRegister left = locs->in(0).fpu_reg();
+  const FRegister right = locs->in(1).fpu_reg();
+
+  // TODO(riscv): Check if this does want we want for comparisons involving NaN.
+  switch (kind) {
+    case Token::kEQ:
+      __ feqd(TMP, left, right);
+      __ CompareImmediate(TMP, 0);
+      return NE;
+    case Token::kNE:
+      __ feqd(TMP, left, right);
+      __ CompareImmediate(TMP, 0);
+      return EQ;
+    case Token::kLT:
+      __ fltd(TMP, left, right);
+      __ CompareImmediate(TMP, 0);
+      return NE;
+    case Token::kGT:
+      __ fltd(TMP, right, left);
+      __ CompareImmediate(TMP, 0);
+      return NE;
+    case Token::kLTE:
+      __ fled(TMP, left, right);
+      __ CompareImmediate(TMP, 0);
+      return NE;
+    case Token::kGTE:
+      __ fled(TMP, right, left);
+      __ CompareImmediate(TMP, 0);
+      return NE;
+    default:
+      UNREACHABLE();
+  }
+}
+
+Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
+                                                   BranchLabels labels) {
+  if (is_null_aware()) {
+    ASSERT(operation_cid() == kMintCid);
+    return EmitNullAwareInt64ComparisonOp(compiler, locs(), kind(), labels);
+  }
+  if (operation_cid() == kSmiCid) {
+    return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
+  } else if (operation_cid() == kMintCid) {
+#if XLEN == 32
+    return EmitUnboxedMintEqualityOp(compiler, locs(), kind());
+#else
+    return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
+#endif
+  } else {
+    ASSERT(operation_cid() == kDoubleCid);
+    return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
+  }
+}
+
+LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(0, Location::RequiresRegister());
+  // Only one input can be a constant operand. The case of two constant
+  // operands should be handled by constant propagation.
+  locs->set_in(1, LocationRegisterOrConstant(right()));
+  return locs;
+}
+
+Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
+                                           BranchLabels labels) {
+  const Register left = locs()->in(0).reg();
+  Location right = locs()->in(1);
+  if (right.IsConstant()) {
+    ASSERT(right.constant().IsSmi());
+    const intx_t imm = static_cast<intx_t>(right.constant().ptr());
+    __ TestImmediate(left, imm);
+  } else {
+    __ TestRegisters(left, right.reg());
+  }
+  Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
+  return true_condition;
+}
+
+LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
+                                                    bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 1;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(0, Location::RequiresRegister());
+  locs->set_temp(0, Location::RequiresRegister());
+  locs->set_out(0, Location::RequiresRegister());
+  return locs;
+}
+
+Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
+                                            BranchLabels labels) {
+  ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
+  const Register val_reg = locs()->in(0).reg();
+  const Register cid_reg = locs()->temp(0).reg();
+
+  compiler::Label* deopt =
+      CanDeoptimize()
+          ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids,
+                                   licm_hoisted_ ? ICData::kHoisted : 0)
+          : NULL;
+
+  const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
+  const ZoneGrowableArray<intptr_t>& data = cid_results();
+  ASSERT(data[0] == kSmiCid);
+  bool result = data[1] == true_result;
+  __ BranchIfSmi(val_reg, result ? labels.true_label : labels.false_label);
+  __ LoadClassId(cid_reg, val_reg);
+
+  for (intptr_t i = 2; i < data.length(); i += 2) {
+    const intptr_t test_cid = data[i];
+    ASSERT(test_cid != kSmiCid);
+    result = data[i + 1] == true_result;
+    __ CompareImmediate(cid_reg, test_cid);
+    __ BranchIf(EQ, result ? labels.true_label : labels.false_label);
+  }
+  // No match found, deoptimize or default action.
+  if (deopt == NULL) {
+    // If the cid is not in the list, jump to the opposite label from the cids
+    // that are in the list.  These must be all the same (see asserts in the
+    // constructor).
+    compiler::Label* target = result ? labels.false_label : labels.true_label;
+    if (target != labels.fall_through) {
+      __ j(target);
+    }
+  } else {
+    __ j(deopt);
+  }
+  // Dummy result as this method already did the jump, there's no need
+  // for the caller to branch on a condition.
+  return kInvalidCondition;
+}
+
+LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+#if XLEN == 32
+  if (operation_cid() == kMintCid) {
+    LocationSummary* locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    locs->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                   Location::RequiresRegister()));
+    locs->set_in(1, Location::Pair(Location::RequiresRegister(),
+                                   Location::RequiresRegister()));
+    locs->set_out(0, Location::RequiresRegister());
+    return locs;
+  }
+#endif
+  if (operation_cid() == kDoubleCid) {
+    LocationSummary* summary = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    summary->set_in(0, Location::RequiresFpuRegister());
+    summary->set_in(1, Location::RequiresFpuRegister());
+    summary->set_out(0, Location::RequiresRegister());
+    return summary;
+  }
+  if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
+    LocationSummary* summary = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    summary->set_in(0, LocationRegisterOrConstant(left()));
+    // Only one input can be a constant operand. The case of two constant
+    // operands should be handled by constant propagation.
+    summary->set_in(1, summary->in(0).IsConstant()
+                           ? Location::RequiresRegister()
+                           : LocationRegisterOrConstant(right()));
+    summary->set_out(0, Location::RequiresRegister());
+    return summary;
+  }
+
+  UNREACHABLE();
+  return NULL;
+}
+
+Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
+                                                BranchLabels labels) {
+  if (operation_cid() == kSmiCid) {
+    return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
+  } else if (operation_cid() == kMintCid) {
+#if XLEN == 32
+    return EmitUnboxedMintComparisonOp(compiler, locs(), kind(), labels);
+#else
+    return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
+#endif
+  } else {
+    ASSERT(operation_cid() == kDoubleCid);
+    return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
+  }
+}
+
+void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  SetupNative();
+  const Register result = locs()->out(0).reg();
+
+  // All arguments are already @SP due to preceding PushArgument()s.
+  ASSERT(ArgumentCount() ==
+         function().NumParameters() + (function().IsGeneric() ? 1 : 0));
+
+  // Push the result place holder initialized to NULL.
+  __ PushObject(Object::null_object());
+
+  // Pass a pointer to the first argument in R2.
+  __ AddImmediate(T2, SP, ArgumentCount() * kWordSize);
+
+  // Compute the effective address. When running under the simulator,
+  // this is a redirection address that forces the simulator to call
+  // into the runtime system.
+  uword entry;
+  const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
+  const Code* stub;
+  if (link_lazily()) {
+    stub = &StubCode::CallBootstrapNative();
+    entry = NativeEntry::LinkNativeCallEntry();
+  } else {
+    entry = reinterpret_cast<uword>(native_c_function());
+    if (is_bootstrap_native()) {
+      stub = &StubCode::CallBootstrapNative();
+    } else if (is_auto_scope()) {
+      stub = &StubCode::CallAutoScopeNative();
+    } else {
+      stub = &StubCode::CallNoScopeNative();
+    }
+  }
+  __ LoadImmediate(T1, argc_tag);
+  compiler::ExternalLabel label(entry);
+  __ LoadNativeEntry(T5, &label,
+                     link_lazily() ? ObjectPool::Patchability::kPatchable
+                                   : ObjectPool::Patchability::kNotPatchable);
+  if (link_lazily()) {
+    compiler->GeneratePatchableCall(source(), *stub,
+                                    UntaggedPcDescriptors::kOther, locs());
+  } else {
+    // We can never lazy-deopt here because natives are never optimized.
+    ASSERT(!compiler->is_optimizing());
+    compiler->GenerateNonLazyDeoptableStubCall(
+        source(), *stub, UntaggedPcDescriptors::kOther, locs());
+  }
+  __ lx(result, compiler::Address(SP, 0));
+
+  __ Drop(ArgumentCount() + 1);  // Drop the arguments and result.
+}
+
+LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
+                                                   bool is_optimizing) const {
+  LocationSummary* summary =
+      MakeLocationSummaryInternal(zone, is_optimizing, CALLEE_SAVED_TEMP2);
+  // A3/A4/A5 are blocked during Dart register allocation because they are
+  // assigned to TMP/TMP2/PP. This assignment is important for reducing code
+  // size. To work around this for FFI calls, the FFI argument definitions are
+  // allocated to other registers and moved to the correct register at the last
+  // moment (so there are no conflicting uses of TMP/TMP2/PP).
+  // FfiCallInstr itself sometimes also clobbers A2/CODE_REG.
+  // See also FfiCallInstr::EmitCall.
+  for (intptr_t i = 0; i < summary->input_count(); i++) {
+    if (!summary->in(i).IsRegister()) continue;
+    if (summary->in(i).reg() == A2) {
+      summary->set_in(i, Location::RegisterLocation(T2));
+    } else if (summary->in(i).reg() == A3) {
+      summary->set_in(i, Location::RegisterLocation(T3));
+    } else if (summary->in(i).reg() == A4) {
+      summary->set_in(i, Location::RegisterLocation(T4));
+    } else if (summary->in(i).reg() == A5) {
+      summary->set_in(i, Location::RegisterLocation(T5));
+    }
+  }
+  return summary;
+}
+
+void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // For regular calls, this holds the FP for rebasing the original locations
+  // during EmitParamMoves.
+  // For leaf calls, this holds the SP used to restore the pre-aligned SP after
+  // the call.
+  const Register saved_fp_or_sp = locs()->temp(0).reg();
+  RELEASE_ASSERT((CallingConventions::kCalleeSaveCpuRegisters &
+                  (1 << saved_fp_or_sp)) != 0);
+  const Register temp1 = locs()->temp(1).reg();
+  const Register temp2 = locs()->temp(2).reg();
+  const Register target = locs()->in(TargetAddressIndex()).reg();
+  ASSERT(temp1 != target);
+  ASSERT(temp2 != target);
+  ASSERT(temp1 != saved_fp_or_sp);
+  ASSERT(temp2 != saved_fp_or_sp);
+  ASSERT(saved_fp_or_sp != target);
+
+  // Ensure these are callee-saved register and are preserved across the call.
+  ASSERT((CallingConventions::kCalleeSaveCpuRegisters &
+          (1 << saved_fp_or_sp)) != 0);
+  // temps don't need to be preserved.
+
+  __ mv(saved_fp_or_sp, is_leaf_ ? SPREG : FPREG);
+
+  if (!is_leaf_) {
+    // We need to create a dummy "exit frame". It will share the same pool
+    // pointer but have a null code object.
+    __ LoadObject(CODE_REG, Object::null_object());
+    __ set_constant_pool_allowed(false);
+    __ EnterDartFrame(0, PP);
+  }
+
+  // Reserve space for the arguments that go on the stack (if any), then align.
+  __ ReserveAlignedFrameSpace(marshaller_.RequiredStackSpaceInBytes());
+
+  EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1);
+
+  if (compiler::Assembler::EmittingComments()) {
+    __ Comment(is_leaf_ ? "Leaf Call" : "Call");
+  }
+
+  if (is_leaf_) {
+#if !defined(PRODUCT)
+    // Set the thread object's top_exit_frame_info and VMTag to enable the
+    // profiler to determine that thread is no longer executing Dart code.
+    __ StoreToOffset(FPREG, THR,
+                     compiler::target::Thread::top_exit_frame_info_offset());
+    __ StoreToOffset(target, THR, compiler::target::Thread::vm_tag_offset());
+#endif
+
+    EmitCall(compiler, target);
+
+#if !defined(PRODUCT)
+    __ LoadImmediate(temp1, compiler::target::Thread::vm_tag_dart_id());
+    __ StoreToOffset(temp1, THR, compiler::target::Thread::vm_tag_offset());
+    __ StoreToOffset(ZR, THR,
+                     compiler::target::Thread::top_exit_frame_info_offset());
+#endif
+  } else {
+    // We need to copy a dummy return address up into the dummy stack frame so
+    // the stack walker will know which safepoint to use.
+    //
+    // AUIPC loads relative to itself.
+    compiler->EmitCallsiteMetadata(source(), deopt_id(),
+                                   UntaggedPcDescriptors::Kind::kOther, locs(),
+                                   env());
+    __ auipc(temp1, 0);
+    __ StoreToOffset(temp1, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
+
+    if (CanExecuteGeneratedCodeInSafepoint()) {
+      // Update information in the thread object and enter a safepoint.
+      __ LoadImmediate(temp1, compiler::target::Thread::exit_through_ffi());
+      __ TransitionGeneratedToNative(target, FPREG, temp1,
+                                     /*enter_safepoint=*/true);
+
+      EmitCall(compiler, target);
+
+      // Update information in the thread object and leave the safepoint.
+      __ TransitionNativeToGenerated(temp1, /*leave_safepoint=*/true);
+    } else {
+      // We cannot trust that this code will be executable within a safepoint.
+      // Therefore we delegate the responsibility of entering/exiting the
+      // safepoint to a stub which in the VM isolate's heap, which will never
+      // lose execute permission.
+      __ lx(temp1,
+            compiler::Address(
+                THR, compiler::target::Thread::
+                         call_native_through_safepoint_entry_point_offset()));
+
+      // Calls T0 and clobbers R19 (along with volatile registers).
+      ASSERT(target == T0);
+      EmitCall(compiler, temp1);
+    }
+
+    // Refresh pinned registers values (inc. write barrier mask and null
+    // object).
+    __ RestorePinnedRegisters();
+  }
+
+  EmitReturnMoves(compiler, temp1, temp2);
+
+  if (is_leaf_) {
+    // Restore the pre-aligned SP.
+    __ mv(SPREG, saved_fp_or_sp);
+  } else {
+    // Although PP is a callee-saved register, it may have been moved by the GC.
+    __ LeaveDartFrame(compiler::kRestoreCallerPP);
+
+    // Restore the global object pool after returning from runtime (old space is
+    // moving, so the GOP could have been relocated).
+    if (FLAG_precompiled_mode) {
+      __ SetupGlobalPoolAndDispatchTable();
+    }
+
+    __ set_constant_pool_allowed(true);
+  }
+}
+
+void FfiCallInstr::EmitCall(FlowGraphCompiler* compiler, Register target) {
+  // Marshall certain argument registers at the last possible moment.
+  // See FfiCallInstr::MakeLocationSummary for the details.
+  if (InputCount() > 2) __ mv(A2, T2);  // A2=CODE_REG
+  if (InputCount() > 3) __ mv(A3, T3);  // A3=TMP
+  if (InputCount() > 4) __ mv(A4, T4);  // A4=TMP2
+  if (InputCount() > 5) __ mv(A5, T5);  // A5=PP
+  __ jalr(target);
+}
+
+// Keep in sync with NativeEntryInstr::EmitNativeCode.
+void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  EmitReturnMoves(compiler);
+
+  __ LeaveDartFrame();
+
+  // The dummy return address is in RA, no need to pop it as on Intel.
+
+  // These can be anything besides the return registers (A0, A1) and THR (S1).
+  const Register vm_tag_reg = T2;
+  const Register old_exit_frame_reg = T3;
+  const Register old_exit_through_ffi_reg = T4;
+  const Register tmp = T5;
+
+  __ PopRegisterPair(old_exit_frame_reg, old_exit_through_ffi_reg);
+
+  // Restore top_resource.
+  __ PopRegisterPair(tmp, vm_tag_reg);
+  __ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset());
+
+  // Reset the exit frame info to old_exit_frame_reg *before* entering the
+  // safepoint.
+  //
+  // If we were called by a trampoline, it will enter the safepoint on our
+  // behalf.
+  __ TransitionGeneratedToNative(
+      vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg,
+      /*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
+
+  __ PopNativeCalleeSavedRegisters();
+
+  // Leave the entry frame.
+  __ LeaveFrame();
+
+  // Leave the dummy frame holding the pushed arguments.
+  __ LeaveFrame();
+
+  __ Ret();
+
+  // For following blocks.
+  __ set_constant_pool_allowed(true);
+}
+
+// Keep in sync with NativeReturnInstr::EmitNativeCode and ComputeInnerLRState.
+void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // Constant pool cannot be used until we enter the actual Dart frame.
+  __ set_constant_pool_allowed(false);
+
+  __ Bind(compiler->GetJumpLabel(this));
+
+  // Create a dummy frame holding the pushed arguments. This simplifies
+  // NativeReturnInstr::EmitNativeCode.
+  __ EnterFrame(0);
+
+  // Save the argument registers, in reverse order.
+  SaveArguments(compiler);
+
+  // Enter the entry frame.
+  __ EnterFrame(0);
+
+  // Save a space for the code object.
+  __ PushImmediate(0);
+
+  __ PushNativeCalleeSavedRegisters();
+
+  // Load the thread object. If we were called by a trampoline, the thread is
+  // already loaded.
+  if (FLAG_precompiled_mode) {
+    compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, A1,
+                           A0);
+  } else if (!NativeCallbackTrampolines::Enabled()) {
+    // In JIT mode, we can just paste the address of the runtime entry into the
+    // generated code directly. This is not a problem since we don't save
+    // callbacks into JIT snapshots.
+    __ LoadImmediate(
+        A1, reinterpret_cast<int64_t>(DLRT_GetThreadForNativeCallback));
+  }
+
+  if (!NativeCallbackTrampolines::Enabled()) {
+    // Create another frame to align the frame before continuing in "native"
+    // code.
+    __ EnterFrame(0);
+    __ ReserveAlignedFrameSpace(0);
+
+    __ LoadImmediate(A0, callback_id_);
+    __ jalr(A1);
+    __ mv(THR, A0);
+
+    __ LeaveFrame();
+  }
+
+#if defined(USING_SHADOW_CALL_STACK)
+#error Unimplemented
+#endif
+
+  // Refresh pinned registers values (inc. write barrier mask and null object).
+  __ RestorePinnedRegisters();
+
+  // Save the current VMTag on the stack.
+  __ LoadFromOffset(TMP, THR, compiler::target::Thread::vm_tag_offset());
+  // Save the top resource.
+  __ LoadFromOffset(A0, THR, compiler::target::Thread::top_resource_offset());
+  __ PushRegisterPair(A0, TMP);
+
+  __ StoreToOffset(ZR, THR, compiler::target::Thread::top_resource_offset());
+
+  __ LoadFromOffset(A0, THR,
+                    compiler::target::Thread::exit_through_ffi_offset());
+  __ PushRegister(A0);
+
+  // Save the top exit frame info. We don't set it to 0 yet:
+  // TransitionNativeToGenerated will handle that.
+  __ LoadFromOffset(A0, THR,
+                    compiler::target::Thread::top_exit_frame_info_offset());
+  __ PushRegister(A0);
+
+  // In debug mode, verify that we've pushed the top exit frame info at the
+  // correct offset from FP.
+  __ EmitEntryFrameVerification();
+
+  // Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller)
+  // will leave the safepoint for us.
+  __ TransitionNativeToGenerated(A0, /*exit_safepoint=*/false);
+
+  // Now that the safepoint has ended, we can touch Dart objects without
+  // handles.
+
+  // Load the code object.
+  __ LoadFromOffset(A0, THR, compiler::target::Thread::callback_code_offset());
+  __ LoadCompressedFieldFromOffset(
+      A0, A0, compiler::target::GrowableObjectArray::data_offset());
+  __ LoadCompressedFieldFromOffset(
+      CODE_REG, A0,
+      compiler::target::Array::data_offset() +
+          callback_id_ * compiler::target::kCompressedWordSize);
+
+  // Put the code object in the reserved slot.
+  __ StoreToOffset(CODE_REG, FPREG,
+                   kPcMarkerSlotFromFp * compiler::target::kWordSize);
+  if (FLAG_precompiled_mode) {
+    __ SetupGlobalPoolAndDispatchTable();
+  } else {
+    // We now load the pool pointer (PP) with a GC safe value as we are about to
+    // invoke dart code. We don't need a real object pool here.
+    // Smi zero does not work because ARM64 assumes PP to be untagged.
+    __ LoadObject(PP, compiler::NullObject());
+  }
+
+  // Load a GC-safe value for the arguments descriptor (unused but tagged).
+  __ mv(ARGS_DESC_REG, ZR);
+
+  // Load a dummy return address which suggests that we are inside of
+  // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
+  __ LoadFromOffset(RA, THR,
+                    compiler::target::Thread::invoke_dart_code_stub_offset());
+  __ LoadFieldFromOffset(RA, RA, compiler::target::Code::entry_point_offset());
+
+  FunctionEntryInstr::EmitNativeCode(compiler);
+}
+
+LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 1;
+  // TODO(fschneider): Allow immediate operands for the char code.
+  return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
+                               LocationSummary::kNoCall);
+}
+
+void OneByteStringFromCharCodeInstr::EmitNativeCode(
+    FlowGraphCompiler* compiler) {
+  ASSERT(compiler->is_optimizing());
+  const Register char_code = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  __ lx(result,
+        compiler::Address(THR, Thread::predefined_symbols_address_offset()));
+  __ slli(TMP, char_code, kWordSizeLog2 - kSmiTagSize);
+  __ add(result, result, TMP);
+  __ lx(result, compiler::Address(
+                    result, Symbols::kNullCharCodeSymbolOffset * kWordSize));
+}
+
+LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
+                                                            bool opt) const {
+  const intptr_t kNumInputs = 1;
+  return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
+                               LocationSummary::kNoCall);
+}
+
+void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(cid_ == kOneByteStringCid);
+  Register str = locs()->in(0).reg();
+  Register result = locs()->out(0).reg();
+  compiler::Label is_one, done;
+  __ LoadCompressedSmi(result,
+                       compiler::FieldAddress(str, String::length_offset()));
+  __ CompareImmediate(result, Smi::RawValue(1));
+  __ BranchIf(EQUAL, &is_one, compiler::Assembler::kNearJump);
+  __ li(result, Smi::RawValue(-1));
+  __ j(&done, compiler::Assembler::kNearJump);
+  __ Bind(&is_one);
+  __ lbu(result, compiler::FieldAddress(str, OneByteString::data_offset()));
+  __ SmiTag(result);
+  __ Bind(&done);
+}
+
+LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
+                                                    bool opt) const {
+  const intptr_t kNumInputs = 5;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::Any());               // decoder
+  summary->set_in(1, Location::WritableRegister());  // bytes
+  summary->set_in(2, Location::WritableRegister());  // start
+  summary->set_in(3, Location::WritableRegister());  // end
+  summary->set_in(4, Location::WritableRegister());  // table
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register bytes_reg = locs()->in(1).reg();
+  const Register start_reg = locs()->in(2).reg();
+  const Register end_reg = locs()->in(3).reg();
+  const Register table_reg = locs()->in(4).reg();
+  const Register size_reg = locs()->out(0).reg();
+
+  const Register bytes_ptr_reg = start_reg;
+  const Register bytes_end_reg = end_reg;
+  const Register flags_reg = bytes_reg;
+  const Register temp_reg = TMP;
+  const Register decoder_temp_reg = start_reg;
+  const Register flags_temp_reg = end_reg;
+
+  static const intptr_t kSizeMask = 0x03;
+  static const intptr_t kFlagsMask = 0x3C;
+
+  compiler::Label loop, loop_in;
+
+  // Address of input bytes.
+  __ LoadFieldFromOffset(bytes_reg, bytes_reg,
+                         compiler::target::TypedDataBase::data_field_offset());
+
+  // Table.
+  __ AddImmediate(
+      table_reg, table_reg,
+      compiler::target::OneByteString::data_offset() - kHeapObjectTag);
+
+  // Pointers to start and end.
+  __ add(bytes_ptr_reg, bytes_reg, start_reg);
+  __ add(bytes_end_reg, bytes_reg, end_reg);
+
+  // Initialize size and flags.
+  __ li(size_reg, 0);
+  __ li(flags_reg, 0);
+
+  __ j(&loop_in, compiler::Assembler::kNearJump);
+  __ Bind(&loop);
+
+  // Read byte and increment pointer.
+  __ lbu(temp_reg, compiler::Address(bytes_ptr_reg, 0));
+  __ addi(bytes_ptr_reg, bytes_ptr_reg, 1);
+
+  // Update size and flags based on byte value.
+  __ add(temp_reg, table_reg, temp_reg);
+  __ lbu(temp_reg, compiler::Address(temp_reg));
+  __ or_(flags_reg, flags_reg, temp_reg);
+  __ andi(temp_reg, temp_reg, kSizeMask);
+  __ add(size_reg, size_reg, temp_reg);
+
+  // Stop if end is reached.
+  __ Bind(&loop_in);
+  __ bltu(bytes_ptr_reg, bytes_end_reg, &loop, compiler::Assembler::kNearJump);
+
+  // Write flags to field.
+  __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
+  if (!IsScanFlagsUnboxed()) {
+    __ SmiTag(flags_reg);
+  }
+  Register decoder_reg;
+  const Location decoder_location = locs()->in(0);
+  if (decoder_location.IsStackSlot()) {
+    __ lx(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
+    decoder_reg = decoder_temp_reg;
+  } else {
+    decoder_reg = decoder_location.reg();
+  }
+  const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
+  if (scan_flags_field_.is_compressed() && !IsScanFlagsUnboxed()) {
+    UNIMPLEMENTED();
+  } else {
+    __ LoadFieldFromOffset(flags_temp_reg, decoder_reg,
+                           scan_flags_field_offset);
+    __ or_(flags_temp_reg, flags_temp_reg, flags_reg);
+    __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
+  }
+}
+
+LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 1;
+  return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
+                               LocationSummary::kNoCall);
+}
+
+void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register obj = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  if (object()->definition()->representation() == kUntagged) {
+    __ LoadFromOffset(result, obj, offset());
+  } else {
+    ASSERT(object()->definition()->representation() == kTagged);
+    __ LoadFieldFromOffset(result, obj, offset());
+  }
+}
+
+static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
+  ConstantInstr* constant = value->definition()->AsConstant();
+  if ((constant == NULL) || !constant->value().IsSmi()) {
+    return false;
+  }
+  const int64_t index = Smi::Cast(constant->value()).AsInt64Value();
+  const intptr_t scale = Instance::ElementSizeFor(cid);
+  const int64_t offset =
+      index * scale +
+      (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
+  if (IsITypeImm(offset)) {
+    ASSERT(IsSTypeImm(offset));
+    return true;
+  }
+  return false;
+}
+
+LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
+                                                       bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(0, Location::RequiresRegister());
+  if (CanBeImmediateIndex(index(), class_id(), IsExternal())) {
+    locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
+  } else {
+    locs->set_in(1, Location::RequiresRegister());
+  }
+  if ((representation() == kUnboxedDouble) ||
+      (representation() == kUnboxedFloat32x4) ||
+      (representation() == kUnboxedInt32x4) ||
+      (representation() == kUnboxedFloat64x2)) {
+    locs->set_out(0, Location::RequiresFpuRegister());
+#if XLEN == 32
+  } else if (representation() == kUnboxedInt64) {
+    ASSERT(class_id() == kTypedDataInt64ArrayCid ||
+           class_id() == kTypedDataUint64ArrayCid);
+    locs->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+#endif
+  } else {
+    locs->set_out(0, Location::RequiresRegister());
+  }
+  return locs;
+}
+
+void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // The array register points to the backing store for external arrays.
+  const Register array = locs()->in(0).reg();
+  const Location index = locs()->in(1);
+
+  compiler::Address element_address(TMP);  // Bad address.
+  element_address = index.IsRegister()
+                        ? __ ElementAddressForRegIndex(
+                              IsExternal(), class_id(), index_scale(),
+                              index_unboxed_, array, index.reg(), TMP)
+                        : __ ElementAddressForIntIndex(
+                              IsExternal(), class_id(), index_scale(), array,
+                              Smi::Cast(index.constant()).Value());
+  if ((representation() == kUnboxedDouble) ||
+      (representation() == kUnboxedFloat32x4) ||
+      (representation() == kUnboxedInt32x4) ||
+      (representation() == kUnboxedFloat64x2)) {
+    const FRegister result = locs()->out(0).fpu_reg();
+    switch (class_id()) {
+      case kTypedDataFloat32ArrayCid:
+        // Load single precision float.
+        __ flw(result, element_address);
+        break;
+      case kTypedDataFloat64ArrayCid:
+        // Load double precision float.
+        __ fld(result, element_address);
+        break;
+      case kTypedDataFloat64x2ArrayCid:
+      case kTypedDataInt32x4ArrayCid:
+      case kTypedDataFloat32x4ArrayCid:
+        UNIMPLEMENTED();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    return;
+  }
+
+  switch (class_id()) {
+    case kTypedDataInt32ArrayCid: {
+      ASSERT(representation() == kUnboxedInt32);
+      const Register result = locs()->out(0).reg();
+      __ lw(result, element_address);
+      break;
+    }
+    case kTypedDataUint32ArrayCid: {
+      ASSERT(representation() == kUnboxedUint32);
+      const Register result = locs()->out(0).reg();
+#if XLEN == 32
+      __ lw(result, element_address);
+#else
+      __ lwu(result, element_address);
+#endif
+      break;
+    }
+    case kTypedDataInt64ArrayCid:
+    case kTypedDataUint64ArrayCid: {
+      ASSERT(representation() == kUnboxedInt64);
+#if XLEN == 32
+      ASSERT(locs()->out(0).IsPairLocation());
+      PairLocation* result_pair = locs()->out(0).AsPairLocation();
+      const Register result_lo = result_pair->At(0).reg();
+      const Register result_hi = result_pair->At(1).reg();
+      __ lw(result_lo, element_address);
+      __ lw(result_hi, compiler::Address(element_address.base(),
+                                         element_address.offset() + 4));
+#else
+      const Register result = locs()->out(0).reg();
+      __ ld(result, element_address);
+#endif
+      break;
+    }
+    case kTypedDataInt8ArrayCid: {
+      ASSERT(representation() == kUnboxedIntPtr);
+      ASSERT(index_scale() == 1);
+      const Register result = locs()->out(0).reg();
+      __ lb(result, element_address);
+      break;
+    }
+    case kTypedDataUint8ArrayCid:
+    case kTypedDataUint8ClampedArrayCid:
+    case kExternalTypedDataUint8ArrayCid:
+    case kExternalTypedDataUint8ClampedArrayCid:
+    case kOneByteStringCid:
+    case kExternalOneByteStringCid: {
+      ASSERT(representation() == kUnboxedIntPtr);
+      ASSERT(index_scale() == 1);
+      const Register result = locs()->out(0).reg();
+      __ lbu(result, element_address);
+      break;
+    }
+    case kTypedDataInt16ArrayCid: {
+      ASSERT(representation() == kUnboxedIntPtr);
+      const Register result = locs()->out(0).reg();
+      __ lh(result, element_address);
+      break;
+    }
+    case kTypedDataUint16ArrayCid:
+    case kTwoByteStringCid:
+    case kExternalTwoByteStringCid: {
+      ASSERT(representation() == kUnboxedIntPtr);
+      const Register result = locs()->out(0).reg();
+      __ lhu(result, element_address);
+      break;
+    }
+    default: {
+      ASSERT(representation() == kTagged);
+      ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
+             (class_id() == kTypeArgumentsCid));
+      const Register result = locs()->out(0).reg();
+      __ lx(result, element_address);
+      break;
+    }
+  }
+}
+
+LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RequiresRegister());
+#if XLEN == 32
+  if (representation() == kUnboxedInt64) {
+    summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                       Location::RequiresRegister()));
+  } else {
+    ASSERT(representation() == kTagged);
+    summary->set_out(0, Location::RequiresRegister());
+  }
+#else
+  summary->set_out(0, Location::RequiresRegister());
+#endif
+  return summary;
+}
+
+void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // The string register points to the backing store for external strings.
+  const Register str = locs()->in(0).reg();
+  const Location index = locs()->in(1);
+  compiler::OperandSize sz = compiler::kByte;
+
+#if XLEN == 32
+  if (representation() == kUnboxedInt64) {
+    ASSERT(compiler->is_optimizing());
+    ASSERT(locs()->out(0).IsPairLocation());
+    UNIMPLEMENTED();
+  }
+#endif
+
+  Register result = locs()->out(0).reg();
+  switch (class_id()) {
+    case kOneByteStringCid:
+    case kExternalOneByteStringCid:
+      switch (element_count()) {
+        case 1:
+          sz = compiler::kUnsignedByte;
+          break;
+        case 2:
+          sz = compiler::kUnsignedTwoBytes;
+          break;
+        case 4:
+          sz = compiler::kUnsignedFourBytes;
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case kTwoByteStringCid:
+    case kExternalTwoByteStringCid:
+      switch (element_count()) {
+        case 1:
+          sz = compiler::kUnsignedTwoBytes;
+          break;
+        case 2:
+          sz = compiler::kUnsignedFourBytes;
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  // Warning: element_address may use register TMP as base.
+  compiler::Address element_address = __ ElementAddressForRegIndexWithSize(
+      IsExternal(), class_id(), sz, index_scale(), /*index_unboxed=*/false, str,
+      index.reg(), TMP);
+  switch (sz) {
+    case compiler::kUnsignedByte:
+      __ lbu(result, element_address);
+      break;
+    case compiler::kUnsignedTwoBytes:
+      __ lhu(result, element_address);
+      break;
+    case compiler::kUnsignedFourBytes:
+#if XLEN == 32
+      __ lw(result, element_address);
+#else
+      __ lwu(result, element_address);
+#endif
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  ASSERT(can_pack_into_smi());
+  __ SmiTag(result);
+}
+
+LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 3;
+  const intptr_t kNumTemps = 1;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(0, Location::RequiresRegister());
+  if (CanBeImmediateIndex(index(), class_id(), IsExternal())) {
+    locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
+  } else {
+    locs->set_in(1, Location::RequiresRegister());
+  }
+  locs->set_temp(0, Location::RequiresRegister());
+
+  switch (class_id()) {
+    case kArrayCid:
+      locs->set_in(2, ShouldEmitStoreBarrier()
+                          ? Location::RegisterLocation(kWriteBarrierValueReg)
+                          : LocationRegisterOrConstant(value()));
+      if (ShouldEmitStoreBarrier()) {
+        locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
+        locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
+      }
+      break;
+    case kExternalTypedDataUint8ArrayCid:
+    case kExternalTypedDataUint8ClampedArrayCid:
+    case kTypedDataInt8ArrayCid:
+    case kTypedDataUint8ArrayCid:
+    case kTypedDataUint8ClampedArrayCid:
+    case kOneByteStringCid:
+    case kTwoByteStringCid:
+    case kTypedDataInt16ArrayCid:
+    case kTypedDataUint16ArrayCid:
+    case kTypedDataInt32ArrayCid:
+    case kTypedDataUint32ArrayCid:
+      locs->set_in(2, Location::RequiresRegister());
+      break;
+    case kTypedDataInt64ArrayCid:
+    case kTypedDataUint64ArrayCid:
+#if XLEN == 32
+      locs->set_in(2, Location::Pair(Location::RequiresRegister(),
+                                     Location::RequiresRegister()));
+#else
+      locs->set_in(2, Location::RequiresRegister());
+#endif
+      break;
+    case kTypedDataFloat32ArrayCid:
+    case kTypedDataFloat64ArrayCid:  // TODO(srdjan): Support Float64 constants.
+      locs->set_in(2, Location::RequiresFpuRegister());
+      break;
+    case kTypedDataInt32x4ArrayCid:
+    case kTypedDataFloat32x4ArrayCid:
+    case kTypedDataFloat64x2ArrayCid:
+      locs->set_in(2, Location::RequiresFpuRegister());
+      break;
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+  return locs;
+}
+
+void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  // The array register points to the backing store for external arrays.
+  const Register array = locs()->in(0).reg();
+  const Location index = locs()->in(1);
+  const Register temp = locs()->temp(0).reg();
+  compiler::Address element_address(TMP);  // Bad address.
+
+  // Deal with a special case separately.
+  if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
+    if (index.IsRegister()) {
+      __ ComputeElementAddressForRegIndex(temp, IsExternal(), class_id(),
+                                          index_scale(), index_unboxed_, array,
+                                          index.reg());
+    } else {
+      __ ComputeElementAddressForIntIndex(temp, IsExternal(), class_id(),
+                                          index_scale(), array,
+                                          Smi::Cast(index.constant()).Value());
+    }
+    const Register value = locs()->in(2).reg();
+    __ StoreIntoArray(array, temp, value, CanValueBeSmi());
+    return;
+  }
+
+  element_address = index.IsRegister()
+                        ? __ ElementAddressForRegIndex(
+                              IsExternal(), class_id(), index_scale(),
+                              index_unboxed_, array, index.reg(), temp)
+                        : __ ElementAddressForIntIndex(
+                              IsExternal(), class_id(), index_scale(), array,
+                              Smi::Cast(index.constant()).Value());
+
+  switch (class_id()) {
+    case kArrayCid:
+      ASSERT(!ShouldEmitStoreBarrier());  // Specially treated above.
+      if (locs()->in(2).IsConstant()) {
+        const Object& constant = locs()->in(2).constant();
+        __ StoreIntoObjectNoBarrier(array, element_address, constant);
+      } else {
+        const Register value = locs()->in(2).reg();
+        __ StoreIntoObjectNoBarrier(array, element_address, value);
+      }
+      break;
+    case kTypedDataInt8ArrayCid:
+    case kTypedDataUint8ArrayCid:
+    case kExternalTypedDataUint8ArrayCid:
+    case kOneByteStringCid: {
+      ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
+      if (locs()->in(2).IsConstant()) {
+        const Smi& constant = Smi::Cast(locs()->in(2).constant());
+        __ LoadImmediate(TMP, static_cast<int8_t>(constant.Value()));
+        __ sb(TMP, element_address);
+      } else {
+        const Register value = locs()->in(2).reg();
+        __ sb(value, element_address);
+      }
+      break;
+    }
+    case kTypedDataUint8ClampedArrayCid:
+    case kExternalTypedDataUint8ClampedArrayCid: {
+      ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
+      if (locs()->in(2).IsConstant()) {
+        const Smi& constant = Smi::Cast(locs()->in(2).constant());
+        intptr_t value = constant.Value();
+        // Clamp to 0x0 or 0xFF respectively.
+        if (value > 0xFF) {
+          value = 0xFF;
+        } else if (value < 0) {
+          value = 0;
+        }
+        __ LoadImmediate(TMP, static_cast<int8_t>(value));
+        __ sb(TMP, element_address);
+      } else {
+        const Register value = locs()->in(2).reg();
+
+        compiler::Label store_zero, store_ff, done;
+        __ blt(value, ZR, &store_zero, compiler::Assembler::kNearJump);
+
+        __ li(TMP, 0xFF);
+        __ bgt(value, TMP, &store_ff, compiler::Assembler::kNearJump);
+
+        __ sb(value, element_address);
+        __ j(&done, compiler::Assembler::kNearJump);
+
+        __ Bind(&store_zero);
+        __ mv(TMP, ZR);
+
+        __ Bind(&store_ff);
+        __ sb(TMP, element_address);
+
+        __ Bind(&done);
+      }
+      break;
+    }
+    case kTwoByteStringCid:
+    case kTypedDataInt16ArrayCid:
+    case kTypedDataUint16ArrayCid: {
+      ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
+      const Register value = locs()->in(2).reg();
+      __ sh(value, element_address);
+      break;
+    }
+    case kTypedDataInt32ArrayCid:
+    case kTypedDataUint32ArrayCid: {
+      const Register value = locs()->in(2).reg();
+      __ sw(value, element_address);
+      break;
+    }
+    case kTypedDataInt64ArrayCid:
+    case kTypedDataUint64ArrayCid: {
+#if XLEN >= 64
+      const Register value = locs()->in(2).reg();
+      __ sd(value, element_address);
+#else
+      PairLocation* value_pair = locs()->in(2).AsPairLocation();
+      Register value_lo = value_pair->At(0).reg();
+      Register value_hi = value_pair->At(1).reg();
+      __ sw(value_lo, element_address);
+      __ sw(value_hi, compiler::Address(element_address.base(),
+                                        element_address.offset() + 4));
+#endif
+      break;
+    }
+    case kTypedDataFloat32ArrayCid: {
+      const FRegister value_reg = locs()->in(2).fpu_reg();
+      __ fsw(value_reg, element_address);
+      break;
+    }
+    case kTypedDataFloat64ArrayCid: {
+      const FRegister value_reg = locs()->in(2).fpu_reg();
+      __ fsd(value_reg, element_address);
+      break;
+    }
+    case kTypedDataFloat64x2ArrayCid:
+    case kTypedDataInt32x4ArrayCid:
+    case kTypedDataFloat32x4ArrayCid: {
+      UNIMPLEMENTED();
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void LoadValueCid(FlowGraphCompiler* compiler,
+                         Register value_cid_reg,
+                         Register value_reg,
+                         compiler::Label* value_is_smi = NULL) {
+  compiler::Label done;
+  if (value_is_smi == NULL) {
+    __ LoadImmediate(value_cid_reg, kSmiCid);
+  }
+  __ BranchIfSmi(value_reg, value_is_smi == NULL ? &done : value_is_smi);
+  __ LoadClassId(value_cid_reg, value_reg);
+  __ Bind(&done);
+}
+
+DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
+DEFINE_UNIMPLEMENTED_INSTRUCTION(CheckConditionInstr)
+
+LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
+                                                           bool opt) const {
+  const intptr_t kNumInputs = 1;
+
+  const intptr_t value_cid = value()->Type()->ToCid();
+  const intptr_t field_cid = field().guarded_cid();
+
+  const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
+
+  const bool needs_value_cid_temp_reg =
+      emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
+
+  const bool needs_field_temp_reg = emit_full_guard;
+
+  intptr_t num_temps = 0;
+  if (needs_value_cid_temp_reg) {
+    num_temps++;
+  }
+  if (needs_field_temp_reg) {
+    num_temps++;
+  }
+
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+
+  for (intptr_t i = 0; i < num_temps; i++) {
+    summary->set_temp(i, Location::RequiresRegister());
+  }
+
+  return summary;
+}
+
+void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
+  ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
+  ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
+
+  const intptr_t value_cid = value()->Type()->ToCid();
+  const intptr_t field_cid = field().guarded_cid();
+  const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
+
+  if (field_cid == kDynamicCid) {
+    return;  // Nothing to emit.
+  }
+
+  const bool emit_full_guard =
+      !compiler->is_optimizing() || (field_cid == kIllegalCid);
+
+  const bool needs_value_cid_temp_reg =
+      emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
+
+  const bool needs_field_temp_reg = emit_full_guard;
+
+  const Register value_reg = locs()->in(0).reg();
+
+  const Register value_cid_reg =
+      needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
+
+  const Register field_reg = needs_field_temp_reg
+                                 ? locs()->temp(locs()->temp_count() - 1).reg()
+                                 : kNoRegister;
+
+  compiler::Label ok, fail_label;
+
+  compiler::Label* deopt =
+      compiler->is_optimizing()
+          ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
+          : NULL;
+
+  compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label;
+
+  if (emit_full_guard) {
+    __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
+
+    compiler::FieldAddress field_cid_operand(field_reg,
+                                             Field::guarded_cid_offset());
+    compiler::FieldAddress field_nullability_operand(
+        field_reg, Field::is_nullable_offset());
+
+    if (value_cid == kDynamicCid) {
+      LoadValueCid(compiler, value_cid_reg, value_reg);
+      compiler::Label skip_length_check;
+      __ lhu(TMP, field_cid_operand);
+      __ CompareRegisters(value_cid_reg, TMP);
+      __ BranchIf(EQ, &ok);
+      __ lhu(TMP, field_nullability_operand);
+      __ CompareRegisters(value_cid_reg, TMP);
+    } else if (value_cid == kNullCid) {
+      __ lhu(value_cid_reg, field_nullability_operand);
+      __ CompareImmediate(value_cid_reg, value_cid);
+    } else {
+      compiler::Label skip_length_check;
+      __ lhu(value_cid_reg, field_cid_operand);
+      __ CompareImmediate(value_cid_reg, value_cid);
+    }
+    __ BranchIf(EQ, &ok);
+
+    // Check if the tracked state of the guarded field can be initialized
+    // inline. If the field needs length check we fall through to runtime
+    // which is responsible for computing offset of the length field
+    // based on the class id.
+    // Length guard will be emitted separately when needed via GuardFieldLength
+    // instruction after GuardFieldClass.
+    if (!field().needs_length_check()) {
+      // Uninitialized field can be handled inline. Check if the
+      // field is still unitialized.
+      __ lhu(TMP, field_cid_operand);
+      __ CompareImmediate(TMP, kIllegalCid);
+      __ BranchIf(NE, fail);
+
+      if (value_cid == kDynamicCid) {
+        __ sh(value_cid_reg, field_cid_operand);
+        __ sh(value_cid_reg, field_nullability_operand);
+      } else {
+        __ LoadImmediate(TMP, value_cid);
+        __ sh(TMP, field_cid_operand);
+        __ sh(TMP, field_nullability_operand);
+      }
+
+      __ j(&ok);
+    }
+
+    if (deopt == NULL) {
+      __ Bind(fail);
+
+      __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
+                             compiler::kUnsignedTwoBytes);
+      __ CompareImmediate(TMP, kDynamicCid);
+      __ BranchIf(EQ, &ok);
+
+      __ PushRegisterPair(value_reg, field_reg);
+      ASSERT(!compiler->is_optimizing());  // No deopt info needed.
+      __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
+      __ Drop(2);  // Drop the field and the value.
+    } else {
+      __ j(fail);
+    }
+  } else {
+    ASSERT(compiler->is_optimizing());
+    ASSERT(deopt != NULL);
+
+    // Field guard class has been initialized and is known.
+    if (value_cid == kDynamicCid) {
+      // Value's class id is not known.
+      __ TestImmediate(value_reg, kSmiTagMask);
+
+      if (field_cid != kSmiCid) {
+        __ BranchIf(EQ, fail);
+        __ LoadClassId(value_cid_reg, value_reg);
+        __ CompareImmediate(value_cid_reg, field_cid);
+      }
+
+      if (field().is_nullable() && (field_cid != kNullCid)) {
+        __ BranchIf(EQ, &ok);
+        __ CompareObject(value_reg, Object::null_object());
+      }
+
+      __ BranchIf(NE, fail);
+    } else if (value_cid == field_cid) {
+      // This would normaly be caught by Canonicalize, but RemoveRedefinitions
+      // may sometimes produce the situation after the last Canonicalize pass.
+    } else {
+      // Both value's and field's class id is known.
+      ASSERT(value_cid != nullability);
+      __ j(fail);
+    }
+  }
+  __ Bind(&ok);
+}
+
+LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
+                                                            bool opt) const {
+  const intptr_t kNumInputs = 1;
+  if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
+    const intptr_t kNumTemps = 3;
+    LocationSummary* summary = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    summary->set_in(0, Location::RequiresRegister());
+    // We need temporaries for field object, length offset and expected length.
+    summary->set_temp(0, Location::RequiresRegister());
+    summary->set_temp(1, Location::RequiresRegister());
+    summary->set_temp(2, Location::RequiresRegister());
+    return summary;
+  } else {
+    LocationSummary* summary = new (zone)
+        LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
+    summary->set_in(0, Location::RequiresRegister());
+    return summary;
+  }
+  UNREACHABLE();
+}
+
+void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (field().guarded_list_length() == Field::kNoFixedLength) {
+    return;  // Nothing to emit.
+  }
+
+  compiler::Label* deopt =
+      compiler->is_optimizing()
+          ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
+          : NULL;
+
+  const Register value_reg = locs()->in(0).reg();
+
+  if (!compiler->is_optimizing() ||
+      (field().guarded_list_length() == Field::kUnknownFixedLength)) {
+    const Register field_reg = locs()->temp(0).reg();
+    const Register offset_reg = locs()->temp(1).reg();
+    const Register length_reg = locs()->temp(2).reg();
+
+    compiler::Label ok;
+
+    __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
+
+    __ lb(offset_reg,
+          compiler::FieldAddress(
+              field_reg, Field::guarded_list_length_in_object_offset_offset()));
+    __ LoadCompressed(
+        length_reg,
+        compiler::FieldAddress(field_reg, Field::guarded_list_length_offset()));
+
+    __ bltz(offset_reg, &ok, compiler::Assembler::kNearJump);
+
+    // Load the length from the value. GuardFieldClass already verified that
+    // value's class matches guarded class id of the field.
+    // offset_reg contains offset already corrected by -kHeapObjectTag that is
+    // why we use Address instead of FieldAddress.
+    __ add(TMP, value_reg, offset_reg);
+    __ lx(TMP, compiler::Address(TMP, 0));
+    __ CompareObjectRegisters(length_reg, TMP);
+
+    if (deopt == NULL) {
+      __ BranchIf(EQ, &ok);
+
+      __ PushRegisterPair(value_reg, field_reg);
+      ASSERT(!compiler->is_optimizing());  // No deopt info needed.
+      __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
+      __ Drop(2);  // Drop the field and the value.
+    } else {
+      __ BranchIf(NE, deopt);
+    }
+
+    __ Bind(&ok);
+  } else {
+    ASSERT(compiler->is_optimizing());
+    ASSERT(field().guarded_list_length() >= 0);
+    ASSERT(field().guarded_list_length_in_object_offset() !=
+           Field::kUnknownLengthOffset);
+
+    __ lx(TMP, compiler::FieldAddress(
+                   value_reg, field().guarded_list_length_in_object_offset()));
+    __ CompareImmediate(TMP, Smi::RawValue(field().guarded_list_length()));
+    __ BranchIf(NE, deopt);
+  }
+}
+
+static void EnsureMutableBox(FlowGraphCompiler* compiler,
+                             StoreInstanceFieldInstr* instruction,
+                             Register box_reg,
+                             const Class& cls,
+                             Register instance_reg,
+                             intptr_t offset,
+                             Register temp) {
+  compiler::Label done;
+  __ LoadCompressedFieldFromOffset(box_reg, instance_reg, offset);
+  __ CompareObject(box_reg, Object::null_object());
+  __ BranchIf(NE, &done);
+  BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
+  __ MoveRegister(temp, box_reg);
+  __ StoreCompressedIntoObjectOffset(instance_reg, offset, temp,
+                                     compiler::Assembler::kValueIsNotSmi);
+  __ Bind(&done);
+}
+
+LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
+                                                              bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = (IsUnboxedDartFieldStore() && opt)
+                                 ? (FLAG_precompiled_mode ? 0 : 2)
+                                 : (IsPotentialUnboxedDartFieldStore() ? 2 : 0);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps,
+      (!FLAG_precompiled_mode &&
+       ((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
+        IsPotentialUnboxedDartFieldStore()))
+          ? LocationSummary::kCallOnSlowPath
+          : LocationSummary::kNoCall);
+
+  summary->set_in(kInstancePos, Location::RequiresRegister());
+  if (slot().representation() != kTagged) {
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    const size_t value_size =
+        RepresentationUtils::ValueSize(slot().representation());
+    if (value_size <= compiler::target::kWordSize) {
+      summary->set_in(kValuePos, Location::RequiresRegister());
+    } else {
+#if XLEN == 32
+      ASSERT(value_size <= 2 * compiler::target::kWordSize);
+      summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
+                                                Location::RequiresRegister()));
+#else
+      UNREACHABLE();
+#endif
+    }
+  } else if (IsUnboxedDartFieldStore() && opt) {
+    summary->set_in(kValuePos, Location::RequiresFpuRegister());
+    if (!FLAG_precompiled_mode) {
+      summary->set_temp(0, Location::RequiresRegister());
+      summary->set_temp(1, Location::RequiresRegister());
+    }
+  } else if (IsPotentialUnboxedDartFieldStore()) {
+    summary->set_in(kValuePos, ShouldEmitStoreBarrier()
+                                   ? Location::WritableRegister()
+                                   : Location::RequiresRegister());
+    summary->set_temp(0, Location::RequiresRegister());
+    summary->set_temp(1, Location::RequiresRegister());
+  } else {
+    summary->set_in(kValuePos,
+                    ShouldEmitStoreBarrier()
+                        ? Location::RegisterLocation(kWriteBarrierValueReg)
+                        : LocationRegisterOrConstant(value()));
+  }
+  return summary;
+}
+
+void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
+  ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
+  ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
+
+  compiler::Label skip_store;
+
+  const Register instance_reg = locs()->in(kInstancePos).reg();
+  const intptr_t offset_in_bytes = OffsetInBytes();
+  ASSERT(offset_in_bytes > 0);  // Field is finalized and points after header.
+
+  if (slot().representation() != kTagged) {
+    ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
+    auto const rep = slot().representation();
+    ASSERT(RepresentationUtils::IsUnboxedInteger(rep));
+    const size_t value_size = RepresentationUtils::ValueSize(rep);
+    __ Comment("NativeUnboxedStoreInstanceFieldInstr");
+    if (value_size <= compiler::target::kWordSize) {
+      const Register value = locs()->in(kValuePos).reg();
+      __ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
+                            RepresentationUtils::OperandSize(rep));
+    } else {
+#if XLEN == 32
+      auto const in_pair = locs()->in(kValuePos).AsPairLocation();
+      const Register in_lo = in_pair->At(0).reg();
+      const Register in_hi = in_pair->At(1).reg();
+      const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
+      const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
+      __ StoreToOffset(in_lo, instance_reg, offset_lo);
+      __ StoreToOffset(in_hi, instance_reg, offset_hi);
+#else
+      UNREACHABLE();
+#endif
+    }
+    return;
+  }
+
+  if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
+    ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
+    const FRegister value = locs()->in(kValuePos).fpu_reg();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
+
+    if (FLAG_precompiled_mode) {
+      switch (cid) {
+        case kDoubleCid:
+          __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
+          __ StoreDFieldToOffset(value, instance_reg, offset_in_bytes);
+          return;
+        case kFloat32x4Cid:
+          __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
+          UNIMPLEMENTED();
+          return;
+        case kFloat64x2Cid:
+          __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr");
+          UNIMPLEMENTED();
+          return;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    const Register temp = locs()->temp(0).reg();
+    const Register temp2 = locs()->temp(1).reg();
+
+    if (is_initialization()) {
+      const Class* cls = NULL;
+      switch (cid) {
+        case kDoubleCid:
+          cls = &compiler->double_class();
+          break;
+        case kFloat32x4Cid:
+          cls = &compiler->float32x4_class();
+          break;
+        case kFloat64x2Cid:
+          cls = &compiler->float64x2_class();
+          break;
+        default:
+          UNREACHABLE();
+      }
+
+      BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
+      __ MoveRegister(temp2, temp);
+      __ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes, temp2,
+                                         compiler::Assembler::kValueIsNotSmi);
+    } else {
+      __ LoadCompressedFieldFromOffset(temp, instance_reg, offset_in_bytes);
+    }
+    switch (cid) {
+      case kDoubleCid:
+        __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
+        __ StoreDFieldToOffset(value, temp, Double::value_offset());
+        break;
+      case kFloat32x4Cid:
+        __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
+        UNIMPLEMENTED();
+        break;
+      case kFloat64x2Cid:
+        __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr");
+        UNIMPLEMENTED();
+        break;
+      default:
+        UNREACHABLE();
+    }
+
+    return;
+  }
+
+  if (IsPotentialUnboxedDartFieldStore()) {
+    ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
+    const Register value_reg = locs()->in(kValuePos).reg();
+    const Register temp = locs()->temp(0).reg();
+    const Register temp2 = locs()->temp(1).reg();
+
+    if (ShouldEmitStoreBarrier()) {
+      // Value input is a writable register and should be manually preserved
+      // across allocation slow-path.
+      locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
+    }
+
+    compiler::Label store_pointer;
+    compiler::Label store_double;
+    compiler::Label store_float32x4;
+    compiler::Label store_float64x2;
+
+    __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
+
+    __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(),
+                           compiler::kUnsignedTwoBytes);
+    __ CompareImmediate(temp2, kNullCid);
+    __ BranchIf(EQ, &store_pointer);
+
+    __ LoadFromOffset(temp2, temp, Field::kind_bits_offset() - kHeapObjectTag,
+                      compiler::kUnsignedByte);
+    __ TestImmediate(temp2, 1 << Field::kUnboxingCandidateBit);
+    __ BranchIf(EQ, &store_pointer);
+
+    __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
+                           compiler::kUnsignedTwoBytes);
+    __ CompareImmediate(temp2, kDoubleCid);
+    __ BranchIf(EQ, &store_double);
+
+    // Fall through.
+    __ j(&store_pointer);
+
+    if (!compiler->is_optimizing()) {
+      locs()->live_registers()->Add(locs()->in(kInstancePos));
+      locs()->live_registers()->Add(locs()->in(kValuePos));
+    }
+
+    {
+      __ Bind(&store_double);
+      EnsureMutableBox(compiler, this, temp, compiler->double_class(),
+                       instance_reg, offset_in_bytes, temp2);
+      __ LoadDFieldFromOffset(FTMP, value_reg, Double::value_offset());
+      __ StoreDFieldToOffset(FTMP, temp, Double::value_offset());
+      __ j(&skip_store);
+    }
+
+    __ Bind(&store_pointer);
+  }
+
+  const bool compressed = slot().is_compressed();
+  if (ShouldEmitStoreBarrier()) {
+    const Register value_reg = locs()->in(kValuePos).reg();
+    if (!compressed) {
+      __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
+                               CanValueBeSmi(), memory_order_);
+    } else {
+      __ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes,
+                                         value_reg, CanValueBeSmi(),
+                                         memory_order_);
+    }
+  } else {
+    if (locs()->in(kValuePos).IsConstant()) {
+      const auto& value = locs()->in(kValuePos).constant();
+      if (!compressed) {
+        __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, value,
+                                          memory_order_);
+      } else {
+        __ StoreCompressedIntoObjectOffsetNoBarrier(
+            instance_reg, offset_in_bytes, value, memory_order_);
+      }
+    } else {
+      const Register value_reg = locs()->in(kValuePos).reg();
+      if (!compressed) {
+        __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
+                                          value_reg, memory_order_);
+      } else {
+        __ StoreCompressedIntoObjectOffsetNoBarrier(
+            instance_reg, offset_in_bytes, value_reg, memory_order_);
+      }
+    }
+  }
+  __ Bind(&skip_store);
+}
+
+LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
+                                                            bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(0, Location::RequiresRegister());
+  return locs;
+}
+
+void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register value = locs()->in(0).reg();
+
+  compiler->used_static_fields().Add(&field());
+
+  __ LoadFromOffset(TMP, THR,
+                    compiler::target::Thread::field_table_values_offset());
+  // Note: static fields ids won't be changed by hot-reload.
+  __ StoreToOffset(value, TMP, compiler::target::FieldTable::OffsetOf(field()));
+}
+
+LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
+                                                      bool opt) const {
+  const intptr_t kNumInputs = 3;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+  summary->set_in(0, Location::RegisterLocation(TypeTestABI::kInstanceReg));
+  summary->set_in(1, Location::RegisterLocation(
+                         TypeTestABI::kInstantiatorTypeArgumentsReg));
+  summary->set_in(
+      2, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg));
+  summary->set_out(
+      0, Location::RegisterLocation(TypeTestABI::kInstanceOfResultReg));
+  return summary;
+}
+
+void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
+  ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg);
+  ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg);
+
+  compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
+  ASSERT(locs()->out(0).reg() == TypeTestABI::kInstanceOfResultReg);
+}
+
+LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
+                                                       bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+  locs->set_in(kTypeArgumentsPos,
+               Location::RegisterLocation(AllocateArrayABI::kTypeArgumentsReg));
+  locs->set_in(kLengthPos,
+               Location::RegisterLocation(AllocateArrayABI::kLengthReg));
+  locs->set_out(0, Location::RegisterLocation(AllocateArrayABI::kResultReg));
+  return locs;
+}
+
+// Inlines array allocation for known constant values.
+static void InlineArrayAllocation(FlowGraphCompiler* compiler,
+                                  intptr_t num_elements,
+                                  compiler::Label* slow_path,
+                                  compiler::Label* done) {
+  const int kInlineArraySize = 12;  // Same as kInlineInstanceSize.
+  const intptr_t instance_size = Array::InstanceSize(num_elements);
+
+  __ TryAllocateArray(kArrayCid, instance_size, slow_path,
+                      AllocateArrayABI::kResultReg,  // instance
+                      T3,                            // end address
+                      T4, T5);
+  // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
+  // R3: new object end address.
+
+  // Store the type argument field.
+  __ StoreCompressedIntoObjectNoBarrier(
+      AllocateArrayABI::kResultReg,
+      compiler::FieldAddress(AllocateArrayABI::kResultReg,
+                             Array::type_arguments_offset()),
+      AllocateArrayABI::kTypeArgumentsReg);
+
+  // Set the length field.
+  __ StoreCompressedIntoObjectNoBarrier(
+      AllocateArrayABI::kResultReg,
+      compiler::FieldAddress(AllocateArrayABI::kResultReg,
+                             Array::length_offset()),
+      AllocateArrayABI::kLengthReg);
+
+  // Initialize all array elements to raw_null.
+  // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
+  // T3: new object end address.
+  // T5: iterator which initially points to the start of the variable
+  // data area to be initialized.
+  if (num_elements > 0) {
+    const intptr_t array_size = instance_size - sizeof(UntaggedArray);
+    __ AddImmediate(T5, AllocateArrayABI::kResultReg,
+                    sizeof(UntaggedArray) - kHeapObjectTag);
+    if (array_size < (kInlineArraySize * kCompressedWordSize)) {
+      intptr_t current_offset = 0;
+      while (current_offset < array_size) {
+        __ StoreCompressedIntoObjectNoBarrier(
+            AllocateArrayABI::kResultReg, compiler::Address(T5, current_offset),
+            NULL_REG);
+        current_offset += kCompressedWordSize;
+      }
+    } else {
+      compiler::Label end_loop, init_loop;
+      __ Bind(&init_loop);
+      __ CompareRegisters(T5, T3);
+      __ BranchIf(CS, &end_loop, compiler::Assembler::kNearJump);
+      __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+                                            compiler::Address(T5, 0), NULL_REG);
+      __ AddImmediate(T5, kCompressedWordSize);
+      __ j(&init_loop);
+      __ Bind(&end_loop);
+    }
+  }
+  __ j(done, compiler::Assembler::kNearJump);
+}
+
+void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
+  if (type_usage_info != nullptr) {
+    const Class& list_class =
+        Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
+    RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
+                             type_arguments()->definition());
+  }
+
+  compiler::Label slow_path, done;
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
+        num_elements()->BindsToConstant() &&
+        num_elements()->BoundConstant().IsSmi()) {
+      const intptr_t length =
+          Smi::Cast(num_elements()->BoundConstant()).Value();
+      if (Array::IsValidLength(length)) {
+        InlineArrayAllocation(compiler, length, &slow_path, &done);
+      }
+    }
+  }
+
+  __ Bind(&slow_path);
+  auto object_store = compiler->isolate_group()->object_store();
+  const auto& allocate_array_stub =
+      Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
+  compiler->GenerateStubCall(source(), allocate_array_stub,
+                             UntaggedPcDescriptors::kOther, locs(), deopt_id(),
+                             env());
+  __ Bind(&done);
+}
+
+LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
+                                                     bool opt) const {
+  const intptr_t kNumInputs = 1;
+  LocationSummary* locs = nullptr;
+  if (slot().representation() != kTagged) {
+    ASSERT(!calls_initializer());
+    ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
+    const size_t value_size =
+        RepresentationUtils::ValueSize(slot().representation());
+
+    const intptr_t kNumTemps = 0;
+    locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    locs->set_in(0, Location::RequiresRegister());
+    if (value_size <= compiler::target::kWordSize) {
+      locs->set_out(0, Location::RequiresRegister());
+    } else {
+#if XLEN == 32
+      ASSERT(value_size <= 2 * compiler::target::kWordSize);
+      locs->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                      Location::RequiresRegister()));
+#else
+      UNREACHABLE();
+#endif
+    }
+
+  } else if (IsUnboxedDartFieldLoad() && opt) {
+    ASSERT(!calls_initializer());
+    ASSERT(!slot().field().is_non_nullable_integer());
+
+    const intptr_t kNumTemps = FLAG_precompiled_mode ? 0 : 1;
+    locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    locs->set_in(0, Location::RequiresRegister());
+    if (!FLAG_precompiled_mode) {
+      locs->set_temp(0, Location::RequiresRegister());
+    }
+    locs->set_out(0, Location::RequiresFpuRegister());
+
+  } else if (IsPotentialUnboxedDartFieldLoad()) {
+    ASSERT(!calls_initializer());
+    const intptr_t kNumTemps = 1;
+    locs = new (zone) LocationSummary(zone, kNumInputs, kNumTemps,
+                                      LocationSummary::kCallOnSlowPath);
+    locs->set_in(0, Location::RequiresRegister());
+    locs->set_temp(0, Location::RequiresRegister());
+    locs->set_out(0, Location::RequiresRegister());
+
+  } else if (calls_initializer()) {
+    if (throw_exception_on_initialization()) {
+      const bool using_shared_stub = UseSharedSlowPathStub(opt);
+      const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
+      locs = new (zone) LocationSummary(
+          zone, kNumInputs, kNumTemps,
+          using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
+                            : LocationSummary::kCallOnSlowPath);
+      if (using_shared_stub) {
+        locs->set_temp(0, Location::RegisterLocation(
+                              LateInitializationErrorABI::kFieldReg));
+      }
+      locs->set_in(0, Location::RequiresRegister());
+      locs->set_out(0, Location::RequiresRegister());
+    } else {
+      const intptr_t kNumTemps = 0;
+      locs = new (zone)
+          LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+      locs->set_in(
+          0, Location::RegisterLocation(InitInstanceFieldABI::kInstanceReg));
+      locs->set_out(
+          0, Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
+    }
+  } else {
+    const intptr_t kNumTemps = 0;
+    locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    locs->set_in(0, Location::RequiresRegister());
+    locs->set_out(0, Location::RequiresRegister());
+  }
+  return locs;
+}
+
+void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
+  ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
+  ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
+
+  const Register instance_reg = locs()->in(0).reg();
+  if (slot().representation() != kTagged) {
+    auto const rep = slot().representation();
+    const size_t value_size = RepresentationUtils::ValueSize(rep);
+    __ Comment("NativeUnboxedLoadFieldInstr");
+    if (value_size <= compiler::target::kWordSize) {
+      auto const result = locs()->out(0).reg();
+      __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
+                             RepresentationUtils::OperandSize(rep));
+    } else {
+#if XLEN == 32
+      auto const out_pair = locs()->out(0).AsPairLocation();
+      const Register out_lo = out_pair->At(0).reg();
+      const Register out_hi = out_pair->At(1).reg();
+      const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
+      const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
+      __ LoadFromOffset(out_lo, instance_reg, offset_lo);
+      __ LoadFromOffset(out_hi, instance_reg, offset_hi);
+#else
+      UNREACHABLE();
+#endif
+    }
+    return;
+  }
+
+  if (IsUnboxedDartFieldLoad() && compiler->is_optimizing()) {
+    const FRegister result = locs()->out(0).fpu_reg();
+    const intptr_t cid = slot().field().UnboxedFieldCid();
+
+    if (FLAG_precompiled_mode) {
+      switch (cid) {
+        case kDoubleCid:
+          __ Comment("UnboxedDoubleLoadFieldInstr");
+          __ LoadDFieldFromOffset(result, instance_reg, OffsetInBytes());
+          return;
+        case kFloat32x4Cid:
+          __ Comment("UnboxedFloat32x4LoadFieldInstr");
+          UNIMPLEMENTED();
+          return;
+        case kFloat64x2Cid:
+          __ Comment("UnboxedFloat64x2LoadFieldInstr");
+          UNIMPLEMENTED();
+          return;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    const Register temp = locs()->temp(0).reg();
+
+    __ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
+    switch (cid) {
+      case kDoubleCid:
+        __ Comment("UnboxedDoubleLoadFieldInstr");
+        __ LoadDFieldFromOffset(result, temp, Double::value_offset());
+        break;
+      case kFloat32x4Cid:
+        UNIMPLEMENTED();
+        break;
+      case kFloat64x2Cid:
+        UNIMPLEMENTED();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    return;
+  }
+
+  compiler::Label done;
+  const Register result_reg = locs()->out(0).reg();
+  if (IsPotentialUnboxedDartFieldLoad()) {
+    const Register temp = locs()->temp(0).reg();
+
+    compiler::Label load_pointer;
+    compiler::Label load_double;
+    compiler::Label load_float32x4;
+    compiler::Label load_float64x2;
+
+    __ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
+
+    compiler::FieldAddress field_cid_operand(result_reg,
+                                             Field::guarded_cid_offset());
+    compiler::FieldAddress field_nullability_operand(
+        result_reg, Field::is_nullable_offset());
+
+    __ lhu(temp, field_nullability_operand);
+    __ CompareImmediate(temp, kNullCid);
+    __ BranchIf(EQ, &load_pointer, compiler::Assembler::kNearJump);
+
+    __ lhu(temp, field_cid_operand);
+    __ CompareImmediate(temp, kDoubleCid);
+    __ BranchIf(EQ, &load_double, compiler::Assembler::kNearJump);
+
+    __ lhu(temp, field_cid_operand);
+    __ CompareImmediate(temp, kFloat32x4Cid);
+    __ BranchIf(EQ, &load_float32x4, compiler::Assembler::kNearJump);
+
+    __ lhu(temp, field_cid_operand);
+    __ CompareImmediate(temp, kFloat64x2Cid);
+    __ BranchIf(EQ, &load_float64x2, compiler::Assembler::kNearJump);
+
+    // Fall through.
+    __ j(&load_pointer);
+
+    if (!compiler->is_optimizing()) {
+      locs()->live_registers()->Add(locs()->in(0));
+    }
+
+    {
+      __ Bind(&load_double);
+      BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
+                                      result_reg, temp);
+      __ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
+      __ LoadDFieldFromOffset(FTMP, temp, Double::value_offset());
+      __ StoreDFieldToOffset(FTMP, result_reg, Double::value_offset());
+      __ j(&done);
+    }
+
+    {
+      __ Bind(&load_float32x4);
+      __ ebreak();  // Unimplemented
+      __ j(&done);
+    }
+
+    {
+      __ Bind(&load_float64x2);
+      __ ebreak();  // Unimplemented
+      __ j(&done);
+    }
+
+    __ Bind(&load_pointer);
+  }
+
+  if (slot().is_compressed()) {
+    __ LoadCompressedFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
+  } else {
+    __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
+  }
+
+  if (calls_initializer()) {
+    EmitNativeCodeForInitializerCall(compiler);
+  }
+
+  __ Bind(&done);
+}
+
+LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  ASSERT(opt);
+  const intptr_t kNumInputs = 0;
+  const intptr_t kNumTemps = 3;
+  LocationSummary* locs = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  locs->set_temp(0, Location::RegisterLocation(T1));
+  locs->set_temp(1, Location::RegisterLocation(T2));
+  locs->set_temp(2, Location::RegisterLocation(T3));
+  locs->set_out(0, Location::RegisterLocation(A0));
+  return locs;
+}
+
+class AllocateContextSlowPath
+    : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
+ public:
+  explicit AllocateContextSlowPath(
+      AllocateUninitializedContextInstr* instruction)
+      : TemplateSlowPathCode(instruction) {}
+
+  virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
+    __ Comment("AllocateContextSlowPath");
+    __ Bind(entry_label());
+
+    LocationSummary* locs = instruction()->locs();
+    locs->live_registers()->Remove(locs->out(0));
+
+    compiler->SaveLiveRegisters(locs);
+
+    auto slow_path_env = compiler->SlowPathEnvironmentFor(
+        instruction(), /*num_slow_path_args=*/0);
+    ASSERT(slow_path_env != nullptr);
+
+    auto object_store = compiler->isolate_group()->object_store();
+    const auto& allocate_context_stub = Code::ZoneHandle(
+        compiler->zone(), object_store->allocate_context_stub());
+
+    __ LoadImmediate(T1, instruction()->num_context_variables());
+    compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
+                               UntaggedPcDescriptors::kOther, locs,
+                               instruction()->deopt_id(), slow_path_env);
+    ASSERT(instruction()->locs()->out(0).reg() == A0);
+    compiler->RestoreLiveRegisters(instruction()->locs());
+    __ j(exit_label());
+  }
+};
+
+void AllocateUninitializedContextInstr::EmitNativeCode(
+    FlowGraphCompiler* compiler) {
+  Register temp0 = locs()->temp(0).reg();
+  Register temp1 = locs()->temp(1).reg();
+  Register temp2 = locs()->temp(2).reg();
+  Register result = locs()->out(0).reg();
+  // Try allocate the object.
+  AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
+  compiler->AddSlowPathCode(slow_path);
+  intptr_t instance_size = Context::InstanceSize(num_context_variables());
+
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
+                        result,  // instance
+                        temp0, temp1, temp2);
+
+    // Setup up number of context variables field (int32_t).
+    __ LoadImmediate(temp0, num_context_variables());
+    __ sw(temp0,
+          compiler::FieldAddress(result, Context::num_variables_offset()));
+  } else {
+    __ Jump(slow_path->entry_label());
+  }
+
+  __ Bind(slow_path->exit_label());
+}
+
+LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
+                                                           bool opt) const {
+  const intptr_t kNumInputs = 0;
+  const intptr_t kNumTemps = 1;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+  locs->set_temp(0, Location::RegisterLocation(T1));
+  locs->set_out(0, Location::RegisterLocation(A0));
+  return locs;
+}
+
+void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(locs()->temp(0).reg() == T1);
+  ASSERT(locs()->out(0).reg() == A0);
+
+  auto object_store = compiler->isolate_group()->object_store();
+  const auto& allocate_context_stub =
+      Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
+  __ LoadImmediate(T1, num_context_variables());
+  compiler->GenerateStubCall(source(), allocate_context_stub,
+                             UntaggedPcDescriptors::kOther, locs(), deopt_id(),
+                             env());
+}
+
+LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+  locs->set_in(0, Location::RegisterLocation(T5));
+  locs->set_out(0, Location::RegisterLocation(A0));
+  return locs;
+}
+
+void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(locs()->in(0).reg() == T5);
+  ASSERT(locs()->out(0).reg() == A0);
+
+  auto object_store = compiler->isolate_group()->object_store();
+  const auto& clone_context_stub =
+      Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
+  compiler->GenerateStubCall(source(), clone_context_stub,
+                             /*kind=*/UntaggedPcDescriptors::kOther, locs(),
+                             deopt_id(), env());
+}
+
+LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
+                                                           bool opt) const {
+  UNREACHABLE();
+  return nullptr;
+}
+
+void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  __ Bind(compiler->GetJumpLabel(this));
+  compiler->AddExceptionHandler(
+      catch_try_index(), try_index(), compiler->assembler()->CodeSize(),
+      is_generated(), catch_handler_types_, needs_stacktrace());
+  if (!FLAG_precompiled_mode) {
+    // On lazy deoptimization we patch the optimized code here to enter the
+    // deoptimization stub.
+    const intptr_t deopt_id = DeoptId::ToDeoptAfter(GetDeoptId());
+    if (compiler->is_optimizing()) {
+      compiler->AddDeoptIndexAtCall(deopt_id, env());
+    } else {
+      compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id,
+                                     InstructionSource());
+    }
+  }
+  if (HasParallelMove()) {
+    compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
+  }
+
+  // Restore SP from FP as we are coming from a throw and the code for
+  // popping arguments has not been run.
+  const intptr_t fp_sp_dist =
+      (compiler::target::frame_layout.first_local_from_fp + 1 -
+       compiler->StackSize()) *
+      kWordSize;
+  ASSERT(fp_sp_dist <= 0);
+  __ AddImmediate(SP, FP, fp_sp_dist);
+
+  if (!compiler->is_optimizing()) {
+    if (raw_exception_var_ != nullptr) {
+      __ StoreToOffset(
+          kExceptionObjectReg, FP,
+          compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
+    }
+    if (raw_stacktrace_var_ != nullptr) {
+      __ StoreToOffset(
+          kStackTraceObjectReg, FP,
+          compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
+    }
+  }
+}
+
+LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
+                                                              bool opt) const {
+  const intptr_t kNumInputs = 0;
+  const intptr_t kNumTemps = 1;
+  const bool using_shared_stub = UseSharedSlowPathStub(opt);
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps,
+                      using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
+                                        : LocationSummary::kCallOnSlowPath);
+  summary->set_temp(0, Location::RequiresRegister());
+  return summary;
+}
+
+class CheckStackOverflowSlowPath
+    : public TemplateSlowPathCode<CheckStackOverflowInstr> {
+ public:
+  static constexpr intptr_t kNumSlowPathArgs = 0;
+
+  explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
+      : TemplateSlowPathCode(instruction) {}
+
+  virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
+    auto locs = instruction()->locs();
+    if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
+      const Register value = locs->temp(0).reg();
+      __ Comment("CheckStackOverflowSlowPathOsr");
+      __ Bind(osr_entry_label());
+      __ li(value, Thread::kOsrRequest);
+      __ sx(value,
+            compiler::Address(THR, Thread::stack_overflow_flags_offset()));
+    }
+    __ Comment("CheckStackOverflowSlowPath");
+    __ Bind(entry_label());
+    const bool using_shared_stub = locs->call_on_shared_slow_path();
+    if (!using_shared_stub) {
+      compiler->SaveLiveRegisters(locs);
+    }
+    // pending_deoptimization_env_ is needed to generate a runtime call that
+    // may throw an exception.
+    ASSERT(compiler->pending_deoptimization_env_ == NULL);
+    Environment* env =
+        compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
+    compiler->pending_deoptimization_env_ = env;
+
+    if (using_shared_stub) {
+      auto object_store = compiler->isolate_group()->object_store();
+      const bool live_fpu_regs = locs->live_registers()->FpuRegisterCount() > 0;
+      const auto& stub = Code::ZoneHandle(
+          compiler->zone(),
+          live_fpu_regs
+              ? object_store->stack_overflow_stub_with_fpu_regs_stub()
+              : object_store->stack_overflow_stub_without_fpu_regs_stub());
+
+      if (using_shared_stub && compiler->CanPcRelativeCall(stub)) {
+        __ GenerateUnRelocatedPcRelativeCall();
+        compiler->AddPcRelativeCallStubTarget(stub);
+      } else {
+        const uword entry_point_offset =
+            Thread::stack_overflow_shared_stub_entry_point_offset(
+                locs->live_registers()->FpuRegisterCount() > 0);
+        __ Call(compiler::Address(THR, entry_point_offset));
+      }
+      compiler->RecordSafepoint(locs, kNumSlowPathArgs);
+      compiler->RecordCatchEntryMoves(env);
+      compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
+                                     instruction()->deopt_id(),
+                                     instruction()->source());
+    } else {
+      __ CallRuntime(kStackOverflowRuntimeEntry, kNumSlowPathArgs);
+      compiler->EmitCallsiteMetadata(
+          instruction()->source(), instruction()->deopt_id(),
+          UntaggedPcDescriptors::kOther, instruction()->locs(), env);
+    }
+
+    if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
+        instruction()->in_loop()) {
+      // In unoptimized code, record loop stack checks as possible OSR entries.
+      compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
+                                     instruction()->deopt_id(),
+                                     InstructionSource());
+    }
+    compiler->pending_deoptimization_env_ = NULL;
+    if (!using_shared_stub) {
+      compiler->RestoreLiveRegisters(locs);
+    }
+    __ j(exit_label());
+  }
+
+  compiler::Label* osr_entry_label() {
+    ASSERT(IsolateGroup::Current()->use_osr());
+    return &osr_entry_label_;
+  }
+
+ private:
+  compiler::Label osr_entry_label_;
+};
+
+void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
+  compiler->AddSlowPathCode(slow_path);
+
+  __ lx(TMP,
+        compiler::Address(THR, compiler::target::Thread::stack_limit_offset()));
+  __ bleu(SP, TMP, slow_path->entry_label());
+  if (compiler->CanOSRFunction() && in_loop()) {
+    const Register function = locs()->temp(0).reg();
+    // In unoptimized code check the usage counter to trigger OSR at loop
+    // stack checks.  Use progressively higher thresholds for more deeply
+    // nested loops to attempt to hit outer loops with OSR when possible.
+    __ LoadObject(function, compiler->parsed_function().function());
+    intptr_t threshold =
+        FLAG_optimization_counter_threshold * (loop_depth() + 1);
+    __ LoadFieldFromOffset(TMP, function, Function::usage_counter_offset(),
+                           compiler::kFourBytes);
+    __ addi(TMP, TMP, 1);
+    __ StoreFieldToOffset(TMP, function, Function::usage_counter_offset(),
+                          compiler::kFourBytes);
+    __ CompareImmediate(TMP, threshold);
+    __ BranchIf(GE, slow_path->osr_entry_label());
+  }
+  if (compiler->ForceSlowPathForStackOverflow()) {
+    __ j(slow_path->entry_label());
+  }
+  __ Bind(slow_path->exit_label());
+}
+
+static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
+                             BinarySmiOpInstr* shift_left) {
+  const LocationSummary& locs = *shift_left->locs();
+  const Register left = locs.in(0).reg();
+  const Register result = locs.out(0).reg();
+  compiler::Label* deopt =
+      shift_left->CanDeoptimize()
+          ? compiler->AddDeoptStub(shift_left->deopt_id(),
+                                   ICData::kDeoptBinarySmiOp)
+          : NULL;
+  if (locs.in(1).IsConstant()) {
+    const Object& constant = locs.in(1).constant();
+    ASSERT(constant.IsSmi());
+    // Immediate shift operation takes 6/5 bits for the count.
+    const intptr_t kCountLimit = XLEN - 1;
+    const intptr_t value = Smi::Cast(constant).Value();
+    ASSERT((0 < value) && (value < kCountLimit));
+    __ slli(result, left, value);
+    if (shift_left->can_overflow()) {
+      ASSERT(result != left);
+      __ srai(TMP2, result, value);
+      __ bne(left, TMP2, deopt);  // Overflow.
+    }
+    return;
+  }
+
+  // Right (locs.in(1)) is not constant.
+  const Register right = locs.in(1).reg();
+  Range* right_range = shift_left->right_range();
+  if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
+    // TODO(srdjan): Implement code below for is_truncating().
+    // If left is constant, we know the maximal allowed size for right.
+    const Object& obj = shift_left->left()->BoundConstant();
+    if (obj.IsSmi()) {
+      const intptr_t left_int = Smi::Cast(obj).Value();
+      if (left_int == 0) {
+        __ bltz(right, deopt);
+        __ mv(result, ZR);
+        return;
+      }
+      const intptr_t max_right =
+          compiler::target::kSmiBits - Utils::HighestBit(left_int);
+      const bool right_needs_check =
+          !RangeUtils::IsWithin(right_range, 0, max_right - 1);
+      if (right_needs_check) {
+        __ CompareObject(right, Smi::ZoneHandle(Smi::New(max_right)));
+        __ BranchIf(CS, deopt);
+      }
+      __ SmiUntag(TMP, right);
+      __ sll(result, left, TMP);
+    }
+    return;
+  }
+
+  const bool right_needs_check =
+      !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
+  if (!shift_left->can_overflow()) {
+    if (right_needs_check) {
+      if (!RangeUtils::IsPositive(right_range)) {
+        ASSERT(shift_left->CanDeoptimize());
+        __ bltz(right, deopt);
+      }
+
+      compiler::Label done, is_not_zero;
+      __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
+      __ BranchIf(LESS, &is_not_zero, compiler::Assembler::kNearJump);
+      __ li(result, 0);
+      __ j(&done, compiler::Assembler::kNearJump);
+      __ Bind(&is_not_zero);
+      __ SmiUntag(TMP, right);
+      __ sll(result, left, TMP);
+      __ Bind(&done);
+    } else {
+      __ SmiUntag(TMP, right);
+      __ sll(result, left, TMP);
+    }
+  } else {
+    if (right_needs_check) {
+      ASSERT(shift_left->CanDeoptimize());
+      __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
+      __ BranchIf(CS, deopt);
+    }
+    __ SmiUntag(TMP, right);
+    ASSERT(result != left);
+    __ sll(result, left, TMP);
+    __ sra(TMP, result, TMP);
+    __ bne(left, TMP, deopt);  // Overflow.
+  }
+}
+
+LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
+                                                       bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps =
+      ((op_kind() == Token::kUSHR) || (op_kind() == Token::kMUL)) ? 1 : 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  if (op_kind() == Token::kTRUNCDIV) {
+    summary->set_in(0, Location::RequiresRegister());
+    if (RightIsPowerOfTwoConstant()) {
+      ConstantInstr* right_constant = right()->definition()->AsConstant();
+      summary->set_in(1, Location::Constant(right_constant));
+    } else {
+      summary->set_in(1, Location::RequiresRegister());
+    }
+    summary->set_out(0, Location::RequiresRegister());
+    return summary;
+  }
+  if (op_kind() == Token::kMOD) {
+    summary->set_in(0, Location::RequiresRegister());
+    summary->set_in(1, Location::RequiresRegister());
+    summary->set_out(0, Location::RequiresRegister());
+    return summary;
+  }
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, LocationRegisterOrSmiConstant(right()));
+  if (kNumTemps == 1) {
+    summary->set_temp(0, Location::RequiresRegister());
+  }
+  // We make use of 3-operand instructions by not requiring result register
+  // to be identical to first input register as on Intel.
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (op_kind() == Token::kSHL) {
+    EmitSmiShiftLeft(compiler, this);
+    return;
+  }
+
+  const Register left = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  compiler::Label* deopt = NULL;
+  if (CanDeoptimize()) {
+    deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
+  }
+
+  if (locs()->in(1).IsConstant()) {
+    const Object& constant = locs()->in(1).constant();
+    ASSERT(constant.IsSmi());
+    const intx_t imm = static_cast<intx_t>(constant.ptr());
+    switch (op_kind()) {
+      case Token::kADD: {
+        if (deopt == NULL) {
+          __ AddImmediate(result, left, imm);
+        } else {
+          __ AddImmediateBranchOverflow(result, left, imm, deopt);
+        }
+        break;
+      }
+      case Token::kSUB: {
+        if (deopt == NULL) {
+          __ AddImmediate(result, left, -imm);
+        } else {
+          // Negating imm and using AddImmediateSetFlags would not detect the
+          // overflow when imm == kMinInt64.
+          __ SubtractImmediateBranchOverflow(result, left, imm, deopt);
+        }
+        break;
+      }
+      case Token::kMUL: {
+        // Keep left value tagged and untag right value.
+        const intptr_t value = Smi::Cast(constant).Value();
+        if (deopt == NULL) {
+          __ LoadImmediate(TMP, value);
+          __ mul(result, left, TMP);
+        } else {
+          __ MultiplyImmediateBranchOverflow(result, left, value, deopt);
+        }
+        break;
+      }
+      case Token::kTRUNCDIV: {
+        const intptr_t value = Smi::Cast(constant).Value();
+        ASSERT(value != kIntptrMin);
+        ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value)));
+        const intptr_t shift_count =
+            Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize;
+        ASSERT(kSmiTagSize == 1);
+        __ srai(TMP, left, XLEN - 1);
+        ASSERT(shift_count > 1);  // 1, -1 case handled above.
+        const Register temp = TMP2;
+        __ srli(TMP, TMP, XLEN - shift_count);
+        __ add(temp, left, TMP);
+        ASSERT(shift_count > 0);
+        __ srai(result, temp, shift_count);
+        if (value < 0) {
+          __ neg(result, result);
+        }
+        __ SmiTag(result);
+        break;
+      }
+      case Token::kBIT_AND:
+        // No overflow check.
+        __ AndImmediate(result, left, imm);
+        break;
+      case Token::kBIT_OR:
+        // No overflow check.
+        __ OrImmediate(result, left, imm);
+        break;
+      case Token::kBIT_XOR:
+        // No overflow check.
+        __ XorImmediate(result, left, imm);
+        break;
+      case Token::kSHR: {
+        // Asr operation masks the count to 6/5 bits.
+        const intptr_t kCountLimit = XLEN - 1;
+        intptr_t value = Smi::Cast(constant).Value();
+        __ srai(result, left, Utils::Minimum(value + kSmiTagSize, kCountLimit));
+        __ SmiTag(result);
+        break;
+      }
+      case Token::kUSHR: {
+#if XLEN == 32
+        const intptr_t value = compiler::target::SmiValue(constant);
+        ASSERT((value > 0) && (value < 64));
+        COMPILE_ASSERT(compiler::target::kSmiBits < 32);
+        // 64-bit representation of left operand value:
+        //
+        //       ss...sssss  s  s  xxxxxxxxxxxxx
+        //       |        |  |  |  |           |
+        //       63      32  31 30 kSmiBits-1  0
+        //
+        // Where 's' is a sign bit.
+        //
+        // If left operand is negative (sign bit is set), then
+        // result will fit into Smi range if and only if
+        // the shift amount >= 64 - kSmiBits.
+        //
+        // If left operand is non-negative, the result always
+        // fits into Smi range.
+        //
+        if (value < (64 - compiler::target::kSmiBits)) {
+          if (deopt != nullptr) {
+            __ bltz(left, deopt);
+          } else {
+            // Operation cannot overflow only if left value is always
+            // non-negative.
+            ASSERT(!can_overflow());
+          }
+          // At this point left operand is non-negative, so unsigned shift
+          // can't overflow.
+          if (value >= compiler::target::kSmiBits) {
+            __ li(result, 0);
+          } else {
+            __ srli(result, left, value + kSmiTagSize);
+            __ SmiTag(result);
+          }
+        } else {
+          // Shift amount > 32, and the result is guaranteed to fit into Smi.
+          // Low (Smi) part of the left operand is shifted out.
+          // High part is filled with sign bits.
+          __ srai(result, left, 31);
+          __ srli(result, result, value - 32);
+          __ SmiTag(result);
+        }
+#else
+        // Lsr operation masks the count to 6 bits, but
+        // unsigned shifts by >= kBitsPerInt64 are eliminated by
+        // BinaryIntegerOpInstr::Canonicalize.
+        const intptr_t kCountLimit = XLEN - 1;
+        intptr_t value = Smi::Cast(constant).Value();
+        ASSERT((value >= 0) && (value <= kCountLimit));
+        __ SmiUntag(TMP, left);
+        __ srli(TMP, TMP, value);
+        __ SmiTag(result, TMP);
+        if (deopt != nullptr) {
+          __ SmiUntag(TMP2, result);
+          __ bne(TMP, TMP2, deopt);
+        }
+#endif
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+    return;
+  }
+
+  const Register right = locs()->in(1).reg();
+  switch (op_kind()) {
+    case Token::kADD: {
+      if (deopt == NULL) {
+        __ add(result, left, right);
+      } else if (RangeUtils::IsPositive(right_range())) {
+        ASSERT(result != left);
+        __ add(result, left, right);
+        __ blt(result, left, deopt);
+      } else if (RangeUtils::IsNegative(right_range())) {
+        ASSERT(result != left);
+        __ add(result, left, right);
+        __ bgt(result, left, deopt);
+      } else {
+        __ AddBranchOverflow(result, left, right, deopt);
+      }
+      break;
+    }
+    case Token::kSUB: {
+      if (deopt == NULL) {
+        __ sub(result, left, right);
+      } else if (RangeUtils::IsPositive(right_range())) {
+        ASSERT(result != left);
+        __ sub(result, left, right);
+        __ bgt(result, left, deopt);
+      } else if (RangeUtils::IsNegative(right_range())) {
+        ASSERT(result != left);
+        __ sub(result, left, right);
+        __ blt(result, left, deopt);
+      } else {
+        __ SubtractBranchOverflow(result, left, right, deopt);
+      }
+      break;
+    }
+    case Token::kMUL: {
+      const Register temp = locs()->temp(0).reg();
+      __ SmiUntag(temp, left);
+      if (deopt == NULL) {
+        __ mul(result, temp, right);
+      } else {
+        __ MultiplyBranchOverflow(result, temp, right, deopt);
+      }
+      break;
+    }
+    case Token::kBIT_AND: {
+      // No overflow check.
+      __ and_(result, left, right);
+      break;
+    }
+    case Token::kBIT_OR: {
+      // No overflow check.
+      __ or_(result, left, right);
+      break;
+    }
+    case Token::kBIT_XOR: {
+      // No overflow check.
+      __ xor_(result, left, right);
+      break;
+    }
+    case Token::kTRUNCDIV: {
+      if (RangeUtils::CanBeZero(right_range())) {
+        // Handle divide by zero in runtime.
+        __ beqz(right, deopt);
+      }
+      __ SmiUntag(TMP, left);
+      __ SmiUntag(TMP2, right);
+      __ div(TMP, TMP, TMP2);
+      __ SmiTag(result, TMP);
+
+      if (RangeUtils::Overlaps(right_range(), -1, -1)) {
+        // Check the corner case of dividing the 'MIN_SMI' with -1, in which
+        // case we cannot tag the result.
+        __ SmiUntag(TMP2, result);
+        __ bne(TMP, TMP2, deopt);
+      }
+      break;
+    }
+    case Token::kMOD: {
+      if (RangeUtils::CanBeZero(right_range())) {
+        // Handle divide by zero in runtime.
+        __ beqz(right, deopt);
+      }
+      __ SmiUntag(TMP, left);
+      __ SmiUntag(TMP2, right);
+
+      __ rem(result, TMP, TMP2);
+
+      //  res = left % right;
+      //  if (res < 0) {
+      //    if (right < 0) {
+      //      res = res - right;
+      //    } else {
+      //      res = res + right;
+      //    }
+      //  }
+      compiler::Label done, adjust;
+      __ bgez(result, &done, compiler::Assembler::kNearJump);
+      // Result is negative, adjust it.
+      __ bgez(right, &adjust, compiler::Assembler::kNearJump);
+      __ sub(result, result, TMP2);
+      __ j(&done, compiler::Assembler::kNearJump);
+      __ Bind(&adjust);
+      __ add(result, result, TMP2);
+      __ Bind(&done);
+      __ SmiTag(result);
+      break;
+    }
+    case Token::kSHR: {
+      if (CanDeoptimize()) {
+        __ bltz(right, deopt);
+      }
+      __ SmiUntag(TMP, right);
+      // asrv[w] operation masks the count to 6/5 bits.
+      const intptr_t kCountLimit = XLEN - 1;
+      if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
+        __ LoadImmediate(TMP2, kCountLimit);
+        compiler::Label shift_in_bounds;
+        __ ble(TMP, TMP2, &shift_in_bounds, compiler::Assembler::kNearJump);
+        __ mv(TMP, TMP2);
+        __ Bind(&shift_in_bounds);
+      }
+      __ SmiUntag(TMP2, left);
+      __ sra(result, TMP2, TMP);
+      __ SmiTag(result);
+      break;
+    }
+    case Token::kUSHR: {
+#if XLEN == 32
+      compiler::Label done;
+      __ SmiUntag(TMP, right);
+      // 64-bit representation of left operand value:
+      //
+      //       ss...sssss  s  s  xxxxxxxxxxxxx
+      //       |        |  |  |  |           |
+      //       63      32  31 30 kSmiBits-1  0
+      //
+      // Where 's' is a sign bit.
+      //
+      // If left operand is negative (sign bit is set), then
+      // result will fit into Smi range if and only if
+      // the shift amount >= 64 - kSmiBits.
+      //
+      // If left operand is non-negative, the result always
+      // fits into Smi range.
+      //
+      if (!RangeUtils::OnlyLessThanOrEqualTo(
+              right_range(), 64 - compiler::target::kSmiBits - 1)) {
+        if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(),
+                                               kBitsPerInt64 - 1)) {
+          ASSERT(result != left);
+          ASSERT(result != right);
+          __ li(result, 0);
+          __ CompareImmediate(TMP, kBitsPerInt64);
+          // If shift amount >= 64, then result is 0.
+          __ BranchIf(GE, &done);
+        }
+        __ CompareImmediate(TMP, 64 - compiler::target::kSmiBits);
+        // Shift amount >= 64 - kSmiBits > 32, but < 64.
+        // Result is guaranteed to fit into Smi range.
+        // Low (Smi) part of the left operand is shifted out.
+        // High part is filled with sign bits.
+        compiler::Label next;
+        __ BranchIf(LT, &next);
+        __ subi(TMP, TMP, 32);
+        __ srai(result, left, 31);
+        __ srl(result, result, TMP);
+        __ SmiTag(result);
+        __ j(&done);
+        __ Bind(&next);
+      }
+      // Shift amount < 64 - kSmiBits.
+      // If left is negative, then result will not fit into Smi range.
+      // Also deopt in case of negative shift amount.
+      if (deopt != nullptr) {
+        __ bltz(left, deopt);
+        __ bltz(right, deopt);
+      } else {
+        ASSERT(!can_overflow());
+      }
+      // At this point left operand is non-negative, so unsigned shift
+      // can't overflow.
+      if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(),
+                                             compiler::target::kSmiBits - 1)) {
+        ASSERT(result != left);
+        ASSERT(result != right);
+        __ li(result, 0);
+        __ CompareImmediate(TMP, compiler::target::kSmiBits);
+        // Left operand >= 0, shift amount >= kSmiBits. Result is 0.
+        __ BranchIf(GE, &done);
+      }
+      // Left operand >= 0, shift amount < kSmiBits < 32.
+      const Register temp = locs()->temp(0).reg();
+      __ SmiUntag(temp, left);
+      __ srl(result, temp, TMP);
+      __ SmiTag(result);
+      __ Bind(&done);
+#elif XLEN == 64
+      if (CanDeoptimize()) {
+        __ bltz(right, deopt);
+      }
+      __ SmiUntag(TMP, right);
+      // lsrv operation masks the count to 6 bits.
+      const intptr_t kCountLimit = XLEN - 1;
+      COMPILE_ASSERT(kCountLimit + 1 == kBitsPerInt64);
+      compiler::Label done;
+      if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
+        __ LoadImmediate(TMP2, kCountLimit);
+        compiler::Label shift_in_bounds;
+        __ ble(TMP, TMP2, &shift_in_bounds, compiler::Assembler::kNearJump);
+        __ mv(result, ZR);
+        __ j(&done, compiler::Assembler::kNearJump);
+        __ Bind(&shift_in_bounds);
+      }
+      __ SmiUntag(TMP2, left);
+      __ srl(TMP, TMP2, TMP);
+      __ SmiTag(result);
+      if (deopt != nullptr) {
+        __ SmiUntag(TMP2, result);
+        __ bne(TMP, TMP2, deopt);
+      }
+      __ Bind(&done);
+#else
+      UNIMPLEMENTED();
+#endif
+      break;
+    }
+    case Token::kDIV: {
+      // Dispatches to 'Double./'.
+      // TODO(srdjan): Implement as conversion to double and double division.
+      UNREACHABLE();
+      break;
+    }
+    case Token::kOR:
+    case Token::kAND: {
+      // Flow graph builder has dissected this operation to guarantee correct
+      // behavior (short-circuit evaluation).
+      UNREACHABLE();
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
+                                                             bool opt) const {
+  intptr_t left_cid = left()->Type()->ToCid();
+  intptr_t right_cid = right()->Type()->ToCid();
+  ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RequiresRegister());
+  return summary;
+}
+
+void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  compiler::Label* deopt =
+      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp,
+                             licm_hoisted_ ? ICData::kHoisted : 0);
+  intptr_t left_cid = left()->Type()->ToCid();
+  intptr_t right_cid = right()->Type()->ToCid();
+  const Register left = locs()->in(0).reg();
+  const Register right = locs()->in(1).reg();
+  if (this->left()->definition() == this->right()->definition()) {
+    __ BranchIfSmi(left, deopt);
+  } else if (left_cid == kSmiCid) {
+    __ BranchIfSmi(right, deopt);
+  } else if (right_cid == kSmiCid) {
+    __ BranchIfSmi(left, deopt);
+  } else {
+    __ or_(TMP, left, right);
+    __ BranchIfSmi(TMP, deopt);
+  }
+}
+
+LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresFpuRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register out_reg = locs()->out(0).reg();
+  const FRegister value = locs()->in(0).fpu_reg();
+
+  BoxAllocationSlowPath::Allocate(compiler, this,
+                                  compiler->BoxClassFor(from_representation()),
+                                  out_reg, TMP);
+
+  switch (from_representation()) {
+    case kUnboxedDouble:
+      __ StoreDFieldToOffset(value, out_reg, ValueOffset());
+      break;
+    case kUnboxedFloat:
+      __ fcvtds(FpuTMP, value);
+      __ StoreDFieldToOffset(FpuTMP, out_reg, ValueOffset());
+      break;
+    case kUnboxedFloat32x4:
+    case kUnboxedFloat64x2:
+    case kUnboxedInt32x4:
+      UNIMPLEMENTED();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  ASSERT(!RepresentationUtils::IsUnsigned(representation()));
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 1;
+  const bool is_floating_point =
+      !RepresentationUtils::IsUnboxedInteger(representation());
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_temp(0, Location::RequiresRegister());
+
+  if (is_floating_point) {
+    summary->set_out(0, Location::RequiresFpuRegister());
+#if XLEN == 32
+  } else if (representation() == kUnboxedInt64) {
+    summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                       Location::RequiresRegister()));
+#endif
+  } else {
+    summary->set_out(0, Location::RequiresRegister());
+  }
+  return summary;
+}
+
+void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
+  const Register box = locs()->in(0).reg();
+
+  switch (representation()) {
+    case kUnboxedInt64: {
+#if XLEN == 32
+      PairLocation* result = locs()->out(0).AsPairLocation();
+      ASSERT(result->At(0).reg() != box);
+      __ LoadFieldFromOffset(result->At(0).reg(), box, ValueOffset());
+      __ LoadFieldFromOffset(result->At(1).reg(), box,
+                             ValueOffset() + compiler::target::kWordSize);
+#elif XLEN == 64
+      const Register result = locs()->out(0).reg();
+      __ ld(result, compiler::FieldAddress(box, ValueOffset()));
+#endif
+      break;
+    }
+
+    case kUnboxedDouble: {
+      const FRegister result = locs()->out(0).fpu_reg();
+      __ LoadDFieldFromOffset(result, box, ValueOffset());
+      break;
+    }
+
+    case kUnboxedFloat: {
+      const FRegister result = locs()->out(0).fpu_reg();
+      __ LoadDFieldFromOffset(result, box, ValueOffset());
+      __ fcvtsd(result, result);
+      break;
+    }
+
+    case kUnboxedFloat32x4:
+    case kUnboxedFloat64x2:
+    case kUnboxedInt32x4: {
+      UNIMPLEMENTED();
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
+  const Register box = locs()->in(0).reg();
+
+  switch (representation()) {
+#if XLEN == 32
+    case kUnboxedInt64: {
+      PairLocation* result = locs()->out(0).AsPairLocation();
+      __ SmiUntag(result->At(0).reg(), box);
+      __ srai(result->At(1).reg(), box, XLEN - 1);  // SignFill.
+      break;
+    }
+#elif XLEN == 64
+    case kUnboxedInt32:
+    case kUnboxedInt64: {
+      const Register result = locs()->out(0).reg();
+      __ SmiUntag(result, box);
+      break;
+    }
+#endif
+
+    case kUnboxedDouble: {
+      const FRegister result = locs()->out(0).fpu_reg();
+      __ SmiUntag(TMP, box);
+#if XLEN == 32
+      __ fcvtdw(result, TMP);
+#elif XLEN == 64
+      __ fcvtdl(result, TMP);
+#endif
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
+  const Register value = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  ASSERT(value != result);
+  compiler::Label done;
+  __ SmiUntag(result, value);
+  __ BranchIfSmi(value, &done, compiler::Assembler::kNearJump);
+  __ LoadFieldFromOffset(result, value, Mint::value_offset(),
+                         compiler::kFourBytes);
+  __ Bind(&done);
+}
+
+void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  const Register box = locs()->in(0).reg();
+  PairLocation* result = locs()->out(0).AsPairLocation();
+  ASSERT(result->At(0).reg() != box);
+  ASSERT(result->At(1).reg() != box);
+  compiler::Label done;
+  __ srai(result->At(1).reg(), box, XLEN - 1);  // SignFill
+  __ SmiUntag(result->At(0).reg(), box);
+  __ BranchIfSmi(box, &done, compiler::Assembler::kNearJump);
+  EmitLoadFromBox(compiler);
+  __ Bind(&done);
+#else
+  const Register value = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  ASSERT(value != result);
+  compiler::Label done;
+  __ SmiUntag(result, value);
+  __ BranchIfSmi(value, &done, compiler::Assembler::kNearJump);
+  __ LoadFieldFromOffset(result, value, Mint::value_offset());
+  __ Bind(&done);
+#endif
+}
+
+LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  ASSERT((from_representation() == kUnboxedInt32) ||
+         (from_representation() == kUnboxedUint32));
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+#if XLEN > 32
+  // ValueFitsSmi() may be overly conservative and false because we only
+  // perform range analysis during optimized compilation.
+  const bool kMayAllocateMint = false;
+#else
+  const bool kMayAllocateMint = !ValueFitsSmi();
+#endif
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps,
+                      kMayAllocateMint ? LocationSummary::kCallOnSlowPath
+                                       : LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register value = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+  ASSERT(value != out);
+
+#if XLEN > 32
+  ASSERT(compiler::target::kSmiBits >= 32);
+  __ slli(out, value, XLEN - 32);
+  if (from_representation() == kUnboxedInt32) {
+    __ srai(out, out, XLEN - 32 - kSmiTagShift);
+  } else {
+    ASSERT(from_representation() == kUnboxedUint32);
+    __ srli(out, out, XLEN - 32 - kSmiTagShift);
+  }
+#elif XLEN == 32
+  __ slli(out, value, 1);
+  if (ValueFitsSmi()) {
+    return;
+  }
+  compiler::Label done;
+  if (from_representation() == kUnboxedInt32) {
+    __ srai(TMP, out, 1);
+    __ beq(TMP, value, &done);
+  } else {
+    ASSERT(from_representation() == kUnboxedUint32);
+    __ srli(TMP, value, 30);
+    __ beqz(TMP, &done);
+  }
+
+  BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
+                                  TMP);
+  __ StoreFieldToOffset(value, out, compiler::target::Mint::value_offset());
+  if (from_representation() == kUnboxedInt32) {
+    __ srai(TMP, value, 31);
+    __ StoreFieldToOffset(
+        TMP, out,
+        compiler::target::Mint::value_offset() + compiler::target::kWordSize);
+  } else {
+    ASSERT(from_representation() == kUnboxedUint32);
+    __ StoreFieldToOffset(
+        ZR, out,
+        compiler::target::Mint::value_offset() + compiler::target::kWordSize);
+  }
+  __ Bind(&done);
+#endif
+}
+
+LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
+                                                    bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+#if XLEN == 32
+  // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
+  // FLAG_use_bare_instructions mode and only after VM isolate stubs where
+  // replaced with isolate-specific stubs.
+  auto object_store = IsolateGroup::Current()->object_store();
+  const bool stubs_in_vm_isolate =
+      object_store->allocate_mint_with_fpu_regs_stub()
+          ->untag()
+          ->InVMIsolateHeap() ||
+      object_store->allocate_mint_without_fpu_regs_stub()
+          ->untag()
+          ->InVMIsolateHeap();
+  const bool shared_slow_path_call =
+      SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps,
+      ValueFitsSmi()
+          ? LocationSummary::kNoCall
+          : ((shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
+                                    : LocationSummary::kCallOnSlowPath)));
+  summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  if (ValueFitsSmi()) {
+    summary->set_out(0, Location::RequiresRegister());
+  } else if (shared_slow_path_call) {
+    summary->set_out(0,
+                     Location::RegisterLocation(AllocateMintABI::kResultReg));
+  } else {
+    summary->set_out(0, Location::RequiresRegister());
+  }
+#else
+  // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
+  // FLAG_use_bare_instructions mode and only after VM isolate stubs where
+  // replaced with isolate-specific stubs.
+  auto object_store = IsolateGroup::Current()->object_store();
+  const bool stubs_in_vm_isolate =
+      object_store->allocate_mint_with_fpu_regs_stub()
+          ->untag()
+          ->InVMIsolateHeap() ||
+      object_store->allocate_mint_without_fpu_regs_stub()
+          ->untag()
+          ->InVMIsolateHeap();
+  const bool shared_slow_path_call =
+      SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps,
+      ValueFitsSmi() ? LocationSummary::kNoCall
+      : shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
+                              : LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresRegister());
+  if (ValueFitsSmi()) {
+    summary->set_out(0, Location::RequiresRegister());
+  } else if (shared_slow_path_call) {
+    summary->set_out(0,
+                     Location::RegisterLocation(AllocateMintABI::kResultReg));
+  } else {
+    summary->set_out(0, Location::RequiresRegister());
+  }
+#endif
+  return summary;
+}
+
+void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  if (ValueFitsSmi()) {
+    PairLocation* value_pair = locs()->in(0).AsPairLocation();
+    Register value_lo = value_pair->At(0).reg();
+    Register out_reg = locs()->out(0).reg();
+    __ SmiTag(out_reg, value_lo);
+    return;
+  }
+
+  PairLocation* value_pair = locs()->in(0).AsPairLocation();
+  Register value_lo = value_pair->At(0).reg();
+  Register value_hi = value_pair->At(1).reg();
+  Register out_reg = locs()->out(0).reg();
+
+  compiler::Label overflow, done;
+  __ SmiTag(out_reg, value_lo);
+  __ srai(TMP, out_reg, kSmiTagSize);
+  __ bne(value_lo, TMP, &overflow, compiler::Assembler::kNearJump);
+  __ srai(TMP, out_reg, XLEN - 1);  // SignFill
+  __ beq(value_hi, TMP, &done, compiler::Assembler::kNearJump);
+
+  __ Bind(&overflow);
+  if (compiler->intrinsic_mode()) {
+    __ TryAllocate(compiler->mint_class(),
+                   compiler->intrinsic_slow_path_label(),
+                   compiler::Assembler::kNearJump, out_reg, TMP);
+  } else if (locs()->call_on_shared_slow_path()) {
+    auto object_store = compiler->isolate_group()->object_store();
+    const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
+    const auto& stub = Code::ZoneHandle(
+        compiler->zone(),
+        live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
+                      : object_store->allocate_mint_without_fpu_regs_stub());
+
+    ASSERT(!locs()->live_registers()->ContainsRegister(
+        AllocateMintABI::kResultReg));
+    auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
+    compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
+                               locs(), DeoptId::kNone, extended_env);
+  } else {
+    BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
+                                    out_reg, TMP);
+  }
+
+  __ StoreFieldToOffset(value_lo, out_reg,
+                        compiler::target::Mint::value_offset());
+  __ StoreFieldToOffset(
+      value_hi, out_reg,
+      compiler::target::Mint::value_offset() + compiler::target::kWordSize);
+  __ Bind(&done);
+#else
+  Register in = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+  if (ValueFitsSmi()) {
+    __ SmiTag(out, in);
+    return;
+  }
+  ASSERT(kSmiTag == 0);
+  compiler::Label done;
+
+  ASSERT(out != in);
+  __ SmiTag(out, in);
+  __ SmiUntag(TMP, out);
+  __ beq(in, TMP, &done);  // No overflow.
+
+  if (compiler->intrinsic_mode()) {
+    __ TryAllocate(compiler->mint_class(),
+                   compiler->intrinsic_slow_path_label(),
+                   compiler::Assembler::kNearJump, out, TMP);
+  } else if (locs()->call_on_shared_slow_path()) {
+    auto object_store = compiler->isolate_group()->object_store();
+    const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
+    const auto& stub = Code::ZoneHandle(
+        compiler->zone(),
+        live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
+                      : object_store->allocate_mint_without_fpu_regs_stub());
+
+    ASSERT(!locs()->live_registers()->ContainsRegister(
+        AllocateMintABI::kResultReg));
+    auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
+    compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
+                               locs(), DeoptId::kNone, extended_env);
+  } else {
+    BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
+                                    TMP);
+  }
+
+  __ StoreToOffset(in, out, Mint::value_offset() - kHeapObjectTag);
+  __ Bind(&done);
+#endif
+}
+
+#if XLEN == 32
+static void LoadInt32FromMint(FlowGraphCompiler* compiler,
+                              Register mint,
+                              Register result,
+                              compiler::Label* deopt) {
+  __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
+  if (deopt != NULL) {
+    __ LoadFieldFromOffset(
+        TMP, mint,
+        compiler::target::Mint::value_offset() + compiler::target::kWordSize);
+    __ srai(TMP2, result, XLEN - 1);
+    __ bne(TMP, TMP2, deopt);
+  }
+}
+#endif
+
+LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
+                                                          bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  const intptr_t value_cid = value()->Type()->ToCid();
+  const Register value = locs()->in(0).reg();
+  const Register out = locs()->out(0).reg();
+  compiler::Label* deopt =
+      CanDeoptimize()
+          ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
+          : NULL;
+  compiler::Label* out_of_range = !is_truncating() ? deopt : NULL;
+  ASSERT(value != out);
+
+  if (value_cid == kSmiCid) {
+    __ SmiUntag(out, value);
+  } else if (value_cid == kMintCid) {
+    LoadInt32FromMint(compiler, value, out, out_of_range);
+  } else if (!CanDeoptimize()) {
+    compiler::Label done;
+    __ SmiUntag(out, value);
+    __ BranchIfSmi(value, &done);
+    LoadInt32FromMint(compiler, value, out, NULL);
+    __ Bind(&done);
+  } else {
+    compiler::Label done;
+    __ SmiUntag(out, value);
+    __ BranchIfSmi(value, &done);
+    __ CompareClassId(value, kMintCid, TMP);
+    __ BranchIf(NE, deopt);
+    LoadInt32FromMint(compiler, value, out, out_of_range);
+    __ Bind(&done);
+  }
+#elif XLEN == 64
+  const intptr_t value_cid = value()->Type()->ToCid();
+  const Register out = locs()->out(0).reg();
+  const Register value = locs()->in(0).reg();
+  compiler::Label* deopt =
+      CanDeoptimize()
+          ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
+          : NULL;
+
+  if (value_cid == kSmiCid) {
+    __ SmiUntag(out, value);
+  } else if (value_cid == kMintCid) {
+    __ LoadFieldFromOffset(out, value, Mint::value_offset());
+  } else if (!CanDeoptimize()) {
+    // Type information is not conclusive, but range analysis found
+    // the value to be in int64 range. Therefore it must be a smi
+    // or mint value.
+    ASSERT(is_truncating());
+    compiler::Label done;
+    __ SmiUntag(out, value);
+    __ BranchIfSmi(value, &done);
+    __ LoadFieldFromOffset(out, value, Mint::value_offset());
+    __ Bind(&done);
+  } else {
+    compiler::Label done;
+    __ SmiUntag(out, value);
+    __ BranchIfSmi(value, &done);
+    __ CompareClassId(value, kMintCid, TMP);
+    __ BranchIf(NE, deopt);
+    __ LoadFieldFromOffset(out, value, Mint::value_offset());
+    __ Bind(&done);
+  }
+
+  // TODO(vegorov): as it is implemented right now truncating unboxing would
+  // leave "garbage" in the higher word.
+  if (!is_truncating() && (deopt != NULL)) {
+    ASSERT(representation() == kUnboxedInt32);
+    __ sextw(TMP, out);
+    __ bne(TMP, out, deopt);
+  }
+#endif
+}
+
+LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
+                                                          bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresFpuRegister());
+  summary->set_in(1, Location::RequiresFpuRegister());
+  summary->set_out(0, Location::RequiresFpuRegister());
+  return summary;
+}
+
+void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const FRegister left = locs()->in(0).fpu_reg();
+  const FRegister right = locs()->in(1).fpu_reg();
+  const FRegister result = locs()->out(0).fpu_reg();
+  switch (op_kind()) {
+    case Token::kADD:
+      __ faddd(result, left, right);
+      break;
+    case Token::kSUB:
+      __ fsubd(result, left, right);
+      break;
+    case Token::kMUL:
+      __ fmuld(result, left, right);
+      break;
+    case Token::kDIV:
+      __ fdivd(result, left, right);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresFpuRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
+                                                BranchLabels labels) {
+  ASSERT(compiler->is_optimizing());
+  const FRegister value = locs()->in(0).fpu_reg();
+
+  __ fclassd(TMP, value);
+  if (op_kind() == MethodRecognizer::kDouble_getIsNaN) {
+    __ TestImmediate(TMP, kFClassSignallingNan | kFClassQuietNan);
+  } else if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) {
+    __ TestImmediate(TMP, kFClassNegInfinity | kFClassPosInfinity);
+  } else {
+    UNREACHABLE();
+  }
+  return kind() == Token::kEQ ? NOT_ZERO : ZERO;
+}
+
+// SIMD
+
+#define DEFINE_EMIT(Name, Args)                                                \
+  static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr,      \
+                         PP_APPLY(PP_UNPACK, Args))
+
+#define SIMD_OP_FLOAT_ARITH(V, Name, op)                                       \
+  V(Float32x4##Name, op##s)                                                    \
+  V(Float64x2##Name, op##d)
+
+#define SIMD_OP_SIMPLE_BINARY(V)                                               \
+  SIMD_OP_FLOAT_ARITH(V, Add, vadd)                                            \
+  SIMD_OP_FLOAT_ARITH(V, Sub, vsub)                                            \
+  SIMD_OP_FLOAT_ARITH(V, Mul, vmul)                                            \
+  SIMD_OP_FLOAT_ARITH(V, Div, vdiv)                                            \
+  SIMD_OP_FLOAT_ARITH(V, Min, vmin)                                            \
+  SIMD_OP_FLOAT_ARITH(V, Max, vmax)                                            \
+  V(Int32x4Add, vaddw)                                                         \
+  V(Int32x4Sub, vsubw)                                                         \
+  V(Int32x4BitAnd, vand)                                                       \
+  V(Int32x4BitOr, vorr)                                                        \
+  V(Int32x4BitXor, veor)                                                       \
+  V(Float32x4Equal, vceqs)                                                     \
+  V(Float32x4GreaterThan, vcgts)                                               \
+  V(Float32x4GreaterThanOrEqual, vcges)
+
+DEFINE_EMIT(SimdBinaryOp, (FRegister result, FRegister left, FRegister right)) {
+  UNIMPLEMENTED();
+}
+
+#define SIMD_OP_SIMPLE_UNARY(V)                                                \
+  SIMD_OP_FLOAT_ARITH(V, Sqrt, vsqrt)                                          \
+  SIMD_OP_FLOAT_ARITH(V, Negate, vneg)                                         \
+  SIMD_OP_FLOAT_ARITH(V, Abs, vabs)                                            \
+  V(Float32x4Reciprocal, VRecps)                                               \
+  V(Float32x4ReciprocalSqrt, VRSqrts)
+
+DEFINE_EMIT(SimdUnaryOp, (FRegister result, FRegister value)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Simd32x4GetSignMask,
+            (Register out, FRegister value, Temp<Register> temp)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(
+    Float32x4FromDoubles,
+    (FRegister r, FRegister v0, FRegister v1, FRegister v2, FRegister v3)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(
+    Float32x4Clamp,
+    (FRegister result, FRegister value, FRegister lower, FRegister upper)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Float32x4With,
+            (FRegister result, FRegister replacement, FRegister value)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Simd32x4ToSimd32x4, (SameAsFirstInput, FRegister value)) {
+  // TODO(dartbug.com/30949) these operations are essentially nop and should
+  // not generate any code. They should be removed from the graph before
+  // code generation.
+}
+
+DEFINE_EMIT(SimdZero, (FRegister v)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Float64x2GetSignMask, (Register out, FRegister value)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Float64x2With,
+            (SameAsFirstInput, FRegister left, FRegister right)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(
+    Int32x4FromInts,
+    (FRegister result, Register v0, Register v1, Register v2, Register v3)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Int32x4FromBools,
+            (FRegister result,
+             Register v0,
+             Register v1,
+             Register v2,
+             Register v3,
+             Temp<Register> temp)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Int32x4GetFlag, (Register result, FRegister value)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Int32x4Select,
+            (FRegister out,
+             FRegister mask,
+             FRegister trueValue,
+             FRegister falseValue,
+             Temp<FRegister> temp)) {
+  UNIMPLEMENTED();
+}
+
+DEFINE_EMIT(Int32x4WithFlag,
+            (SameAsFirstInput, FRegister mask, Register flag)) {
+  UNIMPLEMENTED();
+}
+
+// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
+// format:
+//
+//     CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
+//     SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
+//
+#define SIMD_OP_VARIANTS(CASE, ____)                                           \
+  SIMD_OP_SIMPLE_BINARY(CASE)                                                  \
+  CASE(Float32x4ShuffleMix)                                                    \
+  CASE(Int32x4ShuffleMix)                                                      \
+  CASE(Float32x4NotEqual)                                                      \
+  CASE(Float32x4LessThan)                                                      \
+  CASE(Float32x4LessThanOrEqual)                                               \
+  CASE(Float32x4Scale)                                                         \
+  CASE(Float64x2FromDoubles)                                                   \
+  CASE(Float64x2Scale)                                                         \
+  ____(SimdBinaryOp)                                                           \
+  SIMD_OP_SIMPLE_UNARY(CASE)                                                   \
+  CASE(Float32x4ShuffleX)                                                      \
+  CASE(Float32x4ShuffleY)                                                      \
+  CASE(Float32x4ShuffleZ)                                                      \
+  CASE(Float32x4ShuffleW)                                                      \
+  CASE(Int32x4Shuffle)                                                         \
+  CASE(Float32x4Shuffle)                                                       \
+  CASE(Float32x4Splat)                                                         \
+  CASE(Float64x2GetX)                                                          \
+  CASE(Float64x2GetY)                                                          \
+  CASE(Float64x2Splat)                                                         \
+  CASE(Float64x2ToFloat32x4)                                                   \
+  CASE(Float32x4ToFloat64x2)                                                   \
+  ____(SimdUnaryOp)                                                            \
+  CASE(Float32x4GetSignMask)                                                   \
+  CASE(Int32x4GetSignMask)                                                     \
+  ____(Simd32x4GetSignMask)                                                    \
+  CASE(Float32x4FromDoubles)                                                   \
+  ____(Float32x4FromDoubles)                                                   \
+  CASE(Float32x4Zero)                                                          \
+  CASE(Float64x2Zero)                                                          \
+  ____(SimdZero)                                                               \
+  CASE(Float32x4Clamp)                                                         \
+  ____(Float32x4Clamp)                                                         \
+  CASE(Float32x4WithX)                                                         \
+  CASE(Float32x4WithY)                                                         \
+  CASE(Float32x4WithZ)                                                         \
+  CASE(Float32x4WithW)                                                         \
+  ____(Float32x4With)                                                          \
+  CASE(Float32x4ToInt32x4)                                                     \
+  CASE(Int32x4ToFloat32x4)                                                     \
+  ____(Simd32x4ToSimd32x4)                                                     \
+  CASE(Float64x2GetSignMask)                                                   \
+  ____(Float64x2GetSignMask)                                                   \
+  CASE(Float64x2WithX)                                                         \
+  CASE(Float64x2WithY)                                                         \
+  ____(Float64x2With)                                                          \
+  CASE(Int32x4FromInts)                                                        \
+  ____(Int32x4FromInts)                                                        \
+  CASE(Int32x4FromBools)                                                       \
+  ____(Int32x4FromBools)                                                       \
+  CASE(Int32x4GetFlagX)                                                        \
+  CASE(Int32x4GetFlagY)                                                        \
+  CASE(Int32x4GetFlagZ)                                                        \
+  CASE(Int32x4GetFlagW)                                                        \
+  ____(Int32x4GetFlag)                                                         \
+  CASE(Int32x4Select)                                                          \
+  ____(Int32x4Select)                                                          \
+  CASE(Int32x4WithFlagX)                                                       \
+  CASE(Int32x4WithFlagY)                                                       \
+  CASE(Int32x4WithFlagZ)                                                       \
+  CASE(Int32x4WithFlagW)                                                       \
+  ____(Int32x4WithFlag)
+
+LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  switch (kind()) {
+#define CASE(Name, ...) case k##Name:
+#define EMIT(Name)                                                             \
+  return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
+    SIMD_OP_VARIANTS(CASE, EMIT)
+#undef CASE
+#undef EMIT
+    case kIllegalSimdOp:
+      UNREACHABLE();
+      break;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  switch (kind()) {
+#define CASE(Name, ...) case k##Name:
+#define EMIT(Name)                                                             \
+  InvokeEmitter(compiler, this, &Emit##Name);                                  \
+  break;
+    SIMD_OP_VARIANTS(CASE, EMIT)
+#undef CASE
+#undef EMIT
+    case kIllegalSimdOp:
+      UNREACHABLE();
+      break;
+  }
+}
+
+#undef DEFINE_EMIT
+
+LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone,
+                                                     bool opt) const {
+  ASSERT((kind() == MathUnaryInstr::kSqrt) ||
+         (kind() == MathUnaryInstr::kDoubleSquare));
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresFpuRegister());
+  summary->set_out(0, Location::RequiresFpuRegister());
+  return summary;
+}
+
+void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (kind() == MathUnaryInstr::kSqrt) {
+    const FRegister val = locs()->in(0).fpu_reg();
+    const FRegister result = locs()->out(0).fpu_reg();
+    __ fsqrtd(result, val);
+  } else if (kind() == MathUnaryInstr::kDoubleSquare) {
+    const FRegister val = locs()->in(0).fpu_reg();
+    const FRegister result = locs()->out(0).fpu_reg();
+    __ fmuld(result, val, val);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
+  summary->set_in(0, Location::RegisterLocation(A0));
+  summary->set_in(1, Location::RegisterLocation(A1));
+  summary->set_in(2, Location::RegisterLocation(A2));
+  // Can't specify A3 because it is blocked in register allocation as TMP.
+  summary->set_in(3, Location::Any());
+  summary->set_out(0, Location::RegisterLocation(A0));
+  return summary;
+}
+
+void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (compiler->intrinsic_mode()) {
+    // Would also need to preserve CODE_REG and ARGS_DESC_REG.
+    UNIMPLEMENTED();
+  }
+
+  if (locs()->in(3).IsRegister()) {
+    __ mv(A3, locs()->in(3).reg());
+  } else if (locs()->in(3).IsStackSlot()) {
+    __ lx(A3, LocationToStackSlotAddress(locs()->in(3)));
+  } else {
+    UNIMPLEMENTED();
+  }
+  // PP is a C volatile register.
+  // SP will be aligned to the C stack alignment.
+  __ mv(CALLEE_SAVED_TEMP, PP);
+  __ mv(CALLEE_SAVED_TEMP2, SP);
+
+  // Call the function.
+  ASSERT(TargetFunction().is_leaf());  // No deopt info needed.
+  __ CallRuntime(TargetFunction(), TargetFunction().argument_count());
+
+  __ mv(PP, CALLEE_SAVED_TEMP);
+  __ mv(SP, CALLEE_SAVED_TEMP2);
+}
+
+LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
+                                                      bool opt) const {
+  if (result_cid() == kDoubleCid) {
+    const intptr_t kNumInputs = 2;
+    const intptr_t kNumTemps = 0;
+    LocationSummary* summary = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+    summary->set_in(0, Location::RequiresFpuRegister());
+    summary->set_in(1, Location::RequiresFpuRegister());
+    // Reuse the left register so that code can be made shorter.
+    summary->set_out(0, Location::SameAsFirstInput());
+    return summary;
+  }
+  ASSERT(result_cid() == kSmiCid);
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RequiresRegister());
+  // Reuse the left register so that code can be made shorter.
+  summary->set_out(0, Location::SameAsFirstInput());
+  return summary;
+}
+
+void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
+         (op_kind() == MethodRecognizer::kMathMax));
+  const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
+  if (result_cid() == kDoubleCid) {
+    compiler::Label done, returns_nan, are_equal;
+    const FRegister left = locs()->in(0).fpu_reg();
+    const FRegister right = locs()->in(1).fpu_reg();
+    const FRegister result = locs()->out(0).fpu_reg();
+    if (is_min) {
+      __ fmind(result, left, right);
+    } else {
+      __ fmaxd(result, left, right);
+    }
+    return;
+  }
+
+  ASSERT(result_cid() == kSmiCid);
+  const Register left = locs()->in(0).reg();
+  const Register right = locs()->in(1).reg();
+  const Register result = locs()->out(0).reg();
+  compiler::Label choose_right, done;
+  if (is_min) {
+    __ bgt(left, right, &choose_right, compiler::Assembler::kNearJump);
+  } else {
+    __ blt(left, right, &choose_right, compiler::Assembler::kNearJump);
+  }
+  __ mv(result, left);
+  __ j(&done, compiler::Assembler::kNearJump);
+  __ Bind(&choose_right);
+  __ mv(result, right);
+  __ Bind(&done);
+}
+
+LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
+                                                      bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  // We make use of 3-operand instructions by not requiring result register
+  // to be identical to first input register as on Intel.
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register value = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  switch (op_kind()) {
+    case Token::kNEGATE: {
+      compiler::Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
+      __ neg(result, value);
+      ASSERT(result != value);
+      __ beq(result, value, deopt);  // Overflow.
+      break;
+    }
+    case Token::kBIT_NOT:
+      __ not_(result, value);
+      __ andi(result, result, ~kSmiTagMask);  // Remove inverted smi-tag.
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresFpuRegister());
+  summary->set_out(0, Location::RequiresFpuRegister());
+  return summary;
+}
+
+void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const FRegister result = locs()->out(0).fpu_reg();
+  const FRegister value = locs()->in(0).fpu_reg();
+  __ fnegd(result, value);
+}
+
+LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  result->set_in(0, Location::RequiresRegister());
+  result->set_out(0, Location::RequiresFpuRegister());
+  return result;
+}
+
+void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register value = locs()->in(0).reg();
+  const FRegister result = locs()->out(0).fpu_reg();
+  __ fcvtdw(result, value);
+}
+
+LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
+                                                       bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  result->set_in(0, Location::RequiresRegister());
+  result->set_out(0, Location::RequiresFpuRegister());
+  return result;
+}
+
+void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register value = locs()->in(0).reg();
+  const FRegister result = locs()->out(0).fpu_reg();
+  __ SmiUntag(TMP, value);
+#if XLEN == 32
+  __ fcvtdw(result, TMP);
+#else
+  __ fcvtdl(result, TMP);
+#endif
+}
+
+LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+#if XLEN == 32
+  UNIMPLEMENTED();
+  return NULL;
+#else
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  result->set_in(0, Location::RequiresRegister());
+  result->set_out(0, Location::RequiresFpuRegister());
+  return result;
+#endif
+}
+
+void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  UNIMPLEMENTED();
+#else
+  const Register value = locs()->in(0).reg();
+  const FRegister result = locs()->out(0).fpu_reg();
+  __ fcvtdl(result, value);
+#endif
+}
+
+LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
+                                                           bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  result->set_in(0, Location::RequiresFpuRegister());
+  result->set_out(0, Location::RequiresRegister());
+  return result;
+}
+
+void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register result = locs()->out(0).reg();
+  const FRegister value_double = locs()->in(0).fpu_reg();
+
+  DoubleToIntegerSlowPath* slow_path =
+      new DoubleToIntegerSlowPath(this, value_double);
+  compiler->AddSlowPathCode(slow_path);
+
+  RoundingMode rounding;
+  switch (recognized_kind()) {
+    case MethodRecognizer::kDoubleToInteger:
+      rounding = RTZ;
+      break;
+    case MethodRecognizer::kDoubleFloorToInt:
+      rounding = RDN;
+      break;
+    case MethodRecognizer::kDoubleCeilToInt:
+      rounding = RUP;
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+#if XLEN == 32
+  __ fcvtwd(TMP, value_double, rounding);
+#else
+  __ fcvtld(TMP, value_double, rounding);
+#endif
+  // Underflow -> minint -> Smi tagging fails
+  // Overflow, NaN -> maxint -> Smi tagging fails
+
+  // Check for overflow and that it fits into Smi.
+  __ SmiTag(result, TMP);
+  __ SmiUntag(TMP2, result);
+  __ bne(TMP, TMP2, slow_path->entry_label());
+  __ Bind(slow_path->exit_label());
+}
+
+LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
+                                                       bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  result->set_in(0, Location::RequiresFpuRegister());
+  result->set_out(0, Location::RequiresRegister());
+  return result;
+}
+
+void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  compiler::Label* deopt =
+      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
+  const Register result = locs()->out(0).reg();
+  const FRegister value = locs()->in(0).fpu_reg();
+
+#if XLEN == 32
+  __ fcvtwd(TMP, value, RTZ);  // Round To Zero (truncation).
+#else
+  __ fcvtld(TMP, value, RTZ);  // Round To Zero (truncation).
+#endif
+  // Underflow -> minint -> Smi tagging fails
+  // Overflow, NaN -> maxint -> Smi tagging fails
+
+  // Check for overflow and that it fits into Smi.
+  __ SmiTag(result, TMP);
+  __ SmiUntag(TMP2, result);
+  __ bne(TMP, TMP2, deopt);
+}
+
+LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
+                                                          bool opt) const {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  UNIMPLEMENTED();
+}
+
+LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  result->set_in(0, Location::RequiresFpuRegister());
+  result->set_out(0, Location::RequiresFpuRegister());
+  return result;
+}
+
+void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const FRegister value = locs()->in(0).fpu_reg();
+  const FRegister result = locs()->out(0).fpu_reg();
+  __ fcvtsd(result, value);
+}
+
+LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  result->set_in(0, Location::RequiresFpuRegister());
+  result->set_out(0, Location::RequiresFpuRegister());
+  return result;
+}
+
+void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const FRegister value = locs()->in(0).fpu_reg();
+  const FRegister result = locs()->out(0).fpu_reg();
+  __ fcvtds(result, value);
+}
+
+LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
+                                                               bool opt) const {
+  ASSERT((InputCount() == 1) || (InputCount() == 2));
+  const intptr_t kNumTemps = 0;
+  LocationSummary* result = new (zone)
+      LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
+  result->set_in(0, Location::FpuRegisterLocation(FA0));
+  if (InputCount() == 2) {
+    result->set_in(1, Location::FpuRegisterLocation(FA1));
+  }
+  result->set_out(0, Location::FpuRegisterLocation(FA0));
+  return result;
+}
+
+void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (compiler->intrinsic_mode()) {
+    // Would also need to preserve CODE_REG and ARGS_DESC_REG.
+    UNIMPLEMENTED();
+  }
+
+  // PP is a C volatile register.
+  // SP will be aligned to the C stack alignment.
+  __ mv(CALLEE_SAVED_TEMP, PP);
+  __ mv(CALLEE_SAVED_TEMP2, SP);
+
+  ASSERT(TargetFunction().is_leaf());  // No deopt info needed.
+  __ CallRuntime(TargetFunction(), InputCount());
+
+  __ mv(PP, CALLEE_SAVED_TEMP);
+  __ mv(SP, CALLEE_SAVED_TEMP2);
+
+  // TODO(riscv): Special case pow?
+}
+
+LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
+                                                            bool opt) const {
+  // Only use this instruction in optimized code.
+  ASSERT(opt);
+  const intptr_t kNumInputs = 1;
+  LocationSummary* summary =
+      new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
+  if (representation() == kUnboxedDouble) {
+    if (index() == 0) {
+      summary->set_in(
+          0, Location::Pair(Location::RequiresFpuRegister(), Location::Any()));
+    } else {
+      ASSERT(index() == 1);
+      summary->set_in(
+          0, Location::Pair(Location::Any(), Location::RequiresFpuRegister()));
+    }
+    summary->set_out(0, Location::RequiresFpuRegister());
+  } else {
+    ASSERT(representation() == kTagged);
+    if (index() == 0) {
+      summary->set_in(
+          0, Location::Pair(Location::RequiresRegister(), Location::Any()));
+    } else {
+      ASSERT(index() == 1);
+      summary->set_in(
+          0, Location::Pair(Location::Any(), Location::RequiresRegister()));
+    }
+    summary->set_out(0, Location::RequiresRegister());
+  }
+  return summary;
+}
+
+void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(locs()->in(0).IsPairLocation());
+  PairLocation* pair = locs()->in(0).AsPairLocation();
+  Location in_loc = pair->At(index());
+  if (representation() == kUnboxedDouble) {
+    const FRegister out = locs()->out(0).fpu_reg();
+    const FRegister in = in_loc.fpu_reg();
+    __ fmvd(out, in);
+  } else {
+    ASSERT(representation() == kTagged);
+    const Register out = locs()->out(0).reg();
+    const Register in = in_loc.reg();
+    __ mv(out, in);
+  }
+}
+
+LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
+                                                       bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RequiresRegister());
+  // Output is a pair of registers.
+  summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                     Location::RequiresRegister()));
+  return summary;
+}
+
+void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ASSERT(CanDeoptimize());
+  compiler::Label* deopt =
+      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
+  const Register left = locs()->in(0).reg();
+  const Register right = locs()->in(1).reg();
+  ASSERT(locs()->out(0).IsPairLocation());
+  const PairLocation* pair = locs()->out(0).AsPairLocation();
+  const Register result_div = pair->At(0).reg();
+  const Register result_mod = pair->At(1).reg();
+  if (RangeUtils::CanBeZero(divisor_range())) {
+    // Handle divide by zero in runtime.
+    __ beqz(right, deopt);
+  }
+
+  __ SmiUntag(TMP, left);
+  __ SmiUntag(TMP2, right);
+
+  // Macro-op fusion: DIV immediately before REM.
+  __ div(result_div, TMP, TMP2);
+  __ rem(result_mod, TMP, TMP2);
+
+  // Correct MOD result:
+  //  res = left % right;
+  //  if (res < 0) {
+  //    if (right < 0) {
+  //      res = res - right;
+  //    } else {
+  //      res = res + right;
+  //    }
+  //  }
+  compiler::Label done, adjust;
+  __ bgez(result_mod, &done, compiler::Assembler::kNearJump);
+  // Result is negative, adjust it.
+  if (RangeUtils::IsNegative(divisor_range())) {
+    __ sub(result_mod, result_mod, TMP2);
+  } else if (RangeUtils::IsPositive(divisor_range())) {
+    __ add(result_mod, result_mod, TMP2);
+  } else {
+    __ bgez(right, &adjust, compiler::Assembler::kNearJump);
+    __ sub(result_mod, result_mod, TMP2);
+    __ j(&done, compiler::Assembler::kNearJump);
+    __ Bind(&adjust);
+    __ add(result_mod, result_mod, TMP2);
+  }
+  __ Bind(&done);
+
+  if (RangeUtils::Overlaps(divisor_range(), -1, -1)) {
+    // Check the corner case of dividing the 'MIN_SMI' with -1, in which
+    // case we cannot tag the result.
+    __ mv(TMP, result_div);
+    __ SmiTag(result_div);
+    __ SmiTag(result_mod);
+    __ SmiUntag(TMP2, result_div);
+    __ bne(TMP, TMP2, deopt);
+  } else {
+    __ SmiTag(result_div);
+    __ SmiTag(result_mod);
+  }
+}
+
+LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  comparison()->InitializeLocationSummary(zone, opt);
+  // Branches don't produce a result.
+  comparison()->locs()->set_out(0, Location::NoLocation());
+  return comparison()->locs();
+}
+
+void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  comparison()->EmitBranchCode(compiler, this);
+}
+
+LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
+                                                      bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const bool need_mask_temp = IsBitTest();
+  const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  if (!IsNullCheck()) {
+    summary->set_temp(0, Location::RequiresRegister());
+    if (need_mask_temp) {
+      summary->set_temp(1, Location::RequiresRegister());
+    }
+  }
+  return summary;
+}
+
+void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
+                                    compiler::Label* deopt) {
+  if (IsDeoptIfNull()) {
+    __ beq(locs()->in(0).reg(), NULL_REG, deopt);
+  } else if (IsDeoptIfNotNull()) {
+    __ bne(locs()->in(0).reg(), NULL_REG, deopt);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
+                                  intptr_t min,
+                                  intptr_t max,
+                                  intptr_t mask,
+                                  compiler::Label* deopt) {
+  Register biased_cid = locs()->temp(0).reg();
+  __ AddImmediate(biased_cid, -min);
+  __ CompareImmediate(biased_cid, max - min);
+  __ BranchIf(HI, deopt);
+
+  Register bit_reg = locs()->temp(1).reg();
+  __ LoadImmediate(bit_reg, 1);
+  __ sll(bit_reg, bit_reg, biased_cid);
+  __ TestImmediate(bit_reg, mask);
+  __ BranchIf(EQ, deopt);
+}
+
+int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
+                                  int bias,
+                                  intptr_t cid_start,
+                                  intptr_t cid_end,
+                                  bool is_last,
+                                  compiler::Label* is_ok,
+                                  compiler::Label* deopt,
+                                  bool use_near_jump) {
+  Register biased_cid = locs()->temp(0).reg();
+  Condition no_match, match;
+  if (cid_start == cid_end) {
+    __ CompareImmediate(biased_cid, cid_start - bias);
+    no_match = NE;
+    match = EQ;
+  } else {
+    // For class ID ranges use a subtract followed by an unsigned
+    // comparison to check both ends of the ranges with one comparison.
+    __ AddImmediate(biased_cid, bias - cid_start);
+    bias = cid_start;
+    __ CompareImmediate(biased_cid, cid_end - cid_start);
+    no_match = HI;  // Unsigned higher.
+    match = LS;     // Unsigned lower or same.
+  }
+  if (is_last) {
+    __ BranchIf(no_match, deopt);
+  } else {
+    __ BranchIf(match, is_ok);
+  }
+  return bias;
+}
+
+LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
+                                         : Location::WritableRegister());
+  return summary;
+}
+
+void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register value = locs()->in(0).reg();
+  compiler::Label* deopt =
+      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
+  if (cids_.IsSingleCid()) {
+    __ CompareImmediate(value, Smi::RawValue(cids_.cid_start));
+    __ BranchIf(NE, deopt);
+  } else {
+    __ AddImmediate(value, -Smi::RawValue(cids_.cid_start));
+    __ CompareImmediate(value, Smi::RawValue(cids_.cid_end - cids_.cid_start));
+    __ BranchIf(HI, deopt);  // Unsigned higher.
+  }
+}
+
+LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
+                                                    bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  return summary;
+}
+
+void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register value = locs()->in(0).reg();
+  compiler::Label* deopt = compiler->AddDeoptStub(
+      deopt_id(), ICData::kDeoptCheckSmi, licm_hoisted_ ? ICData::kHoisted : 0);
+  __ BranchIfNotSmi(value, deopt);
+}
+
+void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
+  compiler->AddSlowPathCode(slow_path);
+
+  Register value_reg = locs()->in(0).reg();
+  // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
+  // in order to be able to allocate it on register.
+  __ CompareObject(value_reg, Object::null_object());
+  __ BranchIf(EQUAL, slow_path->entry_label());
+}
+
+LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
+                                                           bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(kLengthPos, LocationRegisterOrSmiConstant(length()));
+  locs->set_in(kIndexPos, LocationRegisterOrSmiConstant(index()));
+  return locs;
+}
+
+void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
+  flags |= licm_hoisted_ ? ICData::kHoisted : 0;
+  compiler::Label* deopt =
+      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
+
+  Location length_loc = locs()->in(kLengthPos);
+  Location index_loc = locs()->in(kIndexPos);
+
+  const intptr_t index_cid = index()->Type()->ToCid();
+  if (length_loc.IsConstant() && index_loc.IsConstant()) {
+    // TODO(srdjan): remove this code once failures are fixed.
+    if ((Smi::Cast(length_loc.constant()).Value() >
+         Smi::Cast(index_loc.constant()).Value()) &&
+        (Smi::Cast(index_loc.constant()).Value() >= 0)) {
+      // This CheckArrayBoundInstr should have been eliminated.
+      return;
+    }
+    ASSERT((Smi::Cast(length_loc.constant()).Value() <=
+            Smi::Cast(index_loc.constant()).Value()) ||
+           (Smi::Cast(index_loc.constant()).Value() < 0));
+    // Unconditionally deoptimize for constant bounds checks because they
+    // only occur only when index is out-of-bounds.
+    __ j(deopt);
+    return;
+  }
+
+  if (index_loc.IsConstant()) {
+    const Register length = length_loc.reg();
+    const Smi& index = Smi::Cast(index_loc.constant());
+    __ CompareObject(length, index);
+    __ BranchIf(LS, deopt);
+  } else if (length_loc.IsConstant()) {
+    const Smi& length = Smi::Cast(length_loc.constant());
+    const Register index = index_loc.reg();
+    if (index_cid != kSmiCid) {
+      __ BranchIfNotSmi(index, deopt);
+    }
+    if (length.Value() == Smi::kMaxValue) {
+      __ bltz(index, deopt);
+    } else {
+      __ CompareObject(index, length);
+      __ BranchIf(CS, deopt);
+    }
+  } else {
+    const Register length = length_loc.reg();
+    const Register index = index_loc.reg();
+    if (index_cid != kSmiCid) {
+      __ BranchIfNotSmi(index, deopt);
+    }
+    __ CompareObjectRegisters(index, length);
+    __ BranchIf(CS, deopt);
+  }
+}
+
+class Int64DivideSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
+                      Register divisor,
+                      Range* divisor_range,
+                      Register tmp,
+                      Register out)
+      : ThrowErrorSlowPathCode(instruction,
+                               kIntegerDivisionByZeroExceptionRuntimeEntry),
+        is_mod_(instruction->op_kind() == Token::kMOD),
+        divisor_(divisor),
+        divisor_range_(divisor_range),
+        tmp_(tmp),
+        out_(out),
+        adjust_sign_label_() {}
+
+  void EmitNativeCode(FlowGraphCompiler* compiler) override {
+    // Handle modulo/division by zero, if needed. Use superclass code.
+    if (has_divide_by_zero()) {
+      ThrowErrorSlowPathCode::EmitNativeCode(compiler);
+    } else {
+      __ Bind(entry_label());  // not used, but keeps destructor happy
+      if (compiler::Assembler::EmittingComments()) {
+        __ Comment("slow path %s operation (no throw)", name());
+      }
+    }
+    // Adjust modulo for negative sign, optimized for known ranges.
+    // if (divisor < 0)
+    //   out -= divisor;
+    // else
+    //   out += divisor;
+    if (has_adjust_sign()) {
+      __ Bind(adjust_sign_label());
+      if (RangeUtils::Overlaps(divisor_range_, -1, 1)) {
+        // General case.
+        compiler::Label adjust, done;
+        __ bgez(divisor_, &adjust, compiler::Assembler::kNearJump);
+        __ sub(out_, out_, divisor_);
+        __ j(&done, compiler::Assembler::kNearJump);
+        __ Bind(&adjust);
+        __ add(out_, out_, divisor_);
+        __ Bind(&done);
+      } else if (divisor_range_->IsPositive()) {
+        // Always positive.
+        __ add(out_, out_, divisor_);
+      } else {
+        // Always negative.
+        __ sub(out_, out_, divisor_);
+      }
+      __ j(exit_label());
+    }
+  }
+
+  const char* name() override { return "int64 divide"; }
+
+  bool has_divide_by_zero() { return RangeUtils::CanBeZero(divisor_range_); }
+
+  bool has_adjust_sign() { return is_mod_; }
+
+  bool is_needed() { return has_divide_by_zero() || has_adjust_sign(); }
+
+  compiler::Label* adjust_sign_label() {
+    ASSERT(has_adjust_sign());
+    return &adjust_sign_label_;
+  }
+
+ private:
+  bool is_mod_;
+  Register divisor_;
+  Range* divisor_range_;
+  Register tmp_;
+  Register out_;
+  compiler::Label adjust_sign_label_;
+};
+
+#if XLEN == 64
+static void EmitInt64ModTruncDiv(FlowGraphCompiler* compiler,
+                                 BinaryInt64OpInstr* instruction,
+                                 Token::Kind op_kind,
+                                 Register left,
+                                 Register right,
+                                 Register tmp,
+                                 Register out) {
+  ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
+
+  // TODO(riscv): Is it worth copying the magic constant optimization from the
+  // other architectures?
+
+  // Prepare a slow path.
+  Range* right_range = instruction->right()->definition()->range();
+  Int64DivideSlowPath* slow_path =
+      new (Z) Int64DivideSlowPath(instruction, right, right_range, tmp, out);
+
+  // Handle modulo/division by zero exception on slow path.
+  if (slow_path->has_divide_by_zero()) {
+    __ beqz(right, slow_path->entry_label());
+  }
+
+  // Perform actual operation
+  //   out = left % right
+  // or
+  //   out = left / right.
+  if (op_kind == Token::kMOD) {
+    __ rem(out, left, right);
+    // For the % operator, the rem instruction does not
+    // quite do what we want. Adjust for sign on slow path.
+    __ bltz(out, slow_path->adjust_sign_label());
+  } else {
+    __ div(out, left, right);
+  }
+
+  if (slow_path->is_needed()) {
+    __ Bind(slow_path->exit_label());
+    compiler->AddSlowPathCode(slow_path);
+  }
+}
+#endif
+
+LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+#if XLEN == 32
+  // TODO(riscv): Allow constants for the RHS of bitwise operators if both
+  // hi and lo components are IType immediates.
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                     Location::RequiresRegister()));
+  return summary;
+#else
+  switch (op_kind()) {
+    case Token::kMOD:
+    case Token::kTRUNCDIV: {
+      const intptr_t kNumInputs = 2;
+      const intptr_t kNumTemps = (op_kind() == Token::kMOD) ? 1 : 0;
+      LocationSummary* summary = new (zone) LocationSummary(
+          zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+      summary->set_in(0, Location::RequiresRegister());
+      summary->set_in(1, Location::RequiresRegister());
+      summary->set_out(0, Location::RequiresRegister());
+      if (kNumTemps == 1) {
+        summary->set_temp(0, Location::RequiresRegister());
+      }
+      return summary;
+    }
+    default: {
+      const intptr_t kNumInputs = 2;
+      const intptr_t kNumTemps = 0;
+      LocationSummary* summary = new (zone) LocationSummary(
+          zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+      summary->set_in(0, Location::RequiresRegister());
+      summary->set_in(1, LocationRegisterOrConstant(right()));
+      summary->set_out(0, Location::RequiresRegister());
+      return summary;
+    }
+  }
+#endif
+}
+
+void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  PairLocation* left_pair = locs()->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+  PairLocation* right_pair = locs()->in(1).AsPairLocation();
+  Register right_lo = right_pair->At(0).reg();
+  Register right_hi = right_pair->At(1).reg();
+  PairLocation* out_pair = locs()->out(0).AsPairLocation();
+  Register out_lo = out_pair->At(0).reg();
+  Register out_hi = out_pair->At(1).reg();
+  ASSERT(!can_overflow());
+  ASSERT(!CanDeoptimize());
+
+  switch (op_kind()) {
+    case Token::kBIT_AND: {
+      __ and_(out_lo, left_lo, right_lo);
+      __ and_(out_hi, left_hi, right_hi);
+      break;
+    }
+    case Token::kBIT_OR: {
+      __ or_(out_lo, left_lo, right_lo);
+      __ or_(out_hi, left_hi, right_hi);
+      break;
+    }
+    case Token::kBIT_XOR: {
+      __ xor_(out_lo, left_lo, right_lo);
+      __ xor_(out_hi, left_hi, right_hi);
+      break;
+    }
+    case Token::kADD: {
+      __ add(out_hi, left_hi, right_hi);
+      __ add(out_lo, left_lo, right_lo);
+      __ sltu(TMP, out_lo, right_lo);  // Carry
+      __ add(out_hi, out_hi, TMP);
+      break;
+    }
+    case Token::kSUB: {
+      __ sltu(TMP, left_lo, right_lo);  // Borrow
+      __ sub(out_hi, left_hi, right_hi);
+      __ sub(out_hi, out_hi, TMP);
+      __ sub(out_lo, left_lo, right_lo);
+      break;
+    }
+    case Token::kMUL: {
+      // TODO(riscv): Fix ordering for macro-op fusion.
+      __ mul(out_lo, right_lo, left_hi);
+      __ mulhu(out_hi, right_lo, left_lo);
+      __ add(out_lo, out_lo, out_hi);
+      __ mul(out_hi, right_hi, left_lo);
+      __ add(out_hi, out_hi, out_lo);
+      __ mul(out_lo, right_lo, left_lo);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+#else
+  ASSERT(!can_overflow());
+  ASSERT(!CanDeoptimize());
+
+  const Register left = locs()->in(0).reg();
+  const Location right = locs()->in(1);
+  const Register out = locs()->out(0).reg();
+
+  if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) {
+    Register tmp =
+        (op_kind() == Token::kMOD) ? locs()->temp(0).reg() : kNoRegister;
+    EmitInt64ModTruncDiv(compiler, this, op_kind(), left, right.reg(), tmp,
+                         out);
+    return;
+  } else if (op_kind() == Token::kMUL) {
+    Register r = TMP;
+    if (right.IsConstant()) {
+      int64_t value;
+      const bool ok = compiler::HasIntegerValue(right.constant(), &value);
+      RELEASE_ASSERT(ok);
+      __ LoadImmediate(r, value);
+    } else {
+      r = right.reg();
+    }
+    __ mul(out, left, r);
+    return;
+  }
+
+  if (right.IsConstant()) {
+    int64_t value;
+    const bool ok = compiler::HasIntegerValue(right.constant(), &value);
+    RELEASE_ASSERT(ok);
+    switch (op_kind()) {
+      case Token::kADD:
+        __ AddImmediate(out, left, value);
+        break;
+      case Token::kSUB:
+        __ AddImmediate(out, left, -value);
+        break;
+      case Token::kBIT_AND:
+        __ AndImmediate(out, left, value);
+        break;
+      case Token::kBIT_OR:
+        __ OrImmediate(out, left, value);
+        break;
+      case Token::kBIT_XOR:
+        __ XorImmediate(out, left, value);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    switch (op_kind()) {
+      case Token::kADD:
+        __ add(out, left, right.reg());
+        break;
+      case Token::kSUB:
+        __ sub(out, left, right.reg());
+        break;
+      case Token::kBIT_AND:
+        __ and_(out, left, right.reg());
+        break;
+      case Token::kBIT_OR:
+        __ or_(out, left, right.reg());
+        break;
+      case Token::kBIT_XOR:
+        __ xor_(out, left, right.reg());
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+#endif
+}
+
+#if XLEN == 32
+static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out_lo,
+                                     Register out_hi,
+                                     Register left_lo,
+                                     Register left_hi,
+                                     const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+
+  switch (op_kind) {
+    case Token::kSHR: {
+      if (shift < 32) {
+        __ slli(out_lo, left_hi, 32 - shift);
+        __ srli(TMP, left_lo, shift);
+        __ or_(out_lo, out_lo, TMP);
+        __ srai(out_hi, left_hi, shift);
+      } else {
+        if (shift == 32) {
+          __ mv(out_lo, left_hi);
+        } else if (shift < 64) {
+          __ srai(out_lo, left_hi, shift - 32);
+        } else {
+          __ srai(out_lo, left_hi, 31);
+        }
+        __ srai(out_hi, left_hi, 31);
+      }
+      break;
+    }
+    case Token::kUSHR: {
+      ASSERT(shift < 64);
+      if (shift < 32) {
+        __ slli(out_lo, left_hi, 32 - shift);
+        __ srli(TMP, left_lo, shift);
+        __ or_(out_lo, out_lo, TMP);
+        __ srli(out_hi, left_hi, shift);
+      } else {
+        if (shift == 32) {
+          __ mv(out_lo, left_hi);
+        } else {
+          __ srli(out_lo, left_hi, shift - 32);
+        }
+        __ li(out_hi, 0);
+      }
+      break;
+    }
+    case Token::kSHL: {
+      ASSERT(shift >= 0);
+      ASSERT(shift < 64);
+      if (shift < 32) {
+        __ srli(out_hi, left_lo, 32 - shift);
+        __ slli(TMP, left_hi, shift);
+        __ or_(out_hi, out_hi, TMP);
+        __ slli(out_lo, left_lo, shift);
+      } else {
+        if (shift == 32) {
+          __ mv(out_hi, left_lo);
+        } else {
+          __ slli(out_hi, left_lo, shift - 32);
+        }
+        __ li(out_lo, 0);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+#else
+static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out,
+                                     Register left,
+                                     const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ srai(out, left, Utils::Minimum<int64_t>(shift, XLEN - 1));
+      break;
+    }
+    case Token::kUSHR: {
+      ASSERT(shift < 64);
+      __ srli(out, left, shift);
+      break;
+    }
+    case Token::kSHL: {
+      ASSERT(shift < 64);
+      __ slli(out, left, shift);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+#endif
+
+#if XLEN == 32
+static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out_lo,
+                                     Register out_hi,
+                                     Register left_lo,
+                                     Register left_hi,
+                                     Register right) {
+  // TODO(riscv): Review.
+  switch (op_kind) {
+    case Token::kSHR: {
+      compiler::Label big_shift, done;
+      __ li(TMP, 32);
+      __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
+
+      // 0 <= right < 32
+      __ srl(out_lo, left_lo, right);
+      __ sra(out_hi, left_hi, right);
+      __ beqz(right, &done, compiler::Assembler::kNearJump);
+      __ sub(TMP, TMP, right);
+      __ sll(TMP2, left_hi, TMP);
+      __ or_(out_lo, out_lo, TMP2);
+      __ j(&done);
+
+      // 32 <= right < 64
+      __ Bind(&big_shift);
+      __ sub(TMP, right, TMP);
+      __ sra(out_lo, left_hi, TMP);
+      __ srai(out_hi, left_hi, XLEN - 1);  // SignFill
+      __ Bind(&done);
+      break;
+    }
+    case Token::kUSHR: {
+      compiler::Label big_shift, done;
+      __ li(TMP, 32);
+      __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
+
+      // 0 <= right < 32
+      __ srl(out_lo, left_lo, right);
+      __ srl(out_hi, left_hi, right);
+      __ beqz(right, &done, compiler::Assembler::kNearJump);
+      __ sub(TMP, TMP, right);
+      __ sll(TMP2, left_hi, TMP);
+      __ or_(out_lo, out_lo, TMP2);
+      __ j(&done);
+
+      // 32 <= right < 64
+      __ Bind(&big_shift);
+      __ sub(TMP, right, TMP);
+      __ srl(out_lo, left_hi, TMP);
+      __ li(out_hi, 0);
+      __ Bind(&done);
+      break;
+    }
+    case Token::kSHL: {
+      compiler::Label big_shift, done;
+      __ li(TMP, 32);
+      __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
+
+      // 0 <= right < 32
+      __ sll(out_lo, left_lo, right);
+      __ sll(out_hi, left_hi, right);
+      __ beqz(right, &done, compiler::Assembler::kNearJump);
+      __ sub(TMP, TMP, right);
+      __ srl(TMP2, left_lo, TMP);
+      __ or_(out_hi, out_hi, TMP2);
+      __ j(&done);
+
+      // 32 <= right < 64
+      __ Bind(&big_shift);
+      __ sub(TMP, right, TMP);
+      __ sll(out_hi, left_lo, TMP);
+      __ li(out_lo, 0);
+      __ Bind(&done);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+#else
+static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out,
+                                     Register left,
+                                     Register right) {
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ sra(out, left, right);
+      break;
+    }
+    case Token::kUSHR: {
+      __ srl(out, left, right);
+      break;
+    }
+    case Token::kSHL: {
+      __ sll(out, left, right);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+#endif
+
+static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register out,
+                                      Register left,
+                                      const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  if (shift >= 32) {
+    __ li(out, 0);
+  } else {
+    switch (op_kind) {
+      case Token::kSHR:
+      case Token::kUSHR:
+#if XLEN == 32
+        __ srli(out, left, shift);
+#else
+        __ srliw(out, left, shift);
+#endif
+        break;
+      case Token::kSHL:
+#if XLEN == 32
+        __ slli(out, left, shift);
+#else
+        __ slliw(out, left, shift);
+#endif
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register out,
+                                      Register left,
+                                      Register right) {
+  switch (op_kind) {
+    case Token::kSHR:
+    case Token::kUSHR:
+#if XLEN == 32
+      __ srl(out, left, right);
+#else
+      __ srlw(out, left, right);
+#endif
+      break;
+    case Token::kSHL:
+#if XLEN == 32
+      __ sll(out, left, right);
+#else
+      __ sllw(out, left, right);
+#endif
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry) {}
+
+  const char* name() override { return "int64 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+#if XLEN == 32
+    PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
+    Register left_hi = left_pair->At(1).reg();
+    PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+    PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
+    Register out_lo = out_pair->At(0).reg();
+    Register out_hi = out_pair->At(1).reg();
+
+    compiler::Label throw_error;
+    __ bltz(right_hi, &throw_error);
+
+    switch (instruction()->AsShiftInt64Op()->op_kind()) {
+      case Token::kSHR:
+        __ srai(out_hi, left_hi, compiler::target::kBitsPerWord - 1);
+        __ mv(out_lo, out_hi);
+        break;
+      case Token::kUSHR:
+      case Token::kSHL: {
+        __ li(out_lo, 0);
+        __ li(out_hi, 0);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+
+    __ j(exit_label());
+
+    __ Bind(&throw_error);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ StoreToOffset(
+        right_lo, THR,
+        compiler::target::Thread::unboxed_int64_runtime_arg_offset());
+    __ StoreToOffset(
+        right_hi, THR,
+        compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
+            compiler::target::kWordSize);
+#else
+    const Register left = instruction()->locs()->in(0).reg();
+    const Register right = instruction()->locs()->in(1).reg();
+    const Register out = instruction()->locs()->out(0).reg();
+    ASSERT((out != left) && (out != right));
+
+    compiler::Label throw_error;
+    __ bltz(right, &throw_error);
+
+    switch (instruction()->AsShiftInt64Op()->op_kind()) {
+      case Token::kSHR:
+        __ srai(out, left, XLEN - 1);
+        break;
+      case Token::kUSHR:
+      case Token::kSHL:
+        __ mv(out, ZR);
+        break;
+      default:
+        UNREACHABLE();
+    }
+    __ j(exit_label());
+
+    __ Bind(&throw_error);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ sx(right,
+          compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
+#endif
+  }
+};
+
+LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+#if XLEN == 32
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  if (RangeUtils::IsPositive(shift_range()) &&
+      right()->definition()->IsConstant()) {
+    ConstantInstr* constant = right()->definition()->AsConstant();
+    summary->set_in(1, Location::Constant(constant));
+  } else {
+    summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+                                      Location::RequiresRegister()));
+  }
+  summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                     Location::RequiresRegister()));
+#else
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, RangeUtils::IsPositive(shift_range())
+                         ? LocationRegisterOrConstant(right())
+                         : Location::RequiresRegister());
+  summary->set_out(0, Location::RequiresRegister());
+#endif
+  return summary;
+}
+
+void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  PairLocation* left_pair = locs()->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+  PairLocation* out_pair = locs()->out(0).AsPairLocation();
+  Register out_lo = out_pair->At(0).reg();
+  Register out_hi = out_pair->At(1).reg();
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount (or constant that throws).
+    PairLocation* right_pair = locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+
+    // Jump to a slow path if shift is larger than 63 or less than 0.
+    ShiftInt64OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange()) {
+      slow_path = new (Z) ShiftInt64OpSlowPath(this);
+      compiler->AddSlowPathCode(slow_path);
+      __ CompareImmediate(right_hi, 0);
+      __ BranchIf(NE, slow_path->entry_label());
+      __ CompareImmediate(right_lo, kShiftCountLimit);
+      __ BranchIf(HI, slow_path->entry_label());
+    }
+
+    EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, right_lo);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+#else
+  const Register left = locs()->in(0).reg();
+  const Register out = locs()->out(0).reg();
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
+                             locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount (or constant that throws).
+    Register shift = locs()->in(1).reg();
+
+    // Jump to a slow path if shift is larger than 63 or less than 0.
+    ShiftInt64OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange()) {
+      slow_path = new (Z) ShiftInt64OpSlowPath(this);
+      compiler->AddSlowPathCode(slow_path);
+      __ CompareImmediate(shift, kShiftCountLimit);
+      __ BranchIf(HI, slow_path->entry_label());
+    }
+
+    EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+#endif
+}
+
+LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+#if XLEN == 32
+  summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  summary->set_in(1, LocationWritableRegisterOrSmiConstant(right()));
+  summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                     Location::RequiresRegister()));
+#else
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, LocationRegisterOrSmiConstant(right()));
+  summary->set_out(0, Location::RequiresRegister());
+#endif
+  return summary;
+}
+
+void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  PairLocation* left_pair = locs()->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+  PairLocation* out_pair = locs()->out(0).AsPairLocation();
+  Register out_lo = out_pair->At(0).reg();
+  Register out_hi = out_pair->At(1).reg();
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount.
+    Register shift = locs()->in(1).reg();
+    __ SmiUntag(shift);
+
+    // Deopt if shift is larger than 63 or less than 0 (or not a smi).
+    if (!IsShiftCountInRange()) {
+      ASSERT(CanDeoptimize());
+      compiler::Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ CompareImmediate(shift, kShiftCountLimit);
+      __ BranchIf(HI, deopt);
+    }
+
+    EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, shift);
+  }
+#else
+  const Register left = locs()->in(0).reg();
+  const Register out = locs()->out(0).reg();
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
+                             locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount.
+    Register shift = locs()->in(1).reg();
+
+    // Untag shift count.
+    __ SmiUntag(TMP, shift);
+    shift = TMP;
+
+    // Deopt if shift is larger than 63 or less than 0 (or not a smi).
+    if (!IsShiftCountInRange()) {
+      ASSERT(CanDeoptimize());
+      compiler::Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ CompareImmediate(shift, kShiftCountLimit);
+      __ BranchIf(HI, deopt);
+    }
+
+    EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
+  }
+#endif
+}
+
+class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry) {}
+
+  const char* name() override { return "uint32 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+#if XLEN == 32
+    PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+    Register out = instruction()->locs()->out(0).reg();
+
+    compiler::Label throw_error;
+    __ bltz(right_hi, &throw_error, compiler::Assembler::kNearJump);
+    __ li(out, 0);
+    __ j(exit_label());
+
+    __ Bind(&throw_error);
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ StoreToOffset(
+        right_lo, THR,
+        compiler::target::Thread::unboxed_int64_runtime_arg_offset());
+    __ StoreToOffset(
+        right_hi, THR,
+        compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
+            compiler::target::kWordSize);
+#else
+    const Register right = instruction()->locs()->in(1).reg();
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ sx(right,
+          compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
+#endif
+  }
+};
+
+LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresRegister());
+  if (RangeUtils::IsPositive(shift_range()) &&
+      right()->definition()->IsConstant()) {
+    ConstantInstr* constant = right()->definition()->AsConstant();
+    summary->set_in(1, Location::Constant(constant));
+  } else {
+#if XLEN == 32
+    summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+                                      Location::RequiresRegister()));
+#else
+    summary->set_in(1, Location::RequiresRegister());
+#endif
+  }
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+
+  ASSERT(left != out);
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
+                              locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount (or constant that throws).
+    PairLocation* right_pair = locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+
+    // Jump to a slow path if shift count is > 31 or negative.
+    ShiftUint32OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
+      slow_path = new (Z) ShiftUint32OpSlowPath(this);
+      compiler->AddSlowPathCode(slow_path);
+
+      __ CompareImmediate(right_hi, 0);
+      __ BranchIf(NE, slow_path->entry_label());
+      __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
+      __ BranchIf(HI, slow_path->entry_label());
+    }
+
+    EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+#else
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
+                              locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount (or constant that throws).
+    const Register right = locs()->in(1).reg();
+    const bool shift_count_in_range =
+        IsShiftCountInRange(kUint32ShiftCountLimit);
+
+    // Jump to a slow path if shift count is negative.
+    if (!shift_count_in_range) {
+      ShiftUint32OpSlowPath* slow_path = new (Z) ShiftUint32OpSlowPath(this);
+      compiler->AddSlowPathCode(slow_path);
+
+      __ bltz(right, slow_path->entry_label());
+    }
+
+    EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
+
+    if (!shift_count_in_range) {
+      // If shift value is > 31, return zero.
+      compiler::Label done;
+      __ CompareImmediate(right, 31);
+      __ BranchIf(LE, &done, compiler::Assembler::kNearJump);
+      __ li(out, 0);
+      __ Bind(&done);
+    }
+  }
+#endif
+}
+
+LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, LocationRegisterOrSmiConstant(right()));
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void SpeculativeShiftUint32OpInstr::EmitNativeCode(
+    FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
+                              locs()->in(1).constant());
+  } else {
+    Register right = locs()->in(1).reg();
+    const bool shift_count_in_range =
+        IsShiftCountInRange(kUint32ShiftCountLimit);
+
+    __ SmiUntag(TMP, right);
+    right = TMP;
+
+    // Jump to a slow path if shift count is negative.
+    if (!shift_count_in_range) {
+      // Deoptimize if shift count is negative.
+      ASSERT(CanDeoptimize());
+      compiler::Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ bltz(right, deopt);
+    }
+
+    EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
+
+    if (!shift_count_in_range) {
+      // If shift value is > 31, return zero.
+      compiler::Label done;
+      __ CompareImmediate(right, 31);
+      __ BranchIf(LE, &done, compiler::Assembler::kNearJump);
+      __ li(out, 0);
+      __ Bind(&done);
+    }
+  }
+}
+
+LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+#if XLEN == 32
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                     Location::RequiresRegister()));
+  return summary;
+#else
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+#endif
+}
+
+void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  PairLocation* left_pair = locs()->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+
+  PairLocation* out_pair = locs()->out(0).AsPairLocation();
+  Register out_lo = out_pair->At(0).reg();
+  Register out_hi = out_pair->At(1).reg();
+
+  switch (op_kind()) {
+    case Token::kBIT_NOT:
+      __ not_(out_lo, left_lo);
+      __ not_(out_hi, left_hi);
+      break;
+    case Token::kNEGATE:
+      __ snez(TMP, left_lo);  // Borrow
+      __ neg(out_lo, left_lo);
+      __ neg(out_hi, left_hi);
+      __ sub(out_hi, out_hi, TMP);
+      break;
+    default:
+      UNREACHABLE();
+  }
+#else
+  const Register left = locs()->in(0).reg();
+  const Register out = locs()->out(0).reg();
+  switch (op_kind()) {
+    case Token::kBIT_NOT:
+      __ not_(out, left);
+      break;
+    case Token::kNEGATE:
+      __ neg(out, left);
+      break;
+    default:
+      UNREACHABLE();
+  }
+#endif
+}
+
+LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
+                                                          bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RequiresRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register right = locs()->in(1).reg();
+  Register out = locs()->out(0).reg();
+  switch (op_kind()) {
+    case Token::kBIT_AND:
+      __ and_(out, left, right);
+      break;
+    case Token::kBIT_OR:
+      __ or_(out, left, right);
+      break;
+    case Token::kBIT_XOR:
+      __ xor_(out, left, right);
+      break;
+    case Token::kADD:
+#if XLEN == 32
+      __ add(out, left, right);
+#elif XLEN > 32
+      __ addw(out, left, right);
+#endif
+      break;
+    case Token::kSUB:
+#if XLEN == 32
+      __ sub(out, left, right);
+#elif XLEN > 32
+      __ subw(out, left, right);
+#endif
+      break;
+    case Token::kMUL:
+      __ mul(out, left, right);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+
+  ASSERT(op_kind() == Token::kBIT_NOT);
+  __ not_(out, left);
+}
+
+#if XLEN == 32
+static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
+                               BinaryInt32OpInstr* shift_left) {
+  const LocationSummary& locs = *shift_left->locs();
+  const Register left = locs.in(0).reg();
+  const Register result = locs.out(0).reg();
+  compiler::Label* deopt =
+      shift_left->CanDeoptimize()
+          ? compiler->AddDeoptStub(shift_left->deopt_id(),
+                                   ICData::kDeoptBinarySmiOp)
+          : NULL;
+  ASSERT(locs.in(1).IsConstant());
+  const Object& constant = locs.in(1).constant();
+  ASSERT(compiler::target::IsSmi(constant));
+  // Immediate shift operation takes 5 bits for the count.
+  const intptr_t kCountLimit = 0x1F;
+  const intptr_t value = compiler::target::SmiValue(constant);
+  ASSERT((0 < value) && (value < kCountLimit));
+  __ slli(result, left, value);
+  if (shift_left->can_overflow()) {
+    __ srai(TMP, result, value);
+    __ bne(TMP, left, deopt);  // Overflow.
+  }
+}
+
+LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  // Calculate number of temporaries.
+  intptr_t num_temps = 0;
+  if (((op_kind() == Token::kSHL) && can_overflow()) ||
+      (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR) ||
+      (op_kind() == Token::kMUL)) {
+    num_temps = 1;
+  }
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, LocationRegisterOrSmiConstant(right()));
+  if (num_temps == 1) {
+    summary->set_temp(0, Location::RequiresRegister());
+  }
+  // We make use of 3-operand instructions by not requiring result register
+  // to be identical to first input register as on Intel.
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (op_kind() == Token::kSHL) {
+    EmitInt32ShiftLeft(compiler, this);
+    return;
+  }
+
+  const Register left = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  compiler::Label* deopt = NULL;
+  if (CanDeoptimize()) {
+    deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
+  }
+
+  if (locs()->in(1).IsConstant()) {
+    const Object& constant = locs()->in(1).constant();
+    ASSERT(compiler::target::IsSmi(constant));
+    const intptr_t value = compiler::target::SmiValue(constant);
+    switch (op_kind()) {
+      case Token::kADD: {
+        if (deopt == NULL) {
+          __ AddImmediate(result, left, value);
+        } else {
+          __ AddImmediateBranchOverflow(result, left, value, deopt);
+        }
+        break;
+      }
+      case Token::kSUB: {
+        if (deopt == NULL) {
+          __ AddImmediate(result, left, -value);
+        } else {
+          // Negating value and using AddImmediateSetFlags would not detect the
+          // overflow when value == kMinInt32.
+          __ SubtractImmediateBranchOverflow(result, left, value, deopt);
+        }
+        break;
+      }
+      case Token::kMUL: {
+        const Register right = locs()->temp(0).reg();
+        __ LoadImmediate(right, value);
+        if (deopt == NULL) {
+          __ mul(result, left, right);
+        } else {
+          __ MultiplyBranchOverflow(result, left, right, deopt);
+        }
+        break;
+      }
+      case Token::kBIT_AND: {
+        // No overflow check.
+        __ AndImmediate(result, left, value);
+        break;
+      }
+      case Token::kBIT_OR: {
+        // No overflow check.
+        __ OrImmediate(result, left, value);
+        break;
+      }
+      case Token::kBIT_XOR: {
+        // No overflow check.
+        __ XorImmediate(result, left, value);
+        break;
+      }
+      case Token::kSHR: {
+        // sarl operation masks the count to 5 bits.
+        const intptr_t kCountLimit = 0x1F;
+        __ srai(result, left, Utils::Minimum(value, kCountLimit));
+        break;
+      }
+      case Token::kUSHR: {
+        UNIMPLEMENTED();
+        break;
+      }
+
+      default:
+        UNREACHABLE();
+        break;
+    }
+    return;
+  }
+
+  const Register right = locs()->in(1).reg();
+  switch (op_kind()) {
+    case Token::kADD: {
+      if (deopt == NULL) {
+        __ add(result, left, right);
+      } else {
+        __ AddBranchOverflow(result, left, right, deopt);
+      }
+      break;
+    }
+    case Token::kSUB: {
+      if (deopt == NULL) {
+        __ sub(result, left, right);
+      } else {
+        __ SubtractBranchOverflow(result, left, right, deopt);
+      }
+      break;
+    }
+    case Token::kMUL: {
+      if (deopt == NULL) {
+        __ mul(result, left, right);
+      } else {
+        __ MultiplyBranchOverflow(result, left, right, deopt);
+      }
+      break;
+    }
+    case Token::kBIT_AND: {
+      // No overflow check.
+      __ and_(result, left, right);
+      break;
+    }
+    case Token::kBIT_OR: {
+      // No overflow check.
+      __ or_(result, left, right);
+      break;
+    }
+    case Token::kBIT_XOR: {
+      // No overflow check.
+      __ xor_(result, left, right);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+#else
+DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
+#endif
+
+LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+#if XLEN == 32
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  if (from() == kUntagged || to() == kUntagged) {
+    ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
+           (from() == kUntagged && to() == kUnboxedUint32) ||
+           (from() == kUnboxedInt32 && to() == kUntagged) ||
+           (from() == kUnboxedUint32 && to() == kUntagged));
+    ASSERT(!CanDeoptimize());
+    summary->set_in(0, Location::RequiresRegister());
+    summary->set_out(0, Location::SameAsFirstInput());
+  } else if (from() == kUnboxedInt64) {
+    ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
+    summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                      Location::RequiresRegister()));
+    summary->set_out(0, Location::RequiresRegister());
+  } else if (to() == kUnboxedInt64) {
+    ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
+    summary->set_in(0, Location::RequiresRegister());
+    summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                       Location::RequiresRegister()));
+  } else {
+    ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
+    ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
+    summary->set_in(0, Location::RequiresRegister());
+    summary->set_out(0, Location::SameAsFirstInput());
+  }
+  return summary;
+#else
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  if (from() == kUntagged || to() == kUntagged) {
+    ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
+           (from() == kUnboxedIntPtr && to() == kUntagged));
+    ASSERT(!CanDeoptimize());
+  } else if (from() == kUnboxedInt64) {
+    ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
+  } else if (to() == kUnboxedInt64) {
+    ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
+  } else {
+    ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
+    ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
+  }
+  summary->set_in(0, Location::RequiresRegister());
+  if (CanDeoptimize()) {
+    summary->set_out(0, Location::RequiresRegister());
+  } else {
+    summary->set_out(0, Location::SameAsFirstInput());
+  }
+  return summary;
+#endif
+}
+
+void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#if XLEN == 32
+  const bool is_nop_conversion =
+      (from() == kUntagged && to() == kUnboxedInt32) ||
+      (from() == kUntagged && to() == kUnboxedUint32) ||
+      (from() == kUnboxedInt32 && to() == kUntagged) ||
+      (from() == kUnboxedUint32 && to() == kUntagged);
+  if (is_nop_conversion) {
+    ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
+    return;
+  }
+
+  if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
+    const Register out = locs()->out(0).reg();
+    // Representations are bitwise equivalent.
+    ASSERT(out == locs()->in(0).reg());
+  } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
+    const Register out = locs()->out(0).reg();
+    // Representations are bitwise equivalent.
+    ASSERT(out == locs()->in(0).reg());
+    if (CanDeoptimize()) {
+      compiler::Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
+      __ bltz(out, deopt);
+    }
+  } else if (from() == kUnboxedInt64) {
+    ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
+    PairLocation* in_pair = locs()->in(0).AsPairLocation();
+    Register in_lo = in_pair->At(0).reg();
+    Register in_hi = in_pair->At(1).reg();
+    Register out = locs()->out(0).reg();
+    // Copy low word.
+    __ mv(out, in_lo);
+    if (CanDeoptimize()) {
+      compiler::Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
+      ASSERT(to() == kUnboxedInt32);
+      __ srai(TMP, in_lo, XLEN - 1);
+      __ bne(in_hi, TMP, deopt);
+    }
+  } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
+    ASSERT(to() == kUnboxedInt64);
+    Register in = locs()->in(0).reg();
+    PairLocation* out_pair = locs()->out(0).AsPairLocation();
+    Register out_lo = out_pair->At(0).reg();
+    Register out_hi = out_pair->At(1).reg();
+    // Copy low word.
+    __ mv(out_lo, in);
+    if (from() == kUnboxedUint32) {
+      __ li(out_hi, 0);
+    } else {
+      ASSERT(from() == kUnboxedInt32);
+      __ srai(out_hi, in, XLEN - 1);
+    }
+  } else {
+    UNREACHABLE();
+  }
+#else
+  ASSERT(from() != to());  // We don't convert from a representation to itself.
+
+  const bool is_nop_conversion =
+      (from() == kUntagged && to() == kUnboxedIntPtr) ||
+      (from() == kUnboxedIntPtr && to() == kUntagged);
+  if (is_nop_conversion) {
+    ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
+    return;
+  }
+
+  const Register value = locs()->in(0).reg();
+  const Register out = locs()->out(0).reg();
+  compiler::Label* deopt =
+      !CanDeoptimize()
+          ? NULL
+          : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
+  if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
+    if (CanDeoptimize()) {
+      __ slli(TMP, value, 32);
+      __ bltz(TMP, deopt);  // If sign bit is set it won't fit in a uint32.
+    }
+    if (out != value) {
+      __ mv(out, value);  // For positive values the bits are the same.
+    }
+  } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
+    if (CanDeoptimize()) {
+      __ slli(TMP, value, 32);
+      __ bltz(TMP, deopt);  // If high bit is set it won't fit in an int32.
+    }
+    if (out != value) {
+      __ mv(out, value);  // For 31 bit values the bits are the same.
+    }
+  } else if (from() == kUnboxedInt64) {
+    if (to() == kUnboxedInt32) {
+      if (is_truncating() || out != value) {
+        __ sextw(out, value);  // Signed extension 64->32.
+      }
+    } else {
+      ASSERT(to() == kUnboxedUint32);
+      if (is_truncating() || out != value) {
+        // Unsigned extension 64->32.
+        // TODO(riscv): Might be a shorter way to do this.
+        __ slli(out, value, 32);
+        __ srli(out, out, 32);
+      }
+    }
+    if (CanDeoptimize()) {
+      ASSERT(to() == kUnboxedInt32);
+      __ CompareRegisters(out, value);
+      __ BranchIf(NE, deopt);  // Value cannot be held in Int32, deopt.
+    }
+  } else if (to() == kUnboxedInt64) {
+    if (from() == kUnboxedUint32) {
+      // TODO(riscv): Might be a shorter way to do this.
+      __ slli(out, value, 32);
+      __ srli(out, out, 32);
+    } else {
+      ASSERT(from() == kUnboxedInt32);
+      __ sextw(out, value);  // Signed extension 32->64.
+    }
+  } else {
+    UNREACHABLE();
+  }
+#endif
+}
+
+LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  LocationSummary* summary =
+      new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
+                                 /*num_temps=*/0, LocationSummary::kNoCall);
+  switch (from()) {
+    case kUnboxedInt32:
+      summary->set_in(0, Location::RequiresRegister());
+      break;
+    case kUnboxedInt64:
+#if XLEN == 32
+      summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                        Location::RequiresRegister()));
+#else
+      summary->set_in(0, Location::RequiresRegister());
+#endif
+      break;
+    case kUnboxedFloat:
+    case kUnboxedDouble:
+      summary->set_in(0, Location::RequiresFpuRegister());
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  switch (to()) {
+    case kUnboxedInt32:
+      summary->set_out(0, Location::RequiresRegister());
+      break;
+    case kUnboxedInt64:
+#if XLEN == 32
+      summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                         Location::RequiresRegister()));
+#else
+      summary->set_out(0, Location::RequiresRegister());
+#endif
+      break;
+    case kUnboxedFloat:
+    case kUnboxedDouble:
+      summary->set_out(0, Location::RequiresFpuRegister());
+      break;
+    default:
+      UNREACHABLE();
+  }
+  return summary;
+}
+
+void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  UNIMPLEMENTED();
+}
+
+LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
+}
+
+void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  __ Stop(message());
+}
+
+void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  BlockEntryInstr* entry = normal_entry();
+  if (entry != nullptr) {
+    if (!compiler->CanFallThroughTo(entry)) {
+      FATAL("Checked function entry must have no offset");
+    }
+  } else {
+    entry = osr_entry();
+    if (!compiler->CanFallThroughTo(entry)) {
+      __ j(compiler->GetJumpLabel(entry));
+    }
+  }
+}
+
+LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
+  return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
+}
+
+void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (!compiler->is_optimizing()) {
+    if (FLAG_reorder_basic_blocks) {
+      compiler->EmitEdgeCounter(block()->preorder_number());
+    }
+    // Add a deoptimization descriptor for deoptimizing instructions that
+    // may be inserted before this instruction.
+    compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
+                                   InstructionSource());
+  }
+  if (HasParallelMove()) {
+    compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
+  }
+
+  // We can fall through if the successor is the next block in the list.
+  // Otherwise, we need a jump.
+  if (!compiler->CanFallThroughTo(successor())) {
+    __ j(compiler->GetJumpLabel(successor()));
+  }
+}
+
+LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
+                                                        bool opt) const {
+  const intptr_t kNumInputs = 1;
+  const intptr_t kNumTemps = 2;
+
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_temp(0, Location::RequiresRegister());
+  summary->set_temp(1, Location::RequiresRegister());
+
+  return summary;
+}
+
+void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register index_reg = locs()->in(0).reg();
+  Register target_address_reg = locs()->temp(0).reg();
+  Register offset_reg = locs()->temp(1).reg();
+
+  ASSERT(RequiredInputRepresentation(0) == kTagged);
+  __ LoadObject(offset_reg, offsets_);
+  const auto element_address = __ ElementAddressForRegIndex(
+      /*is_external=*/false, kTypedDataInt32ArrayCid,
+      /*index_scale=*/4,
+      /*index_unboxed=*/false, offset_reg, index_reg, TMP);
+  __ lw(offset_reg, element_address);
+
+  const intptr_t entry_offset = __ CodeSize();
+  intx_t imm = -entry_offset;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  __ auipc(target_address_reg, hi);
+  __ add(target_address_reg, target_address_reg, offset_reg);
+  __ jr(target_address_reg, lo);
+}
+
+LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  if (needs_number_check()) {
+    LocationSummary* locs = new (zone)
+        LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+    locs->set_in(0, Location::RegisterLocation(A0));
+    locs->set_in(1, Location::RegisterLocation(A1));
+    locs->set_out(0, Location::RegisterLocation(A0));
+    return locs;
+  }
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  locs->set_in(0, LocationRegisterOrConstant(left()));
+  // Only one of the inputs can be a constant. Choose register if the first one
+  // is a constant.
+  locs->set_in(1, locs->in(0).IsConstant()
+                      ? Location::RequiresRegister()
+                      : LocationRegisterOrConstant(right()));
+  locs->set_out(0, Location::RequiresRegister());
+  return locs;
+}
+
+Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
+    FlowGraphCompiler* compiler,
+    BranchLabels labels,
+    Register reg,
+    const Object& obj) {
+  return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
+                                               source(), deopt_id());
+}
+
+void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  compiler::Label is_true, is_false;
+  BranchLabels labels = {&is_true, &is_false, &is_false};
+  Condition true_condition = EmitComparisonCode(compiler, labels);
+
+  Register result = locs()->out(0).reg();
+  if (is_true.IsLinked() || is_false.IsLinked()) {
+    if (true_condition != kInvalidCondition) {
+      EmitBranchOnCondition(compiler, true_condition, labels);
+    }
+    compiler::Label done;
+    __ Bind(&is_false);
+    __ LoadObject(result, Bool::False());
+    __ j(&done, compiler::Assembler::kNearJump);
+    __ Bind(&is_true);
+    __ LoadObject(result, Bool::True());
+    __ Bind(&done);
+  } else {
+    // If EmitComparisonCode did not use the labels and just returned
+    // a condition we can avoid the branch and use slt to generate the
+    // offsets to true or false.
+    ASSERT(kTrueOffsetFromNull + (1 << kBoolValueBitPosition) ==
+           kFalseOffsetFromNull);
+    __ SetIf(InvertCondition(true_condition), result);
+    __ slli(result, result, kBoolValueBitPosition);
+    __ add(result, result, NULL_REG);
+    __ addi(result, result, kTrueOffsetFromNull);
+  }
+}
+
+void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
+                                     BranchInstr* branch) {
+  BranchLabels labels = compiler->CreateBranchLabels(branch);
+  Condition true_condition = EmitComparisonCode(compiler, labels);
+  if (true_condition != kInvalidCondition) {
+    EmitBranchOnCondition(compiler, true_condition, labels);
+  }
+}
+
+LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  return LocationSummary::Make(zone, 1, Location::RequiresRegister(),
+                               LocationSummary::kNoCall);
+}
+
+void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register input = locs()->in(0).reg();
+  const Register result = locs()->out(0).reg();
+  __ xori(result, input, compiler::target::ObjectAlignment::kBoolValueMask);
+}
+
+LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
+                                                          bool opt) const {
+  const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* locs = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
+  if (type_arguments() != nullptr) {
+    locs->set_in(kTypeArgumentsPos, Location::RegisterLocation(
+                                        AllocateObjectABI::kTypeArgumentsReg));
+  }
+  locs->set_out(0, Location::RegisterLocation(AllocateObjectABI::kResultReg));
+  return locs;
+}
+
+void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (type_arguments() != nullptr) {
+    TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
+    if (type_usage_info != nullptr) {
+      RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
+                               type_arguments()->definition());
+    }
+  }
+  const Code& stub = Code::ZoneHandle(
+      compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
+  compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
+                             locs(), deopt_id(), env());
+}
+
+void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+#ifdef PRODUCT
+  UNREACHABLE();
+#else
+  ASSERT(!compiler->is_optimizing());
+  __ JumpAndLinkPatchable(StubCode::DebugStepCheck());
+  compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
+  compiler->RecordSafepoint(locs());
+#endif
+}
+
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/backend/locations.h b/runtime/vm/compiler/backend/locations.h
index 56c5604..b9988948 100644
--- a/runtime/vm/compiler/backend/locations.h
+++ b/runtime/vm/compiler/backend/locations.h
@@ -533,7 +533,7 @@
  public:
   SmallSet() : data_(0) {}
 
-  explicit SmallSet(intptr_t data) : data_(data) {}
+  explicit SmallSet(uintptr_t data) : data_(data) {}
 
   bool Contains(T value) const { return (data_ & ToMask(value)) != 0; }
 
@@ -545,15 +545,15 @@
 
   void Clear() { data_ = 0; }
 
-  intptr_t data() const { return data_; }
+  uintptr_t data() const { return data_; }
 
  private:
-  static intptr_t ToMask(T value) {
-    ASSERT(static_cast<intptr_t>(value) < (kWordSize * kBitsPerByte));
-    return 1 << static_cast<intptr_t>(value);
+  static uintptr_t ToMask(T value) {
+    ASSERT(static_cast<uintptr_t>(value) < (kWordSize * kBitsPerByte));
+    return static_cast<uintptr_t>(1) << static_cast<uintptr_t>(value);
   }
 
-  intptr_t data_;
+  uintptr_t data_;
 };
 
 class RegisterSet : public ValueObject {
@@ -564,8 +564,8 @@
     ASSERT(kNumberOfFpuRegisters <= (kWordSize * kBitsPerByte));
   }
 
-  explicit RegisterSet(intptr_t cpu_register_mask,
-                       intptr_t fpu_register_mask = 0)
+  explicit RegisterSet(uintptr_t cpu_register_mask,
+                       uintptr_t fpu_register_mask = 0)
       : RegisterSet() {
     AddTaggedRegisters(cpu_register_mask, fpu_register_mask);
   }
@@ -620,8 +620,8 @@
 #endif
   }
 
-  void AddTaggedRegisters(intptr_t cpu_register_mask,
-                          intptr_t fpu_register_mask) {
+  void AddTaggedRegisters(uintptr_t cpu_register_mask,
+                          uintptr_t fpu_register_mask) {
     for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
       if (Utils::TestBit(cpu_register_mask, i)) {
         const Register reg = static_cast<Register>(i);
@@ -694,12 +694,12 @@
   intptr_t FpuRegisterCount() const { return RegisterCount(fpu_registers()); }
 
   static intptr_t RegisterCount(intptr_t registers);
-  static bool Contains(intptr_t register_set, intptr_t reg) {
-    return (register_set & (1 << reg)) != 0;
+  static bool Contains(uintptr_t register_set, intptr_t reg) {
+    return (register_set & (static_cast<uintptr_t>(1) << reg)) != 0;
   }
 
-  intptr_t cpu_registers() const { return cpu_registers_.data(); }
-  intptr_t fpu_registers() const { return fpu_registers_.data(); }
+  uintptr_t cpu_registers() const { return cpu_registers_.data(); }
+  uintptr_t fpu_registers() const { return fpu_registers_.data(); }
 
   void Clear() {
     cpu_registers_.Clear();
diff --git a/runtime/vm/compiler/backend/locations_helpers.h b/runtime/vm/compiler/backend/locations_helpers.h
index 6b62e40..fefd212 100644
--- a/runtime/vm/compiler/backend/locations_helpers.h
+++ b/runtime/vm/compiler/backend/locations_helpers.h
@@ -434,6 +434,10 @@
 #include "vm/compiler/backend/locations_helpers_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 
+#elif defined(TARGET_ARCH_RISCV32)
+
+#elif defined(TARGET_ARCH_RISCV64)
+
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index 1005791..ace9914 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -18,6 +18,7 @@
   "asm_intrinsifier_arm.cc",
   "asm_intrinsifier_arm64.cc",
   "asm_intrinsifier_ia32.cc",
+  "asm_intrinsifier_riscv.cc",
   "asm_intrinsifier_x64.cc",
   "assembler/assembler.h",
   "assembler/assembler_arm.cc",
@@ -28,6 +29,8 @@
   "assembler/assembler_base.h",
   "assembler/assembler_ia32.cc",
   "assembler/assembler_ia32.h",
+  "assembler/assembler_riscv.cc",
+  "assembler/assembler_riscv.h",
   "assembler/assembler_x64.cc",
   "assembler/assembler_x64.h",
   "assembler/object_pool_builder.h",
@@ -52,6 +55,7 @@
   "backend/flow_graph_compiler_arm.cc",
   "backend/flow_graph_compiler_arm64.cc",
   "backend/flow_graph_compiler_ia32.cc",
+  "backend/flow_graph_compiler_riscv.cc",
   "backend/flow_graph_compiler_x64.cc",
   "backend/il.cc",
   "backend/il.h",
@@ -60,6 +64,7 @@
   "backend/il_ia32.cc",
   "backend/il_printer.cc",
   "backend/il_printer.h",
+  "backend/il_riscv.cc",
   "backend/il_x64.cc",
   "backend/inliner.cc",
   "backend/inliner.h",
@@ -141,6 +146,7 @@
   "stub_code_compiler_arm.cc",
   "stub_code_compiler_arm64.cc",
   "stub_code_compiler_ia32.cc",
+  "stub_code_compiler_riscv.cc",
   "stub_code_compiler_x64.cc",
   "write_barrier_elimination.cc",
   "write_barrier_elimination.h",
@@ -151,6 +157,7 @@
   "assembler/assembler_arm64_test.cc",
   "assembler/assembler_arm_test.cc",
   "assembler/assembler_ia32_test.cc",
+  "assembler/assembler_riscv_test.cc",
   "assembler/assembler_test.cc",
   "assembler/assembler_x64_test.cc",
   "assembler/disassembler_test.cc",
@@ -193,6 +200,7 @@
   "assembler/disassembler.h",
   "assembler/disassembler_arm.cc",
   "assembler/disassembler_arm64.cc",
+  "assembler/disassembler_riscv.cc",
   "assembler/disassembler_x86.cc",
 ]
 
diff --git a/runtime/vm/compiler/ffi/abi.cc b/runtime/vm/compiler/ffi/abi.cc
index c902a89..1652c720 100644
--- a/runtime/vm/compiler/ffi/abi.cc
+++ b/runtime/vm/compiler/ffi/abi.cc
@@ -22,7 +22,8 @@
   uint64_t i;
 };
 
-#if defined(HOST_ARCH_X64) || defined(HOST_ARCH_ARM64)
+#if defined(HOST_ARCH_X64) || defined(HOST_ARCH_ARM64) ||                      \
+    defined(HOST_ARCH_RISCV64)
 static_assert(offsetof(AbiAlignmentDouble, d) == 8,
               "FFI transformation alignment");
 static_assert(offsetof(AbiAlignmentUint64, i) == 8,
@@ -81,6 +82,12 @@
 #elif defined(TARGET_ARCH_ARM64)
 #define TARGET_ARCH_NAME Arm64
 #define TARGET_ARCH_NAME_LC arm64
+#elif defined(TARGET_ARCH_RISCV32)
+#define TARGET_ARCH_NAME Riscv32
+#define TARGET_ARCH_NAME_LC riscv32
+#elif defined(TARGET_ARCH_RISCV64)
+#define TARGET_ARCH_NAME Riscv64
+#define TARGET_ARCH_NAME_LC riscv64
 #else
 #error Unknown arch
 #endif
diff --git a/runtime/vm/compiler/ffi/abi.h b/runtime/vm/compiler/ffi/abi.h
index 81b76d9..b693ef8 100644
--- a/runtime/vm/compiler/ffi/abi.h
+++ b/runtime/vm/compiler/ffi/abi.h
@@ -33,6 +33,8 @@
   kLinuxArm64,
   kLinuxIA32,
   kLinuxX64,
+  kLinuxRiscv32,
+  kLinuxRiscv64,
   kMacOSArm64,
   kMacOSX64,
   kWindowsArm64,
@@ -47,9 +49,9 @@
 // - runtime/vm/compiler/frontend/kernel_to_il.cc
 static_assert(static_cast<int64_t>(Abi::kAndroidArm) == 0,
               "Enum value unexpected.");
-static_assert(static_cast<int64_t>(Abi::kWindowsX64) == 17,
+static_assert(static_cast<int64_t>(Abi::kWindowsX64) == 19,
               "Enum value unexpected.");
-static_assert(num_abis == 18, "Enum value unexpected.");
+static_assert(num_abis == 20, "Enum value unexpected.");
 
 // The target ABI. Defines sizes and alignment of native types.
 Abi TargetAbi();
diff --git a/runtime/vm/compiler/ffi/native_calling_convention.cc b/runtime/vm/compiler/ffi/native_calling_convention.cc
index 0d81047..e30a518 100644
--- a/runtime/vm/compiler/ffi/native_calling_convention.cc
+++ b/runtime/vm/compiler/ffi/native_calling_convention.cc
@@ -94,8 +94,22 @@
       if (CallingConventions::kArgumentIntRegXorFpuReg) {
         cpu_regs_used++;
       }
-      return *new (zone_) NativeFpuRegistersLocation(payload_type, payload_type,
-                                                     kind, reg_index);
+#if defined(TARGET_ARCH_ARM)
+      if (kind == kSingleFpuReg) {
+        return *new (zone_)
+            NativeFpuRegistersLocation(payload_type, payload_type, kind,
+                                       static_cast<SRegister>(reg_index));
+      }
+      if (kind == kDoubleFpuReg) {
+        return *new (zone_)
+            NativeFpuRegistersLocation(payload_type, payload_type, kind,
+                                       static_cast<DRegister>(reg_index));
+      }
+#endif
+      ASSERT(kind == kQuadFpuReg);
+      FpuRegister reg = CallingConventions::FpuArgumentRegisters[reg_index];
+      return *new (zone_)
+          NativeFpuRegistersLocation(payload_type, payload_type, reg);
     }
 
     BlockAllFpuRegisters();
@@ -252,24 +266,24 @@
         return AllocateStack(payload_type);
       }
     } else {
-      const intptr_t chunck_size = payload_type.AlignmentInBytesStack();
-      ASSERT(chunck_size == 4 || chunck_size == 8);
+      const intptr_t chunk_size = payload_type.AlignmentInBytesStack();
+      ASSERT(chunk_size == 4 || chunk_size == 8);
       const intptr_t size_rounded =
-          Utils::RoundUp(payload_type.SizeInBytes(), chunck_size);
-      const intptr_t num_chuncks = size_rounded / chunck_size;
+          Utils::RoundUp(payload_type.SizeInBytes(), chunk_size);
+      const intptr_t num_chunks = size_rounded / chunk_size;
       const auto& chuck_type =
-          *new (zone_) NativePrimitiveType(chunck_size == 4 ? kInt32 : kInt64);
+          *new (zone_) NativePrimitiveType(chunk_size == 4 ? kInt32 : kInt64);
 
       NativeLocations& multiple_locations =
-          *new (zone_) NativeLocations(zone_, num_chuncks);
-      for (int i = 0; i < num_chuncks; i++) {
+          *new (zone_) NativeLocations(zone_, num_chunks);
+      for (int i = 0; i < num_chunks; i++) {
         const auto& allocated_chunk = &AllocateArgument(chuck_type);
         // The last chunk should not be 8 bytes, if the struct only has 4
         // remaining bytes to be allocated.
-        if (i == num_chuncks - 1 && chunck_size == 8 &&
+        if (i == num_chunks - 1 && chunk_size == 8 &&
             Utils::RoundUp(payload_type.SizeInBytes(), 4) % 8 == 4) {
           const auto& small_chuck_type = *new (zone_) NativePrimitiveType(
-              chunck_size == 4 ? kInt32 : kInt64);
+              chunk_size == 4 ? kInt32 : kInt64);
           multiple_locations.Add(&allocated_chunk->WithOtherNativeType(
               zone_, small_chuck_type, small_chuck_type));
         } else {
@@ -324,13 +338,13 @@
       if (regs_available) {
         const intptr_t size_rounded =
             Utils::RoundUp(payload_type.SizeInBytes(), 8);
-        const intptr_t num_chuncks = size_rounded / 8;
-        const auto& chuck_type = *new (zone_) NativePrimitiveType(kInt64);
+        const intptr_t num_chunks = size_rounded / 8;
+        const auto& chunk_type = *new (zone_) NativePrimitiveType(kInt64);
 
         NativeLocations& multiple_locations =
-            *new (zone_) NativeLocations(zone_, num_chuncks);
-        for (int i = 0; i < num_chuncks; i++) {
-          const auto& allocated_chunk = &AllocateArgument(chuck_type);
+            *new (zone_) NativeLocations(zone_, num_chunks);
+        for (int i = 0; i < num_chunks; i++) {
+          const auto& allocated_chunk = &AllocateArgument(chunk_type);
           multiple_locations.Add(allocated_chunk);
         }
         return *new (zone_)
@@ -350,6 +364,36 @@
   }
 #endif  // defined(TARGET_ARCH_ARM64)
 
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+  // If total size is <= XLEN, passed like an XLEN scalar: use a register if
+  // available or pass by value on the stack.
+  // If total size is <= 2*XLEN, passed like two XLEN scalars: use registers
+  // if available or pass by value on the stack. If only one register is
+  // available, pass the low part by register and the high part on the stack.
+  // Otherwise, passed by reference.
+  const NativeLocation& AllocateCompound(
+      const NativeCompoundType& payload_type) {
+    const auto& pointer_type = *new (zone_) NativePrimitiveType(kFfiIntPtr);
+    const auto& compound_type = payload_type.AsCompound();
+    const intptr_t size = compound_type.SizeInBytes();
+    if (size <= target::kWordSize) {
+      return AllocateArgument(pointer_type);
+    } else if (size <= 2 * target::kWordSize) {
+      NativeLocations& multiple_locations =
+          *new (zone_) NativeLocations(zone_, 2);
+      multiple_locations.Add(&AllocateArgument(pointer_type));
+      multiple_locations.Add(&AllocateArgument(pointer_type));
+      return *new (zone_)
+          MultipleNativeLocations(compound_type, multiple_locations);
+    } else {
+      const auto& pointer_type = *new (zone_) NativePrimitiveType(kFfiIntPtr);
+      const auto& pointer_location = AllocateArgument(pointer_type);
+      return *new (zone_)
+          PointerToMemoryLocation(pointer_location, compound_type);
+    }
+  }
+#endif
+
   static FpuRegisterKind FpuRegKind(const NativeType& payload_type) {
 #if defined(TARGET_ARCH_ARM)
     return FpuRegisterKindFromSize(payload_type.SizeInBytes());
@@ -666,6 +710,22 @@
 }
 #endif  // defined(TARGET_ARCH_ARM64)
 
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+static const NativeLocation& CompoundResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  // First or first and second argument registers if it fits, otherwise a
+  // pointer to the result location is passed in.
+  ArgumentAllocator frame_state(zone);
+  const auto& location_as_argument = frame_state.AllocateArgument(payload_type);
+  if (!location_as_argument.IsStack() &&
+      !location_as_argument.IsPointerToMemory()) {
+    return location_as_argument;
+  }
+  return PointerToMemoryResultLocation(zone, payload_type);
+}
+#endif  // defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
 // Location for the result of a C signature function.
 static const NativeLocation& ResultLocation(Zone* zone,
                                             const NativeType& payload_type) {
diff --git a/runtime/vm/compiler/ffi/native_calling_convention_test.cc b/runtime/vm/compiler/ffi/native_calling_convention_test.cc
index 328c0ca..dc0d522 100644
--- a/runtime/vm/compiler/ffi/native_calling_convention_test.cc
+++ b/runtime/vm/compiler/ffi/native_calling_convention_test.cc
@@ -11,6 +11,9 @@
 namespace compiler {
 namespace ffi {
 
+// TODO(https://github.com/dart-lang/sdk/issues/48164)
+#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 const NativeCallingConvention& RunSignatureTest(
     dart::Zone* zone,
     const char* name,
@@ -625,6 +628,8 @@
   RunSignatureTest(Z, "struct12bytesFloatx6", arguments, int64_type);
 }
 
+#endif  // !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 }  // namespace ffi
 }  // namespace compiler
 }  // namespace dart
diff --git a/runtime/vm/compiler/ffi/native_location_test.cc b/runtime/vm/compiler/ffi/native_location_test.cc
index 8ba10d9..8eecc54 100644
--- a/runtime/vm/compiler/ffi/native_location_test.cc
+++ b/runtime/vm/compiler/ffi/native_location_test.cc
@@ -10,6 +10,9 @@
 namespace compiler {
 namespace ffi {
 
+// TODO(https://github.com/dart-lang/sdk/issues/48164)
+#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 UNIT_TEST_CASE_WITH_ZONE(NativeStackLocation) {
   const auto& native_type = *new (Z) NativePrimitiveType(kInt8);
 
@@ -35,6 +38,8 @@
   EXPECT_EQ(4, half_1.offset_in_bytes());
 }
 
+#endif  // !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 }  // namespace ffi
 }  // namespace compiler
 }  // namespace dart
diff --git a/runtime/vm/compiler/ffi/native_type_test.cc b/runtime/vm/compiler/ffi/native_type_test.cc
index d132b91..aa3b00c 100644
--- a/runtime/vm/compiler/ffi/native_type_test.cc
+++ b/runtime/vm/compiler/ffi/native_type_test.cc
@@ -12,6 +12,9 @@
 namespace compiler {
 namespace ffi {
 
+// TODO(https://github.com/dart-lang/sdk/issues/48164)
+#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 const NativeCompoundType& RunStructTest(dart::Zone* zone,
                                         const char* name,
                                         const NativeTypes& member_types,
@@ -339,6 +342,8 @@
   EXPECT(union_type.ContainsUnalignedMembers());
 }
 
+#endif  // !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 }  // namespace ffi
 }  // namespace compiler
 }  // namespace dart
diff --git a/runtime/vm/compiler/ffi/native_type_vm_test.cc b/runtime/vm/compiler/ffi/native_type_vm_test.cc
index 50a1e07..28e2b42 100644
--- a/runtime/vm/compiler/ffi/native_type_vm_test.cc
+++ b/runtime/vm/compiler/ffi/native_type_vm_test.cc
@@ -11,6 +11,9 @@
 namespace compiler {
 namespace ffi {
 
+// TODO(https://github.com/dart-lang/sdk/issues/48164)
+#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 ISOLATE_UNIT_TEST_CASE(Ffi_NativeType_Primitive_FromAbstractType) {
   Zone* Z = thread->zone();
 
@@ -86,6 +89,8 @@
             native_type.members()[1]->SizeInBytes());
 }
 
+#endif  // !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
+
 }  // namespace ffi
 }  // namespace compiler
 }  // namespace dart
diff --git a/runtime/vm/compiler/ffi/unit_test.cc b/runtime/vm/compiler/ffi/unit_test.cc
index b58e59f..138fe36 100644
--- a/runtime/vm/compiler/ffi/unit_test.cc
+++ b/runtime/vm/compiler/ffi/unit_test.cc
@@ -18,6 +18,10 @@
 const char* kArch = "ia32";
 #elif defined(TARGET_ARCH_X64)
 const char* kArch = "x64";
+#elif defined(TARGET_ARCH_RISCV32)
+const char* kArch = "riscv32";
+#elif defined(TARGET_ARCH_RISCV64)
+const char* kArch = "riscv64";
 #endif
 
 #if defined(DART_TARGET_OS_ANDROID)
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index 44a85cb..08ff0cc 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -926,7 +926,8 @@
       if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
 #if defined(TARGET_ARCH_X64)
       return CompilerState::Current().is_aot() || FLAG_target_unknown_cpu;
-#elif defined(TARGET_ARCH_ARM64)
+#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) ||            \
+    defined(TARGET_ARCH_RISCV64)
       return true;
 #else
       return false;
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index ff96abb..150980e 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -488,7 +488,7 @@
   // true. use_far_branches is always false on ia32 and x64.
   volatile bool done = false;
   // volatile because the variable may be clobbered by a longjmp.
-  volatile bool use_far_branches = false;
+  volatile intptr_t far_branch_level = 0;
 
   // In the JIT case we allow speculative inlining and have no need for a
   // suppression, since we don't restart optimization.
@@ -572,7 +572,7 @@
       ASSERT(pass_state.inline_id_to_function.length() ==
              pass_state.caller_inline_id.length());
       compiler::ObjectPoolBuilder object_pool_builder;
-      compiler::Assembler assembler(&object_pool_builder, use_far_branches);
+      compiler::Assembler assembler(&object_pool_builder, far_branch_level);
       FlowGraphCompiler graph_compiler(
           &assembler, flow_graph, *parsed_function(), optimized(),
           &speculative_policy, pass_state.inline_id_to_function,
@@ -647,8 +647,8 @@
         // Compilation failed due to an out of range branch offset in the
         // assembler. We try again (done = false) with far branches enabled.
         done = false;
-        ASSERT(!use_far_branches);
-        use_far_branches = true;
+        RELEASE_ASSERT(far_branch_level < 2);
+        far_branch_level++;
       } else if (error.ptr() == Object::speculative_inlining_error().ptr()) {
         // Can only happen with precompilation.
         UNREACHABLE();
diff --git a/runtime/vm/compiler/jit/compiler.h b/runtime/vm/compiler/jit/compiler.h
index caa0903..36966c9 100644
--- a/runtime/vm/compiler/jit/compiler.h
+++ b/runtime/vm/compiler/jit/compiler.h
@@ -171,10 +171,18 @@
  public:
   explicit NoBackgroundCompilerScope(Thread* thread)
       : StackResource(thread), isolate_group_(thread->isolate_group()) {
+#if defined(DART_PRECOMPILED_RUNTIME)
+    UNREACHABLE();
+#else
     isolate_group_->background_compiler()->Disable();
+#endif
   }
   ~NoBackgroundCompilerScope() {
+#if defined(DART_PRECOMPILED_RUNTIME)
+    UNREACHABLE();
+#else
     isolate_group_->background_compiler()->Enable();
+#endif
   }
 
  private:
diff --git a/runtime/vm/compiler/offsets_extractor.cc b/runtime/vm/compiler/offsets_extractor.cc
index e1f2f1f..2cb48c9 100644
--- a/runtime/vm/compiler/offsets_extractor.cc
+++ b/runtime/vm/compiler/offsets_extractor.cc
@@ -25,6 +25,10 @@
 #define ARCH_DEF_CPU "defined(TARGET_ARCH_IA32)"
 #elif defined(TARGET_ARCH_ARM64)
 #define ARCH_DEF_CPU "defined(TARGET_ARCH_ARM64)"
+#elif defined(TARGET_ARCH_RISCV32)
+#define ARCH_DEF_CPU "defined(TARGET_ARCH_RISCV32)"
+#elif defined(TARGET_ARCH_RISCV64)
+#define ARCH_DEF_CPU "defined(TARGET_ARCH_RISCV64)"
 #else
 #error Unknown architecture
 #endif
diff --git a/runtime/vm/compiler/relocation_test.cc b/runtime/vm/compiler/relocation_test.cc
index f361bd8..4aec712 100644
--- a/runtime/vm/compiler/relocation_test.cc
+++ b/runtime/vm/compiler/relocation_test.cc
@@ -33,6 +33,10 @@
   static const intptr_t kOffsetOfCall = 4;
 #elif defined(TARGET_ARCH_ARM)
   static const intptr_t kOffsetOfCall = 4;
+#elif defined(TARGET_ARCH_RISCV32)
+  static const intptr_t kOffsetOfCall = 4;
+#elif defined(TARGET_ARCH_RISCV64)
+  static const intptr_t kOffsetOfCall = 4;
 #else
   static const intptr_t kOffsetOfCall = 0;
 #endif
@@ -85,6 +89,8 @@
                                    compiler::Address::PairPreIndex)));
 #elif defined(TARGET_ARCH_ARM)
       SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(__ PushList((1 << LR)));
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+          __ PushRegister(RA);
 #endif
       __ GenerateUnRelocatedPcRelativeCall();
       AddPcRelativeCallTargetAt(__ CodeSize(), code, target);
@@ -95,6 +101,8 @@
                                    compiler::Address::PairPostIndex)));
 #elif defined(TARGET_ARCH_ARM)
       RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(__ PopList((1 << LR)));
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+          __ PopRegister(RA);
 #endif
       __ Ret();
     });
@@ -107,6 +115,8 @@
       __ LoadImmediate(RAX, 42);
 #elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
       __ LoadImmediate(R0, 42);
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+          __ LoadImmediate(A0, 42);
 #endif
       __ Ret();
     });
@@ -129,6 +139,10 @@
       instructions ^= OldPage::ToExecutable(instructions.ptr());
       code.set_instructions(instructions);
     }
+    if (FLAG_disassemble) {
+      OS::PrintErr("Disassemble:\n");
+      code.Disassemble();
+    }
   }
 
   void AddPcRelativeCallTargetAt(intptr_t offset,
@@ -192,9 +206,9 @@
     typedef intptr_t (*Fun)() DART_UNUSED;
 #if defined(TARGET_ARCH_X64)
     EXPECT_EQ(42, reinterpret_cast<Fun>(entrypoint)());
-#elif defined(TARGET_ARCH_ARM)
+#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_RISCV32)
     EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Fun, entrypoint));
-#elif defined(TARGET_ARCH_ARM64)
+#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV64)
     EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Fun, entrypoint));
 #endif
   }
@@ -275,8 +289,8 @@
   // instruction is emitted (not taking into account that the next instruction
   // might actually make some of those unresolved calls resolved).
   helper.CreateInstructions({
-      16,  // caller (call instruction @helper.kOffsetOfCall)
-      fmax - (16 - helper.kOffsetOfCall) - 8,  // 8 bytes less than maximum gap
+      20,  // caller (call instruction @helper.kOffsetOfCall)
+      fmax - (20 - helper.kOffsetOfCall) - 8,  // 8 bytes less than maximum gap
       8                                        // forward call target
   });
   helper.EmitPcRelativeCallFunction(0, 2);
@@ -301,8 +315,8 @@
   const intptr_t fmax = FLAG_upper_pc_relative_call_distance;
 
   helper.CreateInstructions({
-      16,  // caller (call instruction @helper.kOffsetOfCall)
-      fmax - (16 - helper.kOffsetOfCall) + 4,  // 4 bytes above maximum gap
+      20,  // caller (call instruction @helper.kOffsetOfCall)
+      fmax - (20 - helper.kOffsetOfCall) + 4,  // 4 bytes above maximum gap
       8                                        // forwards call target
   });
   helper.EmitPcRelativeCallFunction(0, 2);
@@ -333,7 +347,7 @@
   helper.CreateInstructions({
       8,                                // backwards call target
       bmax - 8 - helper.kOffsetOfCall,  // maximize out backwards call range
-      16  // caller (call instruction @helper.kOffsetOfCall)
+      20  // caller (call instruction @helper.kOffsetOfCall)
   });
   helper.EmitReturn42Function(0);
   helper.EmitPcRelativeCallFunction(2, 0);
@@ -360,8 +374,8 @@
   helper.CreateInstructions({
       8,                                    // backward call target
       bmax - 8 - helper.kOffsetOfCall + 4,  // 4 bytes exceeding backwards range
-      16,  // caller (call instruction @helper.kOffsetOfCall)
-      fmax - (16 - helper.kOffsetOfCall) -
+      20,  // caller (call instruction @helper.kOffsetOfCall)
+      fmax - (20 - helper.kOffsetOfCall) -
           4,  // 4 bytes less than forward range
       4,
       4,  // out-of-range, so trampoline has to be inserted before this
@@ -399,7 +413,7 @@
   helper.CreateInstructions({
       8,                                    // backwards call target
       bmax - 8 - helper.kOffsetOfCall + 4,  // 4 bytes exceeding backwards range
-      16,  // caller (call instruction @helper.kOffsetOfCall)
+      20,  // caller (call instruction @helper.kOffsetOfCall)
       4,
   });
   helper.EmitReturn42Function(0);
diff --git a/runtime/vm/compiler/runtime_offsets_extracted.h b/runtime/vm/compiler/runtime_offsets_extracted.h
index fccacc2..adeab04 100644
--- a/runtime/vm/compiler/runtime_offsets_extracted.h
+++ b/runtime/vm/compiler/runtime_offsets_extracted.h
@@ -3432,6 +3432,1146 @@
     WeakSerializationReference_InstanceSize = 16;
 #endif  // defined(TARGET_ARCH_ARM64) && defined(DART_COMPRESSED_POINTERS)
 
+#if defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word Function_usage_counter_offset =
+    72;
+static constexpr dart::compiler::target::word
+    ICData_receivers_static_type_offset = 16;
+static constexpr dart::compiler::target::word Array_elements_start_offset = 12;
+static constexpr dart::compiler::target::word Array_element_size = 4;
+static constexpr dart::compiler::target::word ClassTable_elements_start_offset =
+    0;
+static constexpr dart::compiler::target::word ClassTable_element_size = 1;
+static constexpr dart::compiler::target::word Code_elements_start_offset = 96;
+static constexpr dart::compiler::target::word Code_element_size = 4;
+static constexpr dart::compiler::target::word Context_elements_start_offset =
+    12;
+static constexpr dart::compiler::target::word Context_element_size = 4;
+static constexpr dart::compiler::target::word
+    ContextScope_elements_start_offset = 12;
+static constexpr dart::compiler::target::word ContextScope_element_size = 32;
+static constexpr dart::compiler::target::word
+    ExceptionHandlers_elements_start_offset = 12;
+static constexpr dart::compiler::target::word ExceptionHandlers_element_size =
+    12;
+static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
+    8;
+static constexpr dart::compiler::target::word ObjectPool_element_size = 4;
+static constexpr dart::compiler::target::word
+    OneByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word OneByteString_element_size = 1;
+static constexpr dart::compiler::target::word
+    TypeArguments_elements_start_offset = 20;
+static constexpr dart::compiler::target::word TypeArguments_element_size = 4;
+static constexpr dart::compiler::target::word
+    TwoByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word TwoByteString_element_size = 2;
+static constexpr dart::compiler::target::word Array_kMaxElements = 268435455;
+static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
+    65533;
+static constexpr dart::compiler::target::word Context_kMaxElements = 268435455;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    Instructions_kNonBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 9;
+static constexpr dart::compiler::target::word
+    NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word String_kMaxElements = 536870911;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
+static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word
+    AbstractType_type_test_stub_entry_point_offset = 4;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
+    16;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_size_offset =
+    20;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_first_named_entry_offset = 28;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_named_entry_size = 8;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_name_offset =
+    0;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_position_offset = 4;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_positional_count_offset = 24;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_type_args_len_offset = 12;
+static constexpr dart::compiler::target::word Array_data_offset = 12;
+static constexpr dart::compiler::target::word Array_length_offset = 8;
+static constexpr dart::compiler::target::word Array_tags_offset = 0;
+static constexpr dart::compiler::target::word Array_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word Class_declaration_type_offset =
+    52;
+static constexpr dart::compiler::target::word Class_num_type_arguments_offset =
+    88;
+static constexpr dart::compiler::target::word Class_super_type_offset = 44;
+static constexpr dart::compiler::target::word
+    Class_host_type_arguments_field_offset_in_words_offset = 100;
+static constexpr dart::compiler::target::word
+    SharedClassTable_class_heap_stats_table_offset = 0;
+static constexpr dart::compiler::target::word Closure_context_offset = 20;
+static constexpr dart::compiler::target::word
+    Closure_delayed_type_arguments_offset = 12;
+static constexpr dart::compiler::target::word Closure_function_offset = 16;
+static constexpr dart::compiler::target::word
+    Closure_function_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word Closure_hash_offset = 24;
+static constexpr dart::compiler::target::word
+    Closure_instantiator_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    ClosureData_default_type_arguments_kind_offset = 16;
+static constexpr dart::compiler::target::word Code_object_pool_offset = 20;
+static constexpr dart::compiler::target::word Code_saved_instructions_offset =
+    24;
+static constexpr dart::compiler::target::word Code_owner_offset = 28;
+static constexpr dart::compiler::target::word Context_num_variables_offset = 4;
+static constexpr dart::compiler::target::word Context_parent_offset = 8;
+static constexpr dart::compiler::target::word Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    Field_initializer_function_offset = 16;
+static constexpr dart::compiler::target::word
+    Field_host_offset_or_field_id_offset = 20;
+static constexpr dart::compiler::target::word Field_guarded_cid_offset = 44;
+static constexpr dart::compiler::target::word
+    Field_guarded_list_length_in_object_offset_offset = 52;
+static constexpr dart::compiler::target::word Field_guarded_list_length_offset =
+    24;
+static constexpr dart::compiler::target::word Field_is_nullable_offset = 46;
+static constexpr dart::compiler::target::word Field_kind_bits_offset = 54;
+static constexpr dart::compiler::target::word Function_code_offset = 32;
+static constexpr dart::compiler::target::word Function_data_offset = 24;
+static constexpr dart::compiler::target::word Function_entry_point_offset[] = {
+    4, 8};
+static constexpr dart::compiler::target::word Function_kind_tag_offset = 64;
+static constexpr dart::compiler::target::word Function_packed_fields_offset =
+    83;
+static constexpr dart::compiler::target::word Function_signature_offset = 20;
+static constexpr dart::compiler::target::word FutureOr_type_arguments_offset =
+    4;
+static constexpr dart::compiler::target::word GrowableObjectArray_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_length_offset = 8;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word OldPage_card_table_offset = 20;
+static constexpr dart::compiler::target::word
+    CallSiteData_arguments_descriptor_offset = 8;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word ICData_entries_offset = 12;
+static constexpr dart::compiler::target::word ICData_owner_offset = 20;
+static constexpr dart::compiler::target::word ICData_state_bits_offset = 28;
+static constexpr dart::compiler::target::word Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Isolate_current_tag_offset = 24;
+static constexpr dart::compiler::target::word Isolate_default_tag_offset = 28;
+static constexpr dart::compiler::target::word Isolate_ic_miss_code_offset = 32;
+static constexpr dart::compiler::target::word IsolateGroup_object_store_offset =
+    20;
+static constexpr dart::compiler::target::word
+    IsolateGroup_shared_class_table_offset = 8;
+static constexpr dart::compiler::target::word
+    IsolateGroup_cached_class_table_table_offset = 16;
+static constexpr dart::compiler::target::word Isolate_single_step_offset = 40;
+static constexpr dart::compiler::target::word Isolate_user_tag_offset = 20;
+static constexpr dart::compiler::target::word LinkedHashBase_data_offset = 12;
+static constexpr dart::compiler::target::word
+    ImmutableLinkedHashBase_data_offset = 12;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_deleted_keys_offset = 20;
+static constexpr dart::compiler::target::word LinkedHashBase_hash_mask_offset =
+    8;
+static constexpr dart::compiler::target::word LinkedHashBase_index_offset = 24;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word LinkedHashBase_used_data_offset =
+    16;
+static constexpr dart::compiler::target::word LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    MarkingStackBlock_pointers_offset = 8;
+static constexpr dart::compiler::target::word MarkingStackBlock_top_offset = 4;
+static constexpr dart::compiler::target::word MegamorphicCache_buckets_offset =
+    12;
+static constexpr dart::compiler::target::word MegamorphicCache_mask_offset = 16;
+static constexpr dart::compiler::target::word Mint_value_offset = 8;
+static constexpr dart::compiler::target::word NativeArguments_argc_tag_offset =
+    4;
+static constexpr dart::compiler::target::word NativeArguments_argv_offset = 8;
+static constexpr dart::compiler::target::word NativeArguments_retval_offset =
+    12;
+static constexpr dart::compiler::target::word NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word ObjectStore_double_type_offset =
+    160;
+static constexpr dart::compiler::target::word ObjectStore_int_type_offset = 116;
+static constexpr dart::compiler::target::word ObjectStore_string_type_offset =
+    180;
+static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
+    104;
+static constexpr dart::compiler::target::word OneByteString_data_offset = 12;
+static constexpr dart::compiler::target::word PointerBase_data_field_offset = 4;
+static constexpr dart::compiler::target::word Pointer_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_entry_point_offset = 8;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_lower_limit_offset = 12;
+static constexpr dart::compiler::target::word SingleTargetCache_target_offset =
+    4;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_upper_limit_offset = 14;
+static constexpr dart::compiler::target::word StoreBufferBlock_pointers_offset =
+    8;
+static constexpr dart::compiler::target::word StoreBufferBlock_top_offset = 4;
+static constexpr dart::compiler::target::word String_hash_offset = 8;
+static constexpr dart::compiler::target::word String_length_offset = 4;
+static constexpr dart::compiler::target::word SubtypeTestCache_cache_offset = 4;
+static constexpr dart::compiler::target::word
+    Thread_AllocateArray_entry_point_offset = 384;
+static constexpr dart::compiler::target::word Thread_active_exception_offset =
+    800;
+static constexpr dart::compiler::target::word Thread_active_stacktrace_offset =
+    804;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_code_offset = 128;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_entry_point_offset = 276;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_entry_point_offset = 284;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_stub_offset = 188;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_entry_point_offset = 288;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_stub_offset = 192;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_entry_point_offset = 292;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_stub_offset = 196;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_entry_point_offset = 296;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_stub_offset = 200;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_entry_point_offset = 300;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_stub_offset = 204;
+static constexpr dart::compiler::target::word Thread_api_top_scope_offset = 840;
+static constexpr dart::compiler::target::word
+    Thread_auto_scope_native_wrapper_entry_point_offset = 348;
+static constexpr dart::compiler::target::word Thread_bool_false_offset = 120;
+static constexpr dart::compiler::target::word Thread_bool_true_offset = 116;
+static constexpr dart::compiler::target::word
+    Thread_bootstrap_native_wrapper_entry_point_offset = 340;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_entry_point_offset = 280;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_stub_offset = 144;
+static constexpr dart::compiler::target::word Thread_dart_stream_offset = 864;
+static constexpr dart::compiler::target::word
+    Thread_dispatch_table_array_offset = 44;
+static constexpr dart::compiler::target::word
+    Thread_double_truncate_round_supported_offset = 844;
+static constexpr dart::compiler::target::word Thread_optimize_entry_offset =
+    320;
+static constexpr dart::compiler::target::word Thread_optimize_stub_offset = 232;
+static constexpr dart::compiler::target::word Thread_deoptimize_entry_offset =
+    324;
+static constexpr dart::compiler::target::word Thread_deoptimize_stub_offset =
+    236;
+static constexpr dart::compiler::target::word Thread_double_abs_address_offset =
+    364;
+static constexpr dart::compiler::target::word
+    Thread_double_negate_address_offset = 360;
+static constexpr dart::compiler::target::word Thread_end_offset = 52;
+static constexpr dart::compiler::target::word
+    Thread_enter_safepoint_stub_offset = 256;
+static constexpr dart::compiler::target::word Thread_execution_state_offset =
+    820;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_stub_offset = 260;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 264;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_stub_offset = 268;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_entry_point_offset = 328;
+static constexpr dart::compiler::target::word
+    Thread_fix_allocation_stub_code_offset = 136;
+static constexpr dart::compiler::target::word
+    Thread_fix_callers_target_code_offset = 132;
+static constexpr dart::compiler::target::word
+    Thread_float_absolute_address_offset = 376;
+static constexpr dart::compiler::target::word
+    Thread_float_negate_address_offset = 372;
+static constexpr dart::compiler::target::word Thread_float_not_address_offset =
+    368;
+static constexpr dart::compiler::target::word
+    Thread_float_zerow_address_offset = 380;
+static constexpr dart::compiler::target::word Thread_global_object_pool_offset =
+    808;
+static constexpr dart::compiler::target::word
+    Thread_invoke_dart_code_stub_offset = 140;
+static constexpr dart::compiler::target::word Thread_exit_through_ffi_offset =
+    836;
+static constexpr dart::compiler::target::word Thread_isolate_offset = 40;
+static constexpr dart::compiler::target::word Thread_isolate_group_offset = 868;
+static constexpr dart::compiler::target::word Thread_field_table_values_offset =
+    64;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_return_stub_offset = 240;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_throw_stub_offset = 244;
+static constexpr dart::compiler::target::word
+    Thread_lazy_specialize_type_test_stub_offset = 252;
+static constexpr dart::compiler::target::word
+    Thread_marking_stack_block_offset = 80;
+static constexpr dart::compiler::target::word
+    Thread_megamorphic_call_checked_entry_offset = 312;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_entry_offset = 316;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_stub_offset = 216;
+static constexpr dart::compiler::target::word
+    Thread_no_scope_native_wrapper_entry_point_offset = 344;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 152;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_without_fpu_regs_stub_offset = 148;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_with_fpu_regs_stub_offset = 160;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_without_fpu_regs_stub_offset = 156;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 168;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 164;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 176;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 172;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_with_fpu_regs_stub_offset = 184;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_without_fpu_regs_stub_offset = 180;
+static constexpr dart::compiler::target::word Thread_object_null_offset = 112;
+static constexpr dart::compiler::target::word
+    Thread_predefined_symbols_address_offset = 352;
+static constexpr dart::compiler::target::word Thread_resume_pc_offset = 812;
+static constexpr dart::compiler::target::word
+    Thread_saved_shadow_call_stack_offset = 816;
+static constexpr dart::compiler::target::word Thread_safepoint_state_offset =
+    824;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_stub_offset = 248;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_entry_point_offset = 336;
+static constexpr dart::compiler::target::word Thread_stack_limit_offset = 28;
+static constexpr dart::compiler::target::word Thread_saved_stack_limit_offset =
+    56;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_flags_offset = 60;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 308;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 212;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 304;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 208;
+static constexpr dart::compiler::target::word Thread_store_buffer_block_offset =
+    76;
+static constexpr dart::compiler::target::word
+    Thread_top_exit_frame_info_offset = 72;
+static constexpr dart::compiler::target::word Thread_top_offset = 48;
+static constexpr dart::compiler::target::word Thread_top_resource_offset = 16;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_int64_runtime_arg_offset = 96;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_double_runtime_arg_offset = 104;
+static constexpr dart::compiler::target::word Thread_vm_tag_offset = 88;
+static constexpr dart::compiler::target::word Thread_write_barrier_code_offset =
+    124;
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_entry_point_offset = 272;
+static constexpr dart::compiler::target::word Thread_write_barrier_mask_offset =
+    32;
+static constexpr dart::compiler::target::word Thread_heap_base_offset = 36;
+static constexpr dart::compiler::target::word Thread_callback_code_offset = 828;
+static constexpr dart::compiler::target::word
+    Thread_callback_stack_return_offset = 832;
+static constexpr dart::compiler::target::word Thread_random_offset = 848;
+static constexpr dart::compiler::target::word
+    Thread_jump_to_frame_entry_point_offset = 332;
+static constexpr dart::compiler::target::word Thread_tsan_utils_offset = 856;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_function_offset =
+    0;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_buffer_offset =
+    4;
+static constexpr dart::compiler::target::word TsanUtils_exception_pc_offset = 8;
+static constexpr dart::compiler::target::word TsanUtils_exception_sp_offset =
+    12;
+static constexpr dart::compiler::target::word TsanUtils_exception_fp_offset =
+    16;
+static constexpr dart::compiler::target::word TimelineStream_enabled_offset = 8;
+static constexpr dart::compiler::target::word TwoByteString_data_offset = 12;
+static constexpr dart::compiler::target::word Type_arguments_offset = 12;
+static constexpr dart::compiler::target::word Type_hash_offset = 16;
+static constexpr dart::compiler::target::word Type_type_class_id_offset = 20;
+static constexpr dart::compiler::target::word Type_type_state_offset = 22;
+static constexpr dart::compiler::target::word Type_nullability_offset = 23;
+static constexpr dart::compiler::target::word FunctionType_hash_offset = 28;
+static constexpr dart::compiler::target::word
+    FunctionType_named_parameter_names_offset = 24;
+static constexpr dart::compiler::target::word FunctionType_nullability_offset =
+    39;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_parameter_counts_offset = 32;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_type_parameter_counts_offset = 36;
+static constexpr dart::compiler::target::word
+    FunctionType_parameter_types_offset = 20;
+static constexpr dart::compiler::target::word
+    FunctionType_type_parameters_offset = 12;
+static constexpr dart::compiler::target::word
+    TypeParameter_parameterized_class_id_offset = 20;
+static constexpr dart::compiler::target::word TypeParameter_index_offset = 23;
+static constexpr dart::compiler::target::word TypeParameter_nullability_offset =
+    25;
+static constexpr dart::compiler::target::word
+    TypeArguments_instantiations_offset = 4;
+static constexpr dart::compiler::target::word TypeArguments_length_offset = 8;
+static constexpr dart::compiler::target::word TypeArguments_nullability_offset =
+    16;
+static constexpr dart::compiler::target::word TypeArguments_types_offset = 20;
+static constexpr dart::compiler::target::word TypeParameters_names_offset = 4;
+static constexpr dart::compiler::target::word TypeParameters_flags_offset = 8;
+static constexpr dart::compiler::target::word TypeParameters_bounds_offset = 12;
+static constexpr dart::compiler::target::word TypeParameters_defaults_offset =
+    16;
+static constexpr dart::compiler::target::word TypeParameter_bound_offset = 16;
+static constexpr dart::compiler::target::word TypeParameter_flags_offset = 24;
+static constexpr dart::compiler::target::word TypeRef_type_offset = 12;
+static constexpr dart::compiler::target::word TypedDataBase_length_offset = 8;
+static constexpr dart::compiler::target::word TypedDataView_data_offset = 12;
+static constexpr dart::compiler::target::word
+    TypedDataView_offset_in_bytes_offset = 16;
+static constexpr dart::compiler::target::word TypedData_data_offset = 12;
+static constexpr dart::compiler::target::word
+    UnhandledException_exception_offset = 4;
+static constexpr dart::compiler::target::word
+    UnhandledException_stacktrace_offset = 8;
+static constexpr dart::compiler::target::word UserTag_tag_offset = 8;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_expected_cid_offset = 4;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_entrypoint_offset = 8;
+static constexpr dart::compiler::target::word WeakProperty_key_offset = 4;
+static constexpr dart::compiler::target::word WeakProperty_value_offset = 8;
+static constexpr dart::compiler::target::word Code_entry_point_offset[] = {
+    4, 12, 8, 16};
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,  -1,  -1, -1, -1, 728, 732, 736, -1,  -1,  740,
+        744, 748, -1, -1, -1, 752, 756, 760, 764, 768, 772,
+        776, 780, -1, -1, -1, -1,  784, 788, 792, 796};
+static constexpr dart::compiler::target::word AbstractType_InstanceSize = 12;
+static constexpr dart::compiler::target::word ApiError_InstanceSize = 8;
+static constexpr dart::compiler::target::word Array_header_size = 12;
+static constexpr dart::compiler::target::word Bool_InstanceSize = 8;
+static constexpr dart::compiler::target::word Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word Class_InstanceSize = 112;
+static constexpr dart::compiler::target::word Closure_InstanceSize = 28;
+static constexpr dart::compiler::target::word ClosureData_InstanceSize = 20;
+static constexpr dart::compiler::target::word CodeSourceMap_HeaderSize = 8;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_ObjectHeaderSize = 4;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word Context_header_size = 12;
+static constexpr dart::compiler::target::word Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word DynamicLibrary_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word ExternalTypedData_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word FfiTrampolineData_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word Field_InstanceSize = 60;
+static constexpr dart::compiler::target::word Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word Function_InstanceSize = 88;
+static constexpr dart::compiler::target::word FunctionType_InstanceSize = 40;
+static constexpr dart::compiler::target::word FutureOr_InstanceSize = 8;
+static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word ICData_InstanceSize = 32;
+static constexpr dart::compiler::target::word Instance_InstanceSize = 4;
+static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
+    8;
+static constexpr dart::compiler::target::word
+    InstructionsSection_UnalignedHeaderSize = 20;
+static constexpr dart::compiler::target::word InstructionsTable_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Integer_InstanceSize = 4;
+static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
+    60;
+static constexpr dart::compiler::target::word LanguageError_InstanceSize = 28;
+static constexpr dart::compiler::target::word Library_InstanceSize = 88;
+static constexpr dart::compiler::target::word LibraryPrefix_InstanceSize = 20;
+static constexpr dart::compiler::target::word LinkedHashBase_InstanceSize = 28;
+static constexpr dart::compiler::target::word MegamorphicCache_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word MirrorReference_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_InstanceSize = 12;
+static constexpr dart::compiler::target::word Namespace_InstanceSize = 20;
+static constexpr dart::compiler::target::word NativeArguments_StructSize = 16;
+static constexpr dart::compiler::target::word Number_InstanceSize = 4;
+static constexpr dart::compiler::target::word Object_InstanceSize = 4;
+static constexpr dart::compiler::target::word PatchClass_InstanceSize = 24;
+static constexpr dart::compiler::target::word PcDescriptors_HeaderSize = 8;
+static constexpr dart::compiler::target::word Pointer_InstanceSize = 12;
+static constexpr dart::compiler::target::word ReceivePort_InstanceSize = 20;
+static constexpr dart::compiler::target::word RegExp_InstanceSize = 60;
+static constexpr dart::compiler::target::word Script_InstanceSize = 48;
+static constexpr dart::compiler::target::word SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word Sentinel_InstanceSize = 4;
+static constexpr dart::compiler::target::word SingleTargetCache_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word StackTrace_InstanceSize = 20;
+static constexpr dart::compiler::target::word String_InstanceSize = 12;
+static constexpr dart::compiler::target::word SubtypeTestCache_InstanceSize = 8;
+static constexpr dart::compiler::target::word LoadingUnit_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    TransferableTypedData_InstanceSize = 4;
+static constexpr dart::compiler::target::word Type_InstanceSize = 24;
+static constexpr dart::compiler::target::word TypeParameter_InstanceSize = 28;
+static constexpr dart::compiler::target::word TypeParameters_InstanceSize = 20;
+static constexpr dart::compiler::target::word TypeRef_InstanceSize = 16;
+static constexpr dart::compiler::target::word TypedData_HeaderSize = 12;
+static constexpr dart::compiler::target::word TypedDataBase_InstanceSize = 12;
+static constexpr dart::compiler::target::word TypedDataView_InstanceSize = 20;
+static constexpr dart::compiler::target::word UnhandledException_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word UnlinkedCall_InstanceSize = 16;
+static constexpr dart::compiler::target::word UnwindError_InstanceSize = 12;
+static constexpr dart::compiler::target::word UserTag_InstanceSize = 16;
+static constexpr dart::compiler::target::word WeakProperty_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    WeakSerializationReference_InstanceSize = 12;
+#endif  // defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+
+#if defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word Function_usage_counter_offset =
+    112;
+static constexpr dart::compiler::target::word
+    ICData_receivers_static_type_offset = 32;
+static constexpr dart::compiler::target::word Array_elements_start_offset = 24;
+static constexpr dart::compiler::target::word Array_element_size = 8;
+static constexpr dart::compiler::target::word ClassTable_elements_start_offset =
+    0;
+static constexpr dart::compiler::target::word ClassTable_element_size = 1;
+static constexpr dart::compiler::target::word Code_elements_start_offset = 176;
+static constexpr dart::compiler::target::word Code_element_size = 4;
+static constexpr dart::compiler::target::word Context_elements_start_offset =
+    24;
+static constexpr dart::compiler::target::word Context_element_size = 8;
+static constexpr dart::compiler::target::word
+    ContextScope_elements_start_offset = 16;
+static constexpr dart::compiler::target::word ContextScope_element_size = 64;
+static constexpr dart::compiler::target::word
+    ExceptionHandlers_elements_start_offset = 24;
+static constexpr dart::compiler::target::word ExceptionHandlers_element_size =
+    12;
+static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
+    16;
+static constexpr dart::compiler::target::word ObjectPool_element_size = 8;
+static constexpr dart::compiler::target::word
+    OneByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word OneByteString_element_size = 1;
+static constexpr dart::compiler::target::word
+    TypeArguments_elements_start_offset = 40;
+static constexpr dart::compiler::target::word TypeArguments_element_size = 8;
+static constexpr dart::compiler::target::word
+    TwoByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word TwoByteString_element_size = 2;
+static constexpr dart::compiler::target::word Array_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
+    32765;
+static constexpr dart::compiler::target::word Context_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    Instructions_kNonBarePayloadAlignment = 8;
+static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 10;
+static constexpr dart::compiler::target::word
+    NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word String_kMaxElements =
+    2305843009213693951;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
+static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    AbstractType_type_test_stub_entry_point_offset = 8;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
+    32;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_size_offset =
+    40;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_first_named_entry_offset = 56;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_named_entry_size = 16;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_name_offset =
+    0;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_position_offset = 8;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_positional_count_offset = 48;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_type_args_len_offset = 24;
+static constexpr dart::compiler::target::word Array_data_offset = 24;
+static constexpr dart::compiler::target::word Array_length_offset = 16;
+static constexpr dart::compiler::target::word Array_tags_offset = 0;
+static constexpr dart::compiler::target::word Array_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word Class_declaration_type_offset =
+    104;
+static constexpr dart::compiler::target::word Class_num_type_arguments_offset =
+    164;
+static constexpr dart::compiler::target::word Class_super_type_offset = 88;
+static constexpr dart::compiler::target::word
+    Class_host_type_arguments_field_offset_in_words_offset = 176;
+static constexpr dart::compiler::target::word
+    SharedClassTable_class_heap_stats_table_offset = 0;
+static constexpr dart::compiler::target::word Closure_context_offset = 40;
+static constexpr dart::compiler::target::word
+    Closure_delayed_type_arguments_offset = 24;
+static constexpr dart::compiler::target::word Closure_function_offset = 32;
+static constexpr dart::compiler::target::word
+    Closure_function_type_arguments_offset = 16;
+static constexpr dart::compiler::target::word Closure_hash_offset = 48;
+static constexpr dart::compiler::target::word
+    Closure_instantiator_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    ClosureData_default_type_arguments_kind_offset = 32;
+static constexpr dart::compiler::target::word Code_object_pool_offset = 40;
+static constexpr dart::compiler::target::word Code_saved_instructions_offset =
+    48;
+static constexpr dart::compiler::target::word Code_owner_offset = 56;
+static constexpr dart::compiler::target::word Context_num_variables_offset = 8;
+static constexpr dart::compiler::target::word Context_parent_offset = 16;
+static constexpr dart::compiler::target::word Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    Field_initializer_function_offset = 32;
+static constexpr dart::compiler::target::word
+    Field_host_offset_or_field_id_offset = 40;
+static constexpr dart::compiler::target::word Field_guarded_cid_offset = 80;
+static constexpr dart::compiler::target::word
+    Field_guarded_list_length_in_object_offset_offset = 88;
+static constexpr dart::compiler::target::word Field_guarded_list_length_offset =
+    48;
+static constexpr dart::compiler::target::word Field_is_nullable_offset = 82;
+static constexpr dart::compiler::target::word Field_kind_bits_offset = 90;
+static constexpr dart::compiler::target::word Function_code_offset = 64;
+static constexpr dart::compiler::target::word Function_data_offset = 48;
+static constexpr dart::compiler::target::word Function_entry_point_offset[] = {
+    8, 16};
+static constexpr dart::compiler::target::word Function_kind_tag_offset = 104;
+static constexpr dart::compiler::target::word Function_packed_fields_offset =
+    123;
+static constexpr dart::compiler::target::word Function_signature_offset = 40;
+static constexpr dart::compiler::target::word FutureOr_type_arguments_offset =
+    8;
+static constexpr dart::compiler::target::word GrowableObjectArray_data_offset =
+    24;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_length_offset = 16;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word OldPage_card_table_offset = 40;
+static constexpr dart::compiler::target::word
+    CallSiteData_arguments_descriptor_offset = 16;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word ICData_entries_offset = 24;
+static constexpr dart::compiler::target::word ICData_owner_offset = 40;
+static constexpr dart::compiler::target::word ICData_state_bits_offset = 52;
+static constexpr dart::compiler::target::word Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Isolate_current_tag_offset = 48;
+static constexpr dart::compiler::target::word Isolate_default_tag_offset = 56;
+static constexpr dart::compiler::target::word Isolate_ic_miss_code_offset = 64;
+static constexpr dart::compiler::target::word IsolateGroup_object_store_offset =
+    40;
+static constexpr dart::compiler::target::word
+    IsolateGroup_shared_class_table_offset = 16;
+static constexpr dart::compiler::target::word
+    IsolateGroup_cached_class_table_table_offset = 32;
+static constexpr dart::compiler::target::word Isolate_single_step_offset = 80;
+static constexpr dart::compiler::target::word Isolate_user_tag_offset = 40;
+static constexpr dart::compiler::target::word LinkedHashBase_data_offset = 24;
+static constexpr dart::compiler::target::word
+    ImmutableLinkedHashBase_data_offset = 24;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_deleted_keys_offset = 40;
+static constexpr dart::compiler::target::word LinkedHashBase_hash_mask_offset =
+    16;
+static constexpr dart::compiler::target::word LinkedHashBase_index_offset = 48;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word LinkedHashBase_used_data_offset =
+    32;
+static constexpr dart::compiler::target::word LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    MarkingStackBlock_pointers_offset = 16;
+static constexpr dart::compiler::target::word MarkingStackBlock_top_offset = 8;
+static constexpr dart::compiler::target::word MegamorphicCache_buckets_offset =
+    24;
+static constexpr dart::compiler::target::word MegamorphicCache_mask_offset = 32;
+static constexpr dart::compiler::target::word Mint_value_offset = 8;
+static constexpr dart::compiler::target::word NativeArguments_argc_tag_offset =
+    8;
+static constexpr dart::compiler::target::word NativeArguments_argv_offset = 16;
+static constexpr dart::compiler::target::word NativeArguments_retval_offset =
+    24;
+static constexpr dart::compiler::target::word NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word ObjectStore_double_type_offset =
+    320;
+static constexpr dart::compiler::target::word ObjectStore_int_type_offset = 232;
+static constexpr dart::compiler::target::word ObjectStore_string_type_offset =
+    360;
+static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
+    208;
+static constexpr dart::compiler::target::word OneByteString_data_offset = 16;
+static constexpr dart::compiler::target::word PointerBase_data_field_offset = 8;
+static constexpr dart::compiler::target::word Pointer_type_arguments_offset =
+    16;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_entry_point_offset = 16;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_lower_limit_offset = 24;
+static constexpr dart::compiler::target::word SingleTargetCache_target_offset =
+    8;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_upper_limit_offset = 26;
+static constexpr dart::compiler::target::word StoreBufferBlock_pointers_offset =
+    16;
+static constexpr dart::compiler::target::word StoreBufferBlock_top_offset = 8;
+static constexpr dart::compiler::target::word String_hash_offset = 4;
+static constexpr dart::compiler::target::word String_length_offset = 8;
+static constexpr dart::compiler::target::word SubtypeTestCache_cache_offset = 8;
+static constexpr dart::compiler::target::word
+    Thread_AllocateArray_entry_point_offset = 744;
+static constexpr dart::compiler::target::word Thread_active_exception_offset =
+    1576;
+static constexpr dart::compiler::target::word Thread_active_stacktrace_offset =
+    1584;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_code_offset = 232;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_entry_point_offset = 528;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_entry_point_offset = 544;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_stub_offset = 352;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_entry_point_offset = 552;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_stub_offset = 360;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_entry_point_offset = 560;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_stub_offset = 368;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_entry_point_offset = 568;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_stub_offset = 376;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_entry_point_offset = 576;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_stub_offset = 384;
+static constexpr dart::compiler::target::word Thread_api_top_scope_offset =
+    1656;
+static constexpr dart::compiler::target::word
+    Thread_auto_scope_native_wrapper_entry_point_offset = 672;
+static constexpr dart::compiler::target::word Thread_bool_false_offset = 216;
+static constexpr dart::compiler::target::word Thread_bool_true_offset = 208;
+static constexpr dart::compiler::target::word
+    Thread_bootstrap_native_wrapper_entry_point_offset = 656;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_entry_point_offset = 536;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_stub_offset = 264;
+static constexpr dart::compiler::target::word Thread_dart_stream_offset = 1696;
+static constexpr dart::compiler::target::word
+    Thread_dispatch_table_array_offset = 88;
+static constexpr dart::compiler::target::word
+    Thread_double_truncate_round_supported_offset = 1664;
+static constexpr dart::compiler::target::word Thread_optimize_entry_offset =
+    616;
+static constexpr dart::compiler::target::word Thread_optimize_stub_offset = 440;
+static constexpr dart::compiler::target::word Thread_deoptimize_entry_offset =
+    624;
+static constexpr dart::compiler::target::word Thread_deoptimize_stub_offset =
+    448;
+static constexpr dart::compiler::target::word Thread_double_abs_address_offset =
+    704;
+static constexpr dart::compiler::target::word
+    Thread_double_negate_address_offset = 696;
+static constexpr dart::compiler::target::word Thread_end_offset = 104;
+static constexpr dart::compiler::target::word
+    Thread_enter_safepoint_stub_offset = 488;
+static constexpr dart::compiler::target::word Thread_execution_state_offset =
+    1616;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_stub_offset = 496;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 504;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_stub_offset = 512;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_entry_point_offset = 632;
+static constexpr dart::compiler::target::word
+    Thread_fix_allocation_stub_code_offset = 248;
+static constexpr dart::compiler::target::word
+    Thread_fix_callers_target_code_offset = 240;
+static constexpr dart::compiler::target::word
+    Thread_float_absolute_address_offset = 728;
+static constexpr dart::compiler::target::word
+    Thread_float_negate_address_offset = 720;
+static constexpr dart::compiler::target::word Thread_float_not_address_offset =
+    712;
+static constexpr dart::compiler::target::word
+    Thread_float_zerow_address_offset = 736;
+static constexpr dart::compiler::target::word Thread_global_object_pool_offset =
+    1592;
+static constexpr dart::compiler::target::word
+    Thread_invoke_dart_code_stub_offset = 256;
+static constexpr dart::compiler::target::word Thread_exit_through_ffi_offset =
+    1648;
+static constexpr dart::compiler::target::word Thread_isolate_offset = 80;
+static constexpr dart::compiler::target::word Thread_isolate_group_offset =
+    1704;
+static constexpr dart::compiler::target::word Thread_field_table_values_offset =
+    128;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_return_stub_offset = 456;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_throw_stub_offset = 464;
+static constexpr dart::compiler::target::word
+    Thread_lazy_specialize_type_test_stub_offset = 480;
+static constexpr dart::compiler::target::word
+    Thread_marking_stack_block_offset = 160;
+static constexpr dart::compiler::target::word
+    Thread_megamorphic_call_checked_entry_offset = 600;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_entry_offset = 608;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_stub_offset = 408;
+static constexpr dart::compiler::target::word
+    Thread_no_scope_native_wrapper_entry_point_offset = 664;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 280;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_without_fpu_regs_stub_offset = 272;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_with_fpu_regs_stub_offset = 296;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_without_fpu_regs_stub_offset = 288;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 312;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 304;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 328;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 320;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_with_fpu_regs_stub_offset = 344;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_without_fpu_regs_stub_offset = 336;
+static constexpr dart::compiler::target::word Thread_object_null_offset = 200;
+static constexpr dart::compiler::target::word
+    Thread_predefined_symbols_address_offset = 680;
+static constexpr dart::compiler::target::word Thread_resume_pc_offset = 1600;
+static constexpr dart::compiler::target::word
+    Thread_saved_shadow_call_stack_offset = 1608;
+static constexpr dart::compiler::target::word Thread_safepoint_state_offset =
+    1624;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_stub_offset = 472;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_entry_point_offset = 648;
+static constexpr dart::compiler::target::word Thread_stack_limit_offset = 56;
+static constexpr dart::compiler::target::word Thread_saved_stack_limit_offset =
+    112;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_flags_offset = 120;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 592;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 400;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 584;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 392;
+static constexpr dart::compiler::target::word Thread_store_buffer_block_offset =
+    152;
+static constexpr dart::compiler::target::word
+    Thread_top_exit_frame_info_offset = 144;
+static constexpr dart::compiler::target::word Thread_top_offset = 96;
+static constexpr dart::compiler::target::word Thread_top_resource_offset = 32;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_int64_runtime_arg_offset = 184;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_double_runtime_arg_offset = 192;
+static constexpr dart::compiler::target::word Thread_vm_tag_offset = 176;
+static constexpr dart::compiler::target::word Thread_write_barrier_code_offset =
+    224;
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_entry_point_offset = 520;
+static constexpr dart::compiler::target::word Thread_write_barrier_mask_offset =
+    64;
+static constexpr dart::compiler::target::word Thread_heap_base_offset = 72;
+static constexpr dart::compiler::target::word Thread_callback_code_offset =
+    1632;
+static constexpr dart::compiler::target::word
+    Thread_callback_stack_return_offset = 1640;
+static constexpr dart::compiler::target::word Thread_random_offset = 1672;
+static constexpr dart::compiler::target::word
+    Thread_jump_to_frame_entry_point_offset = 640;
+static constexpr dart::compiler::target::word Thread_tsan_utils_offset = 1680;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_function_offset =
+    0;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_buffer_offset =
+    8;
+static constexpr dart::compiler::target::word TsanUtils_exception_pc_offset =
+    16;
+static constexpr dart::compiler::target::word TsanUtils_exception_sp_offset =
+    24;
+static constexpr dart::compiler::target::word TsanUtils_exception_fp_offset =
+    32;
+static constexpr dart::compiler::target::word TimelineStream_enabled_offset =
+    16;
+static constexpr dart::compiler::target::word TwoByteString_data_offset = 16;
+static constexpr dart::compiler::target::word Type_arguments_offset = 24;
+static constexpr dart::compiler::target::word Type_hash_offset = 32;
+static constexpr dart::compiler::target::word Type_type_class_id_offset = 40;
+static constexpr dart::compiler::target::word Type_type_state_offset = 42;
+static constexpr dart::compiler::target::word Type_nullability_offset = 43;
+static constexpr dart::compiler::target::word FunctionType_hash_offset = 56;
+static constexpr dart::compiler::target::word
+    FunctionType_named_parameter_names_offset = 48;
+static constexpr dart::compiler::target::word FunctionType_nullability_offset =
+    71;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_parameter_counts_offset = 64;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_type_parameter_counts_offset = 68;
+static constexpr dart::compiler::target::word
+    FunctionType_parameter_types_offset = 40;
+static constexpr dart::compiler::target::word
+    FunctionType_type_parameters_offset = 24;
+static constexpr dart::compiler::target::word
+    TypeParameter_parameterized_class_id_offset = 40;
+static constexpr dart::compiler::target::word TypeParameter_index_offset = 43;
+static constexpr dart::compiler::target::word TypeParameter_nullability_offset =
+    45;
+static constexpr dart::compiler::target::word
+    TypeArguments_instantiations_offset = 8;
+static constexpr dart::compiler::target::word TypeArguments_length_offset = 16;
+static constexpr dart::compiler::target::word TypeArguments_nullability_offset =
+    32;
+static constexpr dart::compiler::target::word TypeArguments_types_offset = 40;
+static constexpr dart::compiler::target::word TypeParameters_names_offset = 8;
+static constexpr dart::compiler::target::word TypeParameters_flags_offset = 16;
+static constexpr dart::compiler::target::word TypeParameters_bounds_offset = 24;
+static constexpr dart::compiler::target::word TypeParameters_defaults_offset =
+    32;
+static constexpr dart::compiler::target::word TypeParameter_bound_offset = 32;
+static constexpr dart::compiler::target::word TypeParameter_flags_offset = 44;
+static constexpr dart::compiler::target::word TypeRef_type_offset = 24;
+static constexpr dart::compiler::target::word TypedDataBase_length_offset = 16;
+static constexpr dart::compiler::target::word TypedDataView_data_offset = 24;
+static constexpr dart::compiler::target::word
+    TypedDataView_offset_in_bytes_offset = 32;
+static constexpr dart::compiler::target::word TypedData_data_offset = 24;
+static constexpr dart::compiler::target::word
+    UnhandledException_exception_offset = 8;
+static constexpr dart::compiler::target::word
+    UnhandledException_stacktrace_offset = 16;
+static constexpr dart::compiler::target::word UserTag_tag_offset = 16;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_expected_cid_offset = 8;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_entrypoint_offset = 16;
+static constexpr dart::compiler::target::word WeakProperty_key_offset = 8;
+static constexpr dart::compiler::target::word WeakProperty_value_offset = 16;
+static constexpr dart::compiler::target::word Code_entry_point_offset[] = {
+    8, 24, 16, 32};
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,   -1,   -1, -1, -1, 1432, 1440, 1448, -1,   -1,   1456,
+        1464, 1472, -1, -1, -1, 1480, 1488, 1496, 1504, 1512, 1520,
+        1528, 1536, -1, -1, -1, -1,   1544, 1552, 1560, 1568};
+static constexpr dart::compiler::target::word AbstractType_InstanceSize = 24;
+static constexpr dart::compiler::target::word ApiError_InstanceSize = 16;
+static constexpr dart::compiler::target::word Array_header_size = 24;
+static constexpr dart::compiler::target::word Bool_InstanceSize = 16;
+static constexpr dart::compiler::target::word Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word Class_InstanceSize = 192;
+static constexpr dart::compiler::target::word Closure_InstanceSize = 56;
+static constexpr dart::compiler::target::word ClosureData_InstanceSize = 40;
+static constexpr dart::compiler::target::word CodeSourceMap_HeaderSize = 16;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_ObjectHeaderSize = 8;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word Context_header_size = 24;
+static constexpr dart::compiler::target::word Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word DynamicLibrary_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word ExternalTypedData_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word FfiTrampolineData_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word Field_InstanceSize = 96;
+static constexpr dart::compiler::target::word Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word Function_InstanceSize = 128;
+static constexpr dart::compiler::target::word FunctionType_InstanceSize = 72;
+static constexpr dart::compiler::target::word FutureOr_InstanceSize = 16;
+static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word ICData_InstanceSize = 56;
+static constexpr dart::compiler::target::word Instance_InstanceSize = 8;
+static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
+    16;
+static constexpr dart::compiler::target::word
+    InstructionsSection_UnalignedHeaderSize = 40;
+static constexpr dart::compiler::target::word InstructionsTable_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Integer_InstanceSize = 8;
+static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
+    120;
+static constexpr dart::compiler::target::word LanguageError_InstanceSize = 48;
+static constexpr dart::compiler::target::word Library_InstanceSize = 168;
+static constexpr dart::compiler::target::word LibraryPrefix_InstanceSize = 40;
+static constexpr dart::compiler::target::word LinkedHashBase_InstanceSize = 56;
+static constexpr dart::compiler::target::word MegamorphicCache_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word MirrorReference_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_InstanceSize = 24;
+static constexpr dart::compiler::target::word Namespace_InstanceSize = 40;
+static constexpr dart::compiler::target::word NativeArguments_StructSize = 32;
+static constexpr dart::compiler::target::word Number_InstanceSize = 8;
+static constexpr dart::compiler::target::word Object_InstanceSize = 8;
+static constexpr dart::compiler::target::word PatchClass_InstanceSize = 48;
+static constexpr dart::compiler::target::word PcDescriptors_HeaderSize = 16;
+static constexpr dart::compiler::target::word Pointer_InstanceSize = 24;
+static constexpr dart::compiler::target::word ReceivePort_InstanceSize = 40;
+static constexpr dart::compiler::target::word RegExp_InstanceSize = 120;
+static constexpr dart::compiler::target::word Script_InstanceSize = 80;
+static constexpr dart::compiler::target::word SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word Sentinel_InstanceSize = 8;
+static constexpr dart::compiler::target::word SingleTargetCache_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word StackTrace_InstanceSize = 40;
+static constexpr dart::compiler::target::word String_InstanceSize = 16;
+static constexpr dart::compiler::target::word SubtypeTestCache_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word LoadingUnit_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    TransferableTypedData_InstanceSize = 8;
+static constexpr dart::compiler::target::word Type_InstanceSize = 48;
+static constexpr dart::compiler::target::word TypeParameter_InstanceSize = 48;
+static constexpr dart::compiler::target::word TypeParameters_InstanceSize = 40;
+static constexpr dart::compiler::target::word TypeRef_InstanceSize = 32;
+static constexpr dart::compiler::target::word TypedData_HeaderSize = 24;
+static constexpr dart::compiler::target::word TypedDataBase_InstanceSize = 24;
+static constexpr dart::compiler::target::word TypedDataView_InstanceSize = 40;
+static constexpr dart::compiler::target::word UnhandledException_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word UnlinkedCall_InstanceSize = 32;
+static constexpr dart::compiler::target::word UnwindError_InstanceSize = 24;
+static constexpr dart::compiler::target::word UserTag_InstanceSize = 32;
+static constexpr dart::compiler::target::word WeakProperty_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    WeakSerializationReference_InstanceSize = 24;
+#endif  // defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+
 #else  // !defined(PRODUCT)
 
 #if defined(TARGET_ARCH_ARM) && !defined(DART_COMPRESSED_POINTERS)
@@ -6813,6 +7953,1134 @@
     WeakSerializationReference_InstanceSize = 16;
 #endif  // defined(TARGET_ARCH_ARM64) && defined(DART_COMPRESSED_POINTERS)
 
+#if defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word Function_usage_counter_offset =
+    72;
+static constexpr dart::compiler::target::word
+    ICData_receivers_static_type_offset = 16;
+static constexpr dart::compiler::target::word Array_elements_start_offset = 12;
+static constexpr dart::compiler::target::word Array_element_size = 4;
+static constexpr dart::compiler::target::word Code_elements_start_offset = 76;
+static constexpr dart::compiler::target::word Code_element_size = 4;
+static constexpr dart::compiler::target::word Context_elements_start_offset =
+    12;
+static constexpr dart::compiler::target::word Context_element_size = 4;
+static constexpr dart::compiler::target::word
+    ContextScope_elements_start_offset = 12;
+static constexpr dart::compiler::target::word ContextScope_element_size = 32;
+static constexpr dart::compiler::target::word
+    ExceptionHandlers_elements_start_offset = 12;
+static constexpr dart::compiler::target::word ExceptionHandlers_element_size =
+    12;
+static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
+    8;
+static constexpr dart::compiler::target::word ObjectPool_element_size = 4;
+static constexpr dart::compiler::target::word
+    OneByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word OneByteString_element_size = 1;
+static constexpr dart::compiler::target::word
+    TypeArguments_elements_start_offset = 20;
+static constexpr dart::compiler::target::word TypeArguments_element_size = 4;
+static constexpr dart::compiler::target::word
+    TwoByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word TwoByteString_element_size = 2;
+static constexpr dart::compiler::target::word Array_kMaxElements = 268435455;
+static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
+    65533;
+static constexpr dart::compiler::target::word Context_kMaxElements = 268435455;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    Instructions_kNonBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 9;
+static constexpr dart::compiler::target::word
+    NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word String_kMaxElements = 536870911;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
+static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word
+    AbstractType_type_test_stub_entry_point_offset = 4;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
+    16;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_size_offset =
+    20;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_first_named_entry_offset = 28;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_named_entry_size = 8;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_name_offset =
+    0;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_position_offset = 4;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_positional_count_offset = 24;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_type_args_len_offset = 12;
+static constexpr dart::compiler::target::word Array_data_offset = 12;
+static constexpr dart::compiler::target::word Array_length_offset = 8;
+static constexpr dart::compiler::target::word Array_tags_offset = 0;
+static constexpr dart::compiler::target::word Array_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word Class_declaration_type_offset =
+    48;
+static constexpr dart::compiler::target::word Class_num_type_arguments_offset =
+    84;
+static constexpr dart::compiler::target::word Class_super_type_offset = 40;
+static constexpr dart::compiler::target::word
+    Class_host_type_arguments_field_offset_in_words_offset = 96;
+static constexpr dart::compiler::target::word Closure_context_offset = 20;
+static constexpr dart::compiler::target::word
+    Closure_delayed_type_arguments_offset = 12;
+static constexpr dart::compiler::target::word Closure_function_offset = 16;
+static constexpr dart::compiler::target::word
+    Closure_function_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word Closure_hash_offset = 24;
+static constexpr dart::compiler::target::word
+    Closure_instantiator_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    ClosureData_default_type_arguments_kind_offset = 16;
+static constexpr dart::compiler::target::word Code_object_pool_offset = 20;
+static constexpr dart::compiler::target::word Code_saved_instructions_offset =
+    24;
+static constexpr dart::compiler::target::word Code_owner_offset = 28;
+static constexpr dart::compiler::target::word Context_num_variables_offset = 4;
+static constexpr dart::compiler::target::word Context_parent_offset = 8;
+static constexpr dart::compiler::target::word Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    Field_initializer_function_offset = 16;
+static constexpr dart::compiler::target::word
+    Field_host_offset_or_field_id_offset = 20;
+static constexpr dart::compiler::target::word Field_guarded_cid_offset = 44;
+static constexpr dart::compiler::target::word
+    Field_guarded_list_length_in_object_offset_offset = 52;
+static constexpr dart::compiler::target::word Field_guarded_list_length_offset =
+    24;
+static constexpr dart::compiler::target::word Field_is_nullable_offset = 46;
+static constexpr dart::compiler::target::word Field_kind_bits_offset = 54;
+static constexpr dart::compiler::target::word Function_code_offset = 32;
+static constexpr dart::compiler::target::word Function_data_offset = 24;
+static constexpr dart::compiler::target::word Function_entry_point_offset[] = {
+    4, 8};
+static constexpr dart::compiler::target::word Function_kind_tag_offset = 64;
+static constexpr dart::compiler::target::word Function_packed_fields_offset =
+    83;
+static constexpr dart::compiler::target::word Function_signature_offset = 20;
+static constexpr dart::compiler::target::word FutureOr_type_arguments_offset =
+    4;
+static constexpr dart::compiler::target::word GrowableObjectArray_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_length_offset = 8;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word OldPage_card_table_offset = 20;
+static constexpr dart::compiler::target::word
+    CallSiteData_arguments_descriptor_offset = 8;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word ICData_entries_offset = 12;
+static constexpr dart::compiler::target::word ICData_owner_offset = 20;
+static constexpr dart::compiler::target::word ICData_state_bits_offset = 28;
+static constexpr dart::compiler::target::word Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Isolate_current_tag_offset = 20;
+static constexpr dart::compiler::target::word Isolate_default_tag_offset = 24;
+static constexpr dart::compiler::target::word Isolate_ic_miss_code_offset = 28;
+static constexpr dart::compiler::target::word IsolateGroup_object_store_offset =
+    20;
+static constexpr dart::compiler::target::word
+    IsolateGroup_shared_class_table_offset = 8;
+static constexpr dart::compiler::target::word
+    IsolateGroup_cached_class_table_table_offset = 16;
+static constexpr dart::compiler::target::word Isolate_user_tag_offset = 16;
+static constexpr dart::compiler::target::word LinkedHashBase_data_offset = 12;
+static constexpr dart::compiler::target::word
+    ImmutableLinkedHashBase_data_offset = 12;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_deleted_keys_offset = 20;
+static constexpr dart::compiler::target::word LinkedHashBase_hash_mask_offset =
+    8;
+static constexpr dart::compiler::target::word LinkedHashBase_index_offset = 24;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word LinkedHashBase_used_data_offset =
+    16;
+static constexpr dart::compiler::target::word LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    MarkingStackBlock_pointers_offset = 8;
+static constexpr dart::compiler::target::word MarkingStackBlock_top_offset = 4;
+static constexpr dart::compiler::target::word MegamorphicCache_buckets_offset =
+    12;
+static constexpr dart::compiler::target::word MegamorphicCache_mask_offset = 16;
+static constexpr dart::compiler::target::word Mint_value_offset = 8;
+static constexpr dart::compiler::target::word NativeArguments_argc_tag_offset =
+    4;
+static constexpr dart::compiler::target::word NativeArguments_argv_offset = 8;
+static constexpr dart::compiler::target::word NativeArguments_retval_offset =
+    12;
+static constexpr dart::compiler::target::word NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word ObjectStore_double_type_offset =
+    160;
+static constexpr dart::compiler::target::word ObjectStore_int_type_offset = 116;
+static constexpr dart::compiler::target::word ObjectStore_string_type_offset =
+    180;
+static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
+    104;
+static constexpr dart::compiler::target::word OneByteString_data_offset = 12;
+static constexpr dart::compiler::target::word PointerBase_data_field_offset = 4;
+static constexpr dart::compiler::target::word Pointer_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_entry_point_offset = 8;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_lower_limit_offset = 12;
+static constexpr dart::compiler::target::word SingleTargetCache_target_offset =
+    4;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_upper_limit_offset = 14;
+static constexpr dart::compiler::target::word StoreBufferBlock_pointers_offset =
+    8;
+static constexpr dart::compiler::target::word StoreBufferBlock_top_offset = 4;
+static constexpr dart::compiler::target::word String_hash_offset = 8;
+static constexpr dart::compiler::target::word String_length_offset = 4;
+static constexpr dart::compiler::target::word SubtypeTestCache_cache_offset = 4;
+static constexpr dart::compiler::target::word
+    Thread_AllocateArray_entry_point_offset = 384;
+static constexpr dart::compiler::target::word Thread_active_exception_offset =
+    800;
+static constexpr dart::compiler::target::word Thread_active_stacktrace_offset =
+    804;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_code_offset = 128;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_entry_point_offset = 276;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_entry_point_offset = 284;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_stub_offset = 188;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_entry_point_offset = 288;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_stub_offset = 192;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_entry_point_offset = 292;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_stub_offset = 196;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_entry_point_offset = 296;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_stub_offset = 200;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_entry_point_offset = 300;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_stub_offset = 204;
+static constexpr dart::compiler::target::word Thread_api_top_scope_offset = 840;
+static constexpr dart::compiler::target::word
+    Thread_auto_scope_native_wrapper_entry_point_offset = 348;
+static constexpr dart::compiler::target::word Thread_bool_false_offset = 120;
+static constexpr dart::compiler::target::word Thread_bool_true_offset = 116;
+static constexpr dart::compiler::target::word
+    Thread_bootstrap_native_wrapper_entry_point_offset = 340;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_entry_point_offset = 280;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_stub_offset = 144;
+static constexpr dart::compiler::target::word Thread_dart_stream_offset = 864;
+static constexpr dart::compiler::target::word
+    Thread_dispatch_table_array_offset = 44;
+static constexpr dart::compiler::target::word
+    Thread_double_truncate_round_supported_offset = 844;
+static constexpr dart::compiler::target::word Thread_optimize_entry_offset =
+    320;
+static constexpr dart::compiler::target::word Thread_optimize_stub_offset = 232;
+static constexpr dart::compiler::target::word Thread_deoptimize_entry_offset =
+    324;
+static constexpr dart::compiler::target::word Thread_deoptimize_stub_offset =
+    236;
+static constexpr dart::compiler::target::word Thread_double_abs_address_offset =
+    364;
+static constexpr dart::compiler::target::word
+    Thread_double_negate_address_offset = 360;
+static constexpr dart::compiler::target::word Thread_end_offset = 52;
+static constexpr dart::compiler::target::word
+    Thread_enter_safepoint_stub_offset = 256;
+static constexpr dart::compiler::target::word Thread_execution_state_offset =
+    820;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_stub_offset = 260;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 264;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_stub_offset = 268;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_entry_point_offset = 328;
+static constexpr dart::compiler::target::word
+    Thread_fix_allocation_stub_code_offset = 136;
+static constexpr dart::compiler::target::word
+    Thread_fix_callers_target_code_offset = 132;
+static constexpr dart::compiler::target::word
+    Thread_float_absolute_address_offset = 376;
+static constexpr dart::compiler::target::word
+    Thread_float_negate_address_offset = 372;
+static constexpr dart::compiler::target::word Thread_float_not_address_offset =
+    368;
+static constexpr dart::compiler::target::word
+    Thread_float_zerow_address_offset = 380;
+static constexpr dart::compiler::target::word Thread_global_object_pool_offset =
+    808;
+static constexpr dart::compiler::target::word
+    Thread_invoke_dart_code_stub_offset = 140;
+static constexpr dart::compiler::target::word Thread_exit_through_ffi_offset =
+    836;
+static constexpr dart::compiler::target::word Thread_isolate_offset = 40;
+static constexpr dart::compiler::target::word Thread_isolate_group_offset = 868;
+static constexpr dart::compiler::target::word Thread_field_table_values_offset =
+    64;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_return_stub_offset = 240;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_throw_stub_offset = 244;
+static constexpr dart::compiler::target::word
+    Thread_lazy_specialize_type_test_stub_offset = 252;
+static constexpr dart::compiler::target::word
+    Thread_marking_stack_block_offset = 80;
+static constexpr dart::compiler::target::word
+    Thread_megamorphic_call_checked_entry_offset = 312;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_entry_offset = 316;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_stub_offset = 216;
+static constexpr dart::compiler::target::word
+    Thread_no_scope_native_wrapper_entry_point_offset = 344;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 152;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_without_fpu_regs_stub_offset = 148;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_with_fpu_regs_stub_offset = 160;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_without_fpu_regs_stub_offset = 156;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 168;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 164;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 176;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 172;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_with_fpu_regs_stub_offset = 184;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_without_fpu_regs_stub_offset = 180;
+static constexpr dart::compiler::target::word Thread_object_null_offset = 112;
+static constexpr dart::compiler::target::word
+    Thread_predefined_symbols_address_offset = 352;
+static constexpr dart::compiler::target::word Thread_resume_pc_offset = 812;
+static constexpr dart::compiler::target::word
+    Thread_saved_shadow_call_stack_offset = 816;
+static constexpr dart::compiler::target::word Thread_safepoint_state_offset =
+    824;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_stub_offset = 248;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_entry_point_offset = 336;
+static constexpr dart::compiler::target::word Thread_stack_limit_offset = 28;
+static constexpr dart::compiler::target::word Thread_saved_stack_limit_offset =
+    56;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_flags_offset = 60;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 308;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 212;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 304;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 208;
+static constexpr dart::compiler::target::word Thread_store_buffer_block_offset =
+    76;
+static constexpr dart::compiler::target::word
+    Thread_top_exit_frame_info_offset = 72;
+static constexpr dart::compiler::target::word Thread_top_offset = 48;
+static constexpr dart::compiler::target::word Thread_top_resource_offset = 16;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_int64_runtime_arg_offset = 96;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_double_runtime_arg_offset = 104;
+static constexpr dart::compiler::target::word Thread_vm_tag_offset = 88;
+static constexpr dart::compiler::target::word Thread_write_barrier_code_offset =
+    124;
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_entry_point_offset = 272;
+static constexpr dart::compiler::target::word Thread_write_barrier_mask_offset =
+    32;
+static constexpr dart::compiler::target::word Thread_heap_base_offset = 36;
+static constexpr dart::compiler::target::word Thread_callback_code_offset = 828;
+static constexpr dart::compiler::target::word
+    Thread_callback_stack_return_offset = 832;
+static constexpr dart::compiler::target::word Thread_random_offset = 848;
+static constexpr dart::compiler::target::word
+    Thread_jump_to_frame_entry_point_offset = 332;
+static constexpr dart::compiler::target::word Thread_tsan_utils_offset = 856;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_function_offset =
+    0;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_buffer_offset =
+    4;
+static constexpr dart::compiler::target::word TsanUtils_exception_pc_offset = 8;
+static constexpr dart::compiler::target::word TsanUtils_exception_sp_offset =
+    12;
+static constexpr dart::compiler::target::word TsanUtils_exception_fp_offset =
+    16;
+static constexpr dart::compiler::target::word TimelineStream_enabled_offset = 8;
+static constexpr dart::compiler::target::word TwoByteString_data_offset = 12;
+static constexpr dart::compiler::target::word Type_arguments_offset = 12;
+static constexpr dart::compiler::target::word Type_hash_offset = 16;
+static constexpr dart::compiler::target::word Type_type_class_id_offset = 20;
+static constexpr dart::compiler::target::word Type_type_state_offset = 22;
+static constexpr dart::compiler::target::word Type_nullability_offset = 23;
+static constexpr dart::compiler::target::word FunctionType_hash_offset = 28;
+static constexpr dart::compiler::target::word
+    FunctionType_named_parameter_names_offset = 24;
+static constexpr dart::compiler::target::word FunctionType_nullability_offset =
+    39;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_parameter_counts_offset = 32;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_type_parameter_counts_offset = 36;
+static constexpr dart::compiler::target::word
+    FunctionType_parameter_types_offset = 20;
+static constexpr dart::compiler::target::word
+    FunctionType_type_parameters_offset = 12;
+static constexpr dart::compiler::target::word
+    TypeParameter_parameterized_class_id_offset = 20;
+static constexpr dart::compiler::target::word TypeParameter_index_offset = 23;
+static constexpr dart::compiler::target::word TypeParameter_nullability_offset =
+    25;
+static constexpr dart::compiler::target::word
+    TypeArguments_instantiations_offset = 4;
+static constexpr dart::compiler::target::word TypeArguments_length_offset = 8;
+static constexpr dart::compiler::target::word TypeArguments_nullability_offset =
+    16;
+static constexpr dart::compiler::target::word TypeArguments_types_offset = 20;
+static constexpr dart::compiler::target::word TypeParameters_names_offset = 4;
+static constexpr dart::compiler::target::word TypeParameters_flags_offset = 8;
+static constexpr dart::compiler::target::word TypeParameters_bounds_offset = 12;
+static constexpr dart::compiler::target::word TypeParameters_defaults_offset =
+    16;
+static constexpr dart::compiler::target::word TypeParameter_bound_offset = 16;
+static constexpr dart::compiler::target::word TypeParameter_flags_offset = 24;
+static constexpr dart::compiler::target::word TypeRef_type_offset = 12;
+static constexpr dart::compiler::target::word TypedDataBase_length_offset = 8;
+static constexpr dart::compiler::target::word TypedDataView_data_offset = 12;
+static constexpr dart::compiler::target::word
+    TypedDataView_offset_in_bytes_offset = 16;
+static constexpr dart::compiler::target::word TypedData_data_offset = 12;
+static constexpr dart::compiler::target::word
+    UnhandledException_exception_offset = 4;
+static constexpr dart::compiler::target::word
+    UnhandledException_stacktrace_offset = 8;
+static constexpr dart::compiler::target::word UserTag_tag_offset = 8;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_expected_cid_offset = 4;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_entrypoint_offset = 8;
+static constexpr dart::compiler::target::word WeakProperty_key_offset = 4;
+static constexpr dart::compiler::target::word WeakProperty_value_offset = 8;
+static constexpr dart::compiler::target::word Code_entry_point_offset[] = {
+    4, 12, 8, 16};
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,  -1,  -1, -1, -1, 728, 732, 736, -1,  -1,  740,
+        744, 748, -1, -1, -1, 752, 756, 760, 764, 768, 772,
+        776, 780, -1, -1, -1, -1,  784, 788, 792, 796};
+static constexpr dart::compiler::target::word AbstractType_InstanceSize = 12;
+static constexpr dart::compiler::target::word ApiError_InstanceSize = 8;
+static constexpr dart::compiler::target::word Array_header_size = 12;
+static constexpr dart::compiler::target::word Bool_InstanceSize = 8;
+static constexpr dart::compiler::target::word Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word Class_InstanceSize = 108;
+static constexpr dart::compiler::target::word Closure_InstanceSize = 28;
+static constexpr dart::compiler::target::word ClosureData_InstanceSize = 20;
+static constexpr dart::compiler::target::word CodeSourceMap_HeaderSize = 8;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_ObjectHeaderSize = 4;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word Context_header_size = 12;
+static constexpr dart::compiler::target::word Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word DynamicLibrary_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word ExternalTypedData_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word FfiTrampolineData_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word Field_InstanceSize = 60;
+static constexpr dart::compiler::target::word Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word Function_InstanceSize = 88;
+static constexpr dart::compiler::target::word FunctionType_InstanceSize = 40;
+static constexpr dart::compiler::target::word FutureOr_InstanceSize = 8;
+static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word ICData_InstanceSize = 32;
+static constexpr dart::compiler::target::word Instance_InstanceSize = 4;
+static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
+    8;
+static constexpr dart::compiler::target::word
+    InstructionsSection_UnalignedHeaderSize = 20;
+static constexpr dart::compiler::target::word InstructionsTable_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Integer_InstanceSize = 4;
+static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
+    60;
+static constexpr dart::compiler::target::word LanguageError_InstanceSize = 28;
+static constexpr dart::compiler::target::word Library_InstanceSize = 88;
+static constexpr dart::compiler::target::word LibraryPrefix_InstanceSize = 20;
+static constexpr dart::compiler::target::word LinkedHashBase_InstanceSize = 28;
+static constexpr dart::compiler::target::word MegamorphicCache_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word MirrorReference_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_InstanceSize = 12;
+static constexpr dart::compiler::target::word Namespace_InstanceSize = 20;
+static constexpr dart::compiler::target::word NativeArguments_StructSize = 16;
+static constexpr dart::compiler::target::word Number_InstanceSize = 4;
+static constexpr dart::compiler::target::word Object_InstanceSize = 4;
+static constexpr dart::compiler::target::word PatchClass_InstanceSize = 24;
+static constexpr dart::compiler::target::word PcDescriptors_HeaderSize = 8;
+static constexpr dart::compiler::target::word Pointer_InstanceSize = 12;
+static constexpr dart::compiler::target::word ReceivePort_InstanceSize = 12;
+static constexpr dart::compiler::target::word RegExp_InstanceSize = 60;
+static constexpr dart::compiler::target::word Script_InstanceSize = 48;
+static constexpr dart::compiler::target::word SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word Sentinel_InstanceSize = 4;
+static constexpr dart::compiler::target::word SingleTargetCache_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word StackTrace_InstanceSize = 20;
+static constexpr dart::compiler::target::word String_InstanceSize = 12;
+static constexpr dart::compiler::target::word SubtypeTestCache_InstanceSize = 8;
+static constexpr dart::compiler::target::word LoadingUnit_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    TransferableTypedData_InstanceSize = 4;
+static constexpr dart::compiler::target::word Type_InstanceSize = 24;
+static constexpr dart::compiler::target::word TypeParameter_InstanceSize = 28;
+static constexpr dart::compiler::target::word TypeParameters_InstanceSize = 20;
+static constexpr dart::compiler::target::word TypeRef_InstanceSize = 16;
+static constexpr dart::compiler::target::word TypedData_HeaderSize = 12;
+static constexpr dart::compiler::target::word TypedDataBase_InstanceSize = 12;
+static constexpr dart::compiler::target::word TypedDataView_InstanceSize = 20;
+static constexpr dart::compiler::target::word UnhandledException_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word UnlinkedCall_InstanceSize = 16;
+static constexpr dart::compiler::target::word UnwindError_InstanceSize = 12;
+static constexpr dart::compiler::target::word UserTag_InstanceSize = 16;
+static constexpr dart::compiler::target::word WeakProperty_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    WeakSerializationReference_InstanceSize = 12;
+#endif  // defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+
+#if defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word Function_usage_counter_offset =
+    112;
+static constexpr dart::compiler::target::word
+    ICData_receivers_static_type_offset = 32;
+static constexpr dart::compiler::target::word Array_elements_start_offset = 24;
+static constexpr dart::compiler::target::word Array_element_size = 8;
+static constexpr dart::compiler::target::word Code_elements_start_offset = 144;
+static constexpr dart::compiler::target::word Code_element_size = 4;
+static constexpr dart::compiler::target::word Context_elements_start_offset =
+    24;
+static constexpr dart::compiler::target::word Context_element_size = 8;
+static constexpr dart::compiler::target::word
+    ContextScope_elements_start_offset = 16;
+static constexpr dart::compiler::target::word ContextScope_element_size = 64;
+static constexpr dart::compiler::target::word
+    ExceptionHandlers_elements_start_offset = 24;
+static constexpr dart::compiler::target::word ExceptionHandlers_element_size =
+    12;
+static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
+    16;
+static constexpr dart::compiler::target::word ObjectPool_element_size = 8;
+static constexpr dart::compiler::target::word
+    OneByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word OneByteString_element_size = 1;
+static constexpr dart::compiler::target::word
+    TypeArguments_elements_start_offset = 40;
+static constexpr dart::compiler::target::word TypeArguments_element_size = 8;
+static constexpr dart::compiler::target::word
+    TwoByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word TwoByteString_element_size = 2;
+static constexpr dart::compiler::target::word Array_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
+    32765;
+static constexpr dart::compiler::target::word Context_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    Instructions_kNonBarePayloadAlignment = 8;
+static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 10;
+static constexpr dart::compiler::target::word
+    NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word String_kMaxElements =
+    2305843009213693951;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
+static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    AbstractType_type_test_stub_entry_point_offset = 8;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
+    32;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_size_offset =
+    40;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_first_named_entry_offset = 56;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_named_entry_size = 16;
+static constexpr dart::compiler::target::word ArgumentsDescriptor_name_offset =
+    0;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_position_offset = 8;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_positional_count_offset = 48;
+static constexpr dart::compiler::target::word
+    ArgumentsDescriptor_type_args_len_offset = 24;
+static constexpr dart::compiler::target::word Array_data_offset = 24;
+static constexpr dart::compiler::target::word Array_length_offset = 16;
+static constexpr dart::compiler::target::word Array_tags_offset = 0;
+static constexpr dart::compiler::target::word Array_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word Class_declaration_type_offset =
+    96;
+static constexpr dart::compiler::target::word Class_num_type_arguments_offset =
+    156;
+static constexpr dart::compiler::target::word Class_super_type_offset = 80;
+static constexpr dart::compiler::target::word
+    Class_host_type_arguments_field_offset_in_words_offset = 168;
+static constexpr dart::compiler::target::word Closure_context_offset = 40;
+static constexpr dart::compiler::target::word
+    Closure_delayed_type_arguments_offset = 24;
+static constexpr dart::compiler::target::word Closure_function_offset = 32;
+static constexpr dart::compiler::target::word
+    Closure_function_type_arguments_offset = 16;
+static constexpr dart::compiler::target::word Closure_hash_offset = 48;
+static constexpr dart::compiler::target::word
+    Closure_instantiator_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    ClosureData_default_type_arguments_kind_offset = 32;
+static constexpr dart::compiler::target::word Code_object_pool_offset = 40;
+static constexpr dart::compiler::target::word Code_saved_instructions_offset =
+    48;
+static constexpr dart::compiler::target::word Code_owner_offset = 56;
+static constexpr dart::compiler::target::word Context_num_variables_offset = 8;
+static constexpr dart::compiler::target::word Context_parent_offset = 16;
+static constexpr dart::compiler::target::word Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    Field_initializer_function_offset = 32;
+static constexpr dart::compiler::target::word
+    Field_host_offset_or_field_id_offset = 40;
+static constexpr dart::compiler::target::word Field_guarded_cid_offset = 80;
+static constexpr dart::compiler::target::word
+    Field_guarded_list_length_in_object_offset_offset = 88;
+static constexpr dart::compiler::target::word Field_guarded_list_length_offset =
+    48;
+static constexpr dart::compiler::target::word Field_is_nullable_offset = 82;
+static constexpr dart::compiler::target::word Field_kind_bits_offset = 90;
+static constexpr dart::compiler::target::word Function_code_offset = 64;
+static constexpr dart::compiler::target::word Function_data_offset = 48;
+static constexpr dart::compiler::target::word Function_entry_point_offset[] = {
+    8, 16};
+static constexpr dart::compiler::target::word Function_kind_tag_offset = 104;
+static constexpr dart::compiler::target::word Function_packed_fields_offset =
+    123;
+static constexpr dart::compiler::target::word Function_signature_offset = 40;
+static constexpr dart::compiler::target::word FutureOr_type_arguments_offset =
+    8;
+static constexpr dart::compiler::target::word GrowableObjectArray_data_offset =
+    24;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_length_offset = 16;
+static constexpr dart::compiler::target::word
+    GrowableObjectArray_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word OldPage_card_table_offset = 40;
+static constexpr dart::compiler::target::word
+    CallSiteData_arguments_descriptor_offset = 16;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word ICData_entries_offset = 24;
+static constexpr dart::compiler::target::word ICData_owner_offset = 40;
+static constexpr dart::compiler::target::word ICData_state_bits_offset = 52;
+static constexpr dart::compiler::target::word Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word Isolate_current_tag_offset = 40;
+static constexpr dart::compiler::target::word Isolate_default_tag_offset = 48;
+static constexpr dart::compiler::target::word Isolate_ic_miss_code_offset = 56;
+static constexpr dart::compiler::target::word IsolateGroup_object_store_offset =
+    40;
+static constexpr dart::compiler::target::word
+    IsolateGroup_shared_class_table_offset = 16;
+static constexpr dart::compiler::target::word
+    IsolateGroup_cached_class_table_table_offset = 32;
+static constexpr dart::compiler::target::word Isolate_user_tag_offset = 32;
+static constexpr dart::compiler::target::word LinkedHashBase_data_offset = 24;
+static constexpr dart::compiler::target::word
+    ImmutableLinkedHashBase_data_offset = 24;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_deleted_keys_offset = 40;
+static constexpr dart::compiler::target::word LinkedHashBase_hash_mask_offset =
+    16;
+static constexpr dart::compiler::target::word LinkedHashBase_index_offset = 48;
+static constexpr dart::compiler::target::word
+    LinkedHashBase_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word LinkedHashBase_used_data_offset =
+    32;
+static constexpr dart::compiler::target::word LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    MarkingStackBlock_pointers_offset = 16;
+static constexpr dart::compiler::target::word MarkingStackBlock_top_offset = 8;
+static constexpr dart::compiler::target::word MegamorphicCache_buckets_offset =
+    24;
+static constexpr dart::compiler::target::word MegamorphicCache_mask_offset = 32;
+static constexpr dart::compiler::target::word Mint_value_offset = 8;
+static constexpr dart::compiler::target::word NativeArguments_argc_tag_offset =
+    8;
+static constexpr dart::compiler::target::word NativeArguments_argv_offset = 16;
+static constexpr dart::compiler::target::word NativeArguments_retval_offset =
+    24;
+static constexpr dart::compiler::target::word NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word ObjectStore_double_type_offset =
+    320;
+static constexpr dart::compiler::target::word ObjectStore_int_type_offset = 232;
+static constexpr dart::compiler::target::word ObjectStore_string_type_offset =
+    360;
+static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
+    208;
+static constexpr dart::compiler::target::word OneByteString_data_offset = 16;
+static constexpr dart::compiler::target::word PointerBase_data_field_offset = 8;
+static constexpr dart::compiler::target::word Pointer_type_arguments_offset =
+    16;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_entry_point_offset = 16;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_lower_limit_offset = 24;
+static constexpr dart::compiler::target::word SingleTargetCache_target_offset =
+    8;
+static constexpr dart::compiler::target::word
+    SingleTargetCache_upper_limit_offset = 26;
+static constexpr dart::compiler::target::word StoreBufferBlock_pointers_offset =
+    16;
+static constexpr dart::compiler::target::word StoreBufferBlock_top_offset = 8;
+static constexpr dart::compiler::target::word String_hash_offset = 4;
+static constexpr dart::compiler::target::word String_length_offset = 8;
+static constexpr dart::compiler::target::word SubtypeTestCache_cache_offset = 8;
+static constexpr dart::compiler::target::word
+    Thread_AllocateArray_entry_point_offset = 744;
+static constexpr dart::compiler::target::word Thread_active_exception_offset =
+    1576;
+static constexpr dart::compiler::target::word Thread_active_stacktrace_offset =
+    1584;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_code_offset = 232;
+static constexpr dart::compiler::target::word
+    Thread_array_write_barrier_entry_point_offset = 528;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_entry_point_offset = 544;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_with_fpu_regs_stub_offset = 352;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_entry_point_offset = 552;
+static constexpr dart::compiler::target::word
+    Thread_allocate_mint_without_fpu_regs_stub_offset = 360;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_entry_point_offset = 560;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_stub_offset = 368;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_entry_point_offset = 568;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_parameterized_stub_offset = 376;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_entry_point_offset = 576;
+static constexpr dart::compiler::target::word
+    Thread_allocate_object_slow_stub_offset = 384;
+static constexpr dart::compiler::target::word Thread_api_top_scope_offset =
+    1656;
+static constexpr dart::compiler::target::word
+    Thread_auto_scope_native_wrapper_entry_point_offset = 672;
+static constexpr dart::compiler::target::word Thread_bool_false_offset = 216;
+static constexpr dart::compiler::target::word Thread_bool_true_offset = 208;
+static constexpr dart::compiler::target::word
+    Thread_bootstrap_native_wrapper_entry_point_offset = 656;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_entry_point_offset = 536;
+static constexpr dart::compiler::target::word
+    Thread_call_to_runtime_stub_offset = 264;
+static constexpr dart::compiler::target::word Thread_dart_stream_offset = 1696;
+static constexpr dart::compiler::target::word
+    Thread_dispatch_table_array_offset = 88;
+static constexpr dart::compiler::target::word
+    Thread_double_truncate_round_supported_offset = 1664;
+static constexpr dart::compiler::target::word Thread_optimize_entry_offset =
+    616;
+static constexpr dart::compiler::target::word Thread_optimize_stub_offset = 440;
+static constexpr dart::compiler::target::word Thread_deoptimize_entry_offset =
+    624;
+static constexpr dart::compiler::target::word Thread_deoptimize_stub_offset =
+    448;
+static constexpr dart::compiler::target::word Thread_double_abs_address_offset =
+    704;
+static constexpr dart::compiler::target::word
+    Thread_double_negate_address_offset = 696;
+static constexpr dart::compiler::target::word Thread_end_offset = 104;
+static constexpr dart::compiler::target::word
+    Thread_enter_safepoint_stub_offset = 488;
+static constexpr dart::compiler::target::word Thread_execution_state_offset =
+    1616;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_stub_offset = 496;
+static constexpr dart::compiler::target::word
+    Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 504;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_stub_offset = 512;
+static constexpr dart::compiler::target::word
+    Thread_call_native_through_safepoint_entry_point_offset = 632;
+static constexpr dart::compiler::target::word
+    Thread_fix_allocation_stub_code_offset = 248;
+static constexpr dart::compiler::target::word
+    Thread_fix_callers_target_code_offset = 240;
+static constexpr dart::compiler::target::word
+    Thread_float_absolute_address_offset = 728;
+static constexpr dart::compiler::target::word
+    Thread_float_negate_address_offset = 720;
+static constexpr dart::compiler::target::word Thread_float_not_address_offset =
+    712;
+static constexpr dart::compiler::target::word
+    Thread_float_zerow_address_offset = 736;
+static constexpr dart::compiler::target::word Thread_global_object_pool_offset =
+    1592;
+static constexpr dart::compiler::target::word
+    Thread_invoke_dart_code_stub_offset = 256;
+static constexpr dart::compiler::target::word Thread_exit_through_ffi_offset =
+    1648;
+static constexpr dart::compiler::target::word Thread_isolate_offset = 80;
+static constexpr dart::compiler::target::word Thread_isolate_group_offset =
+    1704;
+static constexpr dart::compiler::target::word Thread_field_table_values_offset =
+    128;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_return_stub_offset = 456;
+static constexpr dart::compiler::target::word
+    Thread_lazy_deopt_from_throw_stub_offset = 464;
+static constexpr dart::compiler::target::word
+    Thread_lazy_specialize_type_test_stub_offset = 480;
+static constexpr dart::compiler::target::word
+    Thread_marking_stack_block_offset = 160;
+static constexpr dart::compiler::target::word
+    Thread_megamorphic_call_checked_entry_offset = 600;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_entry_offset = 608;
+static constexpr dart::compiler::target::word
+    Thread_switchable_call_miss_stub_offset = 408;
+static constexpr dart::compiler::target::word
+    Thread_no_scope_native_wrapper_entry_point_offset = 664;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 280;
+static constexpr dart::compiler::target::word
+    Thread_late_initialization_error_shared_without_fpu_regs_stub_offset = 272;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_with_fpu_regs_stub_offset = 296;
+static constexpr dart::compiler::target::word
+    Thread_null_error_shared_without_fpu_regs_stub_offset = 288;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 312;
+static constexpr dart::compiler::target::word
+    Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 304;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 328;
+static constexpr dart::compiler::target::word
+    Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 320;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_with_fpu_regs_stub_offset = 344;
+static constexpr dart::compiler::target::word
+    Thread_range_error_shared_without_fpu_regs_stub_offset = 336;
+static constexpr dart::compiler::target::word Thread_object_null_offset = 200;
+static constexpr dart::compiler::target::word
+    Thread_predefined_symbols_address_offset = 680;
+static constexpr dart::compiler::target::word Thread_resume_pc_offset = 1600;
+static constexpr dart::compiler::target::word
+    Thread_saved_shadow_call_stack_offset = 1608;
+static constexpr dart::compiler::target::word Thread_safepoint_state_offset =
+    1624;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_stub_offset = 472;
+static constexpr dart::compiler::target::word
+    Thread_slow_type_test_entry_point_offset = 648;
+static constexpr dart::compiler::target::word Thread_stack_limit_offset = 56;
+static constexpr dart::compiler::target::word Thread_saved_stack_limit_offset =
+    112;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_flags_offset = 120;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 592;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 400;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 584;
+static constexpr dart::compiler::target::word
+    Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 392;
+static constexpr dart::compiler::target::word Thread_store_buffer_block_offset =
+    152;
+static constexpr dart::compiler::target::word
+    Thread_top_exit_frame_info_offset = 144;
+static constexpr dart::compiler::target::word Thread_top_offset = 96;
+static constexpr dart::compiler::target::word Thread_top_resource_offset = 32;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_int64_runtime_arg_offset = 184;
+static constexpr dart::compiler::target::word
+    Thread_unboxed_double_runtime_arg_offset = 192;
+static constexpr dart::compiler::target::word Thread_vm_tag_offset = 176;
+static constexpr dart::compiler::target::word Thread_write_barrier_code_offset =
+    224;
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_entry_point_offset = 520;
+static constexpr dart::compiler::target::word Thread_write_barrier_mask_offset =
+    64;
+static constexpr dart::compiler::target::word Thread_heap_base_offset = 72;
+static constexpr dart::compiler::target::word Thread_callback_code_offset =
+    1632;
+static constexpr dart::compiler::target::word
+    Thread_callback_stack_return_offset = 1640;
+static constexpr dart::compiler::target::word Thread_random_offset = 1672;
+static constexpr dart::compiler::target::word
+    Thread_jump_to_frame_entry_point_offset = 640;
+static constexpr dart::compiler::target::word Thread_tsan_utils_offset = 1680;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_function_offset =
+    0;
+static constexpr dart::compiler::target::word TsanUtils_setjmp_buffer_offset =
+    8;
+static constexpr dart::compiler::target::word TsanUtils_exception_pc_offset =
+    16;
+static constexpr dart::compiler::target::word TsanUtils_exception_sp_offset =
+    24;
+static constexpr dart::compiler::target::word TsanUtils_exception_fp_offset =
+    32;
+static constexpr dart::compiler::target::word TimelineStream_enabled_offset =
+    16;
+static constexpr dart::compiler::target::word TwoByteString_data_offset = 16;
+static constexpr dart::compiler::target::word Type_arguments_offset = 24;
+static constexpr dart::compiler::target::word Type_hash_offset = 32;
+static constexpr dart::compiler::target::word Type_type_class_id_offset = 40;
+static constexpr dart::compiler::target::word Type_type_state_offset = 42;
+static constexpr dart::compiler::target::word Type_nullability_offset = 43;
+static constexpr dart::compiler::target::word FunctionType_hash_offset = 56;
+static constexpr dart::compiler::target::word
+    FunctionType_named_parameter_names_offset = 48;
+static constexpr dart::compiler::target::word FunctionType_nullability_offset =
+    71;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_parameter_counts_offset = 64;
+static constexpr dart::compiler::target::word
+    FunctionType_packed_type_parameter_counts_offset = 68;
+static constexpr dart::compiler::target::word
+    FunctionType_parameter_types_offset = 40;
+static constexpr dart::compiler::target::word
+    FunctionType_type_parameters_offset = 24;
+static constexpr dart::compiler::target::word
+    TypeParameter_parameterized_class_id_offset = 40;
+static constexpr dart::compiler::target::word TypeParameter_index_offset = 43;
+static constexpr dart::compiler::target::word TypeParameter_nullability_offset =
+    45;
+static constexpr dart::compiler::target::word
+    TypeArguments_instantiations_offset = 8;
+static constexpr dart::compiler::target::word TypeArguments_length_offset = 16;
+static constexpr dart::compiler::target::word TypeArguments_nullability_offset =
+    32;
+static constexpr dart::compiler::target::word TypeArguments_types_offset = 40;
+static constexpr dart::compiler::target::word TypeParameters_names_offset = 8;
+static constexpr dart::compiler::target::word TypeParameters_flags_offset = 16;
+static constexpr dart::compiler::target::word TypeParameters_bounds_offset = 24;
+static constexpr dart::compiler::target::word TypeParameters_defaults_offset =
+    32;
+static constexpr dart::compiler::target::word TypeParameter_bound_offset = 32;
+static constexpr dart::compiler::target::word TypeParameter_flags_offset = 44;
+static constexpr dart::compiler::target::word TypeRef_type_offset = 24;
+static constexpr dart::compiler::target::word TypedDataBase_length_offset = 16;
+static constexpr dart::compiler::target::word TypedDataView_data_offset = 24;
+static constexpr dart::compiler::target::word
+    TypedDataView_offset_in_bytes_offset = 32;
+static constexpr dart::compiler::target::word TypedData_data_offset = 24;
+static constexpr dart::compiler::target::word
+    UnhandledException_exception_offset = 8;
+static constexpr dart::compiler::target::word
+    UnhandledException_stacktrace_offset = 16;
+static constexpr dart::compiler::target::word UserTag_tag_offset = 16;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_expected_cid_offset = 8;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_entrypoint_offset = 16;
+static constexpr dart::compiler::target::word WeakProperty_key_offset = 8;
+static constexpr dart::compiler::target::word WeakProperty_value_offset = 16;
+static constexpr dart::compiler::target::word Code_entry_point_offset[] = {
+    8, 24, 16, 32};
+static constexpr dart::compiler::target::word
+    Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,   -1,   -1, -1, -1, 1432, 1440, 1448, -1,   -1,   1456,
+        1464, 1472, -1, -1, -1, 1480, 1488, 1496, 1504, 1512, 1520,
+        1528, 1536, -1, -1, -1, -1,   1544, 1552, 1560, 1568};
+static constexpr dart::compiler::target::word AbstractType_InstanceSize = 24;
+static constexpr dart::compiler::target::word ApiError_InstanceSize = 16;
+static constexpr dart::compiler::target::word Array_header_size = 24;
+static constexpr dart::compiler::target::word Bool_InstanceSize = 16;
+static constexpr dart::compiler::target::word Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word Class_InstanceSize = 184;
+static constexpr dart::compiler::target::word Closure_InstanceSize = 56;
+static constexpr dart::compiler::target::word ClosureData_InstanceSize = 40;
+static constexpr dart::compiler::target::word CodeSourceMap_HeaderSize = 16;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_ObjectHeaderSize = 8;
+static constexpr dart::compiler::target::word
+    CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word Context_header_size = 24;
+static constexpr dart::compiler::target::word Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word DynamicLibrary_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    ExternalOneByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    ExternalTwoByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word ExternalTypedData_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word FfiTrampolineData_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word Field_InstanceSize = 96;
+static constexpr dart::compiler::target::word Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word Function_InstanceSize = 128;
+static constexpr dart::compiler::target::word FunctionType_InstanceSize = 72;
+static constexpr dart::compiler::target::word FutureOr_InstanceSize = 16;
+static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word ICData_InstanceSize = 56;
+static constexpr dart::compiler::target::word Instance_InstanceSize = 8;
+static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
+    16;
+static constexpr dart::compiler::target::word
+    InstructionsSection_UnalignedHeaderSize = 40;
+static constexpr dart::compiler::target::word InstructionsTable_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word Integer_InstanceSize = 8;
+static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
+    120;
+static constexpr dart::compiler::target::word LanguageError_InstanceSize = 48;
+static constexpr dart::compiler::target::word Library_InstanceSize = 168;
+static constexpr dart::compiler::target::word LibraryPrefix_InstanceSize = 40;
+static constexpr dart::compiler::target::word LinkedHashBase_InstanceSize = 56;
+static constexpr dart::compiler::target::word MegamorphicCache_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word MirrorReference_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    MonomorphicSmiableCall_InstanceSize = 24;
+static constexpr dart::compiler::target::word Namespace_InstanceSize = 40;
+static constexpr dart::compiler::target::word NativeArguments_StructSize = 32;
+static constexpr dart::compiler::target::word Number_InstanceSize = 8;
+static constexpr dart::compiler::target::word Object_InstanceSize = 8;
+static constexpr dart::compiler::target::word PatchClass_InstanceSize = 48;
+static constexpr dart::compiler::target::word PcDescriptors_HeaderSize = 16;
+static constexpr dart::compiler::target::word Pointer_InstanceSize = 24;
+static constexpr dart::compiler::target::word ReceivePort_InstanceSize = 24;
+static constexpr dart::compiler::target::word RegExp_InstanceSize = 120;
+static constexpr dart::compiler::target::word Script_InstanceSize = 80;
+static constexpr dart::compiler::target::word SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word Sentinel_InstanceSize = 8;
+static constexpr dart::compiler::target::word SingleTargetCache_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word StackTrace_InstanceSize = 40;
+static constexpr dart::compiler::target::word String_InstanceSize = 16;
+static constexpr dart::compiler::target::word SubtypeTestCache_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word LoadingUnit_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    TransferableTypedData_InstanceSize = 8;
+static constexpr dart::compiler::target::word Type_InstanceSize = 48;
+static constexpr dart::compiler::target::word TypeParameter_InstanceSize = 48;
+static constexpr dart::compiler::target::word TypeParameters_InstanceSize = 40;
+static constexpr dart::compiler::target::word TypeRef_InstanceSize = 32;
+static constexpr dart::compiler::target::word TypedData_HeaderSize = 24;
+static constexpr dart::compiler::target::word TypedDataBase_InstanceSize = 24;
+static constexpr dart::compiler::target::word TypedDataView_InstanceSize = 40;
+static constexpr dart::compiler::target::word UnhandledException_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word UnlinkedCall_InstanceSize = 32;
+static constexpr dart::compiler::target::word UnwindError_InstanceSize = 24;
+static constexpr dart::compiler::target::word UserTag_InstanceSize = 32;
+static constexpr dart::compiler::target::word WeakProperty_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    WeakSerializationReference_InstanceSize = 24;
+#endif  // defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+
 #endif  // !defined(PRODUCT)
 
 #if !defined(PRODUCT)
@@ -9991,6 +12259,1275 @@
     AOT_WeakSerializationReference_InstanceSize = 16;
 #endif  // defined(TARGET_ARCH_ARM64) && defined(DART_COMPRESSED_POINTERS)
 
+#if defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word AOT_Closure_entry_point_offset =
+    28;
+static constexpr dart::compiler::target::word AOT_Array_elements_start_offset =
+    12;
+static constexpr dart::compiler::target::word AOT_Array_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_ClassTable_elements_start_offset = 0;
+static constexpr dart::compiler::target::word AOT_ClassTable_element_size = 1;
+static constexpr dart::compiler::target::word AOT_Code_elements_start_offset =
+    88;
+static constexpr dart::compiler::target::word AOT_Code_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_Context_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_Context_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_ContextScope_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_ContextScope_element_size =
+    32;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_elements_start_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_element_size = 12;
+static constexpr dart::compiler::target::word
+    AOT_ObjectPool_elements_start_offset = 8;
+static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_OneByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_OneByteString_element_size =
+    1;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_elements_start_offset = 20;
+static constexpr dart::compiler::target::word AOT_TypeArguments_element_size =
+    4;
+static constexpr dart::compiler::target::word
+    AOT_TwoByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_TwoByteString_element_size =
+    2;
+static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
+    65533;
+static constexpr dart::compiler::target::word AOT_Context_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kNonBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word AOT_OldPage_kBytesPerCardLog2 = 9;
+static constexpr dart::compiler::target::word
+    AOT_NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word AOT_String_kMaxElements =
+    536870911;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
+    0;
+static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word
+    AOT_AbstractType_type_test_stub_entry_point_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_count_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_size_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_first_named_entry_offset = 28;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_named_entry_size = 8;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_name_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_position_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_positional_count_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_type_args_len_offset = 12;
+static constexpr dart::compiler::target::word AOT_Array_data_offset = 12;
+static constexpr dart::compiler::target::word AOT_Array_length_offset = 8;
+static constexpr dart::compiler::target::word AOT_Array_tags_offset = 0;
+static constexpr dart::compiler::target::word AOT_Array_type_arguments_offset =
+    4;
+static constexpr dart::compiler::target::word
+    AOT_Class_declaration_type_offset = 52;
+static constexpr dart::compiler::target::word
+    AOT_Class_num_type_arguments_offset = 72;
+static constexpr dart::compiler::target::word AOT_Class_super_type_offset = 44;
+static constexpr dart::compiler::target::word
+    AOT_Class_host_type_arguments_field_offset_in_words_offset = 84;
+static constexpr dart::compiler::target::word
+    AOT_SharedClassTable_class_heap_stats_table_offset = 0;
+static constexpr dart::compiler::target::word AOT_Closure_context_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_Closure_delayed_type_arguments_offset = 12;
+static constexpr dart::compiler::target::word AOT_Closure_function_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_Closure_function_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word AOT_Closure_hash_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_Closure_instantiator_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_ClosureData_default_type_arguments_kind_offset = 16;
+static constexpr dart::compiler::target::word AOT_Code_object_pool_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_Code_saved_instructions_offset = 24;
+static constexpr dart::compiler::target::word AOT_Code_owner_offset = 28;
+static constexpr dart::compiler::target::word AOT_Context_num_variables_offset =
+    4;
+static constexpr dart::compiler::target::word AOT_Context_parent_offset = 8;
+static constexpr dart::compiler::target::word AOT_Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word AOT_Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Field_initializer_function_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_Field_host_offset_or_field_id_offset = 20;
+static constexpr dart::compiler::target::word AOT_Field_guarded_cid_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_in_object_offset_offset = 44;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_offset = 24;
+static constexpr dart::compiler::target::word AOT_Field_is_nullable_offset = 42;
+static constexpr dart::compiler::target::word AOT_Field_kind_bits_offset = 46;
+static constexpr dart::compiler::target::word AOT_Function_code_offset = 32;
+static constexpr dart::compiler::target::word AOT_Function_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_Function_entry_point_offset[] = {4, 8};
+static constexpr dart::compiler::target::word AOT_Function_kind_tag_offset = 36;
+static constexpr dart::compiler::target::word
+    AOT_Function_packed_fields_offset = 40;
+static constexpr dart::compiler::target::word AOT_Function_signature_offset =
+    20;
+static constexpr dart::compiler::target::word
+    AOT_FutureOr_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_length_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word AOT_OldPage_card_table_offset =
+    20;
+static constexpr dart::compiler::target::word
+    AOT_CallSiteData_arguments_descriptor_offset = 8;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word AOT_ICData_entries_offset = 12;
+static constexpr dart::compiler::target::word AOT_ICData_owner_offset = 16;
+static constexpr dart::compiler::target::word AOT_ICData_state_bits_offset = 20;
+static constexpr dart::compiler::target::word AOT_Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Isolate_current_tag_offset =
+    24;
+static constexpr dart::compiler::target::word AOT_Isolate_default_tag_offset =
+    28;
+static constexpr dart::compiler::target::word AOT_Isolate_ic_miss_code_offset =
+    32;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_object_store_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_shared_class_table_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_cached_class_table_table_offset = 16;
+static constexpr dart::compiler::target::word AOT_Isolate_single_step_offset =
+    40;
+static constexpr dart::compiler::target::word AOT_Isolate_user_tag_offset = 20;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_ImmutableLinkedHashBase_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_deleted_keys_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_hash_mask_offset = 8;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_index_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_used_data_offset = 16;
+static constexpr dart::compiler::target::word AOT_LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_MarkingStackBlock_pointers_offset = 8;
+static constexpr dart::compiler::target::word AOT_MarkingStackBlock_top_offset =
+    4;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_buckets_offset = 12;
+static constexpr dart::compiler::target::word AOT_MegamorphicCache_mask_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_Mint_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_argc_tag_offset = 4;
+static constexpr dart::compiler::target::word AOT_NativeArguments_argv_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_retval_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_double_type_offset = 160;
+static constexpr dart::compiler::target::word AOT_ObjectStore_int_type_offset =
+    116;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_string_type_offset = 180;
+static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
+    104;
+static constexpr dart::compiler::target::word AOT_OneByteString_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_PointerBase_data_field_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_Pointer_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_entry_point_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_lower_limit_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_target_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_upper_limit_offset = 14;
+static constexpr dart::compiler::target::word
+    AOT_StoreBufferBlock_pointers_offset = 8;
+static constexpr dart::compiler::target::word AOT_StoreBufferBlock_top_offset =
+    4;
+static constexpr dart::compiler::target::word AOT_String_hash_offset = 8;
+static constexpr dart::compiler::target::word AOT_String_length_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_cache_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_Thread_AllocateArray_entry_point_offset = 384;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_exception_offset = 800;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_stacktrace_offset = 804;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_code_offset = 128;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_entry_point_offset = 276;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_entry_point_offset = 284;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_stub_offset = 188;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_entry_point_offset = 288;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_stub_offset = 192;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_entry_point_offset = 292;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_stub_offset = 196;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_entry_point_offset = 296;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_stub_offset = 200;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_entry_point_offset = 300;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_stub_offset = 204;
+static constexpr dart::compiler::target::word AOT_Thread_api_top_scope_offset =
+    840;
+static constexpr dart::compiler::target::word
+    AOT_Thread_auto_scope_native_wrapper_entry_point_offset = 348;
+static constexpr dart::compiler::target::word AOT_Thread_bool_false_offset =
+    120;
+static constexpr dart::compiler::target::word AOT_Thread_bool_true_offset = 116;
+static constexpr dart::compiler::target::word
+    AOT_Thread_bootstrap_native_wrapper_entry_point_offset = 340;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_entry_point_offset = 280;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_stub_offset = 144;
+static constexpr dart::compiler::target::word AOT_Thread_dart_stream_offset =
+    864;
+static constexpr dart::compiler::target::word
+    AOT_Thread_dispatch_table_array_offset = 44;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_truncate_round_supported_offset = 844;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_entry_offset =
+    320;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_stub_offset =
+    232;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_entry_offset = 324;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_stub_offset = 236;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_abs_address_offset = 364;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_negate_address_offset = 360;
+static constexpr dart::compiler::target::word AOT_Thread_end_offset = 52;
+static constexpr dart::compiler::target::word
+    AOT_Thread_enter_safepoint_stub_offset = 256;
+static constexpr dart::compiler::target::word
+    AOT_Thread_execution_state_offset = 820;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_stub_offset = 260;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 264;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_stub_offset = 268;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_entry_point_offset = 328;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_allocation_stub_code_offset = 136;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_callers_target_code_offset = 132;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_absolute_address_offset = 376;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_negate_address_offset = 372;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_not_address_offset = 368;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_zerow_address_offset = 380;
+static constexpr dart::compiler::target::word
+    AOT_Thread_global_object_pool_offset = 808;
+static constexpr dart::compiler::target::word
+    AOT_Thread_invoke_dart_code_stub_offset = 140;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_through_ffi_offset = 836;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_offset = 40;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_group_offset =
+    868;
+static constexpr dart::compiler::target::word
+    AOT_Thread_field_table_values_offset = 64;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_return_stub_offset = 240;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_throw_stub_offset = 244;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_specialize_type_test_stub_offset = 252;
+static constexpr dart::compiler::target::word
+    AOT_Thread_marking_stack_block_offset = 80;
+static constexpr dart::compiler::target::word
+    AOT_Thread_megamorphic_call_checked_entry_offset = 312;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_entry_offset = 316;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_stub_offset = 216;
+static constexpr dart::compiler::target::word
+    AOT_Thread_no_scope_native_wrapper_entry_point_offset = 344;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 152;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_without_fpu_regs_stub_offset =
+        148;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_with_fpu_regs_stub_offset = 160;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_without_fpu_regs_stub_offset = 156;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 168;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 164;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 176;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 172;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_with_fpu_regs_stub_offset = 184;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_without_fpu_regs_stub_offset = 180;
+static constexpr dart::compiler::target::word AOT_Thread_object_null_offset =
+    112;
+static constexpr dart::compiler::target::word
+    AOT_Thread_predefined_symbols_address_offset = 352;
+static constexpr dart::compiler::target::word AOT_Thread_resume_pc_offset = 812;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_shadow_call_stack_offset = 816;
+static constexpr dart::compiler::target::word
+    AOT_Thread_safepoint_state_offset = 824;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_stub_offset = 248;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_entry_point_offset = 336;
+static constexpr dart::compiler::target::word AOT_Thread_stack_limit_offset =
+    28;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_stack_limit_offset = 56;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_flags_offset = 60;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 308;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 212;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 304;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 208;
+static constexpr dart::compiler::target::word
+    AOT_Thread_store_buffer_block_offset = 76;
+static constexpr dart::compiler::target::word
+    AOT_Thread_top_exit_frame_info_offset = 72;
+static constexpr dart::compiler::target::word AOT_Thread_top_offset = 48;
+static constexpr dart::compiler::target::word AOT_Thread_top_resource_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_int64_runtime_arg_offset = 96;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_double_runtime_arg_offset = 104;
+static constexpr dart::compiler::target::word AOT_Thread_vm_tag_offset = 88;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_code_offset = 124;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_entry_point_offset = 272;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_mask_offset = 32;
+static constexpr dart::compiler::target::word AOT_Thread_heap_base_offset = 36;
+static constexpr dart::compiler::target::word AOT_Thread_callback_code_offset =
+    828;
+static constexpr dart::compiler::target::word
+    AOT_Thread_callback_stack_return_offset = 832;
+static constexpr dart::compiler::target::word AOT_Thread_random_offset = 848;
+static constexpr dart::compiler::target::word
+    AOT_Thread_jump_to_frame_entry_point_offset = 332;
+static constexpr dart::compiler::target::word AOT_Thread_tsan_utils_offset =
+    856;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_function_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_buffer_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_pc_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_sp_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_fp_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_TimelineStream_enabled_offset = 8;
+static constexpr dart::compiler::target::word AOT_TwoByteString_data_offset =
+    12;
+static constexpr dart::compiler::target::word AOT_Type_arguments_offset = 12;
+static constexpr dart::compiler::target::word AOT_Type_hash_offset = 16;
+static constexpr dart::compiler::target::word AOT_Type_type_class_id_offset =
+    20;
+static constexpr dart::compiler::target::word AOT_Type_type_state_offset = 22;
+static constexpr dart::compiler::target::word AOT_Type_nullability_offset = 23;
+static constexpr dart::compiler::target::word AOT_FunctionType_hash_offset = 28;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_named_parameter_names_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_nullability_offset = 39;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_parameter_counts_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_type_parameter_counts_offset = 36;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_parameter_types_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_type_parameters_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_parameterized_class_id_offset = 20;
+static constexpr dart::compiler::target::word AOT_TypeParameter_index_offset =
+    23;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_nullability_offset = 25;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_instantiations_offset = 4;
+static constexpr dart::compiler::target::word AOT_TypeArguments_length_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_nullability_offset = 16;
+static constexpr dart::compiler::target::word AOT_TypeArguments_types_offset =
+    20;
+static constexpr dart::compiler::target::word AOT_TypeParameters_names_offset =
+    4;
+static constexpr dart::compiler::target::word AOT_TypeParameters_flags_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_TypeParameters_bounds_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameters_defaults_offset = 16;
+static constexpr dart::compiler::target::word AOT_TypeParameter_bound_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_TypeParameter_flags_offset =
+    24;
+static constexpr dart::compiler::target::word AOT_TypeRef_type_offset = 12;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_length_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_TypedDataView_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_TypedDataView_offset_in_bytes_offset = 16;
+static constexpr dart::compiler::target::word AOT_TypedData_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_exception_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_stacktrace_offset = 8;
+static constexpr dart::compiler::target::word AOT_UserTag_tag_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_expected_cid_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_entrypoint_offset = 8;
+static constexpr dart::compiler::target::word AOT_WeakProperty_key_offset = 4;
+static constexpr dart::compiler::target::word AOT_WeakProperty_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Code_entry_point_offset[] = {
+    4, 12, 8, 16};
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,  -1,  -1, -1, -1, 728, 732, 736, -1,  -1,  740,
+        744, 748, -1, -1, -1, 752, 756, 760, 764, 768, 772,
+        776, 780, -1, -1, -1, -1,  784, 788, 792, 796};
+static constexpr dart::compiler::target::word AOT_AbstractType_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word AOT_ApiError_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Array_header_size = 12;
+static constexpr dart::compiler::target::word AOT_Bool_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Class_InstanceSize = 92;
+static constexpr dart::compiler::target::word AOT_Closure_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_ClosureData_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_CodeSourceMap_HeaderSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_ObjectHeaderSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word AOT_Context_header_size = 12;
+static constexpr dart::compiler::target::word AOT_Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_DynamicLibrary_InstanceSize =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTypedData_InstanceSize = 12;
+static constexpr dart::compiler::target::word
+    AOT_FfiTrampolineData_InstanceSize = 28;
+static constexpr dart::compiler::target::word AOT_Field_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Function_InstanceSize = 44;
+static constexpr dart::compiler::target::word AOT_FunctionType_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word AOT_FutureOr_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_UnalignedHeaderSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsSection_UnalignedHeaderSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsTable_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_KernelProgramInfo_InstanceSize = 60;
+static constexpr dart::compiler::target::word AOT_LanguageError_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word AOT_Library_InstanceSize = 84;
+static constexpr dart::compiler::target::word AOT_LibraryPrefix_InstanceSize =
+    20;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_MirrorReference_InstanceSize =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_Namespace_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_NativeArguments_StructSize =
+    16;
+static constexpr dart::compiler::target::word AOT_Number_InstanceSize = 4;
+static constexpr dart::compiler::target::word AOT_Object_InstanceSize = 4;
+static constexpr dart::compiler::target::word AOT_PatchClass_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_PcDescriptors_HeaderSize = 8;
+static constexpr dart::compiler::target::word AOT_Pointer_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_ReceivePort_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_RegExp_InstanceSize = 60;
+static constexpr dart::compiler::target::word AOT_Script_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Sentinel_InstanceSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_StackTrace_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_String_InstanceSize = 12;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_LoadingUnit_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_TransferableTypedData_InstanceSize = 4;
+static constexpr dart::compiler::target::word AOT_Type_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_TypeParameter_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word AOT_TypeParameters_InstanceSize =
+    20;
+static constexpr dart::compiler::target::word AOT_TypeRef_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_TypedData_HeaderSize = 12;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word AOT_TypedDataView_InstanceSize =
+    20;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_UnlinkedCall_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word AOT_UnwindError_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_UserTag_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_WeakProperty_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_WeakSerializationReference_InstanceSize = 12;
+#endif  // defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+
+#if defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word AOT_Closure_entry_point_offset =
+    56;
+static constexpr dart::compiler::target::word AOT_Array_elements_start_offset =
+    24;
+static constexpr dart::compiler::target::word AOT_Array_element_size = 8;
+static constexpr dart::compiler::target::word
+    AOT_ClassTable_elements_start_offset = 0;
+static constexpr dart::compiler::target::word AOT_ClassTable_element_size = 1;
+static constexpr dart::compiler::target::word AOT_Code_elements_start_offset =
+    152;
+static constexpr dart::compiler::target::word AOT_Code_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_Context_elements_start_offset = 24;
+static constexpr dart::compiler::target::word AOT_Context_element_size = 8;
+static constexpr dart::compiler::target::word
+    AOT_ContextScope_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_ContextScope_element_size =
+    64;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_elements_start_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_element_size = 12;
+static constexpr dart::compiler::target::word
+    AOT_ObjectPool_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 8;
+static constexpr dart::compiler::target::word
+    AOT_OneByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_OneByteString_element_size =
+    1;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_elements_start_offset = 40;
+static constexpr dart::compiler::target::word AOT_TypeArguments_element_size =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_TwoByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_TwoByteString_element_size =
+    2;
+static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
+    32765;
+static constexpr dart::compiler::target::word AOT_Context_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kNonBarePayloadAlignment = 8;
+static constexpr dart::compiler::target::word AOT_OldPage_kBytesPerCardLog2 =
+    10;
+static constexpr dart::compiler::target::word
+    AOT_NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word AOT_String_kMaxElements =
+    2305843009213693951;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
+    0;
+static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    AOT_AbstractType_type_test_stub_entry_point_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_count_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_size_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_first_named_entry_offset = 56;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_named_entry_size = 16;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_name_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_position_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_positional_count_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_type_args_len_offset = 24;
+static constexpr dart::compiler::target::word AOT_Array_data_offset = 24;
+static constexpr dart::compiler::target::word AOT_Array_length_offset = 16;
+static constexpr dart::compiler::target::word AOT_Array_tags_offset = 0;
+static constexpr dart::compiler::target::word AOT_Array_type_arguments_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_Class_declaration_type_offset = 104;
+static constexpr dart::compiler::target::word
+    AOT_Class_num_type_arguments_offset = 140;
+static constexpr dart::compiler::target::word AOT_Class_super_type_offset = 88;
+static constexpr dart::compiler::target::word
+    AOT_Class_host_type_arguments_field_offset_in_words_offset = 152;
+static constexpr dart::compiler::target::word
+    AOT_SharedClassTable_class_heap_stats_table_offset = 0;
+static constexpr dart::compiler::target::word AOT_Closure_context_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_Closure_delayed_type_arguments_offset = 24;
+static constexpr dart::compiler::target::word AOT_Closure_function_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_Closure_function_type_arguments_offset = 16;
+static constexpr dart::compiler::target::word AOT_Closure_hash_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_Closure_instantiator_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ClosureData_default_type_arguments_kind_offset = 32;
+static constexpr dart::compiler::target::word AOT_Code_object_pool_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_Code_saved_instructions_offset = 48;
+static constexpr dart::compiler::target::word AOT_Code_owner_offset = 56;
+static constexpr dart::compiler::target::word AOT_Context_num_variables_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_Context_parent_offset = 16;
+static constexpr dart::compiler::target::word AOT_Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word AOT_Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Field_initializer_function_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_Field_host_offset_or_field_id_offset = 40;
+static constexpr dart::compiler::target::word AOT_Field_guarded_cid_offset = 72;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_in_object_offset_offset = 76;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_offset = 48;
+static constexpr dart::compiler::target::word AOT_Field_is_nullable_offset = 74;
+static constexpr dart::compiler::target::word AOT_Field_kind_bits_offset = 78;
+static constexpr dart::compiler::target::word AOT_Function_code_offset = 64;
+static constexpr dart::compiler::target::word AOT_Function_data_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_Function_entry_point_offset[] = {8, 16};
+static constexpr dart::compiler::target::word AOT_Function_kind_tag_offset = 72;
+static constexpr dart::compiler::target::word
+    AOT_Function_packed_fields_offset = 76;
+static constexpr dart::compiler::target::word AOT_Function_signature_offset =
+    40;
+static constexpr dart::compiler::target::word
+    AOT_FutureOr_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_length_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word AOT_OldPage_card_table_offset =
+    40;
+static constexpr dart::compiler::target::word
+    AOT_CallSiteData_arguments_descriptor_offset = 16;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word AOT_ICData_entries_offset = 24;
+static constexpr dart::compiler::target::word AOT_ICData_owner_offset = 32;
+static constexpr dart::compiler::target::word AOT_ICData_state_bits_offset = 40;
+static constexpr dart::compiler::target::word AOT_Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Isolate_current_tag_offset =
+    48;
+static constexpr dart::compiler::target::word AOT_Isolate_default_tag_offset =
+    56;
+static constexpr dart::compiler::target::word AOT_Isolate_ic_miss_code_offset =
+    64;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_object_store_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_shared_class_table_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_cached_class_table_table_offset = 32;
+static constexpr dart::compiler::target::word AOT_Isolate_single_step_offset =
+    80;
+static constexpr dart::compiler::target::word AOT_Isolate_user_tag_offset = 40;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_data_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_ImmutableLinkedHashBase_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_deleted_keys_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_hash_mask_offset = 16;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_index_offset =
+    48;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_used_data_offset = 32;
+static constexpr dart::compiler::target::word AOT_LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_MarkingStackBlock_pointers_offset = 16;
+static constexpr dart::compiler::target::word AOT_MarkingStackBlock_top_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_buckets_offset = 24;
+static constexpr dart::compiler::target::word AOT_MegamorphicCache_mask_offset =
+    32;
+static constexpr dart::compiler::target::word AOT_Mint_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_argc_tag_offset = 8;
+static constexpr dart::compiler::target::word AOT_NativeArguments_argv_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_retval_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_double_type_offset = 320;
+static constexpr dart::compiler::target::word AOT_ObjectStore_int_type_offset =
+    232;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_string_type_offset = 360;
+static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
+    208;
+static constexpr dart::compiler::target::word AOT_OneByteString_data_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_PointerBase_data_field_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Pointer_type_arguments_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_entry_point_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_lower_limit_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_target_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_upper_limit_offset = 26;
+static constexpr dart::compiler::target::word
+    AOT_StoreBufferBlock_pointers_offset = 16;
+static constexpr dart::compiler::target::word AOT_StoreBufferBlock_top_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_String_hash_offset = 4;
+static constexpr dart::compiler::target::word AOT_String_length_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_cache_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Thread_AllocateArray_entry_point_offset = 744;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_exception_offset = 1576;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_stacktrace_offset = 1584;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_code_offset = 232;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_entry_point_offset = 528;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_entry_point_offset = 544;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_stub_offset = 352;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_entry_point_offset = 552;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_stub_offset = 360;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_entry_point_offset = 560;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_stub_offset = 368;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_entry_point_offset = 568;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_stub_offset = 376;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_entry_point_offset = 576;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_stub_offset = 384;
+static constexpr dart::compiler::target::word AOT_Thread_api_top_scope_offset =
+    1656;
+static constexpr dart::compiler::target::word
+    AOT_Thread_auto_scope_native_wrapper_entry_point_offset = 672;
+static constexpr dart::compiler::target::word AOT_Thread_bool_false_offset =
+    216;
+static constexpr dart::compiler::target::word AOT_Thread_bool_true_offset = 208;
+static constexpr dart::compiler::target::word
+    AOT_Thread_bootstrap_native_wrapper_entry_point_offset = 656;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_entry_point_offset = 536;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_stub_offset = 264;
+static constexpr dart::compiler::target::word AOT_Thread_dart_stream_offset =
+    1696;
+static constexpr dart::compiler::target::word
+    AOT_Thread_dispatch_table_array_offset = 88;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_truncate_round_supported_offset = 1664;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_entry_offset =
+    616;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_stub_offset =
+    440;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_entry_offset = 624;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_stub_offset = 448;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_abs_address_offset = 704;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_negate_address_offset = 696;
+static constexpr dart::compiler::target::word AOT_Thread_end_offset = 104;
+static constexpr dart::compiler::target::word
+    AOT_Thread_enter_safepoint_stub_offset = 488;
+static constexpr dart::compiler::target::word
+    AOT_Thread_execution_state_offset = 1616;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_stub_offset = 496;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 504;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_stub_offset = 512;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_entry_point_offset = 632;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_allocation_stub_code_offset = 248;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_callers_target_code_offset = 240;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_absolute_address_offset = 728;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_negate_address_offset = 720;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_not_address_offset = 712;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_zerow_address_offset = 736;
+static constexpr dart::compiler::target::word
+    AOT_Thread_global_object_pool_offset = 1592;
+static constexpr dart::compiler::target::word
+    AOT_Thread_invoke_dart_code_stub_offset = 256;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_through_ffi_offset = 1648;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_offset = 80;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_group_offset =
+    1704;
+static constexpr dart::compiler::target::word
+    AOT_Thread_field_table_values_offset = 128;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_return_stub_offset = 456;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_throw_stub_offset = 464;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_specialize_type_test_stub_offset = 480;
+static constexpr dart::compiler::target::word
+    AOT_Thread_marking_stack_block_offset = 160;
+static constexpr dart::compiler::target::word
+    AOT_Thread_megamorphic_call_checked_entry_offset = 600;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_entry_offset = 608;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_stub_offset = 408;
+static constexpr dart::compiler::target::word
+    AOT_Thread_no_scope_native_wrapper_entry_point_offset = 664;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 280;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_without_fpu_regs_stub_offset =
+        272;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_with_fpu_regs_stub_offset = 296;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_without_fpu_regs_stub_offset = 288;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 312;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 304;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 328;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 320;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_with_fpu_regs_stub_offset = 344;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_without_fpu_regs_stub_offset = 336;
+static constexpr dart::compiler::target::word AOT_Thread_object_null_offset =
+    200;
+static constexpr dart::compiler::target::word
+    AOT_Thread_predefined_symbols_address_offset = 680;
+static constexpr dart::compiler::target::word AOT_Thread_resume_pc_offset =
+    1600;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_shadow_call_stack_offset = 1608;
+static constexpr dart::compiler::target::word
+    AOT_Thread_safepoint_state_offset = 1624;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_stub_offset = 472;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_entry_point_offset = 648;
+static constexpr dart::compiler::target::word AOT_Thread_stack_limit_offset =
+    56;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_stack_limit_offset = 112;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_flags_offset = 120;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 592;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 400;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 584;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 392;
+static constexpr dart::compiler::target::word
+    AOT_Thread_store_buffer_block_offset = 152;
+static constexpr dart::compiler::target::word
+    AOT_Thread_top_exit_frame_info_offset = 144;
+static constexpr dart::compiler::target::word AOT_Thread_top_offset = 96;
+static constexpr dart::compiler::target::word AOT_Thread_top_resource_offset =
+    32;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_int64_runtime_arg_offset = 184;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_double_runtime_arg_offset = 192;
+static constexpr dart::compiler::target::word AOT_Thread_vm_tag_offset = 176;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_code_offset = 224;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_entry_point_offset = 520;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_mask_offset = 64;
+static constexpr dart::compiler::target::word AOT_Thread_heap_base_offset = 72;
+static constexpr dart::compiler::target::word AOT_Thread_callback_code_offset =
+    1632;
+static constexpr dart::compiler::target::word
+    AOT_Thread_callback_stack_return_offset = 1640;
+static constexpr dart::compiler::target::word AOT_Thread_random_offset = 1672;
+static constexpr dart::compiler::target::word
+    AOT_Thread_jump_to_frame_entry_point_offset = 640;
+static constexpr dart::compiler::target::word AOT_Thread_tsan_utils_offset =
+    1680;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_function_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_buffer_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_pc_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_sp_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_fp_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_TimelineStream_enabled_offset = 16;
+static constexpr dart::compiler::target::word AOT_TwoByteString_data_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_Type_arguments_offset = 24;
+static constexpr dart::compiler::target::word AOT_Type_hash_offset = 32;
+static constexpr dart::compiler::target::word AOT_Type_type_class_id_offset =
+    40;
+static constexpr dart::compiler::target::word AOT_Type_type_state_offset = 42;
+static constexpr dart::compiler::target::word AOT_Type_nullability_offset = 43;
+static constexpr dart::compiler::target::word AOT_FunctionType_hash_offset = 56;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_named_parameter_names_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_nullability_offset = 71;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_parameter_counts_offset = 64;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_type_parameter_counts_offset = 68;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_parameter_types_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_type_parameters_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_parameterized_class_id_offset = 40;
+static constexpr dart::compiler::target::word AOT_TypeParameter_index_offset =
+    43;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_nullability_offset = 45;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_instantiations_offset = 8;
+static constexpr dart::compiler::target::word AOT_TypeArguments_length_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_nullability_offset = 32;
+static constexpr dart::compiler::target::word AOT_TypeArguments_types_offset =
+    40;
+static constexpr dart::compiler::target::word AOT_TypeParameters_names_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_TypeParameters_flags_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_TypeParameters_bounds_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameters_defaults_offset = 32;
+static constexpr dart::compiler::target::word AOT_TypeParameter_bound_offset =
+    32;
+static constexpr dart::compiler::target::word AOT_TypeParameter_flags_offset =
+    44;
+static constexpr dart::compiler::target::word AOT_TypeRef_type_offset = 24;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_length_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_TypedDataView_data_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_TypedDataView_offset_in_bytes_offset = 32;
+static constexpr dart::compiler::target::word AOT_TypedData_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_exception_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_stacktrace_offset = 16;
+static constexpr dart::compiler::target::word AOT_UserTag_tag_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_expected_cid_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_entrypoint_offset = 16;
+static constexpr dart::compiler::target::word AOT_WeakProperty_key_offset = 8;
+static constexpr dart::compiler::target::word AOT_WeakProperty_value_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_Code_entry_point_offset[] = {
+    8, 24, 16, 32};
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,   -1,   -1, -1, -1, 1432, 1440, 1448, -1,   -1,   1456,
+        1464, 1472, -1, -1, -1, 1480, 1488, 1496, 1504, 1512, 1520,
+        1528, 1536, -1, -1, -1, -1,   1544, 1552, 1560, 1568};
+static constexpr dart::compiler::target::word AOT_AbstractType_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word AOT_ApiError_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Array_header_size = 24;
+static constexpr dart::compiler::target::word AOT_Bool_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Class_InstanceSize = 160;
+static constexpr dart::compiler::target::word AOT_Closure_InstanceSize = 64;
+static constexpr dart::compiler::target::word AOT_ClosureData_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_CodeSourceMap_HeaderSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_ObjectHeaderSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word AOT_Context_header_size = 24;
+static constexpr dart::compiler::target::word AOT_Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_DynamicLibrary_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTypedData_InstanceSize = 24;
+static constexpr dart::compiler::target::word
+    AOT_FfiTrampolineData_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Field_InstanceSize = 80;
+static constexpr dart::compiler::target::word AOT_Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Function_InstanceSize = 80;
+static constexpr dart::compiler::target::word AOT_FunctionType_InstanceSize =
+    72;
+static constexpr dart::compiler::target::word AOT_FutureOr_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_UnalignedHeaderSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsSection_UnalignedHeaderSize = 40;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsTable_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_KernelProgramInfo_InstanceSize = 120;
+static constexpr dart::compiler::target::word AOT_LanguageError_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word AOT_Library_InstanceSize = 160;
+static constexpr dart::compiler::target::word AOT_LibraryPrefix_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_InstanceSize =
+    56;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_MirrorReference_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Namespace_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_NativeArguments_StructSize =
+    32;
+static constexpr dart::compiler::target::word AOT_Number_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Object_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_PatchClass_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_PcDescriptors_HeaderSize = 16;
+static constexpr dart::compiler::target::word AOT_Pointer_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_ReceivePort_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_RegExp_InstanceSize = 120;
+static constexpr dart::compiler::target::word AOT_Script_InstanceSize = 72;
+static constexpr dart::compiler::target::word AOT_SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Sentinel_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_StackTrace_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_String_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_LoadingUnit_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    AOT_TransferableTypedData_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Type_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_TypeParameter_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word AOT_TypeParameters_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word AOT_TypeRef_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_TypedData_HeaderSize = 24;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word AOT_TypedDataView_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_UnlinkedCall_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word AOT_UnwindError_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_UserTag_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_WeakProperty_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word
+    AOT_WeakSerializationReference_InstanceSize = 24;
+#endif  // defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+
 #else  // !defined(PRODUCT)
 
 #if defined(TARGET_ARCH_ARM) && !defined(DART_COMPRESSED_POINTERS)
@@ -13132,6 +16669,1261 @@
     AOT_WeakSerializationReference_InstanceSize = 16;
 #endif  // defined(TARGET_ARCH_ARM64) && defined(DART_COMPRESSED_POINTERS)
 
+#if defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word AOT_Closure_entry_point_offset =
+    28;
+static constexpr dart::compiler::target::word AOT_Array_elements_start_offset =
+    12;
+static constexpr dart::compiler::target::word AOT_Array_element_size = 4;
+static constexpr dart::compiler::target::word AOT_Code_elements_start_offset =
+    64;
+static constexpr dart::compiler::target::word AOT_Code_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_Context_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_Context_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_ContextScope_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_ContextScope_element_size =
+    32;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_elements_start_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_element_size = 12;
+static constexpr dart::compiler::target::word
+    AOT_ObjectPool_elements_start_offset = 8;
+static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_OneByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_OneByteString_element_size =
+    1;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_elements_start_offset = 20;
+static constexpr dart::compiler::target::word AOT_TypeArguments_element_size =
+    4;
+static constexpr dart::compiler::target::word
+    AOT_TwoByteString_elements_start_offset = 12;
+static constexpr dart::compiler::target::word AOT_TwoByteString_element_size =
+    2;
+static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
+    65533;
+static constexpr dart::compiler::target::word AOT_Context_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kNonBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word AOT_OldPage_kBytesPerCardLog2 = 9;
+static constexpr dart::compiler::target::word
+    AOT_NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word AOT_String_kMaxElements =
+    536870911;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
+    0;
+static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
+    268435455;
+static constexpr dart::compiler::target::word
+    AOT_AbstractType_type_test_stub_entry_point_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_count_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_size_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_first_named_entry_offset = 28;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_named_entry_size = 8;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_name_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_position_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_positional_count_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_type_args_len_offset = 12;
+static constexpr dart::compiler::target::word AOT_Array_data_offset = 12;
+static constexpr dart::compiler::target::word AOT_Array_length_offset = 8;
+static constexpr dart::compiler::target::word AOT_Array_tags_offset = 0;
+static constexpr dart::compiler::target::word AOT_Array_type_arguments_offset =
+    4;
+static constexpr dart::compiler::target::word
+    AOT_Class_declaration_type_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_Class_num_type_arguments_offset = 60;
+static constexpr dart::compiler::target::word AOT_Class_super_type_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_Class_host_type_arguments_field_offset_in_words_offset = 72;
+static constexpr dart::compiler::target::word AOT_Closure_context_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_Closure_delayed_type_arguments_offset = 12;
+static constexpr dart::compiler::target::word AOT_Closure_function_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_Closure_function_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word AOT_Closure_hash_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_Closure_instantiator_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_ClosureData_default_type_arguments_kind_offset = 16;
+static constexpr dart::compiler::target::word AOT_Code_object_pool_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_Code_saved_instructions_offset = 24;
+static constexpr dart::compiler::target::word AOT_Code_owner_offset = 28;
+static constexpr dart::compiler::target::word AOT_Context_num_variables_offset =
+    4;
+static constexpr dart::compiler::target::word AOT_Context_parent_offset = 8;
+static constexpr dart::compiler::target::word AOT_Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_external_data_offset = 12;
+static constexpr dart::compiler::target::word AOT_Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Field_initializer_function_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_Field_host_offset_or_field_id_offset = 20;
+static constexpr dart::compiler::target::word AOT_Field_guarded_cid_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_in_object_offset_offset = 44;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_offset = 24;
+static constexpr dart::compiler::target::word AOT_Field_is_nullable_offset = 42;
+static constexpr dart::compiler::target::word AOT_Field_kind_bits_offset = 46;
+static constexpr dart::compiler::target::word AOT_Function_code_offset = 32;
+static constexpr dart::compiler::target::word AOT_Function_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_Function_entry_point_offset[] = {4, 8};
+static constexpr dart::compiler::target::word AOT_Function_kind_tag_offset = 36;
+static constexpr dart::compiler::target::word
+    AOT_Function_packed_fields_offset = 40;
+static constexpr dart::compiler::target::word AOT_Function_signature_offset =
+    20;
+static constexpr dart::compiler::target::word
+    AOT_FutureOr_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_length_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word AOT_OldPage_card_table_offset =
+    20;
+static constexpr dart::compiler::target::word
+    AOT_CallSiteData_arguments_descriptor_offset = 8;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word AOT_ICData_entries_offset = 12;
+static constexpr dart::compiler::target::word AOT_ICData_owner_offset = 16;
+static constexpr dart::compiler::target::word AOT_ICData_state_bits_offset = 20;
+static constexpr dart::compiler::target::word AOT_Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Isolate_current_tag_offset =
+    20;
+static constexpr dart::compiler::target::word AOT_Isolate_default_tag_offset =
+    24;
+static constexpr dart::compiler::target::word AOT_Isolate_ic_miss_code_offset =
+    28;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_object_store_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_shared_class_table_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_cached_class_table_table_offset = 16;
+static constexpr dart::compiler::target::word AOT_Isolate_user_tag_offset = 16;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_ImmutableLinkedHashBase_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_deleted_keys_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_hash_mask_offset = 8;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_index_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_type_arguments_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_used_data_offset = 16;
+static constexpr dart::compiler::target::word AOT_LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_MarkingStackBlock_pointers_offset = 8;
+static constexpr dart::compiler::target::word AOT_MarkingStackBlock_top_offset =
+    4;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_buckets_offset = 12;
+static constexpr dart::compiler::target::word AOT_MegamorphicCache_mask_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_Mint_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_argc_tag_offset = 4;
+static constexpr dart::compiler::target::word AOT_NativeArguments_argv_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_retval_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_double_type_offset = 160;
+static constexpr dart::compiler::target::word AOT_ObjectStore_int_type_offset =
+    116;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_string_type_offset = 180;
+static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
+    104;
+static constexpr dart::compiler::target::word AOT_OneByteString_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_PointerBase_data_field_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_Pointer_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_entry_point_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_lower_limit_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_target_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_upper_limit_offset = 14;
+static constexpr dart::compiler::target::word
+    AOT_StoreBufferBlock_pointers_offset = 8;
+static constexpr dart::compiler::target::word AOT_StoreBufferBlock_top_offset =
+    4;
+static constexpr dart::compiler::target::word AOT_String_hash_offset = 8;
+static constexpr dart::compiler::target::word AOT_String_length_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_cache_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_Thread_AllocateArray_entry_point_offset = 384;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_exception_offset = 800;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_stacktrace_offset = 804;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_code_offset = 128;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_entry_point_offset = 276;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_entry_point_offset = 284;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_stub_offset = 188;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_entry_point_offset = 288;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_stub_offset = 192;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_entry_point_offset = 292;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_stub_offset = 196;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_entry_point_offset = 296;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_stub_offset = 200;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_entry_point_offset = 300;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_stub_offset = 204;
+static constexpr dart::compiler::target::word AOT_Thread_api_top_scope_offset =
+    840;
+static constexpr dart::compiler::target::word
+    AOT_Thread_auto_scope_native_wrapper_entry_point_offset = 348;
+static constexpr dart::compiler::target::word AOT_Thread_bool_false_offset =
+    120;
+static constexpr dart::compiler::target::word AOT_Thread_bool_true_offset = 116;
+static constexpr dart::compiler::target::word
+    AOT_Thread_bootstrap_native_wrapper_entry_point_offset = 340;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_entry_point_offset = 280;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_stub_offset = 144;
+static constexpr dart::compiler::target::word AOT_Thread_dart_stream_offset =
+    864;
+static constexpr dart::compiler::target::word
+    AOT_Thread_dispatch_table_array_offset = 44;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_truncate_round_supported_offset = 844;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_entry_offset =
+    320;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_stub_offset =
+    232;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_entry_offset = 324;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_stub_offset = 236;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_abs_address_offset = 364;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_negate_address_offset = 360;
+static constexpr dart::compiler::target::word AOT_Thread_end_offset = 52;
+static constexpr dart::compiler::target::word
+    AOT_Thread_enter_safepoint_stub_offset = 256;
+static constexpr dart::compiler::target::word
+    AOT_Thread_execution_state_offset = 820;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_stub_offset = 260;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 264;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_stub_offset = 268;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_entry_point_offset = 328;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_allocation_stub_code_offset = 136;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_callers_target_code_offset = 132;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_absolute_address_offset = 376;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_negate_address_offset = 372;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_not_address_offset = 368;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_zerow_address_offset = 380;
+static constexpr dart::compiler::target::word
+    AOT_Thread_global_object_pool_offset = 808;
+static constexpr dart::compiler::target::word
+    AOT_Thread_invoke_dart_code_stub_offset = 140;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_through_ffi_offset = 836;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_offset = 40;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_group_offset =
+    868;
+static constexpr dart::compiler::target::word
+    AOT_Thread_field_table_values_offset = 64;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_return_stub_offset = 240;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_throw_stub_offset = 244;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_specialize_type_test_stub_offset = 252;
+static constexpr dart::compiler::target::word
+    AOT_Thread_marking_stack_block_offset = 80;
+static constexpr dart::compiler::target::word
+    AOT_Thread_megamorphic_call_checked_entry_offset = 312;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_entry_offset = 316;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_stub_offset = 216;
+static constexpr dart::compiler::target::word
+    AOT_Thread_no_scope_native_wrapper_entry_point_offset = 344;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 152;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_without_fpu_regs_stub_offset =
+        148;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_with_fpu_regs_stub_offset = 160;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_without_fpu_regs_stub_offset = 156;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 168;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 164;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 176;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 172;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_with_fpu_regs_stub_offset = 184;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_without_fpu_regs_stub_offset = 180;
+static constexpr dart::compiler::target::word AOT_Thread_object_null_offset =
+    112;
+static constexpr dart::compiler::target::word
+    AOT_Thread_predefined_symbols_address_offset = 352;
+static constexpr dart::compiler::target::word AOT_Thread_resume_pc_offset = 812;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_shadow_call_stack_offset = 816;
+static constexpr dart::compiler::target::word
+    AOT_Thread_safepoint_state_offset = 824;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_stub_offset = 248;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_entry_point_offset = 336;
+static constexpr dart::compiler::target::word AOT_Thread_stack_limit_offset =
+    28;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_stack_limit_offset = 56;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_flags_offset = 60;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 308;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 212;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 304;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 208;
+static constexpr dart::compiler::target::word
+    AOT_Thread_store_buffer_block_offset = 76;
+static constexpr dart::compiler::target::word
+    AOT_Thread_top_exit_frame_info_offset = 72;
+static constexpr dart::compiler::target::word AOT_Thread_top_offset = 48;
+static constexpr dart::compiler::target::word AOT_Thread_top_resource_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_int64_runtime_arg_offset = 96;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_double_runtime_arg_offset = 104;
+static constexpr dart::compiler::target::word AOT_Thread_vm_tag_offset = 88;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_code_offset = 124;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_entry_point_offset = 272;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_mask_offset = 32;
+static constexpr dart::compiler::target::word AOT_Thread_heap_base_offset = 36;
+static constexpr dart::compiler::target::word AOT_Thread_callback_code_offset =
+    828;
+static constexpr dart::compiler::target::word
+    AOT_Thread_callback_stack_return_offset = 832;
+static constexpr dart::compiler::target::word AOT_Thread_random_offset = 848;
+static constexpr dart::compiler::target::word
+    AOT_Thread_jump_to_frame_entry_point_offset = 332;
+static constexpr dart::compiler::target::word AOT_Thread_tsan_utils_offset =
+    856;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_function_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_buffer_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_pc_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_sp_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_fp_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_TimelineStream_enabled_offset = 8;
+static constexpr dart::compiler::target::word AOT_TwoByteString_data_offset =
+    12;
+static constexpr dart::compiler::target::word AOT_Type_arguments_offset = 12;
+static constexpr dart::compiler::target::word AOT_Type_hash_offset = 16;
+static constexpr dart::compiler::target::word AOT_Type_type_class_id_offset =
+    20;
+static constexpr dart::compiler::target::word AOT_Type_type_state_offset = 22;
+static constexpr dart::compiler::target::word AOT_Type_nullability_offset = 23;
+static constexpr dart::compiler::target::word AOT_FunctionType_hash_offset = 28;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_named_parameter_names_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_nullability_offset = 39;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_parameter_counts_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_type_parameter_counts_offset = 36;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_parameter_types_offset = 20;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_type_parameters_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_parameterized_class_id_offset = 20;
+static constexpr dart::compiler::target::word AOT_TypeParameter_index_offset =
+    23;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_nullability_offset = 25;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_instantiations_offset = 4;
+static constexpr dart::compiler::target::word AOT_TypeArguments_length_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_nullability_offset = 16;
+static constexpr dart::compiler::target::word AOT_TypeArguments_types_offset =
+    20;
+static constexpr dart::compiler::target::word AOT_TypeParameters_names_offset =
+    4;
+static constexpr dart::compiler::target::word AOT_TypeParameters_flags_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_TypeParameters_bounds_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameters_defaults_offset = 16;
+static constexpr dart::compiler::target::word AOT_TypeParameter_bound_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_TypeParameter_flags_offset =
+    24;
+static constexpr dart::compiler::target::word AOT_TypeRef_type_offset = 12;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_length_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_TypedDataView_data_offset =
+    12;
+static constexpr dart::compiler::target::word
+    AOT_TypedDataView_offset_in_bytes_offset = 16;
+static constexpr dart::compiler::target::word AOT_TypedData_data_offset = 12;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_exception_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_stacktrace_offset = 8;
+static constexpr dart::compiler::target::word AOT_UserTag_tag_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_expected_cid_offset = 4;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_entrypoint_offset = 8;
+static constexpr dart::compiler::target::word AOT_WeakProperty_key_offset = 4;
+static constexpr dart::compiler::target::word AOT_WeakProperty_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Code_entry_point_offset[] = {
+    4, 12, 8, 16};
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,  -1,  -1, -1, -1, 728, 732, 736, -1,  -1,  740,
+        744, 748, -1, -1, -1, 752, 756, 760, 764, 768, 772,
+        776, 780, -1, -1, -1, -1,  784, 788, 792, 796};
+static constexpr dart::compiler::target::word AOT_AbstractType_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word AOT_ApiError_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Array_header_size = 12;
+static constexpr dart::compiler::target::word AOT_Bool_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Class_InstanceSize = 80;
+static constexpr dart::compiler::target::word AOT_Closure_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_ClosureData_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_CodeSourceMap_HeaderSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_ObjectHeaderSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word AOT_Context_header_size = 12;
+static constexpr dart::compiler::target::word AOT_Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_DynamicLibrary_InstanceSize =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTypedData_InstanceSize = 12;
+static constexpr dart::compiler::target::word
+    AOT_FfiTrampolineData_InstanceSize = 28;
+static constexpr dart::compiler::target::word AOT_Field_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Function_InstanceSize = 44;
+static constexpr dart::compiler::target::word AOT_FunctionType_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word AOT_FutureOr_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_UnalignedHeaderSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsSection_UnalignedHeaderSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsTable_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_KernelProgramInfo_InstanceSize = 60;
+static constexpr dart::compiler::target::word AOT_LanguageError_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word AOT_Library_InstanceSize = 84;
+static constexpr dart::compiler::target::word AOT_LibraryPrefix_InstanceSize =
+    20;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_MirrorReference_InstanceSize =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_Namespace_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_NativeArguments_StructSize =
+    16;
+static constexpr dart::compiler::target::word AOT_Number_InstanceSize = 4;
+static constexpr dart::compiler::target::word AOT_Object_InstanceSize = 4;
+static constexpr dart::compiler::target::word AOT_PatchClass_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_PcDescriptors_HeaderSize = 8;
+static constexpr dart::compiler::target::word AOT_Pointer_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_ReceivePort_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_RegExp_InstanceSize = 60;
+static constexpr dart::compiler::target::word AOT_Script_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Sentinel_InstanceSize = 4;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_StackTrace_InstanceSize = 20;
+static constexpr dart::compiler::target::word AOT_String_InstanceSize = 12;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_LoadingUnit_InstanceSize = 20;
+static constexpr dart::compiler::target::word
+    AOT_TransferableTypedData_InstanceSize = 4;
+static constexpr dart::compiler::target::word AOT_Type_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_TypeParameter_InstanceSize =
+    28;
+static constexpr dart::compiler::target::word AOT_TypeParameters_InstanceSize =
+    20;
+static constexpr dart::compiler::target::word AOT_TypeRef_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_TypedData_HeaderSize = 12;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_InstanceSize =
+    12;
+static constexpr dart::compiler::target::word AOT_TypedDataView_InstanceSize =
+    20;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_UnlinkedCall_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word AOT_UnwindError_InstanceSize = 12;
+static constexpr dart::compiler::target::word AOT_UserTag_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_WeakProperty_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_WeakSerializationReference_InstanceSize = 12;
+#endif  // defined(TARGET_ARCH_RISCV32) && !defined(DART_COMPRESSED_POINTERS)
+
+#if defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+static constexpr dart::compiler::target::word AOT_Closure_entry_point_offset =
+    56;
+static constexpr dart::compiler::target::word AOT_Array_elements_start_offset =
+    24;
+static constexpr dart::compiler::target::word AOT_Array_element_size = 8;
+static constexpr dart::compiler::target::word AOT_Code_elements_start_offset =
+    120;
+static constexpr dart::compiler::target::word AOT_Code_element_size = 4;
+static constexpr dart::compiler::target::word
+    AOT_Context_elements_start_offset = 24;
+static constexpr dart::compiler::target::word AOT_Context_element_size = 8;
+static constexpr dart::compiler::target::word
+    AOT_ContextScope_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_ContextScope_element_size =
+    64;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_elements_start_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_ExceptionHandlers_element_size = 12;
+static constexpr dart::compiler::target::word
+    AOT_ObjectPool_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 8;
+static constexpr dart::compiler::target::word
+    AOT_OneByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_OneByteString_element_size =
+    1;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_elements_start_offset = 40;
+static constexpr dart::compiler::target::word AOT_TypeArguments_element_size =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_TwoByteString_elements_start_offset = 16;
+static constexpr dart::compiler::target::word AOT_TwoByteString_element_size =
+    2;
+static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
+    32765;
+static constexpr dart::compiler::target::word AOT_Context_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetJIT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetJIT = 42;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kMonomorphicEntryOffsetAOT = 6;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kPolymorphicEntryOffsetAOT = 16;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kBarePayloadAlignment = 4;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_kNonBarePayloadAlignment = 8;
+static constexpr dart::compiler::target::word AOT_OldPage_kBytesPerCardLog2 =
+    10;
+static constexpr dart::compiler::target::word
+    AOT_NativeEntry_kNumCallWrapperArguments = 2;
+static constexpr dart::compiler::target::word AOT_String_kMaxElements =
+    2305843009213693951;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceCidOrSignature = 1;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kDestinationType = 2;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceDelayedFunctionTypeArguments = 7;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceParentFunctionTypeArguments = 6;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstanceTypeArguments = 3;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kInstantiatorTypeArguments = 4;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_kTestEntryLength = 8;
+static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
+    0;
+static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
+    576460752303423487;
+static constexpr dart::compiler::target::word
+    AOT_AbstractType_type_test_stub_entry_point_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_count_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_size_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_first_named_entry_offset = 56;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_named_entry_size = 16;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_name_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_position_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_positional_count_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_ArgumentsDescriptor_type_args_len_offset = 24;
+static constexpr dart::compiler::target::word AOT_Array_data_offset = 24;
+static constexpr dart::compiler::target::word AOT_Array_length_offset = 16;
+static constexpr dart::compiler::target::word AOT_Array_tags_offset = 0;
+static constexpr dart::compiler::target::word AOT_Array_type_arguments_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_Class_declaration_type_offset = 96;
+static constexpr dart::compiler::target::word
+    AOT_Class_num_type_arguments_offset = 116;
+static constexpr dart::compiler::target::word AOT_Class_super_type_offset = 80;
+static constexpr dart::compiler::target::word
+    AOT_Class_host_type_arguments_field_offset_in_words_offset = 128;
+static constexpr dart::compiler::target::word AOT_Closure_context_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_Closure_delayed_type_arguments_offset = 24;
+static constexpr dart::compiler::target::word AOT_Closure_function_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_Closure_function_type_arguments_offset = 16;
+static constexpr dart::compiler::target::word AOT_Closure_hash_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_Closure_instantiator_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ClosureData_default_type_arguments_kind_offset = 32;
+static constexpr dart::compiler::target::word AOT_Code_object_pool_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_Code_saved_instructions_offset = 48;
+static constexpr dart::compiler::target::word AOT_Code_owner_offset = 56;
+static constexpr dart::compiler::target::word AOT_Context_num_variables_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_Context_parent_offset = 16;
+static constexpr dart::compiler::target::word AOT_Double_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_external_data_offset = 16;
+static constexpr dart::compiler::target::word AOT_Float32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Float64x2_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Field_initializer_function_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_Field_host_offset_or_field_id_offset = 40;
+static constexpr dart::compiler::target::word AOT_Field_guarded_cid_offset = 72;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_in_object_offset_offset = 76;
+static constexpr dart::compiler::target::word
+    AOT_Field_guarded_list_length_offset = 48;
+static constexpr dart::compiler::target::word AOT_Field_is_nullable_offset = 74;
+static constexpr dart::compiler::target::word AOT_Field_kind_bits_offset = 78;
+static constexpr dart::compiler::target::word AOT_Function_code_offset = 64;
+static constexpr dart::compiler::target::word AOT_Function_data_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_Function_entry_point_offset[] = {8, 16};
+static constexpr dart::compiler::target::word AOT_Function_kind_tag_offset = 72;
+static constexpr dart::compiler::target::word
+    AOT_Function_packed_fields_offset = 76;
+static constexpr dart::compiler::target::word AOT_Function_signature_offset =
+    40;
+static constexpr dart::compiler::target::word
+    AOT_FutureOr_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_length_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word AOT_OldPage_card_table_offset =
+    40;
+static constexpr dart::compiler::target::word
+    AOT_CallSiteData_arguments_descriptor_offset = 16;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedMask = 3;
+static constexpr dart::compiler::target::word AOT_ICData_NumArgsTestedShift = 0;
+static constexpr dart::compiler::target::word AOT_ICData_entries_offset = 24;
+static constexpr dart::compiler::target::word AOT_ICData_owner_offset = 32;
+static constexpr dart::compiler::target::word AOT_ICData_state_bits_offset = 40;
+static constexpr dart::compiler::target::word AOT_Int32x4_value_offset = 8;
+static constexpr dart::compiler::target::word AOT_Isolate_current_tag_offset =
+    40;
+static constexpr dart::compiler::target::word AOT_Isolate_default_tag_offset =
+    48;
+static constexpr dart::compiler::target::word AOT_Isolate_ic_miss_code_offset =
+    56;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_object_store_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_shared_class_table_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_IsolateGroup_cached_class_table_table_offset = 32;
+static constexpr dart::compiler::target::word AOT_Isolate_user_tag_offset = 32;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_data_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_ImmutableLinkedHashBase_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_deleted_keys_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_hash_mask_offset = 16;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_index_offset =
+    48;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_type_arguments_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_LinkedHashBase_used_data_offset = 32;
+static constexpr dart::compiler::target::word AOT_LocalHandle_ptr_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_MarkingStackBlock_pointers_offset = 16;
+static constexpr dart::compiler::target::word AOT_MarkingStackBlock_top_offset =
+    8;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_buckets_offset = 24;
+static constexpr dart::compiler::target::word AOT_MegamorphicCache_mask_offset =
+    32;
+static constexpr dart::compiler::target::word AOT_Mint_value_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_argc_tag_offset = 8;
+static constexpr dart::compiler::target::word AOT_NativeArguments_argv_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_retval_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_NativeArguments_thread_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_double_type_offset = 320;
+static constexpr dart::compiler::target::word AOT_ObjectStore_int_type_offset =
+    232;
+static constexpr dart::compiler::target::word
+    AOT_ObjectStore_string_type_offset = 360;
+static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
+    208;
+static constexpr dart::compiler::target::word AOT_OneByteString_data_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_PointerBase_data_field_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Pointer_type_arguments_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_entry_point_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_lower_limit_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_target_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_upper_limit_offset = 26;
+static constexpr dart::compiler::target::word
+    AOT_StoreBufferBlock_pointers_offset = 16;
+static constexpr dart::compiler::target::word AOT_StoreBufferBlock_top_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_String_hash_offset = 4;
+static constexpr dart::compiler::target::word AOT_String_length_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_cache_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_Thread_AllocateArray_entry_point_offset = 744;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_exception_offset = 1576;
+static constexpr dart::compiler::target::word
+    AOT_Thread_active_stacktrace_offset = 1584;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_code_offset = 232;
+static constexpr dart::compiler::target::word
+    AOT_Thread_array_write_barrier_entry_point_offset = 528;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_entry_point_offset = 544;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_with_fpu_regs_stub_offset = 352;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_entry_point_offset = 552;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_mint_without_fpu_regs_stub_offset = 360;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_entry_point_offset = 560;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_stub_offset = 368;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_entry_point_offset = 568;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_parameterized_stub_offset = 376;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_entry_point_offset = 576;
+static constexpr dart::compiler::target::word
+    AOT_Thread_allocate_object_slow_stub_offset = 384;
+static constexpr dart::compiler::target::word AOT_Thread_api_top_scope_offset =
+    1656;
+static constexpr dart::compiler::target::word
+    AOT_Thread_auto_scope_native_wrapper_entry_point_offset = 672;
+static constexpr dart::compiler::target::word AOT_Thread_bool_false_offset =
+    216;
+static constexpr dart::compiler::target::word AOT_Thread_bool_true_offset = 208;
+static constexpr dart::compiler::target::word
+    AOT_Thread_bootstrap_native_wrapper_entry_point_offset = 656;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_entry_point_offset = 536;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_to_runtime_stub_offset = 264;
+static constexpr dart::compiler::target::word AOT_Thread_dart_stream_offset =
+    1696;
+static constexpr dart::compiler::target::word
+    AOT_Thread_dispatch_table_array_offset = 88;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_truncate_round_supported_offset = 1664;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_entry_offset =
+    616;
+static constexpr dart::compiler::target::word AOT_Thread_optimize_stub_offset =
+    440;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_entry_offset = 624;
+static constexpr dart::compiler::target::word
+    AOT_Thread_deoptimize_stub_offset = 448;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_abs_address_offset = 704;
+static constexpr dart::compiler::target::word
+    AOT_Thread_double_negate_address_offset = 696;
+static constexpr dart::compiler::target::word AOT_Thread_end_offset = 104;
+static constexpr dart::compiler::target::word
+    AOT_Thread_enter_safepoint_stub_offset = 488;
+static constexpr dart::compiler::target::word
+    AOT_Thread_execution_state_offset = 1616;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_stub_offset = 496;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_safepoint_ignore_unwind_in_progress_stub_offset = 504;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_stub_offset = 512;
+static constexpr dart::compiler::target::word
+    AOT_Thread_call_native_through_safepoint_entry_point_offset = 632;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_allocation_stub_code_offset = 248;
+static constexpr dart::compiler::target::word
+    AOT_Thread_fix_callers_target_code_offset = 240;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_absolute_address_offset = 728;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_negate_address_offset = 720;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_not_address_offset = 712;
+static constexpr dart::compiler::target::word
+    AOT_Thread_float_zerow_address_offset = 736;
+static constexpr dart::compiler::target::word
+    AOT_Thread_global_object_pool_offset = 1592;
+static constexpr dart::compiler::target::word
+    AOT_Thread_invoke_dart_code_stub_offset = 256;
+static constexpr dart::compiler::target::word
+    AOT_Thread_exit_through_ffi_offset = 1648;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_offset = 80;
+static constexpr dart::compiler::target::word AOT_Thread_isolate_group_offset =
+    1704;
+static constexpr dart::compiler::target::word
+    AOT_Thread_field_table_values_offset = 128;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_return_stub_offset = 456;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_deopt_from_throw_stub_offset = 464;
+static constexpr dart::compiler::target::word
+    AOT_Thread_lazy_specialize_type_test_stub_offset = 480;
+static constexpr dart::compiler::target::word
+    AOT_Thread_marking_stack_block_offset = 160;
+static constexpr dart::compiler::target::word
+    AOT_Thread_megamorphic_call_checked_entry_offset = 600;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_entry_offset = 608;
+static constexpr dart::compiler::target::word
+    AOT_Thread_switchable_call_miss_stub_offset = 408;
+static constexpr dart::compiler::target::word
+    AOT_Thread_no_scope_native_wrapper_entry_point_offset = 664;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_with_fpu_regs_stub_offset = 280;
+static constexpr dart::compiler::target::word
+    AOT_Thread_late_initialization_error_shared_without_fpu_regs_stub_offset =
+        272;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_with_fpu_regs_stub_offset = 296;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_error_shared_without_fpu_regs_stub_offset = 288;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_with_fpu_regs_stub_offset = 312;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_arg_error_shared_without_fpu_regs_stub_offset = 304;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_with_fpu_regs_stub_offset = 328;
+static constexpr dart::compiler::target::word
+    AOT_Thread_null_cast_error_shared_without_fpu_regs_stub_offset = 320;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_with_fpu_regs_stub_offset = 344;
+static constexpr dart::compiler::target::word
+    AOT_Thread_range_error_shared_without_fpu_regs_stub_offset = 336;
+static constexpr dart::compiler::target::word AOT_Thread_object_null_offset =
+    200;
+static constexpr dart::compiler::target::word
+    AOT_Thread_predefined_symbols_address_offset = 680;
+static constexpr dart::compiler::target::word AOT_Thread_resume_pc_offset =
+    1600;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_shadow_call_stack_offset = 1608;
+static constexpr dart::compiler::target::word
+    AOT_Thread_safepoint_state_offset = 1624;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_stub_offset = 472;
+static constexpr dart::compiler::target::word
+    AOT_Thread_slow_type_test_entry_point_offset = 648;
+static constexpr dart::compiler::target::word AOT_Thread_stack_limit_offset =
+    56;
+static constexpr dart::compiler::target::word
+    AOT_Thread_saved_stack_limit_offset = 112;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_flags_offset = 120;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_entry_point_offset = 592;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_with_fpu_regs_stub_offset = 400;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_entry_point_offset = 584;
+static constexpr dart::compiler::target::word
+    AOT_Thread_stack_overflow_shared_without_fpu_regs_stub_offset = 392;
+static constexpr dart::compiler::target::word
+    AOT_Thread_store_buffer_block_offset = 152;
+static constexpr dart::compiler::target::word
+    AOT_Thread_top_exit_frame_info_offset = 144;
+static constexpr dart::compiler::target::word AOT_Thread_top_offset = 96;
+static constexpr dart::compiler::target::word AOT_Thread_top_resource_offset =
+    32;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_int64_runtime_arg_offset = 184;
+static constexpr dart::compiler::target::word
+    AOT_Thread_unboxed_double_runtime_arg_offset = 192;
+static constexpr dart::compiler::target::word AOT_Thread_vm_tag_offset = 176;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_code_offset = 224;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_entry_point_offset = 520;
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_mask_offset = 64;
+static constexpr dart::compiler::target::word AOT_Thread_heap_base_offset = 72;
+static constexpr dart::compiler::target::word AOT_Thread_callback_code_offset =
+    1632;
+static constexpr dart::compiler::target::word
+    AOT_Thread_callback_stack_return_offset = 1640;
+static constexpr dart::compiler::target::word AOT_Thread_random_offset = 1672;
+static constexpr dart::compiler::target::word
+    AOT_Thread_jump_to_frame_entry_point_offset = 640;
+static constexpr dart::compiler::target::word AOT_Thread_tsan_utils_offset =
+    1680;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_function_offset = 0;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_setjmp_buffer_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_pc_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_sp_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_TsanUtils_exception_fp_offset = 32;
+static constexpr dart::compiler::target::word
+    AOT_TimelineStream_enabled_offset = 16;
+static constexpr dart::compiler::target::word AOT_TwoByteString_data_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_Type_arguments_offset = 24;
+static constexpr dart::compiler::target::word AOT_Type_hash_offset = 32;
+static constexpr dart::compiler::target::word AOT_Type_type_class_id_offset =
+    40;
+static constexpr dart::compiler::target::word AOT_Type_type_state_offset = 42;
+static constexpr dart::compiler::target::word AOT_Type_nullability_offset = 43;
+static constexpr dart::compiler::target::word AOT_FunctionType_hash_offset = 56;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_named_parameter_names_offset = 48;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_nullability_offset = 71;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_parameter_counts_offset = 64;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_packed_type_parameter_counts_offset = 68;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_parameter_types_offset = 40;
+static constexpr dart::compiler::target::word
+    AOT_FunctionType_type_parameters_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_parameterized_class_id_offset = 40;
+static constexpr dart::compiler::target::word AOT_TypeParameter_index_offset =
+    43;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameter_nullability_offset = 45;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_instantiations_offset = 8;
+static constexpr dart::compiler::target::word AOT_TypeArguments_length_offset =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_TypeArguments_nullability_offset = 32;
+static constexpr dart::compiler::target::word AOT_TypeArguments_types_offset =
+    40;
+static constexpr dart::compiler::target::word AOT_TypeParameters_names_offset =
+    8;
+static constexpr dart::compiler::target::word AOT_TypeParameters_flags_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_TypeParameters_bounds_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_TypeParameters_defaults_offset = 32;
+static constexpr dart::compiler::target::word AOT_TypeParameter_bound_offset =
+    32;
+static constexpr dart::compiler::target::word AOT_TypeParameter_flags_offset =
+    44;
+static constexpr dart::compiler::target::word AOT_TypeRef_type_offset = 24;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_length_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_TypedDataView_data_offset =
+    24;
+static constexpr dart::compiler::target::word
+    AOT_TypedDataView_offset_in_bytes_offset = 32;
+static constexpr dart::compiler::target::word AOT_TypedData_data_offset = 24;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_exception_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_stacktrace_offset = 16;
+static constexpr dart::compiler::target::word AOT_UserTag_tag_offset = 16;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_expected_cid_offset = 8;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_entrypoint_offset = 16;
+static constexpr dart::compiler::target::word AOT_WeakProperty_key_offset = 8;
+static constexpr dart::compiler::target::word AOT_WeakProperty_value_offset =
+    16;
+static constexpr dart::compiler::target::word AOT_Code_entry_point_offset[] = {
+    8, 24, 16, 32};
+static constexpr dart::compiler::target::word
+    AOT_Thread_write_barrier_wrappers_thread_offset[] = {
+        -1,   -1,   -1, -1, -1, 1432, 1440, 1448, -1,   -1,   1456,
+        1464, 1472, -1, -1, -1, 1480, 1488, 1496, 1504, 1512, 1520,
+        1528, 1536, -1, -1, -1, -1,   1544, 1552, 1560, 1568};
+static constexpr dart::compiler::target::word AOT_AbstractType_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word AOT_ApiError_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Array_header_size = 24;
+static constexpr dart::compiler::target::word AOT_Bool_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Capability_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_Class_InstanceSize = 136;
+static constexpr dart::compiler::target::word AOT_Closure_InstanceSize = 64;
+static constexpr dart::compiler::target::word AOT_ClosureData_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_CodeSourceMap_HeaderSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_ObjectHeaderSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_CompressedStackMaps_PayloadHeaderSize = 4;
+static constexpr dart::compiler::target::word AOT_Context_header_size = 24;
+static constexpr dart::compiler::target::word AOT_Double_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_DynamicLibrary_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_ExternalOneByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTwoByteString_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    AOT_ExternalTypedData_InstanceSize = 24;
+static constexpr dart::compiler::target::word
+    AOT_FfiTrampolineData_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Field_InstanceSize = 80;
+static constexpr dart::compiler::target::word AOT_Float32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Float64x2_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Function_InstanceSize = 80;
+static constexpr dart::compiler::target::word AOT_FunctionType_InstanceSize =
+    72;
+static constexpr dart::compiler::target::word AOT_FutureOr_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_GrowableObjectArray_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_Instructions_UnalignedHeaderSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsSection_UnalignedHeaderSize = 40;
+static constexpr dart::compiler::target::word
+    AOT_InstructionsTable_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_KernelProgramInfo_InstanceSize = 120;
+static constexpr dart::compiler::target::word AOT_LanguageError_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word AOT_Library_InstanceSize = 160;
+static constexpr dart::compiler::target::word AOT_LibraryPrefix_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word AOT_LinkedHashBase_InstanceSize =
+    56;
+static constexpr dart::compiler::target::word
+    AOT_MegamorphicCache_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_Mint_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_MirrorReference_InstanceSize =
+    16;
+static constexpr dart::compiler::target::word
+    AOT_MonomorphicSmiableCall_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Namespace_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_NativeArguments_StructSize =
+    32;
+static constexpr dart::compiler::target::word AOT_Number_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Object_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_PatchClass_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_PcDescriptors_HeaderSize = 16;
+static constexpr dart::compiler::target::word AOT_Pointer_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_ReceivePort_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_RegExp_InstanceSize = 120;
+static constexpr dart::compiler::target::word AOT_Script_InstanceSize = 72;
+static constexpr dart::compiler::target::word AOT_SendPort_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_Sentinel_InstanceSize = 8;
+static constexpr dart::compiler::target::word
+    AOT_SingleTargetCache_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_StackTrace_InstanceSize = 40;
+static constexpr dart::compiler::target::word AOT_String_InstanceSize = 16;
+static constexpr dart::compiler::target::word
+    AOT_SubtypeTestCache_InstanceSize = 16;
+static constexpr dart::compiler::target::word AOT_LoadingUnit_InstanceSize = 32;
+static constexpr dart::compiler::target::word
+    AOT_TransferableTypedData_InstanceSize = 8;
+static constexpr dart::compiler::target::word AOT_Type_InstanceSize = 48;
+static constexpr dart::compiler::target::word AOT_TypeParameter_InstanceSize =
+    48;
+static constexpr dart::compiler::target::word AOT_TypeParameters_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word AOT_TypeRef_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_TypedData_HeaderSize = 24;
+static constexpr dart::compiler::target::word AOT_TypedDataBase_InstanceSize =
+    24;
+static constexpr dart::compiler::target::word AOT_TypedDataView_InstanceSize =
+    40;
+static constexpr dart::compiler::target::word
+    AOT_UnhandledException_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_UnlinkedCall_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word AOT_UnwindError_InstanceSize = 24;
+static constexpr dart::compiler::target::word AOT_UserTag_InstanceSize = 32;
+static constexpr dart::compiler::target::word AOT_WeakProperty_InstanceSize =
+    32;
+static constexpr dart::compiler::target::word
+    AOT_WeakSerializationReference_InstanceSize = 24;
+#endif  // defined(TARGET_ARCH_RISCV64) && !defined(DART_COMPRESSED_POINTERS)
+
 #endif  // !defined(PRODUCT)
 
 #endif  // RUNTIME_VM_COMPILER_RUNTIME_OFFSETS_EXTRACTED_H_
diff --git a/runtime/vm/compiler/runtime_offsets_list.h b/runtime/vm/compiler/runtime_offsets_list.h
index 1883b9d..bff550b 100644
--- a/runtime/vm/compiler/runtime_offsets_list.h
+++ b/runtime/vm/compiler/runtime_offsets_list.h
@@ -335,10 +335,10 @@
   RANGE(Code, entry_point_offset, CodeEntryKind, CodeEntryKind::kNormal,       \
         CodeEntryKind::kMonomorphicUnchecked,                                  \
         [](CodeEntryKind value) { return true; })                              \
-  ONLY_IN_ARM_ARM64_X64(RANGE(                                                 \
-      Thread, write_barrier_wrappers_thread_offset, Register, 0,               \
-      kNumberOfCpuRegisters - 1,                                               \
-      [](Register reg) { return (kDartAvailableCpuRegs & (1 << reg)) != 0; })) \
+  NOT_IN_IA32(RANGE(Thread, write_barrier_wrappers_thread_offset, Register, 0, \
+                    kNumberOfCpuRegisters - 1, [](Register reg) {              \
+                      return (kDartAvailableCpuRegs & (1 << reg)) != 0;        \
+                    }))                                                        \
                                                                                \
   SIZEOF(AbstractType, InstanceSize, UntaggedAbstractType)                     \
   SIZEOF(ApiError, InstanceSize, UntaggedApiError)                             \
diff --git a/runtime/vm/compiler/stub_code_compiler.h b/runtime/vm/compiler/stub_code_compiler.h
index fc28901..6c4b3ae 100644
--- a/runtime/vm/compiler/stub_code_compiler.h
+++ b/runtime/vm/compiler/stub_code_compiler.h
@@ -133,6 +133,16 @@
   static constexpr intptr_t kNativeCallbackSharedStubSize = 268;
 #endif
   static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
+#elif defined(TARGET_ARCH_RISCV32)
+  static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
+  static constexpr intptr_t kNativeCallbackSharedStubSize = 192;
+  static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
+#elif defined(TARGET_ARCH_RISCV64)
+  static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
+  static constexpr intptr_t kNativeCallbackSharedStubSize = 196;
+  static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
+#else
+#error What architecture?
 #endif
 
   static void GenerateJITCallbackTrampolines(Assembler* assembler,
diff --git a/runtime/vm/compiler/stub_code_compiler_riscv.cc b/runtime/vm/compiler/stub_code_compiler_riscv.cc
new file mode 100644
index 0000000..ec19d60
--- /dev/null
+++ b/runtime/vm/compiler/stub_code_compiler_riscv.cc
@@ -0,0 +1,3651 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+
+// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
+// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
+#include "vm/compiler/backend/il.h"
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/stub_code_compiler.h"
+
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/class_id.h"
+#include "vm/code_entry_kind.h"
+#include "vm/compiler/api/type_check_mode.h"
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/constants.h"
+#include "vm/instructions.h"
+#include "vm/static_type_exactness_state.h"
+#include "vm/tags.h"
+
+#define __ assembler->
+
+namespace dart {
+namespace compiler {
+
+// Ensures that [A0] is a new object, if not it will be added to the remembered
+// set via a leaf runtime call.
+//
+// WARNING: This might clobber all registers except for [A0], [THR] and [FP].
+// The caller should simply call LeaveStubFrame() and return.
+void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
+                                               bool preserve_registers) {
+  // If the object is not remembered we call a leaf-runtime to add it to the
+  // remembered set.
+  Label done;
+  __ andi(TMP2, A0, 1 << target::ObjectAlignment::kNewObjectBitPosition);
+  __ bnez(TMP2, &done);
+
+  {
+    Assembler::CallRuntimeScope scope(
+        assembler, kEnsureRememberedAndMarkingDeferredRuntimeEntry,
+        /*frame_size=*/0, /*preserve_registers=*/preserve_registers);
+    __ mv(A1, THR);
+    scope.Call(/*argument_count=*/2);
+  }
+
+  __ Bind(&done);
+}
+
+// Input parameters:
+//   RA : return address.
+//   SP : address of last argument in argument array.
+//   SP + 8*T4 - 8 : address of first argument in argument array.
+//   SP + 8*T4 : address of return value.
+//   T5 : address of the runtime function to call.
+//   T4 : number of arguments to the call.
+void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+  const intptr_t thread_offset = target::NativeArguments::thread_offset();
+  const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+  const intptr_t argv_offset = target::NativeArguments::argv_offset();
+  const intptr_t retval_offset = target::NativeArguments::retval_offset();
+
+  __ Comment("CallToRuntimeStub");
+  __ lx(CODE_REG, Address(THR, target::Thread::call_to_runtime_stub_offset()));
+  __ SetPrologueOffset();
+  __ EnterStubFrame();
+
+  // Save exit frame information to enable stack walking as we are about
+  // to transition to Dart VM C++ code.
+  __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
+
+  // Mark that the thread exited generated code through a runtime call.
+  __ LoadImmediate(TMP, target::Thread::exit_through_runtime_call());
+  __ StoreToOffset(TMP, THR, target::Thread::exit_through_ffi_offset());
+
+#if defined(DEBUG)
+  {
+    Label ok;
+    // Check that we are always entering from Dart code.
+    __ LoadFromOffset(TMP, THR, target::Thread::vm_tag_offset());
+    __ CompareImmediate(TMP, VMTag::kDartTagId);
+    __ BranchIf(EQ, &ok);
+    __ Stop("Not coming from Dart code.");
+    __ Bind(&ok);
+  }
+#endif
+
+  // Mark that the thread is executing VM code.
+  __ StoreToOffset(T5, THR, target::Thread::vm_tag_offset());
+
+  // Reserve space for arguments and align frame before entering C++ world.
+  // target::NativeArguments are passed in registers.
+  __ Comment("align stack");
+  // Reserve space for arguments.
+  ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize);
+  __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
+
+  // Pass target::NativeArguments structure by value and call runtime.
+  // Registers R0, R1, R2, and R3 are used.
+
+  ASSERT(thread_offset == 0 * target::kWordSize);
+  // There are no runtime calls to closures, so we do not need to set the tag
+  // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
+  ASSERT(argc_tag_offset == 1 * target::kWordSize);
+  ASSERT(argv_offset == 2 * target::kWordSize);
+  __ slli(T2, T4, target::kWordSizeLog2);
+  __ add(T2, FP, T2);  // Compute argv.
+  // Set argv in target::NativeArguments.
+  __ AddImmediate(T2,
+                  target::frame_layout.param_end_from_fp * target::kWordSize);
+
+  ASSERT(retval_offset == 3 * target::kWordSize);
+  __ AddImmediate(T3, T2, target::kWordSize);
+
+  __ StoreToOffset(THR, SP, thread_offset);
+  __ StoreToOffset(T4, SP, argc_tag_offset);
+  __ StoreToOffset(T2, SP, argv_offset);
+  __ StoreToOffset(T3, SP, retval_offset);
+  __ mv(A0, SP);  // Pass the pointer to the target::NativeArguments.
+
+  ASSERT(IsAbiPreservedRegister(THR));
+  __ jalr(T5);
+  __ Comment("CallToRuntimeStub return");
+
+  // Refresh pinned registers values (inc. write barrier mask and null object).
+  __ RestorePinnedRegisters();
+
+  // Retval is next to 1st argument.
+  // Mark that the thread is executing Dart code.
+  __ LoadImmediate(TMP, VMTag::kDartTagId);
+  __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
+
+  // Mark that the thread has not exited generated Dart code.
+  __ StoreToOffset(ZR, THR, target::Thread::exit_through_ffi_offset());
+
+  // Reset exit frame information in Isolate's mutator thread structure.
+  __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
+
+  // Restore the global object pool after returning from runtime (old space is
+  // moving, so the GOP could have been relocated).
+  if (FLAG_precompiled_mode) {
+    __ SetupGlobalPoolAndDispatchTable();
+  }
+
+  __ LeaveStubFrame();
+
+  // The following return can jump to a lazy-deopt stub, which assumes A0
+  // contains a return value and will save it in a GC-visible way.  We therefore
+  // have to ensure A0 does not contain any garbage value left from the C
+  // function we called (which has return type "void").
+  // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
+  __ LoadImmediate(A0, 0);
+  __ ret();
+}
+
+void StubCodeCompiler::GenerateSharedStubGeneric(
+    Assembler* assembler,
+    bool save_fpu_registers,
+    intptr_t self_code_stub_offset_from_thread,
+    bool allow_return,
+    std::function<void()> perform_runtime_call) {
+  // We want the saved registers to appear like part of the caller's frame, so
+  // we push them before calling EnterStubFrame.
+  RegisterSet all_registers;
+  all_registers.AddAllNonReservedRegisters(save_fpu_registers);
+
+  // To make the stack map calculation architecture independent we do the same
+  // as on intel.
+  __ PushRegister(RA);
+  __ PushRegisters(all_registers);
+  __ lx(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
+  __ EnterStubFrame();
+  perform_runtime_call();
+  if (!allow_return) {
+    __ Breakpoint();
+    return;
+  }
+  __ LeaveStubFrame();
+  __ PopRegisters(all_registers);
+  __ Drop(1);  // We use the RA restored via LeaveStubFrame.
+  __ ret();
+}
+
+void StubCodeCompiler::GenerateSharedStub(
+    Assembler* assembler,
+    bool save_fpu_registers,
+    const RuntimeEntry* target,
+    intptr_t self_code_stub_offset_from_thread,
+    bool allow_return,
+    bool store_runtime_result_in_result_register) {
+  ASSERT(!store_runtime_result_in_result_register || allow_return);
+  auto perform_runtime_call = [&]() {
+    if (store_runtime_result_in_result_register) {
+      __ PushRegister(NULL_REG);
+    }
+    __ CallRuntime(*target, /*argument_count=*/0);
+    if (store_runtime_result_in_result_register) {
+      __ PopRegister(A0);
+      __ sx(A0, Address(FP, target::kWordSize *
+                                StubCodeCompiler::WordOffsetFromFpToCpuRegister(
+                                    SharedSlowPathStubABI::kResultReg)));
+    }
+  };
+  GenerateSharedStubGeneric(assembler, save_fpu_registers,
+                            self_code_stub_offset_from_thread, allow_return,
+                            perform_runtime_call);
+}
+
+void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
+  RegisterSet all_registers;
+  all_registers.AddAllGeneralRegisters();
+
+  __ EnterFrame(0);
+  __ PushRegisters(all_registers);
+
+  __ ReserveAlignedFrameSpace(0);
+
+  __ lx(TMP, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
+  __ jalr(TMP);
+
+  __ PopRegisters(all_registers);
+  __ LeaveFrame();
+  __ ret();
+}
+
+static void GenerateExitSafepointStubCommon(Assembler* assembler,
+                                            uword runtime_entry_offset) {
+  RegisterSet all_registers;
+  all_registers.AddAllGeneralRegisters();
+
+  __ EnterFrame(0);
+  __ PushRegisters(all_registers);
+
+  __ ReserveAlignedFrameSpace(0);
+
+  // Set the execution state to VM while waiting for the safepoint to end.
+  // This isn't strictly necessary but enables tests to check that we're not
+  // in native code anymore. See tests/ffi/function_gc_test.dart for example.
+  __ LoadImmediate(TMP, target::Thread::vm_execution_state());
+  __ sx(TMP, Address(THR, target::Thread::execution_state_offset()));
+
+  __ lx(TMP, Address(THR, runtime_entry_offset));
+  __ jalr(TMP);
+
+  __ PopRegisters(all_registers);
+  __ LeaveFrame();
+  __ ret();
+}
+
+void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
+  GenerateExitSafepointStubCommon(
+      assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
+}
+
+void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
+    Assembler* assembler) {
+  GenerateExitSafepointStubCommon(
+      assembler,
+      kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
+}
+
+// Calls native code within a safepoint.
+//
+// On entry:
+//   T0: target to call
+//   Stack: set up for native call (SP), aligned, CSP < SP
+//
+// On exit:
+//   S2: clobbered, although normally callee-saved
+//   Stack: preserved, CSP == SP
+void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
+    Assembler* assembler) {
+  COMPILE_ASSERT(IsAbiPreservedRegister(S2));
+  __ mv(S2, RA);
+  __ LoadImmediate(T1, target::Thread::exit_through_ffi());
+  __ TransitionGeneratedToNative(T0, FPREG, T1 /*volatile*/,
+                                 /*enter_safepoint=*/true);
+
+#if defined(DEBUG)
+  // Check SP alignment.
+  __ andi(T2 /*volatile*/, SP, ~(OS::ActivationFrameAlignment() - 1));
+  Label done;
+  __ beq(T2, SP, &done);
+  __ Breakpoint();
+  __ Bind(&done);
+#endif
+
+  __ jalr(T0);
+
+  __ TransitionNativeToGenerated(T1, /*leave_safepoint=*/true);
+  __ jr(S2);
+}
+
+#if !defined(DART_PRECOMPILER)
+void StubCodeCompiler::GenerateJITCallbackTrampolines(
+    Assembler* assembler,
+    intptr_t next_callback_id) {
+#if defined(USING_SIMULATOR)
+  // TODO(37299): FFI is not support in SIMRISCV32/64.
+  __ ebreak();
+#else
+  Label loaded_callback_id_hi;
+
+  // T1 is volatile and not used for passing any arguments.
+  COMPILE_ASSERT(!IsCalleeSavedRegister(T1) && !IsArgumentRegister(T1));
+  for (intptr_t i = 0;
+       i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
+    // We don't use LoadImmediate because we need the trampoline size to be
+    // fixed independently of the callback ID.
+    // lui has 20 bits of range.
+    __ lui_fixed(T1, (next_callback_id + i) << 12);
+    __ j(&loaded_callback_id_hi);
+  }
+
+  ASSERT(__ CodeSize() ==
+         kNativeCallbackTrampolineSize *
+             NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
+
+  __ Bind(&loaded_callback_id_hi);
+  __ srai(T1, T1, 12);
+
+  const intptr_t shared_stub_start = __ CodeSize();
+
+  // Save THR (callee-saved) and RA. Keeps stack aligned.
+  COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 2);
+  __ PushRegisterPair(RA, THR);
+  COMPILE_ASSERT(!IsArgumentRegister(THR));
+
+  RegisterSet all_registers;
+  all_registers.AddAllArgumentRegisters();
+
+  // The call below might clobber T1 (volatile, holding callback_id).
+  all_registers.Add(Location::RegisterLocation(T1));
+
+  // Load the thread, verify the callback ID and exit the safepoint.
+  //
+  // We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
+  // in order to safe code size on this shared stub.
+  {
+    __ PushRegisters(all_registers);
+    __ EnterFrame(0);
+    __ ReserveAlignedFrameSpace(0);
+
+    // Since DLRT_GetThreadForNativeCallbackTrampoline can theoretically be
+    // loaded anywhere, we use the same trick as before to ensure a predictable
+    // instruction sequence.
+    Label call;
+    __ mv(A0, T1);
+
+    const intptr_t kPCRelativeLoadOffset = 12;
+    intptr_t start = __ CodeSize();
+    __ auipc(T1, 0);
+    __ lx(T1, Address(T1, kPCRelativeLoadOffset));
+    __ j(&call);
+
+    ASSERT_EQUAL(__ CodeSize() - start, kPCRelativeLoadOffset);
+#if XLEN == 32
+    __ Emit32(
+        reinterpret_cast<int32_t>(&DLRT_GetThreadForNativeCallbackTrampoline));
+#else
+    __ Emit64(
+        reinterpret_cast<int64_t>(&DLRT_GetThreadForNativeCallbackTrampoline));
+#endif
+
+    __ Bind(&call);
+    __ jalr(T1);
+    __ mv(THR, A0);
+
+    __ LeaveFrame();
+
+    __ PopRegisters(all_registers);
+  }
+
+  COMPILE_ASSERT(!IsCalleeSavedRegister(T2) && !IsArgumentRegister(T2));
+  COMPILE_ASSERT(!IsCalleeSavedRegister(T3) && !IsArgumentRegister(T3));
+
+  // Load the code object.
+  __ LoadFromOffset(T2, THR, compiler::target::Thread::callback_code_offset());
+  __ LoadCompressedFieldFromOffset(
+      T2, T2, compiler::target::GrowableObjectArray::data_offset());
+  __ LoadCompressed(
+      T2,
+      __ ElementAddressForRegIndex(
+          /*external=*/false,
+          /*array_cid=*/kArrayCid,
+          /*index_scale, smi-tagged=*/compiler::target::kCompressedWordSize * 2,
+          /*index_unboxed=*/false,
+          /*array=*/T2,
+          /*index=*/T1,
+          /*temp=*/T3));
+  __ LoadFieldFromOffset(T2, T2, compiler::target::Code::entry_point_offset());
+
+  // Clobbers all volatile registers, including the callback ID in T1.
+  __ jalr(T2);
+
+  // Clobbers TMP, TMP2 and T1 -- all volatile and not holding return values.
+  __ EnterFullSafepoint(/*scratch=*/T1);
+
+  __ PopRegisterPair(RA, THR);
+  __ ret();
+
+  ASSERT_EQUAL((__ CodeSize() - shared_stub_start),
+               kNativeCallbackSharedStubSize);
+  ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
+
+#if defined(DEBUG)
+  while (__ CodeSize() < VirtualMemory::PageSize()) {
+    __ ebreak();
+  }
+#endif
+#endif
+}
+#endif  // !defined(DART_PRECOMPILER)
+
+// T1: The extracted method.
+// T4: The type_arguments_field_offset (or 0)
+void StubCodeCompiler::GenerateBuildMethodExtractorStub(
+    Assembler* assembler,
+    const Code& closure_allocation_stub,
+    const Code& context_allocation_stub,
+    bool generic) {
+  const intptr_t kReceiverOffset = target::frame_layout.param_end_from_fp + 1;
+
+  __ EnterStubFrame();
+
+  // Build type_arguments vector (or null)
+  Label no_type_args;
+  __ lx(T3, Address(THR, target::Thread::object_null_offset()));
+  __ CompareImmediate(T4, 0);
+  __ BranchIf(EQ, &no_type_args);
+  __ lx(T0, Address(FP, kReceiverOffset * target::kWordSize));
+  __ add(TMP, T0, T4);
+  __ LoadCompressed(T3, Address(TMP, 0));
+  __ Bind(&no_type_args);
+
+  // Push type arguments & extracted method.
+  __ PushRegister(T3);
+  __ PushRegister(T1);
+
+  // Allocate context.
+  {
+    Label done, slow_path;
+    if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+      __ TryAllocateArray(kContextCid, target::Context::InstanceSize(1),
+                          &slow_path,
+                          A0,  // instance
+                          T1,  // end address
+                          T2, T3);
+      __ StoreCompressedIntoObjectNoBarrier(
+          A0, FieldAddress(A0, target::Context::parent_offset()), NULL_REG);
+      __ LoadImmediate(T1, 1);
+      __ sw(T1, FieldAddress(A0, target::Context::num_variables_offset()));
+      __ j(&done, compiler::Assembler::kNearJump);
+    }
+
+    __ Bind(&slow_path);
+
+    __ LoadImmediate(/*num_vars=*/T1, 1);
+    __ LoadObject(CODE_REG, context_allocation_stub);
+    __ lx(RA, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+    __ jalr(RA);
+
+    __ Bind(&done);
+  }
+
+  // Put context in right register for AllocateClosure call.
+  __ MoveRegister(AllocateClosureABI::kContextReg, A0);
+
+  // Store receiver in context
+  __ lx(AllocateClosureABI::kScratchReg,
+        Address(FP, target::kWordSize * kReceiverOffset));
+  __ StoreCompressedIntoObject(
+      AllocateClosureABI::kContextReg,
+      FieldAddress(AllocateClosureABI::kContextReg,
+                   target::Context::variable_offset(0)),
+      AllocateClosureABI::kScratchReg);
+
+  // Pop function before pushing context.
+  __ PopRegister(AllocateClosureABI::kFunctionReg);
+
+  // Allocate closure. After this point, we only use the registers in
+  // AllocateClosureABI.
+  __ LoadObject(CODE_REG, closure_allocation_stub);
+  __ lx(AllocateClosureABI::kScratchReg,
+        FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+  __ jalr(AllocateClosureABI::kScratchReg);
+
+  // Populate closure object.
+  __ PopRegister(AllocateClosureABI::kScratchReg);  // Pop type arguments.
+  __ StoreCompressedIntoObjectNoBarrier(
+      AllocateClosureABI::kResultReg,
+      FieldAddress(AllocateClosureABI::kResultReg,
+                   target::Closure::instantiator_type_arguments_offset()),
+      AllocateClosureABI::kScratchReg);
+  // Keep delayed_type_arguments as null if non-generic (see Closure::New).
+  if (generic) {
+    __ LoadObject(AllocateClosureABI::kScratchReg, EmptyTypeArguments());
+    __ StoreCompressedIntoObjectNoBarrier(
+        AllocateClosureABI::kResultReg,
+        FieldAddress(AllocateClosureABI::kResultReg,
+                     target::Closure::delayed_type_arguments_offset()),
+        AllocateClosureABI::kScratchReg);
+  }
+
+  __ LeaveStubFrame();
+  // No-op if the two are the same.
+  __ MoveRegister(A0, AllocateClosureABI::kResultReg);
+  __ Ret();
+}
+
+void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
+    Assembler* assembler) {
+  __ EnterStubFrame();
+  __ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
+  __ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
+  __ CallRuntime(kDispatchTableNullErrorRuntimeEntry, /*argument_count=*/1);
+  // The NullError runtime entry does not return.
+  __ Breakpoint();
+}
+
+void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
+                                          bool with_fpu_regs) {
+  auto perform_runtime_call = [&]() {
+  // If the generated code has unboxed index/length we need to box them before
+  // calling the runtime entry.
+#if XLEN == 32
+    ASSERT(!GenericCheckBoundInstr::UseUnboxedRepresentation());
+#else
+    if (GenericCheckBoundInstr::UseUnboxedRepresentation()) {
+      Label length, smi_case;
+
+      // The user-controlled index might not fit into a Smi.
+      __ mv(TMP, RangeErrorABI::kIndexReg);
+      __ SmiTag(RangeErrorABI::kIndexReg, RangeErrorABI::kIndexReg);
+      __ SmiUntag(TMP2, RangeErrorABI::kIndexReg);
+      __ beq(TMP, TMP2, &length);  // No overflow.
+      {
+        // Allocate a mint, reload the two registers and popualte the mint.
+        __ PushRegister(NULL_REG);
+        __ CallRuntime(kAllocateMintRuntimeEntry, /*argument_count=*/0);
+        __ PopRegister(RangeErrorABI::kIndexReg);
+        __ lx(TMP,
+              Address(FP, target::kWordSize *
+                              StubCodeCompiler::WordOffsetFromFpToCpuRegister(
+                                  RangeErrorABI::kIndexReg)));
+        __ sx(TMP, FieldAddress(RangeErrorABI::kIndexReg,
+                                target::Mint::value_offset()));
+        __ lx(RangeErrorABI::kLengthReg,
+              Address(FP, target::kWordSize *
+                              StubCodeCompiler::WordOffsetFromFpToCpuRegister(
+                                  RangeErrorABI::kLengthReg)));
+      }
+
+      // Length is guaranteed to be in positive Smi range (it comes from a load
+      // of a vm recognized array).
+      __ Bind(&length);
+      __ SmiTag(RangeErrorABI::kLengthReg);
+    }
+#endif  // XLEN != 32
+    __ PushRegister(RangeErrorABI::kLengthReg);
+    __ PushRegister(RangeErrorABI::kIndexReg);
+    __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
+    __ Breakpoint();
+  };
+
+  GenerateSharedStubGeneric(
+      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      with_fpu_regs
+          ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
+          : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
+      /*allow_return=*/false, perform_runtime_call);
+}
+
+// Input parameters:
+//   RA : return address.
+//   SP : address of return value.
+//   T5 : address of the native function to call.
+//   T2 : address of first argument in argument array.
+//   T1 : argc_tag including number of arguments and function kind.
+static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
+                                              Address wrapper) {
+  const intptr_t thread_offset = target::NativeArguments::thread_offset();
+  const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+  const intptr_t argv_offset = target::NativeArguments::argv_offset();
+  const intptr_t retval_offset = target::NativeArguments::retval_offset();
+
+  __ EnterStubFrame();
+
+  // Save exit frame information to enable stack walking as we are about
+  // to transition to native code.
+  __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
+
+  // Mark that the thread exited generated code through a runtime call.
+  __ LoadImmediate(TMP, target::Thread::exit_through_runtime_call());
+  __ StoreToOffset(TMP, THR, target::Thread::exit_through_ffi_offset());
+
+#if defined(DEBUG)
+  {
+    Label ok;
+    // Check that we are always entering from Dart code.
+    __ LoadFromOffset(TMP, THR, target::Thread::vm_tag_offset());
+    __ CompareImmediate(TMP, VMTag::kDartTagId);
+    __ BranchIf(EQ, &ok);
+    __ Stop("Not coming from Dart code.");
+    __ Bind(&ok);
+  }
+#endif
+
+  // Mark that the thread is executing native code.
+  __ StoreToOffset(T5, THR, target::Thread::vm_tag_offset());
+
+  // Reserve space for the native arguments structure passed on the stack (the
+  // outgoing pointer parameter to the native arguments structure is passed in
+  // R0) and align frame before entering the C++ world.
+  __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
+
+  // Initialize target::NativeArguments structure and call native function.
+  ASSERT(thread_offset == 0 * target::kWordSize);
+  // There are no native calls to closures, so we do not need to set the tag
+  // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
+  ASSERT(argc_tag_offset == 1 * target::kWordSize);
+  // Set argc in target::NativeArguments: R1 already contains argc.
+  ASSERT(argv_offset == 2 * target::kWordSize);
+  // Set argv in target::NativeArguments: R2 already contains argv.
+  // Set retval in NativeArgs.
+  ASSERT(retval_offset == 3 * target::kWordSize);
+  __ AddImmediate(T3, FP, 2 * target::kWordSize);
+
+  // Passing the structure by value as in runtime calls would require changing
+  // Dart API for native functions.
+  // For now, space is reserved on the stack and we pass a pointer to it.
+  __ StoreToOffset(THR, SP, thread_offset);
+  __ StoreToOffset(T1, SP, argc_tag_offset);
+  __ StoreToOffset(T2, SP, argv_offset);
+  __ StoreToOffset(T3, SP, retval_offset);
+  __ mv(A0, SP);  // Pass the pointer to the target::NativeArguments.
+  __ mv(A1, T5);  // Pass the function entrypoint to call.
+
+  // Call native function invocation wrapper or redirection via simulator.
+  ASSERT(IsAbiPreservedRegister(THR));
+  __ Call(wrapper);
+
+  // Refresh pinned registers values (inc. write barrier mask and null object).
+  __ RestorePinnedRegisters();
+
+  // Mark that the thread is executing Dart code.
+  __ LoadImmediate(TMP, VMTag::kDartTagId);
+  __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
+
+  // Mark that the thread has not exited generated Dart code.
+  __ StoreToOffset(ZR, THR, target::Thread::exit_through_ffi_offset());
+
+  // Reset exit frame information in Isolate's mutator thread structure.
+  __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
+
+  // Restore the global object pool after returning from runtime (old space is
+  // moving, so the GOP could have been relocated).
+  if (FLAG_precompiled_mode) {
+    __ SetupGlobalPoolAndDispatchTable();
+  }
+
+  __ LeaveStubFrame();
+  __ ret();
+}
+
+void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+  GenerateCallNativeWithWrapperStub(
+      assembler,
+      Address(THR,
+              target::Thread::no_scope_native_wrapper_entry_point_offset()));
+}
+
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+  GenerateCallNativeWithWrapperStub(
+      assembler,
+      Address(THR,
+              target::Thread::auto_scope_native_wrapper_entry_point_offset()));
+}
+
+// Input parameters:
+//   RA : return address.
+//   SP : address of return value.
+//   R5 : address of the native function to call.
+//   R2 : address of first argument in argument array.
+//   R1 : argc_tag including number of arguments and function kind.
+void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+  GenerateCallNativeWithWrapperStub(
+      assembler,
+      Address(THR,
+              target::Thread::bootstrap_native_wrapper_entry_point_offset()));
+}
+
+// Input parameters:
+//   S4: arguments descriptor array.
+void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
+  // Create a stub frame as we are pushing some objects on the stack before
+  // calling into the runtime.
+  __ EnterStubFrame();
+  __ subi(SP, SP, 2 * target::kWordSize);
+  __ sx(S4, Address(SP, 1 * target::kWordSize));  // Preserve args descriptor.
+  __ sx(ZR, Address(SP, 0 * target::kWordSize));  // Result slot.
+  __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
+  __ lx(CODE_REG, Address(SP, 0 * target::kWordSize));  // Result.
+  __ lx(S4, Address(SP, 1 * target::kWordSize));  // Restore args descriptor.
+  __ addi(SP, SP, 2 * target::kWordSize);
+  __ LeaveStubFrame();
+  // Jump to the dart function.
+  __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
+  __ jr(TMP);
+}
+
+// Called from a static call only when an invalid code has been entered
+// (invalid because its function was optimized or deoptimized).
+// S4: arguments descriptor array.
+void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
+  Label monomorphic;
+  __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
+
+  // Load code pointer to this stub from the thread:
+  // The one that is passed in, is not correct - it points to the code object
+  // that needs to be replaced.
+  __ lx(CODE_REG,
+        Address(THR, target::Thread::fix_callers_target_code_offset()));
+  // Create a stub frame as we are pushing some objects on the stack before
+  // calling into the runtime.
+  __ EnterStubFrame();
+  // Setup space on stack for return value and preserve arguments descriptor.
+  __ PushRegister(S4);
+  __ PushRegister(ZR);
+  __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
+  // Get Code object result and restore arguments descriptor array.
+  __ PopRegister(CODE_REG);
+  __ PopRegister(S4);
+  // Remove the stub frame.
+  __ LeaveStubFrame();
+  // Jump to the dart function.
+  __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
+  __ jr(TMP);
+
+  __ Bind(&monomorphic);
+  // Load code pointer to this stub from the thread:
+  // The one that is passed in, is not correct - it points to the code object
+  // that needs to be replaced.
+  __ lx(CODE_REG,
+        Address(THR, target::Thread::fix_callers_target_code_offset()));
+  // Create a stub frame as we are pushing some objects on the stack before
+  // calling into the runtime.
+  __ EnterStubFrame();
+  __ PushRegister(ZR);  // Result slot.
+  __ PushRegister(A0);  // Preserve receiver.
+  __ PushRegister(S5);  // Old cache value (also 2nd return value).
+  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
+  __ PopRegister(S5);        // Get target cache object.
+  __ PopRegister(A0);        // Restore receiver.
+  __ PopRegister(CODE_REG);  // Get target Code object.
+  // Remove the stub frame.
+  __ LeaveStubFrame();
+  // Jump to the dart function.
+  __ LoadFieldFromOffset(
+      TMP, CODE_REG,
+      target::Code::entry_point_offset(CodeEntryKind::kMonomorphic));
+  __ jr(TMP);
+}
+
+// Called from object allocate instruction when the allocation stub has been
+// disabled.
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
+    Assembler* assembler) {
+  // Load code pointer to this stub from the thread:
+  // The one that is passed in, is not correct - it points to the code object
+  // that needs to be replaced.
+  __ lx(CODE_REG,
+        Address(THR, target::Thread::fix_allocation_stub_code_offset()));
+  __ EnterStubFrame();
+  // Setup space on stack for return value.
+  __ PushRegister(ZR);
+  __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
+  // Get Code object result.
+  __ PopRegister(CODE_REG);
+  // Remove the stub frame.
+  __ LeaveStubFrame();
+  // Jump to the dart function.
+  __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
+  __ jr(TMP);
+}
+
+// Input parameters:
+//   T2: smi-tagged argument count, may be zero.
+//   FP[target::frame_layout.param_end_from_fp + 1]: last argument.
+static void PushArrayOfArguments(Assembler* assembler) {
+  COMPILE_ASSERT(AllocateArrayABI::kLengthReg == T2);
+  COMPILE_ASSERT(AllocateArrayABI::kTypeArgumentsReg == T1);
+
+  // Allocate array to store arguments of caller.
+  __ LoadObject(T1, NullObject());
+  // T1: null element type for raw Array.
+  // T2: smi-tagged argument count, may be zero.
+  __ JumpAndLink(StubCodeAllocateArray());
+  // A0: newly allocated array.
+  // T2: smi-tagged argument count, may be zero (was preserved by the stub).
+  __ PushRegister(A0);  // Array is in A0 and on top of stack.
+  __ SmiUntag(T2);
+  __ slli(T1, T2, target::kWordSizeLog2);
+  __ add(T1, T1, FP);
+  __ AddImmediate(T1,
+                  target::frame_layout.param_end_from_fp * target::kWordSize);
+  __ AddImmediate(T3, A0, target::Array::data_offset() - kHeapObjectTag);
+  // T1: address of first argument on stack.
+  // T3: address of first argument in array.
+
+  Label loop, loop_exit;
+  __ Bind(&loop);
+  __ beqz(T2, &loop_exit);
+  __ lx(T6, Address(T1, 0));
+  __ addi(T1, T1, -target::kWordSize);
+  __ StoreCompressedIntoObject(A0, Address(T3, 0), T6);
+  __ addi(T3, T3, target::kCompressedWordSize);
+  __ addi(T2, T2, -1);
+  __ j(&loop);
+  __ Bind(&loop_exit);
+}
+
+// Used by eager and lazy deoptimization. Preserve result in RAX if necessary.
+// This stub translates optimized frame into unoptimized frame. The optimized
+// frame can contain values in registers and on stack, the unoptimized
+// frame contains all values on stack.
+// Deoptimization occurs in following steps:
+// - Push all registers that can contain values.
+// - Call C routine to copy the stack and saved registers into temporary buffer.
+// - Adjust caller's frame to correct unoptimized frame size.
+// - Fill the unoptimized frame.
+// - Materialize objects that require allocation (e.g. Double instances).
+// GC can occur only after frame is fully rewritten.
+// Stack after TagAndPushPP() below:
+//   +------------------+
+//   | Saved PP         | <- PP
+//   +------------------+
+//   | PC marker        | <- TOS
+//   +------------------+
+//   | Saved FP         | <- FP of stub
+//   +------------------+
+//   | return-address   |  (deoptimization point)
+//   +------------------+
+//   | Saved CODE_REG   |
+//   +------------------+
+//   | ...              | <- SP of optimized frame
+//
+// Parts of the code cannot GC, part of the code can GC.
+static void GenerateDeoptimizationSequence(Assembler* assembler,
+                                           DeoptStubKind kind) {
+  // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
+  // is no need to set the correct PC marker or load PP, since they get patched.
+  __ EnterStubFrame();
+
+  // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
+  // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
+  const intptr_t saved_result_slot_from_fp =
+      target::frame_layout.first_local_from_fp + 1 -
+      (kNumberOfCpuRegisters - A0);
+  const intptr_t saved_exception_slot_from_fp =
+      target::frame_layout.first_local_from_fp + 1 -
+      (kNumberOfCpuRegisters - A0);
+  const intptr_t saved_stacktrace_slot_from_fp =
+      target::frame_layout.first_local_from_fp + 1 -
+      (kNumberOfCpuRegisters - A1);
+  // Result in A0 is preserved as part of pushing all registers below.
+
+  // Push registers in their enumeration order: lowest register number at
+  // lowest address.
+  __ subi(SP, SP, kNumberOfCpuRegisters * target::kWordSize);
+  for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
+    const Register r = static_cast<Register>(i);
+    if (r == CODE_REG) {
+      // Save the original value of CODE_REG pushed before invoking this stub
+      // instead of the value used to call this stub.
+      COMPILE_ASSERT(TMP > CODE_REG);  // TMP saved first
+      __ lx(TMP, Address(FP, 2 * target::kWordSize));
+      __ sx(TMP, Address(SP, i * target::kWordSize));
+    } else {
+      __ sx(r, Address(SP, i * target::kWordSize));
+    }
+  }
+
+  __ subi(SP, SP, kNumberOfFpuRegisters * kFpuRegisterSize);
+  for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; i--) {
+    FRegister freg = static_cast<FRegister>(i);
+    __ fsd(freg, Address(SP, i * kFpuRegisterSize));
+  }
+
+  __ mv(A0, SP);  // Pass address of saved registers block.
+  bool is_lazy =
+      (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
+  __ li(A1, is_lazy ? 1 : 0);
+  __ ReserveAlignedFrameSpace(0);
+  __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
+  // Result (A0) is stack-size (FP - SP) in bytes.
+
+  if (kind == kLazyDeoptFromReturn) {
+    // Restore result into T1 temporarily.
+    __ LoadFromOffset(T1, FP, saved_result_slot_from_fp * target::kWordSize);
+  } else if (kind == kLazyDeoptFromThrow) {
+    // Restore result into T1 temporarily.
+    __ LoadFromOffset(T1, FP, saved_exception_slot_from_fp * target::kWordSize);
+    __ LoadFromOffset(T2, FP,
+                      saved_stacktrace_slot_from_fp * target::kWordSize);
+  }
+
+  // There is a Dart Frame on the stack. We must restore PP and leave frame.
+  __ RestoreCodePointer();
+  __ LeaveStubFrame();
+  __ sub(SP, FP, A0);
+
+  // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
+  // is no need to set the correct PC marker or load PP, since they get patched.
+  __ EnterStubFrame();
+
+  if (kind == kLazyDeoptFromReturn) {
+    __ PushRegister(T1);  // Preserve result as first local.
+  } else if (kind == kLazyDeoptFromThrow) {
+    __ PushRegister(T1);  // Preserve exception as first local.
+    __ PushRegister(T2);  // Preserve stacktrace as second local.
+  }
+  __ ReserveAlignedFrameSpace(0);
+  __ mv(A0, FP);  // Pass last FP as parameter in R0.
+  __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
+  if (kind == kLazyDeoptFromReturn) {
+    // Restore result into T1.
+    __ LoadFromOffset(
+        T1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
+  } else if (kind == kLazyDeoptFromThrow) {
+    // Restore result into T1.
+    __ LoadFromOffset(
+        T1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
+    __ LoadFromOffset(
+        T2, FP,
+        (target::frame_layout.first_local_from_fp - 1) * target::kWordSize);
+  }
+  // Code above cannot cause GC.
+  // There is a Dart Frame on the stack. We must restore PP and leave frame.
+  __ RestoreCodePointer();
+  __ LeaveStubFrame();
+
+  // Frame is fully rewritten at this point and it is safe to perform a GC.
+  // Materialize any objects that were deferred by FillFrame because they
+  // require allocation.
+  // Enter stub frame with loading PP. The caller's PP is not materialized yet.
+  __ EnterStubFrame();
+  if (kind == kLazyDeoptFromReturn) {
+    __ PushRegister(T1);  // Preserve result, it will be GC-d here.
+  } else if (kind == kLazyDeoptFromThrow) {
+    __ PushRegister(T1);  // Preserve exception, it will be GC-d here.
+    __ PushRegister(T2);  // Preserve stacktrace, it will be GC-d here.
+  }
+
+  __ PushRegister(ZR);  // Space for the result.
+  __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
+  // Result tells stub how many bytes to remove from the expression stack
+  // of the bottom-most frame. They were used as materialization arguments.
+  __ PopRegister(T2);
+  __ SmiUntag(T2);
+  if (kind == kLazyDeoptFromReturn) {
+    __ PopRegister(A0);  // Restore result.
+  } else if (kind == kLazyDeoptFromThrow) {
+    __ PopRegister(A1);  // Restore stacktrace.
+    __ PopRegister(A0);  // Restore exception.
+  }
+  __ LeaveStubFrame();
+  // Remove materialization arguments.
+  __ add(SP, SP, T2);
+  // The caller is responsible for emitting the return instruction.
+}
+
+// A0: result, must be preserved
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
+    Assembler* assembler) {
+  // Push zap value instead of CODE_REG for lazy deopt.
+  __ LoadImmediate(TMP, kZapCodeReg);
+  __ PushRegister(TMP);
+  // Return address for "call" to deopt stub.
+  __ LoadImmediate(RA, kZapReturnAddress);
+  __ lx(CODE_REG,
+        Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
+  GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
+  __ ret();
+}
+
+// A0: exception, must be preserved
+// A1: stacktrace, must be preserved
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
+    Assembler* assembler) {
+  // Push zap value instead of CODE_REG for lazy deopt.
+  __ LoadImmediate(TMP, kZapCodeReg);
+  __ PushRegister(TMP);
+  // Return address for "call" to deopt stub.
+  __ LoadImmediate(RA, kZapReturnAddress);
+  __ lx(CODE_REG,
+        Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
+  GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
+  __ ret();
+}
+
+void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
+  __ PushRegister(CODE_REG);
+  __ lx(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
+  GenerateDeoptimizationSequence(assembler, kEagerDeopt);
+  __ ret();
+}
+
+// S5: ICData/MegamorphicCache
+static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
+  __ EnterStubFrame();
+
+  __ lx(S4,
+        FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
+
+  // Load the receiver.
+  __ LoadCompressedSmiFieldFromOffset(
+      T2, S4, target::ArgumentsDescriptor::size_offset());
+  __ slli(TMP, T2, target::kWordSizeLog2 - 1);  // T2 is Smi.
+  __ add(TMP, TMP, FP);
+  __ LoadFromOffset(A0, TMP,
+                    target::frame_layout.param_end_from_fp * target::kWordSize);
+  __ PushRegister(ZR);  // Result slot.
+  __ PushRegister(A0);  // Receiver.
+  __ PushRegister(S5);  // ICData/MegamorphicCache.
+  __ PushRegister(S4);  // Arguments descriptor.
+
+  // Adjust arguments count.
+  __ LoadCompressedSmiFieldFromOffset(
+      T3, S4, target::ArgumentsDescriptor::type_args_len_offset());
+  Label args_count_ok;
+  __ beqz(T3, &args_count_ok, Assembler::kNearJump);
+  // Include the type arguments.
+  __ addi(T2, T2, target::ToRawSmi(1));
+  __ Bind(&args_count_ok);
+
+  // T2: Smi-tagged arguments array length.
+  PushArrayOfArguments(assembler);
+  const intptr_t kNumArgs = 4;
+  __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
+  __ Drop(4);
+  __ PopRegister(A0);  // Return value.
+  __ LeaveStubFrame();
+  __ ret();
+}
+
+static void GenerateDispatcherCode(Assembler* assembler,
+                                   Label* call_target_function) {
+  __ Comment("NoSuchMethodDispatch");
+  // When lazily generated invocation dispatchers are disabled, the
+  // miss-handler may return null.
+  __ bne(T0, NULL_REG, call_target_function);
+
+  GenerateNoSuchMethodDispatcherBody(assembler);
+}
+
+// Input:
+//   S4 - arguments descriptor
+//   S5 - icdata/megamorphic_cache
+void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
+    Assembler* assembler) {
+  GenerateNoSuchMethodDispatcherBody(assembler);
+}
+
+// Called for inline allocation of arrays.
+// Input registers (preserved):
+//   RA: return address.
+//   AllocateArrayABI::kLengthReg: array length as Smi.
+//   AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+// Output registers:
+//   AllocateArrayABI::kResultReg: newly allocated array.
+// Clobbered:
+//   T3, T4, T5
+void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    Label slow_case;
+    // Compute the size to be allocated, it is based on the array length
+    // and is computed as:
+    // RoundedAllocationSize(
+    //     (array_length * kCompressedWordSize) + target::Array::header_size()).
+    // Check that length is a Smi.
+    __ BranchIfNotSmi(AllocateArrayABI::kLengthReg, &slow_case);
+
+    // Check length >= 0 && length <= kMaxNewSpaceElements
+    const intptr_t max_len =
+        target::ToRawSmi(target::Array::kMaxNewSpaceElements);
+    __ CompareImmediate(AllocateArrayABI::kLengthReg, max_len, kObjectBytes);
+    __ BranchIf(HI, &slow_case);
+
+    const intptr_t cid = kArrayCid;
+    NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, T4, &slow_case));
+
+    // Calculate and align allocation size.
+    // Load new object start and calculate next object start.
+    // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+    // AllocateArrayABI::kLengthReg: array length as Smi.
+    __ lx(AllocateArrayABI::kResultReg,
+          Address(THR, target::Thread::top_offset()));
+    intptr_t fixed_size_plus_alignment_padding =
+        target::Array::header_size() +
+        target::ObjectAlignment::kObjectAlignment - 1;
+    // AllocateArrayABI::kLengthReg is Smi.
+    __ slli(T3, AllocateArrayABI::kLengthReg,
+            target::kWordSizeLog2 - kSmiTagSize);
+    __ AddImmediate(T3, fixed_size_plus_alignment_padding);
+    __ andi(T3, T3, ~(target::ObjectAlignment::kObjectAlignment - 1));
+    // AllocateArrayABI::kResultReg: potential new object start.
+    // T3: object size in bytes.
+    __ add(T4, AllocateArrayABI::kResultReg, T3);
+    // Branch if unsigned overflow.
+    __ bltu(T4, AllocateArrayABI::kResultReg, &slow_case);
+
+    // Check if the allocation fits into the remaining space.
+    // AllocateArrayABI::kResultReg: potential new object start.
+    // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+    // AllocateArrayABI::kLengthReg: array length as Smi.
+    // T3: array size.
+    // T4: potential next object start.
+    __ LoadFromOffset(TMP, THR, target::Thread::end_offset());
+    __ bgeu(T4, TMP, &slow_case);  // Branch if unsigned higher or equal.
+
+    // Successfully allocated the object(s), now update top to point to
+    // next object start and initialize the object.
+    // AllocateArrayABI::kResultReg: potential new object start.
+    // T3: array size.
+    // T4: potential next object start.
+    __ sx(T4, Address(THR, target::Thread::top_offset()));
+    __ addi(AllocateArrayABI::kResultReg, AllocateArrayABI::kResultReg,
+            kHeapObjectTag);
+
+    // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
+    // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+    // AllocateArrayABI::kLengthReg: array length as Smi.
+    // R3: array size.
+    // R7: new object end address.
+
+    // Store the type argument field.
+    __ StoreCompressedIntoObjectOffsetNoBarrier(
+        AllocateArrayABI::kResultReg, target::Array::type_arguments_offset(),
+        AllocateArrayABI::kTypeArgumentsReg);
+
+    // Set the length field.
+    __ StoreCompressedIntoObjectOffsetNoBarrier(AllocateArrayABI::kResultReg,
+                                                target::Array::length_offset(),
+                                                AllocateArrayABI::kLengthReg);
+
+    // Calculate the size tag.
+    // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
+    // AllocateArrayABI::kLengthReg: array length as Smi.
+    // T3: array size.
+    // T4: new object end address.
+    const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
+                           target::ObjectAlignment::kObjectAlignmentLog2;
+    __ li(T5, 0);
+    __ CompareImmediate(T3, target::UntaggedObject::kSizeTagMaxSizeTag);
+    compiler::Label zero_tag;
+    __ BranchIf(UNSIGNED_GREATER, &zero_tag);
+    __ slli(T5, T3, shift);
+    __ Bind(&zero_tag);
+
+    // Get the class index and insert it into the tags.
+    const uword tags =
+        target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+
+    __ OrImmediate(T5, T5, tags);
+    __ StoreFieldToOffset(T5, AllocateArrayABI::kResultReg,
+                          target::Array::tags_offset());
+
+    // Initialize all array elements to raw_null.
+    // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
+    // R7: new object end address.
+    // AllocateArrayABI::kLengthReg: array length as Smi.
+    __ AddImmediate(T3, AllocateArrayABI::kResultReg,
+                    target::Array::data_offset() - kHeapObjectTag);
+    // R3: iterator which initially points to the start of the variable
+    // data area to be initialized.
+    Label loop, done;
+    __ Bind(&loop);
+    // TODO(cshapiro): StoreIntoObjectNoBarrier
+    __ bgeu(T3, T4, &done);
+    __ sx(NULL_REG, Address(T3, 0));
+    __ sx(NULL_REG, Address(T3, target::kCompressedWordSize));
+    __ AddImmediate(T3, 2 * target::kCompressedWordSize);
+    __ j(&loop);  // Loop until T3 == T4.
+    __ Bind(&done);
+
+    // Done allocating and initializing the array.
+    // AllocateArrayABI::kResultReg: new object.
+    // AllocateArrayABI::kLengthReg: array length as Smi (preserved).
+    __ ret();
+
+    // Unable to allocate the array using the fast inline code, just call
+    // into the runtime.
+    __ Bind(&slow_case);
+  }
+
+  // Create a stub frame as we are pushing some objects on the stack before
+  // calling into the runtime.
+  __ EnterStubFrame();
+  __ subi(SP, SP, 3 * target::kWordSize);
+  __ sx(ZR, Address(SP, 2 * target::kWordSize));  // Result slot.
+  __ sx(AllocateArrayABI::kLengthReg, Address(SP, 1 * target::kWordSize));
+  __ sx(AllocateArrayABI::kTypeArgumentsReg,
+        Address(SP, 0 * target::kWordSize));
+  __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
+  __ lx(AllocateArrayABI::kTypeArgumentsReg,
+        Address(SP, 0 * target::kWordSize));
+  __ lx(AllocateArrayABI::kLengthReg, Address(SP, 1 * target::kWordSize));
+  __ lx(AllocateArrayABI::kResultReg, Address(SP, 2 * target::kWordSize));
+  __ addi(SP, SP, 3 * target::kWordSize);
+  __ LeaveStubFrame();
+
+  // Write-barrier elimination might be enabled for this array (depending on the
+  // array length). To be sure we will check if the allocated object is in old
+  // space and if so call a leaf runtime to add it to the remembered set.
+  ASSERT(AllocateArrayABI::kResultReg == A0);
+  EnsureIsNewOrRemembered(assembler);
+
+  __ ret();
+}
+
+void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
+    Assembler* assembler) {
+  // For test purpose call allocation stub without inline allocation attempt.
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    Label slow_case;
+    __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
+                   AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
+    __ ret();
+
+    __ Bind(&slow_case);
+  }
+  COMPILE_ASSERT(AllocateMintABI::kResultReg ==
+                 SharedSlowPathStubABI::kResultReg);
+  GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
+                     &kAllocateMintRuntimeEntry,
+                     target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
+                     /*allow_return=*/true,
+                     /*store_runtime_result_in_result_register=*/true);
+}
+
+void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
+    Assembler* assembler) {
+  // For test purpose call allocation stub without inline allocation attempt.
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    Label slow_case;
+    __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
+                   AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
+    __ ret();
+
+    __ Bind(&slow_case);
+  }
+  COMPILE_ASSERT(AllocateMintABI::kResultReg ==
+                 SharedSlowPathStubABI::kResultReg);
+  GenerateSharedStub(
+      assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
+      target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
+      /*allow_return=*/true,
+      /*store_runtime_result_in_result_register=*/true);
+}
+
+// Called when invoking Dart code from C++ (VM code).
+// Input parameters:
+//   RA : points to return address.
+//   A0 : target code or entry point (in bare instructions mode).
+//   A1 : arguments descriptor array.
+//   A2 : arguments array.
+//   A3 : current thread.
+// Beware!  TMP == A3
+void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
+  __ Comment("InvokeDartCodeStub");
+
+  __ PushRegister(RA);  // Marker for the profiler.
+  __ EnterFrame(0);
+
+  // Push code object to PC marker slot.
+  __ lx(TMP2, Address(A3, target::Thread::invoke_dart_code_stub_offset()));
+  __ PushRegister(TMP2);
+
+#if defined(USING_SHADOW_CALL_STACK)
+#error Unimplemented
+#endif
+
+  // TODO(riscv): Consider using only volatile FPU registers in Dart code so we
+  // don't need to save the preserved FPU registers here.
+  __ PushNativeCalleeSavedRegisters();
+
+  // Set up THR, which caches the current thread in Dart code.
+  if (THR != A3) {
+    __ mv(THR, A3);
+  }
+
+  // Refresh pinned registers values (inc. write barrier mask and null object).
+  __ RestorePinnedRegisters();
+
+  // Save the current VMTag on the stack.
+  __ LoadFromOffset(TMP, THR, target::Thread::vm_tag_offset());
+  __ PushRegister(TMP);
+
+  // Save top resource and top exit frame info. Use R6 as a temporary register.
+  // StackFrameIterator reads the top exit frame info saved in this frame.
+  __ LoadFromOffset(TMP, THR, target::Thread::top_resource_offset());
+  __ StoreToOffset(ZR, THR, target::Thread::top_resource_offset());
+  __ PushRegister(TMP);
+
+  __ LoadFromOffset(TMP, THR, target::Thread::exit_through_ffi_offset());
+  __ StoreToOffset(ZR, THR, target::Thread::exit_through_ffi_offset());
+  __ PushRegister(TMP);
+
+  __ LoadFromOffset(TMP, THR, target::Thread::top_exit_frame_info_offset());
+  __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
+  __ PushRegister(TMP);
+  // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
+  // with the code below.
+#if XLEN == 32
+  ASSERT_EQUAL(target::frame_layout.exit_link_slot_from_entry_fp, -40);
+#elif XLEN == 64
+  ASSERT_EQUAL(target::frame_layout.exit_link_slot_from_entry_fp, -28);
+#endif
+
+  // Mark that the thread is executing Dart code. Do this after initializing the
+  // exit link for the profiler.
+  __ LoadImmediate(TMP, VMTag::kDartTagId);
+  __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
+
+  // Load arguments descriptor array, which is passed to Dart code.
+  __ LoadFromOffset(ARGS_DESC_REG, A1, VMHandles::kOffsetOfRawPtrInHandle);
+
+  // Load number of arguments into T5 and adjust count for type arguments.
+  __ LoadFieldFromOffset(T5, ARGS_DESC_REG,
+                         target::ArgumentsDescriptor::count_offset());
+  __ LoadFieldFromOffset(T3, ARGS_DESC_REG,
+                         target::ArgumentsDescriptor::type_args_len_offset());
+  __ SmiUntag(T5);
+  // Include the type arguments.
+  __ snez(T3, T3);  // T3 <- T3 == 0 ? 0 : 1
+  __ add(T5, T5, T3);
+
+  // Compute address of 'arguments array' data area into A2.
+  __ LoadFromOffset(A2, A2, VMHandles::kOffsetOfRawPtrInHandle);
+  __ AddImmediate(A2, target::Array::data_offset() - kHeapObjectTag);
+
+  // Set up arguments for the Dart call.
+  Label push_arguments;
+  Label done_push_arguments;
+  __ beqz(T5, &done_push_arguments);  // check if there are arguments.
+  __ LoadImmediate(T2, 0);
+  __ Bind(&push_arguments);
+  __ lx(T3, Address(A2, 0));
+  __ PushRegister(T3);
+  __ addi(T2, T2, 1);
+  __ addi(A2, A2, target::kWordSize);
+  __ blt(T2, T5, &push_arguments, compiler::Assembler::kNearJump);
+  __ Bind(&done_push_arguments);
+
+  if (FLAG_precompiled_mode) {
+    __ SetupGlobalPoolAndDispatchTable();
+    __ mv(CODE_REG, ZR);  // GC-safe value into CODE_REG.
+  } else {
+    // We now load the pool pointer(PP) with a GC safe value as we are about to
+    // invoke dart code. We don't need a real object pool here.
+    __ li(PP, 1);  // PP is untagged, callee will tag and spill PP.
+    __ lx(CODE_REG, Address(A0, VMHandles::kOffsetOfRawPtrInHandle));
+    __ lx(A0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+  }
+
+  // Call the Dart code entrypoint.
+  __ jalr(A0);  // ARGS_DESC_REG is the arguments descriptor array.
+  __ Comment("InvokeDartCodeStub return");
+
+  // Get rid of arguments pushed on the stack.
+  __ addi(
+      SP, FP,
+      target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
+
+  // Restore the saved top exit frame info and top resource back into the
+  // Isolate structure. Uses R6 as a temporary register for this.
+  __ PopRegister(TMP);
+  __ StoreToOffset(TMP, THR, target::Thread::top_exit_frame_info_offset());
+  __ PopRegister(TMP);
+  __ StoreToOffset(TMP, THR, target::Thread::exit_through_ffi_offset());
+  __ PopRegister(TMP);
+  __ StoreToOffset(TMP, THR, target::Thread::top_resource_offset());
+
+  // Restore the current VMTag from the stack.
+  __ PopRegister(TMP);
+  __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
+
+  __ PopNativeCalleeSavedRegisters();
+
+  // Restore the frame pointer and C stack pointer and return.
+  __ LeaveFrame();
+  __ Drop(1);
+  __ ret();
+}
+
+// Helper to generate space allocation of context stub.
+// This does not initialise the fields of the context.
+// Input:
+//   T1: number of context variables.
+// Output:
+//   A0: new allocated Context object.
+// Clobbered:
+//   T2, T3, T4, TMP
+static void GenerateAllocateContextSpaceStub(Assembler* assembler,
+                                             Label* slow_case) {
+  // First compute the rounded instance size.
+  // R1: number of context variables.
+  intptr_t fixed_size_plus_alignment_padding =
+      target::Context::header_size() +
+      target::ObjectAlignment::kObjectAlignment - 1;
+  __ slli(T2, T1, kCompressedWordSizeLog2);
+  __ AddImmediate(T2, fixed_size_plus_alignment_padding);
+  __ andi(T2, T2, ~(target::ObjectAlignment::kObjectAlignment - 1));
+
+  NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, T4, slow_case));
+  // Now allocate the object.
+  // T1: number of context variables.
+  // T2: object size.
+  __ lx(A0, Address(THR, target::Thread::top_offset()));
+  __ add(T3, T2, A0);
+  // Check if the allocation fits into the remaining space.
+  // A0: potential new object.
+  // T1: number of context variables.
+  // T2: object size.
+  // T3: potential next object start.
+  __ lx(TMP, Address(THR, target::Thread::end_offset()));
+  __ CompareRegisters(T3, TMP);
+  __ BranchIf(CS, slow_case);  // Branch if unsigned higher or equal.
+
+  // Successfully allocated the object, now update top to point to
+  // next object start and initialize the object.
+  // A0: new object.
+  // T1: number of context variables.
+  // T2: object size.
+  // T3: next object start.
+  __ sx(T3, Address(THR, target::Thread::top_offset()));
+  __ addi(A0, A0, kHeapObjectTag);
+
+  // Calculate the size tag.
+  // A0: new object.
+  // T1: number of context variables.
+  // T2: object size.
+  const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
+                         target::ObjectAlignment::kObjectAlignmentLog2;
+  __ li(T3, 0);
+  __ CompareImmediate(T2, target::UntaggedObject::kSizeTagMaxSizeTag);
+  // If no size tag overflow, shift R2 left, else set R2 to zero.
+  compiler::Label zero_tag;
+  __ BranchIf(HI, &zero_tag);
+  __ slli(T3, T2, shift);
+  __ Bind(&zero_tag);
+
+  // Get the class index and insert it into the tags.
+  // T3: size and bit tags.
+  const uword tags =
+      target::MakeTagWordForNewSpaceObject(kContextCid, /*instance_size=*/0);
+
+  __ OrImmediate(T3, T3, tags);
+  __ StoreFieldToOffset(T3, A0, target::Object::tags_offset());
+
+  // Setup up number of context variables field.
+  // A0: new object.
+  // T1: number of context variables as integer value (not object).
+  __ StoreFieldToOffset(T1, A0, target::Context::num_variables_offset(),
+                        kFourBytes);
+}
+
+// Called for inline allocation of contexts.
+// Input:
+//   T1: number of context variables.
+// Output:
+//   A0: new allocated Context object.
+void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    Label slow_case;
+
+    GenerateAllocateContextSpaceStub(assembler, &slow_case);
+
+    // Setup the parent field.
+    // A0: new object.
+    // T1: number of context variables.
+    __ StoreCompressedIntoObjectOffset(A0, target::Context::parent_offset(),
+                                       NULL_REG);
+
+    // Initialize the context variables.
+    // A0: new object.
+    // T1: number of context variables.
+    {
+      Label loop, done;
+      __ AddImmediate(T3, A0,
+                      target::Context::variable_offset(0) - kHeapObjectTag);
+      __ Bind(&loop);
+      __ subi(T1, T1, 1);
+      __ bltz(T1, &done);
+      __ sx(NULL_REG, Address(T3, 0));
+      __ addi(T3, T3, target::kCompressedWordSize);
+      __ j(&loop);
+      __ Bind(&done);
+    }
+
+    // Done allocating and initializing the context.
+    // A0: new object.
+    __ ret();
+
+    __ Bind(&slow_case);
+  }
+
+  // Create a stub frame as we are pushing some objects on the stack before
+  // calling into the runtime.
+  __ EnterStubFrame();
+  // Setup space on stack for return value.
+  __ SmiTag(T1);
+  __ PushObject(NullObject());
+  __ PushRegister(T1);
+  __ CallRuntime(kAllocateContextRuntimeEntry, 1);  // Allocate context.
+  __ Drop(1);          // Pop number of context variables argument.
+  __ PopRegister(A0);  // Pop the new context object.
+
+  // Write-barrier elimination might be enabled for this context (depending on
+  // the size). To be sure we will check if the allocated object is in old
+  // space and if so call a leaf runtime to add it to the remembered set.
+  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+
+  // A0: new object
+  // Restore the frame pointer.
+  __ LeaveStubFrame();
+  __ ret();
+}
+
+// Called for clone of contexts.
+// Input:
+//   T5: context variable to clone.
+// Output:
+//   A0: new allocated Context object.
+void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    Label slow_case;
+
+    // Load num. variable (int32) in the existing context.
+    __ lw(T1, FieldAddress(T5, target::Context::num_variables_offset()));
+
+    GenerateAllocateContextSpaceStub(assembler, &slow_case);
+
+    // Load parent in the existing context.
+    __ LoadCompressed(T3, FieldAddress(T5, target::Context::parent_offset()));
+    // Setup the parent field.
+    // A0: new context.
+    __ StoreCompressedIntoObjectNoBarrier(
+        A0, FieldAddress(A0, target::Context::parent_offset()), T3);
+
+    // Clone the context variables.
+    // A0: new context.
+    // T1: number of context variables.
+    {
+      Label loop, done;
+      // T3: Variable array address, new context.
+      __ AddImmediate(T3, A0,
+                      target::Context::variable_offset(0) - kHeapObjectTag);
+      // T4: Variable array address, old context.
+      __ AddImmediate(T4, T5,
+                      target::Context::variable_offset(0) - kHeapObjectTag);
+
+      __ Bind(&loop);
+      __ subi(T1, T1, 1);
+      __ bltz(T1, &done);
+      __ lx(T5, Address(T4, 0));
+      __ addi(T4, T4, target::kCompressedWordSize);
+      __ sx(T5, Address(T3, 0));
+      __ addi(T3, T3, target::kCompressedWordSize);
+      __ j(&loop);
+
+      __ Bind(&done);
+    }
+
+    // Done allocating and initializing the context.
+    // A0: new object.
+    __ ret();
+
+    __ Bind(&slow_case);
+  }
+
+  __ EnterStubFrame();
+
+  __ subi(SP, SP, 2 * target::kWordSize);
+  __ sx(NULL_REG, Address(SP, 1 * target::kWordSize));  // Result slot.
+  __ sx(T5, Address(SP, 0 * target::kWordSize));        // Context argument.
+  __ CallRuntime(kCloneContextRuntimeEntry, 1);
+  __ lx(A0, Address(SP, 1 * target::kWordSize));  // Context result.
+  __ subi(SP, SP, 2 * target::kWordSize);
+
+  // Write-barrier elimination might be enabled for this context (depending on
+  // the size). To be sure we will check if the allocated object is in old
+  // space and if so call a leaf runtime to add it to the remembered set.
+  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+
+  // A0: new object
+  __ LeaveStubFrame();
+  __ ret();
+}
+
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
+    if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
+
+    Register reg = static_cast<Register>(i);
+    intptr_t start = __ CodeSize();
+    __ addi(SP, SP, -3 * target::kWordSize);
+    __ sx(RA, Address(SP, 2 * target::kWordSize));
+    __ sx(TMP, Address(SP, 1 * target::kWordSize));
+    __ sx(kWriteBarrierObjectReg, Address(SP, 0 * target::kWordSize));
+    __ mv(kWriteBarrierObjectReg, reg);
+    __ Call(Address(THR, target::Thread::write_barrier_entry_point_offset()));
+    __ lx(kWriteBarrierObjectReg, Address(SP, 0 * target::kWordSize));
+    __ lx(TMP, Address(SP, 1 * target::kWordSize));
+    __ lx(RA, Address(SP, 2 * target::kWordSize));
+    __ addi(SP, SP, 3 * target::kWordSize);
+    __ jr(TMP);  // Return.
+    intptr_t end = __ CodeSize();
+    ASSERT_EQUAL(end - start, kStoreBufferWrapperSize);
+  }
+}
+
+// Helper stub to implement Assembler::StoreIntoObject/Array.
+// Input parameters:
+//   A0: Object (old)
+//   A1: Value (old or new)
+//   A6: Slot
+// If A1 is new, add A0 to the store buffer. Otherwise A1 is old, mark A1
+// and add it to the mark list.
+COMPILE_ASSERT(kWriteBarrierObjectReg == A0);
+COMPILE_ASSERT(kWriteBarrierValueReg == A1);
+COMPILE_ASSERT(kWriteBarrierSlotReg == A6);
+static void GenerateWriteBarrierStubHelper(Assembler* assembler,
+                                           Address stub_code,
+                                           bool cards) {
+  Label add_to_mark_stack, remember_card, lost_race;
+  __ andi(TMP2, A1, 1 << target::ObjectAlignment::kNewObjectBitPosition);
+  __ beqz(TMP2, &add_to_mark_stack);
+
+  if (cards) {
+    __ lbu(TMP2, FieldAddress(A0, target::Object::tags_offset()));
+    __ andi(TMP2, TMP2, 1 << target::UntaggedObject::kCardRememberedBit);
+    __ bnez(TMP2, &remember_card);
+  } else {
+#if defined(DEBUG)
+    Label ok;
+    __ lbu(TMP2, FieldAddress(A0, target::Object::tags_offset()));
+    __ andi(TMP2, TMP2, 1 << target::UntaggedObject::kCardRememberedBit);
+    __ beqz(TMP2, &ok, Assembler::kNearJump);
+    __ Stop("Wrong barrier!");
+    __ Bind(&ok);
+#endif
+  }
+
+  // Spill T2, T3, T4.
+  __ subi(SP, SP, 3 * target::kWordSize);
+  __ sx(T2, Address(SP, 2 * target::kWordSize));
+  __ sx(T3, Address(SP, 1 * target::kWordSize));
+  __ sx(T4, Address(SP, 0 * target::kWordSize));
+
+  // Atomically clear kOldAndNotRememberedBit.
+  // TODO(riscv): Use amoand instead of lr/sc.
+  ASSERT(target::Object::tags_offset() == 0);
+  __ subi(T3, A0, kHeapObjectTag);
+  // T3: Untagged address of header word (lr/sc do not support offsets).
+  Label retry;
+  __ Bind(&retry);
+  __ lr(T2, Address(T3, 0));
+  __ andi(TMP2, T2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
+  __ beqz(TMP2, &lost_race);
+  __ andi(T2, T2, ~(1 << target::UntaggedObject::kOldAndNotRememberedBit));
+  __ sc(T4, T2, Address(T3, 0));
+  __ bnez(T4, &retry);
+
+  // Load the StoreBuffer block out of the thread. Then load top_ out of the
+  // StoreBufferBlock and add the address to the pointers_.
+  __ LoadFromOffset(T4, THR, target::Thread::store_buffer_block_offset());
+  __ LoadFromOffset(T2, T4, target::StoreBufferBlock::top_offset(),
+                    kUnsignedFourBytes);
+  __ slli(T3, T2, target::kWordSizeLog2);
+  __ add(T3, T4, T3);
+  __ StoreToOffset(A0, T3, target::StoreBufferBlock::pointers_offset());
+
+  // Increment top_ and check for overflow.
+  // T2: top_.
+  // T4: StoreBufferBlock.
+  Label overflow;
+  __ addi(T2, T2, 1);
+  __ StoreToOffset(T2, T4, target::StoreBufferBlock::top_offset(),
+                   kUnsignedFourBytes);
+  __ CompareImmediate(T2, target::StoreBufferBlock::kSize);
+  // Restore values.
+  __ BranchIf(EQ, &overflow);
+
+  // Restore T2, T3, T4.
+  __ lx(T4, Address(SP, 0 * target::kWordSize));
+  __ lx(T3, Address(SP, 1 * target::kWordSize));
+  __ lx(T2, Address(SP, 2 * target::kWordSize));
+  __ addi(SP, SP, 3 * target::kWordSize);
+  __ ret();
+
+  // Handle overflow: Call the runtime leaf function.
+  __ Bind(&overflow);
+  // Restore T2, T3, T4.
+  __ lx(T4, Address(SP, 0 * target::kWordSize));
+  __ lx(T3, Address(SP, 1 * target::kWordSize));
+  __ lx(T2, Address(SP, 2 * target::kWordSize));
+  __ addi(SP, SP, 3 * target::kWordSize);
+  {
+    Assembler::CallRuntimeScope scope(assembler,
+                                      kStoreBufferBlockProcessRuntimeEntry,
+                                      /*frame_size=*/0, stub_code);
+    __ mv(A0, THR);
+    scope.Call(/*argument_count=*/1);
+  }
+  __ ret();
+
+  __ Bind(&add_to_mark_stack);
+  // Spill T2, T3, T4.
+  __ subi(SP, SP, 3 * target::kWordSize);
+  __ sx(T2, Address(SP, 2 * target::kWordSize));
+  __ sx(T3, Address(SP, 1 * target::kWordSize));
+  __ sx(T4, Address(SP, 0 * target::kWordSize));
+
+  // Atomically clear kOldAndNotMarkedBit.
+  // TODO(riscv): Use amoand instead of lr/sc.
+  Label marking_retry, marking_overflow;
+  ASSERT(target::Object::tags_offset() == 0);
+  __ subi(T3, A1, kHeapObjectTag);
+  // T3: Untagged address of header word (lr/sc do not support offsets).
+  __ Bind(&marking_retry);
+  __ lr(T2, Address(T3, 0));
+  __ andi(TMP2, T2, 1 << target::UntaggedObject::kOldAndNotMarkedBit);
+  __ beqz(TMP2, &lost_race);
+  __ andi(T2, T2, ~(1 << target::UntaggedObject::kOldAndNotMarkedBit));
+  __ sc(T4, T2, Address(T3, 0));
+  __ bnez(T4, &marking_retry);
+
+  __ LoadFromOffset(T4, THR, target::Thread::marking_stack_block_offset());
+  __ LoadFromOffset(T2, T4, target::MarkingStackBlock::top_offset(),
+                    kUnsignedFourBytes);
+  __ slli(T3, T2, target::kWordSizeLog2);
+  __ add(T3, T4, T3);
+  __ StoreToOffset(A1, T3, target::MarkingStackBlock::pointers_offset());
+  __ addi(T2, T2, 1);
+  __ StoreToOffset(T2, T4, target::MarkingStackBlock::top_offset(),
+                   kUnsignedFourBytes);
+  __ CompareImmediate(T2, target::MarkingStackBlock::kSize);
+  __ BranchIf(EQ, &marking_overflow);
+  // Restore T2, T3, T4.
+  __ lx(T4, Address(SP, 0 * target::kWordSize));
+  __ lx(T3, Address(SP, 1 * target::kWordSize));
+  __ lx(T2, Address(SP, 2 * target::kWordSize));
+  __ addi(SP, SP, 3 * target::kWordSize);
+  __ ret();
+
+  __ Bind(&marking_overflow);
+  // Restore T2, T3, T4.
+  __ lx(T4, Address(SP, 0 * target::kWordSize));
+  __ lx(T3, Address(SP, 1 * target::kWordSize));
+  __ lx(T2, Address(SP, 2 * target::kWordSize));
+  __ addi(SP, SP, 3 * target::kWordSize);
+  {
+    Assembler::CallRuntimeScope scope(assembler,
+                                      kMarkingStackBlockProcessRuntimeEntry,
+                                      /*frame_size=*/0, stub_code);
+    __ mv(A0, THR);
+    scope.Call(/*argument_count=*/1);
+  }
+  __ ret();
+
+  __ Bind(&lost_race);
+  // Restore T2, T3, T4.
+  __ lx(T4, Address(SP, 0 * target::kWordSize));
+  __ lx(T3, Address(SP, 1 * target::kWordSize));
+  __ lx(T2, Address(SP, 2 * target::kWordSize));
+  __ addi(SP, SP, 3 * target::kWordSize);
+  __ ret();
+
+  if (cards) {
+    Label remember_card_slow;
+
+    // Get card table.
+    __ Bind(&remember_card);
+    __ AndImmediate(TMP, A0, target::kOldPageMask);  // OldPage.
+    __ lx(TMP,
+          Address(TMP, target::OldPage::card_table_offset()));  // Card table.
+    __ beqz(TMP, &remember_card_slow);
+
+    // Dirty the card.
+    __ AndImmediate(TMP, A0, target::kOldPageMask);  // OldPage.
+    __ sub(A6, A6, TMP);                             // Offset in page.
+    __ lx(TMP,
+          Address(TMP, target::OldPage::card_table_offset()));  // Card table.
+    __ srli(A6, A6, target::OldPage::kBytesPerCardLog2);
+    __ add(TMP, TMP, A6);        // Card address.
+    __ sb(A0, Address(TMP, 0));  // Low byte of A0 is non-zero from object tag.
+    __ ret();
+
+    // Card table not yet allocated.
+    __ Bind(&remember_card_slow);
+    {
+      Assembler::CallRuntimeScope scope(assembler, kRememberCardRuntimeEntry,
+                                        /*frame_size=*/0, stub_code);
+      __ mv(A0, A0);  // Arg0 = Object
+      __ mv(A1, A6);  // Arg1 = Slot
+      scope.Call(/*argument_count=*/2);
+    }
+    __ ret();
+  }
+}
+
+void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
+  GenerateWriteBarrierStubHelper(
+      assembler, Address(THR, target::Thread::write_barrier_code_offset()),
+      false);
+}
+
+void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+  GenerateWriteBarrierStubHelper(
+      assembler,
+      Address(THR, target::Thread::array_write_barrier_code_offset()), true);
+}
+
+static void GenerateAllocateObjectHelper(Assembler* assembler,
+                                         bool is_cls_parameterized) {
+  const Register kTagsReg = T2;
+
+  {
+    Label slow_case;
+
+    const Register kNewTopReg = T3;
+
+    // Bump allocation.
+    {
+      const Register kInstanceSizeReg = T4;
+      const Register kEndReg = T5;
+
+      __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
+
+      // Load two words from Thread::top: top and end.
+      // AllocateObjectABI::kResultReg: potential next object start.
+      __ lx(AllocateObjectABI::kResultReg,
+            Address(THR, target::Thread::top_offset()));
+      __ lx(kEndReg, Address(THR, target::Thread::end_offset()));
+
+      __ add(kNewTopReg, AllocateObjectABI::kResultReg, kInstanceSizeReg);
+
+      __ CompareRegisters(kEndReg, kNewTopReg);
+      __ BranchIf(UNSIGNED_LESS_EQUAL, &slow_case);
+
+      // Successfully allocated the object, now update top to point to
+      // next object start and store the class in the class field of object.
+      __ sx(kNewTopReg, Address(THR, target::Thread::top_offset()));
+    }  // kInstanceSizeReg = R4, kEndReg = R5
+
+    // Tags.
+    __ sx(kTagsReg, Address(AllocateObjectABI::kResultReg,
+                            target::Object::tags_offset()));
+
+    // Initialize the remaining words of the object.
+    {
+      const Register kFieldReg = T4;
+
+      __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
+                      target::Instance::first_field_offset());
+      Label done, init_loop;
+      __ Bind(&init_loop);
+      __ CompareRegisters(kFieldReg, kNewTopReg);
+      __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
+      __ sx(NULL_REG, Address(kFieldReg, 0));
+      __ addi(kFieldReg, kFieldReg, target::kCompressedWordSize);
+      __ j(&init_loop);
+
+      __ Bind(&done);
+    }  // kFieldReg = T4
+
+    if (is_cls_parameterized) {
+      Label not_parameterized_case;
+
+      const Register kClsIdReg = T4;
+      const Register kTypeOffsetReg = T5;
+
+      __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
+
+      // Load class' type_arguments_field offset in words.
+      __ LoadClassById(kTypeOffsetReg, kClsIdReg);
+      __ lw(
+          kTypeOffsetReg,
+          FieldAddress(kTypeOffsetReg,
+                       target::Class::
+                           host_type_arguments_field_offset_in_words_offset()));
+
+      // Set the type arguments in the new object.
+      __ slli(kTypeOffsetReg, kTypeOffsetReg, target::kWordSizeLog2);
+      __ add(kTypeOffsetReg, kTypeOffsetReg, AllocateObjectABI::kResultReg);
+      __ sx(AllocateObjectABI::kTypeArgumentsReg, Address(kTypeOffsetReg, 0));
+
+      __ Bind(&not_parameterized_case);
+    }  // kClsIdReg = R4, kTypeOffestReg = R5
+
+    __ AddImmediate(AllocateObjectABI::kResultReg,
+                    AllocateObjectABI::kResultReg, kHeapObjectTag);
+
+    __ ret();
+
+    __ Bind(&slow_case);
+  }  // kNewTopReg = R3
+
+  // Fall back on slow case:
+  if (!is_cls_parameterized) {
+    __ mv(AllocateObjectABI::kTypeArgumentsReg, NULL_REG);
+  }
+  // Tail call to generic allocation stub.
+  __ lx(
+      TMP,
+      Address(THR, target::Thread::allocate_object_slow_entry_point_offset()));
+  __ jr(TMP);
+}
+
+// Called for inline allocation of objects (any class).
+void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
+  GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
+}
+
+void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
+    Assembler* assembler) {
+  GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
+}
+
+void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
+  const Register kTagsToClsIdReg = T2;
+
+  if (!FLAG_precompiled_mode) {
+    __ lx(CODE_REG,
+          Address(THR, target::Thread::call_to_runtime_stub_offset()));
+  }
+
+  // Create a stub frame as we are pushing some objects on the stack before
+  // calling into the runtime.
+  __ EnterStubFrame();
+
+  __ ExtractClassIdFromTags(kTagsToClsIdReg, kTagsToClsIdReg);
+  __ LoadClassById(A0, kTagsToClsIdReg);
+
+  __ subi(SP, SP, 3 * target::kWordSize);
+  __ sx(ZR, Address(SP, 2 * target::kWordSize));  // Result slot.
+  __ sx(A0, Address(SP, 1 * target::kWordSize));  // Arg0: Class object.
+  __ sx(AllocateObjectABI::kTypeArgumentsReg,
+        Address(SP, 0 * target::kWordSize));  // Arg1: Type args or null.
+  __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
+  __ lx(AllocateObjectABI::kResultReg, Address(SP, 2 * target::kWordSize));
+  __ addi(SP, SP, 3 * target::kWordSize);
+
+  // Write-barrier elimination is enabled for [cls] and we therefore need to
+  // ensure that the object is in new-space or has remembered bit set.
+  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+
+  __ LeaveStubFrame();
+
+  __ ret();
+}
+
+// Called for inline allocation of objects.
+void StubCodeCompiler::GenerateAllocationStubForClass(
+    Assembler* assembler,
+    UnresolvedPcRelativeCalls* unresolved_calls,
+    const Class& cls,
+    const Code& allocate_object,
+    const Code& allocat_object_parametrized) {
+  classid_t cls_id = target::Class::GetId(cls);
+  ASSERT(cls_id != kIllegalCid);
+
+  RELEASE_ASSERT(AllocateObjectInstr::WillAllocateNewOrRemembered(cls));
+
+  // The generated code is different if the class is parameterized.
+  const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
+  ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
+                                      cls) != target::Class::kNoTypeArguments);
+
+  const intptr_t instance_size = target::Class::GetInstanceSize(cls);
+  ASSERT(instance_size > 0);
+  RELEASE_ASSERT(target::Heap::IsAllocatableInNewSpace(instance_size));
+
+  const uword tags =
+      target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
+
+  // Note: Keep in sync with helper function.
+  const Register kTagsReg = T2;
+  ASSERT(kTagsReg != AllocateObjectABI::kTypeArgumentsReg);
+
+  __ LoadImmediate(kTagsReg, tags);
+
+  if (!FLAG_use_slow_path && FLAG_inline_alloc &&
+      !target::Class::TraceAllocation(cls) &&
+      target::SizeFitsInSizeTag(instance_size)) {
+    if (is_cls_parameterized) {
+      // TODO(41974): Assign all allocation stubs to the root loading unit?
+      if (false &&
+          !IsSameObject(NullObject(),
+                        CastHandle<Object>(allocat_object_parametrized))) {
+        __ GenerateUnRelocatedPcRelativeTailCall();
+        unresolved_calls->Add(new UnresolvedPcRelativeCall(
+            __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
+      } else {
+        __ lx(TMP,
+              Address(THR,
+                      target::Thread::
+                          allocate_object_parameterized_entry_point_offset()));
+        __ jr(TMP);
+      }
+    } else {
+      // TODO(41974): Assign all allocation stubs to the root loading unit?
+      if (false &&
+          !IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
+        __ GenerateUnRelocatedPcRelativeTailCall();
+        unresolved_calls->Add(new UnresolvedPcRelativeCall(
+            __ CodeSize(), allocate_object, /*is_tail_call=*/true));
+      } else {
+        __ lx(
+            TMP,
+            Address(THR, target::Thread::allocate_object_entry_point_offset()));
+        __ jr(TMP);
+      }
+    }
+  } else {
+    if (!is_cls_parameterized) {
+      __ LoadObject(AllocateObjectABI::kTypeArgumentsReg, NullObject());
+    }
+    __ lx(TMP,
+          Address(THR,
+                  target::Thread::allocate_object_slow_entry_point_offset()));
+    __ jr(TMP);
+  }
+}
+
+// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
+// from the entry code of a dart function after an error in passed argument
+// name or number is detected.
+// Input parameters:
+//  RA : return address.
+//  SP : address of last argument.
+//  S4: arguments descriptor array.
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
+    Assembler* assembler) {
+  __ EnterStubFrame();
+
+  // Load the receiver.
+  __ LoadCompressedSmiFieldFromOffset(
+      T2, S4, target::ArgumentsDescriptor::size_offset());
+  __ slli(TMP, T2, target::kWordSizeLog2 - 1);  // T2 is Smi
+  __ add(TMP, TMP, FP);
+  __ LoadFromOffset(A0, TMP,
+                    target::frame_layout.param_end_from_fp * target::kWordSize);
+
+  // Load the function.
+  __ LoadCompressedFieldFromOffset(TMP, A0, target::Closure::function_offset());
+
+  __ PushRegister(ZR);   // Result slot.
+  __ PushRegister(A0);   // Receiver.
+  __ PushRegister(TMP);  // Function
+  __ PushRegister(S4);   // Arguments descriptor.
+
+  // Adjust arguments count.
+  __ LoadCompressedSmiFieldFromOffset(
+      T3, S4, target::ArgumentsDescriptor::type_args_len_offset());
+  Label args_count_ok;
+  __ beqz(T3, &args_count_ok, Assembler::kNearJump);
+  // Include the type arguments.
+  __ addi(T2, T2, target::ToRawSmi(1));
+  __ Bind(&args_count_ok);
+
+  // T2: Smi-tagged arguments array length.
+  PushArrayOfArguments(assembler);
+
+  const intptr_t kNumArgs = 4;
+  __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
+  // noSuchMethod on closures always throws an error, so it will never return.
+  __ ebreak();
+}
+
+//  A6: function object.
+//  S5: inline cache data object.
+// Cannot use function object from ICData as it may be the inlined
+// function and not the top-scope function.
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
+    Assembler* assembler) {
+  if (FLAG_precompiled_mode) {
+    __ Breakpoint();
+    return;
+  }
+  if (FLAG_trace_optimized_ic_calls) {
+    __ Stop("Unimplemented");
+  }
+  __ LoadFieldFromOffset(TMP, A6, target::Function::usage_counter_offset(),
+                         kFourBytes);
+  __ addi(TMP, TMP, 1);
+  __ StoreFieldToOffset(TMP, A6, target::Function::usage_counter_offset(),
+                        kFourBytes);
+}
+
+// Loads function into 'func_reg'.
+void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
+                                                     Register func_reg) {
+  if (FLAG_precompiled_mode) {
+    __ trap();
+    return;
+  }
+  if (FLAG_optimization_counter_threshold >= 0) {
+    __ Comment("Increment function counter");
+    __ LoadFieldFromOffset(func_reg, IC_DATA_REG,
+                           target::ICData::owner_offset());
+    __ LoadFieldFromOffset(
+        A1, func_reg, target::Function::usage_counter_offset(), kFourBytes);
+    __ AddImmediate(A1, 1);
+    __ StoreFieldToOffset(A1, func_reg,
+                          target::Function::usage_counter_offset(), kFourBytes);
+  }
+}
+
+// Note: S5 must be preserved.
+// Attempt a quick Smi operation for known operations ('kind'). The ICData
+// must have been primed with a Smi/Smi check that will be used for counting
+// the invocations.
+static void EmitFastSmiOp(Assembler* assembler,
+                          Token::Kind kind,
+                          intptr_t num_args,
+                          Label* not_smi_or_overflow) {
+  __ Comment("Fast Smi op");
+  __ lx(A0, Address(SP, +1 * target::kWordSize));  // Left.
+  __ lx(A1, Address(SP, +0 * target::kWordSize));  // Right.
+  __ or_(TMP2, A0, A1);
+  __ andi(TMP2, TMP2, kSmiTagMask);
+  __ bnez(TMP2, not_smi_or_overflow);
+  switch (kind) {
+    case Token::kADD: {
+      __ AddBranchOverflow(A0, A0, A1, not_smi_or_overflow);
+      break;
+    }
+    case Token::kLT: {
+      // TODO(riscv): Bit tricks with stl and NULL_REG.
+      Label load_true, done;
+      __ blt(A0, A1, &load_true, compiler::Assembler::kNearJump);
+      __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+      __ j(&done, Assembler::kNearJump);
+      __ Bind(&load_true);
+      __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+      __ Bind(&done);
+      break;
+    }
+    case Token::kEQ: {
+      // TODO(riscv): Bit tricks with stl and NULL_REG.
+      Label load_true, done;
+      __ beq(A0, A1, &load_true, Assembler::kNearJump);
+      __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+      __ j(&done, Assembler::kNearJump);
+      __ Bind(&load_true);
+      __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+      __ Bind(&done);
+      break;
+    }
+    default:
+      UNIMPLEMENTED();
+  }
+
+  // S5: IC data object (preserved).
+  __ LoadFieldFromOffset(A6, IC_DATA_REG, target::ICData::entries_offset());
+  // R6: ic_data_array with check entries: classes and target functions.
+  __ AddImmediate(A6, target::Array::data_offset() - kHeapObjectTag);
+// R6: points directly to the first ic data array element.
+#if defined(DEBUG)
+  // Check that first entry is for Smi/Smi.
+  Label error, ok;
+  const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
+  __ LoadCompressedSmiFromOffset(TMP, A6, 0);
+  __ CompareImmediate(TMP, imm_smi_cid);
+  __ BranchIf(NE, &error);
+  __ LoadCompressedSmiFromOffset(TMP, A6, target::kCompressedWordSize);
+  __ CompareImmediate(TMP, imm_smi_cid);
+  __ BranchIf(EQ, &ok);
+  __ Bind(&error);
+  __ Stop("Incorrect IC data");
+  __ Bind(&ok);
+#endif
+  if (FLAG_optimization_counter_threshold >= 0) {
+    const intptr_t count_offset =
+        target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
+    // Update counter, ignore overflow.
+    __ LoadCompressedSmiFromOffset(A1, A6, count_offset);
+    __ addi(A1, A1, target::ToRawSmi(1));
+    __ StoreToOffset(A1, A6, count_offset);
+  }
+
+  __ ret();
+}
+
+// Saves the offset of the target entry-point (from the Function) into T6.
+//
+// Must be the first code generated, since any code before will be skipped in
+// the unchecked entry-point.
+static void GenerateRecordEntryPoint(Assembler* assembler) {
+  Label done;
+  __ LoadImmediate(T6, target::Function::entry_point_offset() - kHeapObjectTag);
+  __ j(&done, Assembler::kNearJump);
+  __ BindUncheckedEntryPoint();
+  __ LoadImmediate(
+      T6, target::Function::entry_point_offset(CodeEntryKind::kUnchecked) -
+              kHeapObjectTag);
+  __ Bind(&done);
+}
+
+// Generate inline cache check for 'num_args'.
+//  A0: receiver (if instance call)
+//  S5: ICData
+//  RA: return address
+// Control flow:
+// - If receiver is null -> jump to IC miss.
+// - If receiver is Smi -> load Smi class.
+// - If receiver is not-Smi -> load receiver's class.
+// - Check if 'num_args' (including receiver) match any IC data group.
+// - Match found -> jump to target.
+// - Match not found -> jump to IC miss.
+void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
+    Assembler* assembler,
+    intptr_t num_args,
+    const RuntimeEntry& handle_ic_miss,
+    Token::Kind kind,
+    Optimized optimized,
+    CallType type,
+    Exactness exactness) {
+  const bool save_entry_point = kind == Token::kILLEGAL;
+  if (FLAG_precompiled_mode) {
+    __ Breakpoint();
+    return;
+  }
+
+  if (save_entry_point) {
+    GenerateRecordEntryPoint(assembler);
+    // T6: untagged entry point offset
+  }
+
+  if (optimized == kOptimized) {
+    GenerateOptimizedUsageCounterIncrement(assembler);
+  } else {
+    GenerateUsageCounterIncrement(assembler, /*scratch=*/T0);
+  }
+
+  ASSERT(exactness == kIgnoreExactness);  // Unimplemented.
+  ASSERT(num_args == 1 || num_args == 2);
+#if defined(DEBUG)
+  {
+    Label ok;
+    // Check that the IC data array has NumArgsTested() == num_args.
+    // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
+    __ LoadFromOffset(TMP, IC_DATA_REG,
+                      target::ICData::state_bits_offset() - kHeapObjectTag,
+                      kUnsignedFourBytes);
+    ASSERT(target::ICData::NumArgsTestedShift() == 0);  // No shift needed.
+    __ andi(TMP, TMP, target::ICData::NumArgsTestedMask());
+    __ CompareImmediate(TMP2, num_args);
+    __ BranchIf(EQ, &ok, Assembler::kNearJump);
+    __ Stop("Incorrect stub for IC data");
+    __ Bind(&ok);
+  }
+#endif  // DEBUG
+
+#if !defined(PRODUCT)
+  Label stepping, done_stepping;
+  if (optimized == kUnoptimized) {
+    __ Comment("Check single stepping");
+    __ LoadIsolate(TMP);
+    __ LoadFromOffset(TMP, TMP, target::Isolate::single_step_offset(),
+                      kUnsignedByte);
+    __ bnez(TMP, &stepping);
+    __ Bind(&done_stepping);
+  }
+#endif
+
+  Label not_smi_or_overflow;
+  if (kind != Token::kILLEGAL) {
+    EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
+  }
+  __ Bind(&not_smi_or_overflow);
+
+  __ Comment("Extract ICData initial values and receiver cid");
+  // S5: IC data object (preserved).
+  __ LoadFieldFromOffset(A1, IC_DATA_REG, target::ICData::entries_offset());
+  // A1: ic_data_array with check entries: classes and target functions.
+  __ AddImmediate(A1, target::Array::data_offset() - kHeapObjectTag);
+  // A1: points directly to the first ic data array element.
+
+  if (type == kInstanceCall) {
+    __ LoadTaggedClassIdMayBeSmi(T1, A0);
+    __ LoadFieldFromOffset(ARGS_DESC_REG, IC_DATA_REG,
+                           target::CallSiteData::arguments_descriptor_offset());
+    if (num_args == 2) {
+      __ LoadCompressedSmiFieldFromOffset(
+          A7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
+      __ slli(A7, A7, target::kWordSizeLog2 - kSmiTagSize);
+      __ add(A7, SP, A7);
+      __ lx(A6, Address(A7, -2 * target::kWordSize));
+      __ LoadTaggedClassIdMayBeSmi(T2, A6);
+    }
+  } else {
+    __ LoadFieldFromOffset(ARGS_DESC_REG, IC_DATA_REG,
+                           target::CallSiteData::arguments_descriptor_offset());
+    __ LoadCompressedSmiFieldFromOffset(
+        A7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
+    __ slli(A7, A7, target::kWordSizeLog2 - kSmiTagSize);
+    __ add(A7, A7, SP);
+    __ lx(A6, Address(A7, -1 * target::kWordSize));
+    __ LoadTaggedClassIdMayBeSmi(T1, A6);
+    if (num_args == 2) {
+      __ lx(A6, Address(A7, -2 * target::kWordSize));
+      __ LoadTaggedClassIdMayBeSmi(T2, A6);
+    }
+  }
+  // T1: first argument class ID as Smi.
+  // T2: second argument class ID as Smi.
+  // S4: args descriptor
+
+  // We unroll the generic one that is generated once more than the others.
+  const bool optimize = kind == Token::kILLEGAL;
+
+  // Loop that checks if there is an IC data match.
+  Label loop, found, miss;
+  __ Comment("ICData loop");
+
+  __ Bind(&loop);
+  for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
+    Label update;
+
+    __ LoadCompressedSmiFromOffset(A7, A1, 0);
+    if (num_args == 1) {
+      __ beq(A7, T1, &found);  // Class id match?
+    } else {
+      __ bne(A7, T1, &update);  // Continue.
+      __ LoadCompressedSmiFromOffset(A7, A1, target::kCompressedWordSize);
+      __ beq(A7, T2, &found);  // Class id match?
+    }
+    __ Bind(&update);
+
+    const intptr_t entry_size = target::ICData::TestEntryLengthFor(
+                                    num_args, exactness == kCheckExactness) *
+                                target::kCompressedWordSize;
+    __ AddImmediate(A1, entry_size);  // Next entry.
+
+    __ CompareImmediate(A7, target::ToRawSmi(kIllegalCid));  // Done?
+    if (unroll == 0) {
+      __ BranchIf(NE, &loop);
+    } else {
+      __ BranchIf(EQ, &miss);
+    }
+  }
+
+  __ Bind(&miss);
+  __ Comment("IC miss");
+
+  // Compute address of arguments.
+  __ LoadCompressedSmiFieldFromOffset(
+      A7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
+  __ slli(A7, A7, target::kWordSizeLog2 - kSmiTagSize);
+  __ add(A7, A7, SP);
+  __ subi(A7, A7, 1 * target::kWordSize);
+
+  // A7: address of receiver
+  // Create a stub frame as we are pushing some objects on the stack before
+  // calling into the runtime.
+  __ EnterStubFrame();
+  // Preserve IC data object and arguments descriptor array and
+  // setup space on stack for result (target code object).
+  __ PushRegister(ARGS_DESC_REG);  // Preserve arguments descriptor array.
+  __ PushRegister(IC_DATA_REG);    // Preserve IC Data.
+  if (save_entry_point) {
+    __ SmiTag(T6);
+    __ PushRegister(T6);
+  }
+  // Setup space on stack for the result (target code object).
+  __ PushRegister(ZR);
+  // Push call arguments.
+  for (intptr_t i = 0; i < num_args; i++) {
+    __ LoadFromOffset(TMP, A7, -target::kWordSize * i);
+    __ PushRegister(TMP);
+  }
+  // Pass IC data object.
+  __ PushRegister(IC_DATA_REG);
+  __ CallRuntime(handle_ic_miss, num_args + 1);
+  // Remove the call arguments pushed earlier, including the IC data object.
+  __ Drop(num_args + 1);
+  // Pop returned function object into R0.
+  // Restore arguments descriptor array and IC data array.
+  __ PopRegister(T0);  // Pop returned function object into T0.
+  if (save_entry_point) {
+    __ PopRegister(T6);
+    __ SmiUntag(T6);
+  }
+  __ PopRegister(IC_DATA_REG);    // Restore IC Data.
+  __ PopRegister(ARGS_DESC_REG);  // Restore arguments descriptor array.
+  __ RestoreCodePointer();
+  __ LeaveStubFrame();
+  Label call_target_function;
+  if (!FLAG_lazy_dispatchers) {
+    GenerateDispatcherCode(assembler, &call_target_function);
+  } else {
+    __ j(&call_target_function);
+  }
+
+  __ Bind(&found);
+  __ Comment("Update caller's counter");
+  // A1: pointer to an IC data check group.
+  const intptr_t target_offset =
+      target::ICData::TargetIndexFor(num_args) * target::kCompressedWordSize;
+  const intptr_t count_offset =
+      target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
+  __ LoadCompressedFromOffset(T0, A1, target_offset);
+
+  if (FLAG_optimization_counter_threshold >= 0) {
+    // Update counter, ignore overflow.
+    __ LoadCompressedSmiFromOffset(TMP, A1, count_offset);
+    __ addi(TMP, TMP, target::ToRawSmi(1));
+    __ StoreToOffset(TMP, A1, count_offset);
+  }
+
+  __ Comment("Call target");
+  __ Bind(&call_target_function);
+  // T0: target function.
+  __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+                                   target::Function::code_offset());
+  if (save_entry_point) {
+    __ add(A7, T0, T6);
+    __ lx(A7, Address(A7, 0));
+  } else {
+    __ LoadFieldFromOffset(A7, T0, target::Function::entry_point_offset());
+  }
+  __ jr(A7);  // T0: Function, argument to lazy compile stub.
+
+#if !defined(PRODUCT)
+  if (optimized == kUnoptimized) {
+    __ Bind(&stepping);
+    __ EnterStubFrame();
+    if (type == kInstanceCall) {
+      __ PushRegister(A0);  // Preserve receiver.
+    }
+    if (save_entry_point) {
+      __ SmiTag(T6);
+      __ PushRegister(T6);
+    }
+    __ PushRegister(IC_DATA_REG);  // Preserve IC data.
+    __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
+    __ PopRegister(IC_DATA_REG);
+    if (save_entry_point) {
+      __ PopRegister(T6);
+      __ SmiUntag(T6);
+    }
+    if (type == kInstanceCall) {
+      __ PopRegister(A0);
+    }
+    __ RestoreCodePointer();
+    __ LeaveStubFrame();
+    __ j(&done_stepping);
+  }
+#endif
+}
+
+// A0: receiver
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
+    Assembler* assembler) {
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      kUnoptimized, kInstanceCall, kIgnoreExactness);
+}
+
+// A0: receiver
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
+    Assembler* assembler) {
+  __ Stop("Unimplemented");
+}
+
+// A0: receiver
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
+    Assembler* assembler) {
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      kUnoptimized, kInstanceCall, kIgnoreExactness);
+}
+
+// A0: receiver
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
+      kUnoptimized, kInstanceCall, kIgnoreExactness);
+}
+
+// A0: receiver
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
+      kUnoptimized, kInstanceCall, kIgnoreExactness);
+}
+
+// A0: receiver
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
+      kUnoptimized, kInstanceCall, kIgnoreExactness);
+}
+
+// A0: receiver
+// S5: ICData
+// A6: Function
+// RA: return address
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
+    Assembler* assembler) {
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      kOptimized, kInstanceCall, kIgnoreExactness);
+}
+
+// A0: receiver
+// S5: ICData
+// A6: Function
+// RA: return address
+void StubCodeCompiler::
+    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
+        Assembler* assembler) {
+  __ Stop("Unimplemented");
+}
+
+// A0: receiver
+// S5: ICData
+// A6: Function
+// RA: return address
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
+    Assembler* assembler) {
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      kOptimized, kInstanceCall, kIgnoreExactness);
+}
+
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
+    Assembler* assembler) {
+  GenerateRecordEntryPoint(assembler);
+  GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
+
+#if defined(DEBUG)
+  {
+    Label ok;
+    // Check that the IC data array has NumArgsTested() == 0.
+    // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
+    __ LoadFromOffset(TMP, IC_DATA_REG,
+                      target::ICData::state_bits_offset() - kHeapObjectTag,
+                      kUnsignedFourBytes);
+    ASSERT(target::ICData::NumArgsTestedShift() == 0);  // No shift needed.
+    __ andi(TMP, TMP, target::ICData::NumArgsTestedMask());
+    __ CompareImmediate(TMP, 0);
+    __ BranchIf(EQ, &ok);
+    __ Stop("Incorrect IC data for unoptimized static call");
+    __ Bind(&ok);
+  }
+#endif  // DEBUG
+
+  // Check single stepping.
+#if !defined(PRODUCT)
+  Label stepping, done_stepping;
+  __ LoadIsolate(TMP);
+  __ LoadFromOffset(TMP, TMP, target::Isolate::single_step_offset(),
+                    kUnsignedByte);
+  __ bnez(TMP, &stepping, Assembler::kNearJump);
+  __ Bind(&done_stepping);
+#endif
+
+  // T5: IC data object (preserved).
+  __ LoadFieldFromOffset(A0, IC_DATA_REG, target::ICData::entries_offset());
+  // A0: ic_data_array with entries: target functions and count.
+  __ AddImmediate(A0, target::Array::data_offset() - kHeapObjectTag);
+  // A0: points directly to the first ic data array element.
+  const intptr_t target_offset =
+      target::ICData::TargetIndexFor(0) * target::kCompressedWordSize;
+  const intptr_t count_offset =
+      target::ICData::CountIndexFor(0) * target::kCompressedWordSize;
+
+  if (FLAG_optimization_counter_threshold >= 0) {
+    // Increment count for this call, ignore overflow.
+    __ LoadCompressedSmiFromOffset(TMP, A0, count_offset);
+    __ addi(TMP, TMP, target::ToRawSmi(1));
+    __ StoreToOffset(TMP, A0, count_offset);
+  }
+
+  // Load arguments descriptor into T4.
+  __ LoadFieldFromOffset(ARGS_DESC_REG, IC_DATA_REG,
+                         target::CallSiteData::arguments_descriptor_offset());
+
+  // Get function and call it, if possible.
+  __ LoadCompressedFromOffset(T0, A0, target_offset);
+  __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+                                   target::Function::code_offset());
+  __ add(A0, T0, T6);
+  __ lx(TMP, Address(A0, 0));
+  __ jr(TMP);  // T0: Function, argument to lazy compile stub.
+
+#if !defined(PRODUCT)
+  __ Bind(&stepping);
+  __ EnterStubFrame();
+  __ PushRegister(IC_DATA_REG);  // Preserve IC data.
+  __ SmiTag(T6);
+  __ PushRegister(T6);
+  __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
+  __ PopRegister(T6);
+  __ SmiUntag(T6);
+  __ PopRegister(IC_DATA_REG);
+  __ RestoreCodePointer();
+  __ LeaveStubFrame();
+  __ j(&done_stepping);
+#endif
+}
+
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
+    Assembler* assembler) {
+  GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      kUnoptimized, kStaticCall, kIgnoreExactness);
+}
+
+// S5: ICData
+// RA: return address
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
+    Assembler* assembler) {
+  GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
+  GenerateNArgsCheckInlineCacheStub(
+      assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      kUnoptimized, kStaticCall, kIgnoreExactness);
+}
+
+// Stub for compiling a function and jumping to the compiled code.
+// S4: Arguments descriptor.
+// T0: Function.
+void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
+  // Preserve arg desc.
+  __ EnterStubFrame();
+  __ PushRegister(ARGS_DESC_REG);  // Save arg. desc.
+  __ PushRegister(T0);             // Pass function.
+  __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
+  __ PopRegister(T0);             // Restore argument.
+  __ PopRegister(ARGS_DESC_REG);  // Restore arg desc.
+  __ LeaveStubFrame();
+
+  __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+                                   target::Function::code_offset());
+  __ LoadFieldFromOffset(TMP, T0, target::Function::entry_point_offset());
+  __ jr(TMP);
+}
+
+// A0: Receiver
+// S5: ICData
+void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
+#if defined(PRODUCT)
+  __ Stop("No debugging in PRODUCT mode");
+#else
+  __ EnterStubFrame();
+  __ subi(SP, SP, 3 * target::kWordSize);
+  __ sx(A0, Address(SP, 2 * target::kWordSize));  // Preserve receiver.
+  __ sx(S5, Address(SP, 1 * target::kWordSize));  // Preserve IC data.
+  __ sx(ZR, Address(SP, 0 * target::kWordSize));  // Space for result.
+  __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
+  __ lx(CODE_REG, Address(SP, 0 * target::kWordSize));  // Original stub.
+  __ lx(S5, Address(SP, 1 * target::kWordSize));        // Restore IC data.
+  __ lx(A0, Address(SP, 2 * target::kWordSize));        // Restore receiver.
+  __ LeaveStubFrame();
+  __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
+  __ jr(TMP);
+#endif
+}
+
+// S5: ICData
+void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
+    Assembler* assembler) {
+#if defined(PRODUCT)
+  __ Stop("No debugging in PRODUCT mode");
+#else
+  __ EnterStubFrame();
+  __ subi(SP, SP, 2 * target::kWordSize);
+  __ sx(S5, Address(SP, 1 * target::kWordSize));  // Preserve IC data.
+  __ sx(ZR, Address(SP, 0 * target::kWordSize));  // Space for result.
+  __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
+  __ lx(CODE_REG, Address(SP, 0 * target::kWordSize));  // Original stub.
+  __ lx(S5, Address(SP, 1 * target::kWordSize));        // Restore IC data.
+  __ LeaveStubFrame();
+  __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
+  __ jr(TMP);
+#endif  // defined(PRODUCT)
+}
+
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+#if defined(PRODUCT)
+  __ Stop("No debugging in PRODUCT mode");
+#else
+  __ EnterStubFrame();
+  __ subi(SP, SP, 1 * target::kWordSize);
+  __ sx(ZR, Address(SP, 0 * target::kWordSize));  // Space for result.
+  __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
+  __ lx(CODE_REG, Address(SP, 0 * target::kWordSize));
+  __ LeaveStubFrame();
+  __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
+  __ jr(TMP);
+#endif  // defined(PRODUCT)
+}
+
+// Called only from unoptimized code. All relevant registers have been saved.
+void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
+#if defined(PRODUCT)
+  __ Stop("No debugging in PRODUCT mode");
+#else
+  // Check single stepping.
+  Label stepping, done_stepping;
+  __ LoadIsolate(A1);
+  __ LoadFromOffset(A1, A1, target::Isolate::single_step_offset(),
+                    kUnsignedByte);
+  __ bnez(A1, &stepping, compiler::Assembler::kNearJump);
+  __ Bind(&done_stepping);
+  __ ret();
+
+  __ Bind(&stepping);
+  __ EnterStubFrame();
+  __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
+  __ LeaveStubFrame();
+  __ j(&done_stepping);
+#endif  // defined(PRODUCT)
+}
+
+// Used to check class and type arguments. Arguments passed in registers:
+//
+// Inputs (mostly from TypeTestABI struct):
+//   - kSubtypeTestCacheReg: UntaggedSubtypeTestCache
+//   - kInstanceReg: instance to test against.
+//   - kDstTypeReg: destination type (for n>=3).
+//   - kInstantiatorTypeArgumentsReg: instantiator type arguments (for n=5).
+//   - kFunctionTypeArgumentsReg: function type arguments (for n=5).
+//   - RA: return address.
+//
+// All input registers are preserved except for kSubtypeTestCacheReg, which
+// should be saved by the caller if needed.
+//
+// Result in SubtypeTestCacheABI::kResultReg: null -> not found, otherwise
+// result (true or false).
+static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
+  ASSERT(n == 1 || n == 3 || n == 5 || n == 7);
+
+  // Until we have the result, we use the result register to store the null
+  // value for quick access. This has the side benefit of initializing the
+  // result to null, so it only needs to be changed if found.
+  const Register kNullReg = TypeTestABI::kSubtypeTestCacheResultReg;
+  __ LoadObject(kNullReg, NullObject());
+
+  const Register kCacheArrayReg = TypeTestABI::kSubtypeTestCacheReg;
+  const Register kScratchReg = TypeTestABI::kScratchReg;
+
+  // All of these must be distinct from TypeTestABI::kSubtypeTestCacheResultReg
+  // since it is used for kNullReg as well.
+
+  // Loop initialization (moved up here to avoid having all dependent loads
+  // after each other).
+
+  // We avoid a load-acquire barrier here by relying on the fact that all other
+  // loads from the array are data-dependent loads.
+  __ lx(kCacheArrayReg, FieldAddress(TypeTestABI::kSubtypeTestCacheReg,
+                                     target::SubtypeTestCache::cache_offset()));
+  __ AddImmediate(kCacheArrayReg,
+                  target::Array::data_offset() - kHeapObjectTag);
+
+  Label loop, not_closure;
+  if (n >= 5) {
+    __ LoadClassIdMayBeSmi(STCInternalRegs::kInstanceCidOrSignatureReg,
+                           TypeTestABI::TypeTestABI::kInstanceReg);
+  } else {
+    __ LoadClassId(STCInternalRegs::kInstanceCidOrSignatureReg,
+                   TypeTestABI::kInstanceReg);
+  }
+  __ CompareImmediate(STCInternalRegs::kInstanceCidOrSignatureReg, kClosureCid);
+  __ BranchIf(NE, &not_closure);
+
+  // Closure handling.
+  {
+    __ Comment("Closure");
+    __ LoadCompressed(STCInternalRegs::kInstanceCidOrSignatureReg,
+                      FieldAddress(TypeTestABI::kInstanceReg,
+                                   target::Closure::function_offset()));
+    __ LoadCompressed(STCInternalRegs::kInstanceCidOrSignatureReg,
+                      FieldAddress(STCInternalRegs::kInstanceCidOrSignatureReg,
+                                   target::Function::signature_offset()));
+    if (n >= 3) {
+      __ LoadCompressed(
+          STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
+          FieldAddress(TypeTestABI::kInstanceReg,
+                       target::Closure::instantiator_type_arguments_offset()));
+      if (n >= 7) {
+        __ LoadCompressed(
+            STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg,
+            FieldAddress(TypeTestABI::kInstanceReg,
+                         target::Closure::function_type_arguments_offset()));
+        __ LoadCompressed(
+            STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg,
+            FieldAddress(TypeTestABI::kInstanceReg,
+                         target::Closure::delayed_type_arguments_offset()));
+      }
+    }
+    __ j(&loop);
+  }
+
+  // Non-Closure handling.
+  {
+    __ Comment("Non-Closure");
+    __ Bind(&not_closure);
+    if (n >= 3) {
+      Label has_no_type_arguments;
+      __ LoadClassById(kScratchReg,
+                       STCInternalRegs::kInstanceCidOrSignatureReg);
+      __ mv(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg, kNullReg);
+      __ LoadFieldFromOffset(
+          kScratchReg, kScratchReg,
+          target::Class::host_type_arguments_field_offset_in_words_offset(),
+          kFourBytes);
+      __ CompareImmediate(kScratchReg, target::Class::kNoTypeArguments);
+      __ BranchIf(EQ, &has_no_type_arguments);
+      __ slli(kScratchReg, kScratchReg, kCompressedWordSizeLog2);
+      __ add(kScratchReg, kScratchReg, TypeTestABI::kInstanceReg);
+      __ LoadCompressed(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
+                        FieldAddress(kScratchReg, 0));
+      __ Bind(&has_no_type_arguments);
+      __ Comment("No type arguments");
+
+      if (n >= 7) {
+        __ mv(STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg,
+              kNullReg);
+        __ mv(STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg,
+              kNullReg);
+      }
+    }
+    __ SmiTag(STCInternalRegs::kInstanceCidOrSignatureReg);
+  }
+
+  Label found, done, next_iteration;
+
+  // Loop header
+  __ Bind(&loop);
+  __ Comment("Loop");
+  __ LoadCompressed(
+      kScratchReg,
+      Address(kCacheArrayReg,
+              target::kCompressedWordSize *
+                  target::SubtypeTestCache::kInstanceCidOrSignature));
+  __ CompareObjectRegisters(kScratchReg, kNullReg);
+  __ BranchIf(EQ, &done);
+  __ CompareObjectRegisters(kScratchReg,
+                            STCInternalRegs::kInstanceCidOrSignatureReg);
+  if (n == 1) {
+    __ BranchIf(EQ, &found);
+  } else {
+    __ BranchIf(NE, &next_iteration);
+    __ LoadCompressed(kScratchReg,
+                      Address(kCacheArrayReg,
+                              target::kCompressedWordSize *
+                                  target::SubtypeTestCache::kDestinationType));
+    __ CompareRegisters(kScratchReg, TypeTestABI::kDstTypeReg);
+    __ BranchIf(NE, &next_iteration);
+    __ LoadCompressed(
+        kScratchReg,
+        Address(kCacheArrayReg,
+                target::kCompressedWordSize *
+                    target::SubtypeTestCache::kInstanceTypeArguments));
+    __ CompareRegisters(kScratchReg,
+                        STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg);
+    if (n == 3) {
+      __ BranchIf(EQ, &found);
+    } else {
+      __ BranchIf(NE, &next_iteration);
+      __ LoadCompressed(
+          kScratchReg,
+          Address(kCacheArrayReg,
+                  target::kCompressedWordSize *
+                      target::SubtypeTestCache::kInstantiatorTypeArguments));
+      __ CompareRegisters(kScratchReg,
+                          TypeTestABI::kInstantiatorTypeArgumentsReg);
+      __ BranchIf(NE, &next_iteration);
+      __ LoadCompressed(
+          kScratchReg,
+          Address(kCacheArrayReg,
+                  target::kCompressedWordSize *
+                      target::SubtypeTestCache::kFunctionTypeArguments));
+      __ CompareRegisters(kScratchReg, TypeTestABI::kFunctionTypeArgumentsReg);
+      if (n == 5) {
+        __ BranchIf(EQ, &found);
+      } else {
+        ASSERT(n == 7);
+        __ BranchIf(NE, &next_iteration);
+
+        __ LoadCompressed(
+            kScratchReg, Address(kCacheArrayReg,
+                                 target::kCompressedWordSize *
+                                     target::SubtypeTestCache::
+                                         kInstanceParentFunctionTypeArguments));
+        __ CompareRegisters(
+            kScratchReg,
+            STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg);
+        __ BranchIf(NE, &next_iteration);
+
+        __ LoadCompressed(
+            kScratchReg,
+            Address(kCacheArrayReg,
+                    target::kCompressedWordSize *
+                        target::SubtypeTestCache::
+                            kInstanceDelayedFunctionTypeArguments));
+        __ CompareRegisters(
+            kScratchReg,
+            STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg);
+        __ BranchIf(EQ, &found);
+      }
+    }
+  }
+  __ Bind(&next_iteration);
+  __ Comment("Next iteration");
+  __ AddImmediate(
+      kCacheArrayReg,
+      target::kCompressedWordSize * target::SubtypeTestCache::kTestEntryLength);
+  __ j(&loop);
+
+  __ Bind(&found);
+  __ Comment("Found");
+  __ LoadCompressed(
+      TypeTestABI::kSubtypeTestCacheResultReg,
+      Address(kCacheArrayReg, target::kCompressedWordSize *
+                                  target::SubtypeTestCache::kTestResult));
+  __ Bind(&done);
+  __ Comment("Done");
+  __ ret();
+}
+
+// See comment on [GenerateSubtypeNTestCacheStub].
+void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+  GenerateSubtypeNTestCacheStub(assembler, 1);
+}
+
+// See comment on [GenerateSubtypeNTestCacheStub].
+void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
+  GenerateSubtypeNTestCacheStub(assembler, 3);
+}
+
+// See comment on [GenerateSubtypeNTestCacheStub].
+void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
+  GenerateSubtypeNTestCacheStub(assembler, 5);
+}
+
+// See comment on [GenerateSubtypeNTestCacheStub].
+void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
+  GenerateSubtypeNTestCacheStub(assembler, 7);
+}
+
+void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+  __ mv(A0, SP);
+  __ ret();
+}
+
+// Jump to a frame on the call stack.
+// RA: return address.
+// A0: program_counter.
+// A1: stack_pointer.
+// A2: frame_pointer.
+// A3: thread.
+// Does not return.
+//
+// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
+void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
+  ASSERT(kExceptionObjectReg == A0);
+  ASSERT(kStackTraceObjectReg == A1);
+  __ mv(CALLEE_SAVED_TEMP, A0);  // Program counter.
+  __ mv(SP, A1);                 // Stack pointer.
+  __ mv(FP, A2);                 // Frame_pointer.
+  __ mv(THR, A3);
+#if defined(USING_SHADOW_CALL_STACK)
+#error Unimplemented
+#endif
+  Label exit_through_non_ffi;
+  // Check if we exited generated from FFI. If so do transition.
+  __ LoadFromOffset(TMP, THR,
+                    compiler::target::Thread::exit_through_ffi_offset());
+  __ LoadImmediate(TMP2, target::Thread::exit_through_ffi());
+  __ bne(TMP, TMP2, &exit_through_non_ffi);
+  __ Stop("Unimplemented");
+  __ Bind(&exit_through_non_ffi);
+
+  // Refresh pinned registers values (inc. write barrier mask and null object).
+  __ RestorePinnedRegisters();
+  // Set the tag.
+  __ LoadImmediate(TMP, VMTag::kDartTagId);
+  __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
+  // Clear top exit frame.
+  __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
+  // Restore the pool pointer.
+  __ RestoreCodePointer();
+  if (FLAG_precompiled_mode) {
+    __ SetupGlobalPoolAndDispatchTable();
+  } else {
+    __ LoadPoolPointer();
+  }
+  __ jr(CALLEE_SAVED_TEMP);  // Jump to continuation point.
+}
+
+// Run an exception handler.  Execution comes from JumpToFrame
+// stub or from the simulator.
+//
+// The arguments are stored in the Thread object.
+// Does not return.
+void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+  // Exception object.
+  ASSERT(kExceptionObjectReg == A0);
+  __ LoadFromOffset(A0, THR, target::Thread::active_exception_offset());
+  __ StoreToOffset(NULL_REG, THR, target::Thread::active_exception_offset());
+
+  // StackTrace object.
+  ASSERT(kStackTraceObjectReg == A1);
+  __ LoadFromOffset(A1, THR, target::Thread::active_stacktrace_offset());
+  __ StoreToOffset(NULL_REG, THR, target::Thread::active_stacktrace_offset());
+
+  __ LoadFromOffset(RA, THR, target::Thread::resume_pc_offset());
+  __ ret();  // Jump to the exception handler code.
+}
+
+// Deoptimize a frame on the call stack before rewinding.
+// The arguments are stored in the Thread object.
+// No result.
+void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
+  // Push zap value instead of CODE_REG.
+  __ LoadImmediate(TMP, kZapCodeReg);
+  __ PushRegister(TMP);
+
+  // Load the deopt pc into RA.
+  __ LoadFromOffset(RA, THR, target::Thread::resume_pc_offset());
+  GenerateDeoptimizationSequence(assembler, kEagerDeopt);
+
+  // After we have deoptimized, jump to the correct frame.
+  __ EnterStubFrame();
+  __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
+  __ LeaveStubFrame();
+  __ ebreak();
+}
+
+// Calls to the runtime to optimize the given function.
+// A0: function to be re-optimized.
+// S4: argument descriptor (preserved).
+void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
+  __ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
+  __ EnterStubFrame();
+
+  __ subi(SP, SP, 3 * target::kWordSize);
+  __ sx(S4, Address(SP, 2 * target::kWordSize));  // Preserves args descriptor.
+  __ sx(ZR, Address(SP, 1 * target::kWordSize));  // Result slot.
+  __ sx(A0, Address(SP, 0 * target::kWordSize));  // Function argument.
+  __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
+  __ lx(T0, Address(SP, 1 * target::kWordSize));  // Function result.
+  __ lx(S4, Address(SP, 2 * target::kWordSize));  // Restore args descriptor.
+  __ addi(SP, SP, 3 * target::kWordSize);
+
+  __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+                                   target::Function::code_offset());
+  __ LoadFieldFromOffset(A1, T0, target::Function::entry_point_offset());
+  __ LeaveStubFrame();
+  __ jr(A1);
+  __ ebreak();
+}
+
+// Does identical check (object references are equal or not equal) with special
+// checks for boxed numbers and returns with TMP = 0 iff left and right are
+// identical.
+static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
+                                                 const Register left,
+                                                 const Register right) {
+  Label reference_compare, check_mint, done;
+  // If any of the arguments is Smi do reference compare.
+  // Note: A Mint cannot contain a value that would fit in Smi.
+  __ BranchIfSmi(left, &reference_compare, Assembler::kNearJump);
+  __ BranchIfSmi(right, &reference_compare, Assembler::kNearJump);
+
+  // Value compare for two doubles.
+  __ CompareClassId(left, kDoubleCid, /*scratch*/ TMP);
+  __ BranchIf(NOT_EQUAL, &check_mint, Assembler::kNearJump);
+  __ CompareClassId(right, kDoubleCid, /*scratch*/ TMP);
+  __ BranchIf(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
+
+  // Double values bitwise compare.
+#if XLEN == 32
+  __ lw(T0, FieldAddress(left, target::Double::value_offset()));
+  __ lw(T1, FieldAddress(right, target::Double::value_offset()));
+  __ xor_(TMP, T0, T1);
+  __ lw(T0, FieldAddress(left, target::Double::value_offset() + 4));
+  __ lw(T1, FieldAddress(right, target::Double::value_offset() + 4));
+  __ xor_(TMP2, T0, T1);
+  __ or_(TMP, TMP, TMP2);
+#else
+  __ ld(T0, FieldAddress(left, target::Double::value_offset()));
+  __ ld(T1, FieldAddress(right, target::Double::value_offset()));
+  __ xor_(TMP, T0, T1);
+#endif
+  __ j(&done, Assembler::kNearJump);
+
+  __ Bind(&check_mint);
+  __ CompareClassId(left, kMintCid, /*scratch*/ TMP);
+  __ BranchIf(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
+  __ CompareClassId(right, kMintCid, /*scratch*/ TMP);
+  __ BranchIf(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
+#if XLEN == 32
+  __ lw(T0, FieldAddress(left, target::Mint::value_offset()));
+  __ lw(T1, FieldAddress(right, target::Mint::value_offset()));
+  __ xor_(TMP, T0, T1);
+  __ lw(T0, FieldAddress(left, target::Mint::value_offset() + 4));
+  __ lw(T1, FieldAddress(right, target::Mint::value_offset() + 4));
+  __ xor_(TMP2, T0, T1);
+  __ or_(TMP, TMP, TMP2);
+#else
+  __ ld(T0, FieldAddress(left, target::Mint::value_offset()));
+  __ ld(T1, FieldAddress(right, target::Mint::value_offset()));
+  __ xor_(TMP, T0, T1);
+#endif
+  __ j(&done, Assembler::kNearJump);
+
+  __ Bind(&reference_compare);
+  __ xor_(TMP, left, right);
+  __ Bind(&done);
+}
+
+// Called only from unoptimized code. All relevant registers have been saved.
+// RA: return address.
+// SP + 4: left operand.
+// SP + 0: right operand.
+// Return TMP set to 0 if equal.
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
+    Assembler* assembler) {
+#if !defined(PRODUCT)
+  // Check single stepping.
+  Label stepping, done_stepping;
+  __ LoadIsolate(TMP);
+  __ LoadFromOffset(TMP, TMP, target::Isolate::single_step_offset(),
+                    kUnsignedByte);
+  __ bnez(TMP, &stepping);
+  __ Bind(&done_stepping);
+#endif
+
+  const Register left = A0;
+  const Register right = A1;
+  __ LoadFromOffset(left, SP, 1 * target::kWordSize);
+  __ LoadFromOffset(right, SP, 0 * target::kWordSize);
+  GenerateIdenticalWithNumberCheckStub(assembler, left, right);
+  __ ret();
+
+#if !defined(PRODUCT)
+  __ Bind(&stepping);
+  __ EnterStubFrame();
+  __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
+  __ RestoreCodePointer();
+  __ LeaveStubFrame();
+  __ j(&done_stepping);
+#endif
+}
+
+// Called from optimized code only.
+// RA: return address.
+// SP + 4: left operand.
+// SP + 0: right operand.
+// Return TMP set to 0 if equal.
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
+    Assembler* assembler) {
+  const Register left = A0;
+  const Register right = A1;
+  __ LoadFromOffset(left, SP, 1 * target::kWordSize);
+  __ LoadFromOffset(right, SP, 0 * target::kWordSize);
+  GenerateIdenticalWithNumberCheckStub(assembler, left, right);
+  __ ret();
+}
+
+// Called from megamorphic call sites.
+//  A0: receiver (passed to target)
+//  S5: MegamorphicCache (preserved)
+// Passed to target:
+//  A0: receiver
+//  CODE_REG: target Code
+//  S4: arguments descriptor
+//  S5: MegamorphicCache
+void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
+  // Jump if receiver is a smi.
+  Label smi_case;
+  __ BranchIfSmi(A0, &smi_case);
+
+  // Loads the cid of the object.
+  __ LoadClassId(T5, A0);
+
+  Label cid_loaded;
+  __ Bind(&cid_loaded);
+  __ lx(T2, FieldAddress(S5, target::MegamorphicCache::buckets_offset()));
+  __ lx(T1, FieldAddress(S5, target::MegamorphicCache::mask_offset()));
+  // T2: cache buckets array.
+  // T1: mask as a smi.
+
+  // Make the cid into a smi.
+  __ SmiTag(T5);
+  // T5: class ID of the receiver (smi).
+
+  // Compute the table index.
+  ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
+  // Use lsl and sub to multiply with 7 == 8 - 1.
+  __ slli(T3, T5, 3);
+  __ sub(T3, T3, T5);
+  // T3: probe.
+  Label loop;
+  __ Bind(&loop);
+  __ and_(T3, T3, T1);
+
+  const intptr_t base = target::Array::data_offset();
+  // T3 is smi tagged, but table entries are 16 bytes, so LSL 3.
+  __ slli(TMP, T3, kCompressedWordSizeLog2);
+  __ add(TMP, TMP, T2);
+  __ LoadCompressedSmiFieldFromOffset(T4, TMP, base);
+  Label probe_failed;
+  __ CompareObjectRegisters(T4, T5);
+  __ BranchIf(NE, &probe_failed);
+
+  Label load_target;
+  __ Bind(&load_target);
+  // Call the target found in the cache.  For a class id match, this is a
+  // proper target for the given name and arguments descriptor.  If the
+  // illegal class id was found, the target is a cache miss handler that can
+  // be invoked as a normal Dart function.
+  __ LoadCompressed(T0, FieldAddress(TMP, base + target::kCompressedWordSize));
+  __ lx(A1, FieldAddress(T0, target::Function::entry_point_offset()));
+  __ lx(ARGS_DESC_REG,
+        FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
+  if (!FLAG_precompiled_mode) {
+    __ LoadCompressed(CODE_REG,
+                      FieldAddress(T0, target::Function::code_offset()));
+  }
+  __ jr(A1);  // T0: Function, argument to lazy compile stub.
+
+  // Probe failed, check if it is a miss.
+  __ Bind(&probe_failed);
+  ASSERT(kIllegalCid == 0);
+  Label miss;
+  __ beqz(T4, &miss);  // branch if miss.
+
+  // Try next extry in the table.
+  __ AddImmediate(T3, target::ToRawSmi(1));
+  __ j(&loop);
+
+  // Load cid for the Smi case.
+  __ Bind(&smi_case);
+  __ LoadImmediate(T5, kSmiCid);
+  __ j(&cid_loaded);
+
+  __ Bind(&miss);
+  GenerateSwitchableCallMissStub(assembler);
+}
+
+// Input:
+//   A0 - receiver
+//   S5 - icdata
+void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
+  Label loop, found, miss;
+  __ lx(T1, FieldAddress(S5, target::ICData::entries_offset()));
+  __ lx(S4,
+        FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
+  __ AddImmediate(T1, target::Array::data_offset() - kHeapObjectTag);
+  // T1: first IC entry
+  __ LoadTaggedClassIdMayBeSmi(A1, A0);
+  // A1: receiver cid as Smi
+
+  __ Bind(&loop);
+  __ LoadCompressedSmi(T2, Address(T1, 0));
+  __ beq(A1, T2, &found);
+  __ CompareImmediate(T2, target::ToRawSmi(kIllegalCid));
+  __ BranchIf(EQ, &miss);
+
+  const intptr_t entry_length =
+      target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
+      target::kCompressedWordSize;
+  __ AddImmediate(T1, entry_length);  // Next entry.
+  __ j(&loop);
+
+  __ Bind(&found);
+  if (FLAG_precompiled_mode) {
+    const intptr_t entry_offset =
+        target::ICData::EntryPointIndexFor(1) * target::kCompressedWordSize;
+    __ LoadCompressed(A1, Address(T1, entry_offset));
+    __ lx(A1, FieldAddress(A1, target::Function::entry_point_offset()));
+  } else {
+    const intptr_t code_offset =
+        target::ICData::CodeIndexFor(1) * target::kCompressedWordSize;
+    __ LoadCompressed(CODE_REG, Address(T1, code_offset));
+    __ lx(A1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+  }
+  __ jr(A1);
+
+  __ Bind(&miss);
+  __ LoadIsolate(A1);
+  __ lx(CODE_REG, Address(A1, target::Isolate::ic_miss_code_offset()));
+  __ lx(A1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+  __ jr(A1);
+}
+
+// Implement the monomorphic entry check for call-sites where the receiver
+// might be a Smi.
+//
+//   A0: receiver
+//   S5: MonomorphicSmiableCall object
+//
+//   T1,T2: clobbered
+void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
+    Assembler* assembler) {
+  Label miss;
+  __ LoadClassIdMayBeSmi(T1, A0);
+
+  // Note: this stub is only used in AOT mode, hence the direct (bare) call.
+  __ LoadField(
+      T2,
+      FieldAddress(S5, target::MonomorphicSmiableCall::expected_cid_offset()));
+  __ LoadField(
+      TMP,
+      FieldAddress(S5, target::MonomorphicSmiableCall::entrypoint_offset()));
+  __ bne(T1, T2, &miss);
+  __ jr(TMP);
+
+  __ Bind(&miss);
+  __ lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
+  __ jr(TMP);
+}
+
+// Called from switchable IC calls.
+//  A0: receiver
+void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
+  __ lx(CODE_REG,
+        Address(THR, target::Thread::switchable_call_miss_stub_offset()));
+  __ EnterStubFrame();
+  __ PushRegister(A0);  // Preserve receiver.
+
+  __ PushRegister(ZR);  // Result slot.
+  __ PushRegister(ZR);  // Arg0: stub out.
+  __ PushRegister(A0);  // Arg1: Receiver
+  __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
+  __ Drop(1);
+  __ PopRegister(CODE_REG);     // result = stub
+  __ PopRegister(IC_DATA_REG);  // result = IC
+
+  __ PopRegister(A0);  // Restore receiver.
+  __ LeaveStubFrame();
+
+  __ lx(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+                                        CodeEntryKind::kNormal)));
+  __ jr(TMP);
+}
+
+// Called from switchable IC calls.
+//  A0: receiver
+//  S5: SingleTargetCache
+// Passed to target:
+//  CODE_REG: target Code object
+void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
+  Label miss;
+  __ LoadClassIdMayBeSmi(A1, A0);
+  __ lhu(T2, FieldAddress(S5, target::SingleTargetCache::lower_limit_offset()));
+  __ lhu(T3, FieldAddress(S5, target::SingleTargetCache::upper_limit_offset()));
+
+  __ blt(A1, T2, &miss);
+  __ bgt(A1, T3, &miss);
+
+  __ lx(TMP, FieldAddress(S5, target::SingleTargetCache::entry_point_offset()));
+  __ lx(CODE_REG, FieldAddress(S5, target::SingleTargetCache::target_offset()));
+  __ jr(TMP);
+
+  __ Bind(&miss);
+  __ EnterStubFrame();
+  __ PushRegister(A0);  // Preserve receiver.
+
+  __ PushRegister(ZR);  // Result slot.
+  __ PushRegister(ZR);  // Arg0: Stub out.
+  __ PushRegister(A0);  // Arg1: Receiver
+  __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
+  __ Drop(1);
+  __ PopRegister(CODE_REG);  // result = stub
+  __ PopRegister(S5);        // result = IC
+
+  __ PopRegister(A0);  // Restore receiver.
+  __ LeaveStubFrame();
+
+  __ lx(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+                                        CodeEntryKind::kMonomorphic)));
+  __ jr(TMP);
+}
+
+// Instantiate type arguments from instantiator and function type args.
+// T1 uninstantiated type arguments.
+// T2 instantiator type arguments.
+// T3: function type arguments.
+// Returns instantiated type arguments in T5.
+void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
+    Assembler* assembler) {
+  // Lookup cache before calling runtime.
+  __ LoadCompressedFieldFromOffset(
+      A1, InstantiationABI::kUninstantiatedTypeArgumentsReg,
+      target::TypeArguments::instantiations_offset());
+  __ AddImmediate(A1, Array::data_offset() - kHeapObjectTag);
+  // The instantiations cache is initialized with Object::zero_array() and is
+  // therefore guaranteed to contain kNoInstantiator. No length check needed.
+  compiler::Label loop, next, found, call_runtime;
+  __ Bind(&loop);
+
+  // Use load-acquire to test for sentinel, if we found non-sentinel it is safe
+  // to access the other entries. If we found a sentinel we go to runtime.
+  __ LoadAcquireCompressed(
+      A6, A1,
+      TypeArguments::Instantiation::kInstantiatorTypeArgsIndex *
+          target::kCompressedWordSize);
+  __ CompareImmediate(A6, Smi::RawValue(TypeArguments::kNoInstantiator),
+                      kObjectBytes);
+  __ BranchIf(EQ, &call_runtime);
+
+  __ CompareRegisters(A6, InstantiationABI::kInstantiatorTypeArgumentsReg);
+  __ BranchIf(NE, &next);
+  __ LoadCompressedFromOffset(
+      A7, A1,
+      TypeArguments::Instantiation::kFunctionTypeArgsIndex *
+          target::kCompressedWordSize);
+  __ CompareRegisters(A7, InstantiationABI::kFunctionTypeArgumentsReg);
+  __ BranchIf(EQ, &found);
+  __ Bind(&next);
+  __ AddImmediate(A1, TypeArguments::Instantiation::kSizeInWords *
+                          target::kCompressedWordSize);
+  __ j(&loop);
+
+  // Instantiate non-null type arguments.
+  // A runtime call to instantiate the type arguments is required.
+  __ Bind(&call_runtime);
+  __ EnterStubFrame();
+  __ PushRegisterPair(InstantiationABI::kUninstantiatedTypeArgumentsReg,
+                      NULL_REG);
+  __ PushRegisterPair(InstantiationABI::kFunctionTypeArgumentsReg,
+                      InstantiationABI::kInstantiatorTypeArgumentsReg);
+  __ CallRuntime(kInstantiateTypeArgumentsRuntimeEntry, 3);
+  __ Drop(3);  // Drop 2 type vectors, and uninstantiated type.
+  __ PopRegister(InstantiationABI::kResultTypeArgumentsReg);
+  __ LeaveStubFrame();
+  __ Ret();
+
+  __ Bind(&found);
+  __ LoadCompressedFromOffset(
+      InstantiationABI::kResultTypeArgumentsReg, A1,
+      TypeArguments::Instantiation::kInstantiatedTypeArgsIndex *
+          target::kCompressedWordSize);
+  __ Ret();
+}
+
+void StubCodeCompiler::
+    GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub(
+        Assembler* assembler) {
+  // Return the instantiator type arguments if its nullability is compatible for
+  // sharing, otherwise proceed to instantiation cache lookup.
+  compiler::Label cache_lookup;
+  __ LoadCompressedSmi(
+      A6, FieldAddress(InstantiationABI::kUninstantiatedTypeArgumentsReg,
+                       target::TypeArguments::nullability_offset()));
+  __ LoadCompressedSmi(
+      A7, FieldAddress(InstantiationABI::kInstantiatorTypeArgumentsReg,
+                       target::TypeArguments::nullability_offset()));
+  __ and_(A7, A7, A6);
+  __ CompareRegisters(A7, A6);
+  __ BranchIf(NE, &cache_lookup);
+  __ mv(InstantiationABI::kResultTypeArgumentsReg,
+        InstantiationABI::kInstantiatorTypeArgumentsReg);
+  __ Ret();
+
+  __ Bind(&cache_lookup);
+  GenerateInstantiateTypeArgumentsStub(assembler);
+}
+
+void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
+    Assembler* assembler) {
+  // Return the function type arguments if its nullability is compatible for
+  // sharing, otherwise proceed to instantiation cache lookup.
+  compiler::Label cache_lookup;
+  __ LoadCompressedSmi(
+      A6, FieldAddress(InstantiationABI::kUninstantiatedTypeArgumentsReg,
+                       target::TypeArguments::nullability_offset()));
+  __ LoadCompressedSmi(
+      A7, FieldAddress(InstantiationABI::kFunctionTypeArgumentsReg,
+                       target::TypeArguments::nullability_offset()));
+  __ and_(A7, A7, A6);
+  __ CompareRegisters(A7, A6);
+  __ BranchIf(NE, &cache_lookup);
+  __ mv(InstantiationABI::kResultTypeArgumentsReg,
+        InstantiationABI::kFunctionTypeArgumentsReg);
+  __ Ret();
+
+  __ Bind(&cache_lookup);
+  GenerateInstantiateTypeArgumentsStub(assembler);
+}
+
+static int GetScaleFactor(intptr_t size) {
+  switch (size) {
+    case 1:
+      return 0;
+    case 2:
+      return 1;
+    case 4:
+      return 2;
+    case 8:
+      return 3;
+    case 16:
+      return 4;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
+                                                          intptr_t cid) {
+  const intptr_t element_size = TypedDataElementSizeInBytes(cid);
+  const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
+  const intptr_t scale_shift = GetScaleFactor(element_size);
+
+  COMPILE_ASSERT(AllocateTypedDataArrayABI::kLengthReg == T2);
+  COMPILE_ASSERT(AllocateTypedDataArrayABI::kResultReg == A0);
+
+  if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+    Label call_runtime;
+    NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, T3, &call_runtime));
+    __ mv(T3, AllocateTypedDataArrayABI::kLengthReg);
+    /* Check that length is a positive Smi. */
+    /* T3: requested array length argument. */
+    __ BranchIfNotSmi(T3, &call_runtime);
+    __ SmiUntag(T3);
+    /* Check for length >= 0 && length <= max_len. */
+    /* T3: untagged array length. */
+    __ CompareImmediate(T3, max_len, kObjectBytes);
+    __ BranchIf(UNSIGNED_GREATER, &call_runtime);
+    if (scale_shift != 0) {
+      __ slli(T3, T3, scale_shift);
+    }
+    const intptr_t fixed_size_plus_alignment_padding =
+        target::TypedData::HeaderSize() +
+        target::ObjectAlignment::kObjectAlignment - 1;
+    __ AddImmediate(T3, fixed_size_plus_alignment_padding);
+    __ andi(T3, T3, ~(target::ObjectAlignment::kObjectAlignment - 1));
+    __ lx(A0, Address(THR, target::Thread::top_offset()));
+
+    /* T3: allocation size. */
+    __ add(T4, A0, T3);
+    __ bltu(T4, A0, &call_runtime); /* Fail on unsigned overflow. */
+
+    /* Check if the allocation fits into the remaining space. */
+    /* A0: potential new object start. */
+    /* T4: potential next object start. */
+    /* T3: allocation size. */
+    __ lx(TMP, Address(THR, target::Thread::end_offset()));
+    __ bgeu(T4, TMP, &call_runtime);
+
+    /* Successfully allocated the object(s), now update top to point to */
+    /* next object start and initialize the object. */
+    __ sx(T4, Address(THR, target::Thread::top_offset()));
+    __ AddImmediate(A0, kHeapObjectTag);
+    /* Initialize the tags. */
+    /* A0: new object start as a tagged pointer. */
+    /* T4: new object end address. */
+    /* T3: allocation size. */
+    {
+      __ li(T5, 0);
+      __ CompareImmediate(T3, target::UntaggedObject::kSizeTagMaxSizeTag);
+      compiler::Label zero_tags;
+      __ BranchIf(HI, &zero_tags);
+      __ slli(T5, T3,
+              target::UntaggedObject::kTagBitsSizeTagPos -
+                  target::ObjectAlignment::kObjectAlignmentLog2);
+      __ Bind(&zero_tags);
+
+      /* Get the class index and insert it into the tags. */
+      uword tags =
+          target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+      __ OrImmediate(T5, T5, tags);
+      __ sx(T5, FieldAddress(A0, target::Object::tags_offset())); /* Tags. */
+    }
+    /* Set the length field. */
+    /* A0: new object start as a tagged pointer. */
+    /* T4: new object end address. */
+    __ mv(T3, AllocateTypedDataArrayABI::kLengthReg); /* Array length. */
+    __ StoreCompressedIntoObjectNoBarrier(
+        A0, FieldAddress(A0, target::TypedDataBase::length_offset()), T3);
+    /* Initialize all array elements to 0. */
+    /* A0: new object start as a tagged pointer. */
+    /* T4: new object end address. */
+    /* T3: iterator which initially points to the start of the variable */
+    /* R3: scratch register. */
+    /* data area to be initialized. */
+    __ AddImmediate(T3, A0, target::TypedData::HeaderSize() - 1);
+    __ StoreInternalPointer(
+        A0, FieldAddress(A0, target::TypedDataBase::data_field_offset()), T3);
+    Label init_loop, done;
+    __ Bind(&init_loop);
+    __ bgeu(T3, T4, &done);
+    __ sx(ZR, Address(T3, 0));
+    __ addi(T3, T3, target::kWordSize);
+    __ j(&init_loop);
+    __ Bind(&done);
+
+    __ Ret();
+
+    __ Bind(&call_runtime);
+  }
+
+  __ EnterStubFrame();
+  __ PushRegister(ZR);                                     // Result slot.
+  __ PushImmediate(target::ToRawSmi(cid));                 // Cid
+  __ PushRegister(AllocateTypedDataArrayABI::kLengthReg);  // Array length
+  __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
+  __ Drop(2);  // Drop arguments.
+  __ PopRegister(AllocateTypedDataArrayABI::kResultReg);
+  __ LeaveStubFrame();
+  __ Ret();
+}
+
+}  // namespace compiler
+
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/constants.h b/runtime/vm/constants.h
index 00e3f9b..26514bf 100644
--- a/runtime/vm/constants.h
+++ b/runtime/vm/constants.h
@@ -13,6 +13,8 @@
 #include "vm/constants_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 #include "vm/constants_arm64.h"
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#include "vm/constants_riscv.h"
 #else
 #error Unknown architecture.
 #endif
@@ -24,13 +26,13 @@
 // We re-use registers from another ABI to avoid duplicating this ABI across 4
 // architectures.
 struct InstantiateTypeABI {
-  static const Register kTypeReg =
+  static constexpr Register kTypeReg =
       InstantiationABI::kUninstantiatedTypeArgumentsReg;
-  static const Register kInstantiatorTypeArgumentsReg =
+  static constexpr Register kInstantiatorTypeArgumentsReg =
       InstantiationABI::kInstantiatorTypeArgumentsReg;
-  static const Register kFunctionTypeArgumentsReg =
+  static constexpr Register kFunctionTypeArgumentsReg =
       InstantiationABI::kFunctionTypeArgumentsReg;
-  static const Register kResultTypeReg = InstantiationABI::kResultTypeReg;
+  static constexpr Register kResultTypeReg = InstantiationABI::kResultTypeReg;
 };
 
 class RegisterNames {
diff --git a/runtime/vm/constants_riscv.cc b/runtime/vm/constants_riscv.cc
new file mode 100644
index 0000000..6f694e9
--- /dev/null
+++ b/runtime/vm/constants_riscv.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "platform/globals.h"  // NOLINT
+
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/constants.h"  // NOLINT
+
+namespace dart {
+
+#if !defined(FFI_UNIT_TESTS)
+DEFINE_FLAG(bool,
+            use_compressed_instructions,
+            true,
+            "Use instructions from the C extension");
+#endif
+
+const char* const cpu_reg_names[kNumberOfCpuRegisters] = {
+    "zero", "ra", "sp",  "gp",   "tp",   "t0",   "t1", "t2", "fp", "thr", "a0",
+    "a1",   "a2", "tmp", "tmp2", "pp",   "a6",   "a7", "s2", "s3", "s4",  "s5",
+    "s6",   "s7", "s8",  "s9",   "null", "mask", "t3", "t4", "t5", "t6",
+};
+
+const char* const fpu_reg_names[kNumberOfFpuRegisters] = {
+    "ft0", "ft1", "ft2",  "ft3",  "ft4", "ft5", "ft6",  "ft7",
+    "fs0", "fs1", "fa0",  "fa1",  "fa2", "fa3", "fa4",  "fa5",
+    "fa6", "fa7", "fs2",  "fs3",  "fs4", "fs5", "fs6",  "fs7",
+    "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11",
+};
+
+const Register CallingConventions::ArgumentRegisters[] = {
+    A0, A1, A2, A3, A4, A5, A6, A7,
+};
+
+const FpuRegister CallingConventions::FpuArgumentRegisters[] = {
+    FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7,
+};
+
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
diff --git a/runtime/vm/constants_riscv.h b/runtime/vm/constants_riscv.h
new file mode 100644
index 0000000..d04317b
--- /dev/null
+++ b/runtime/vm/constants_riscv.h
@@ -0,0 +1,1422 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_CONSTANTS_RISCV_H_
+#define RUNTIME_VM_CONSTANTS_RISCV_H_
+
+#ifndef RUNTIME_VM_CONSTANTS_H_
+#error Do not include constants_riscv.h directly; use constants.h instead.
+#endif
+
+#include <sstream>
+
+#include "platform/assert.h"
+#include "platform/globals.h"
+#include "platform/utils.h"
+
+#include "vm/constants_base.h"
+#include "vm/flags.h"
+
+namespace dart {
+
+DECLARE_FLAG(bool, use_compressed_instructions);
+
+#if defined(TARGET_ARCH_RISCV32)
+typedef uint32_t uintx_t;
+typedef int32_t intx_t;
+constexpr intx_t kMaxIntX = kMaxInt32;
+constexpr uintx_t kMaxUIntX = kMaxUint32;
+constexpr intx_t kMinIntX = kMinInt32;
+#define XLEN 32
+#elif defined(TARGET_ARCH_RISCV64)
+typedef uint64_t uintx_t;
+typedef int64_t intx_t;
+constexpr intx_t kMaxIntX = kMaxInt64;
+constexpr uintx_t kMaxUIntX = kMaxUint64;
+constexpr intx_t kMinIntX = kMinInt64;
+#define XLEN 64
+#else
+#error What XLEN?
+#endif
+
+enum Register {
+  // The correct name for this register is ZERO, but this conflicts with other
+  // globals.
+  ZR = 0,
+  RA = 1,
+  SP = 2,
+  GP = 3,
+  TP = 4,
+  T0 = 5,
+  T1 = 6,
+  T2 = 7,
+  FP = 8,
+  S1 = 9,  // THR
+  A0 = 10,
+  A1 = 11,
+  A2 = 12,  // CODE_REG
+  A3 = 13,  // TMP
+  A4 = 14,  // TMP2
+  A5 = 15,  // PP, untagged
+  A6 = 16,
+  A7 = 17,
+  S2 = 18,
+  S3 = 19,
+  S4 = 20,  // ARGS_DESC_REG
+  S5 = 21,  // IC_DATA_REG
+  S6 = 22,
+  S7 = 23,   // CALLEE_SAVED_TEMP2
+  S8 = 24,   // CALLEE_SAVED_TEMP / FAR_TMP
+  S9 = 25,   // DISPATCH_TABLE_REG
+  S10 = 26,  // NULL
+  S11 = 27,  // WRITE_BARRIER_MASK
+  T3 = 28,
+  T4 = 29,
+  T5 = 30,
+  T6 = 31,
+  kNumberOfCpuRegisters = 32,
+  kNoRegister = -1,
+
+  RA2 = T0,
+  S0 = FP,
+
+  // Note that some compressed instructions can only take registers x8-x15 for
+  // some of their operands, so to reduce code size we assign the most popular
+  // uses to these registers.
+
+  // If the base register of a load/store is not SP, both the base register and
+  // source/destination register must be in x8-x15 and the offset must be
+  // aligned to make use a compressed instruction. So either,
+  //   - PP, CODE_REG and IC_DATA_REG should all be assigned to x8-x15 and we
+  //     should hold PP untagged like on ARM64. This makes the loads in the call
+  //     sequence shorter, but adds extra PP tagging/untagging on entry and
+  //     return.
+  //   - PP should be assigned to a C-preserved register to avoid spilling it on
+  //     leaf runtime calls.
+};
+
+enum FRegister {
+  FT0 = 0,
+  FT1 = 1,
+  FT2 = 2,
+  FT3 = 3,
+  FT4 = 4,
+  FT5 = 5,
+  FT6 = 6,
+  FT7 = 7,
+  FS0 = 8,
+  FS1 = 9,
+  FA0 = 10,
+  FA1 = 11,
+  FA2 = 12,
+  FA3 = 13,
+  FA4 = 14,
+  FA5 = 15,
+  FA6 = 16,
+  FA7 = 17,
+  FS2 = 18,
+  FS3 = 19,
+  FS4 = 20,
+  FS5 = 21,
+  FS6 = 22,
+  FS7 = 23,
+  FS8 = 24,
+  FS9 = 25,
+  FS10 = 26,
+  FS11 = 27,
+  FT8 = 28,
+  FT9 = 29,
+  FT10 = 30,
+  FT11 = 31,
+  kNumberOfFpuRegisters = 32,
+  kNoFpuRegister = -1,
+};
+
+// Register alias for floating point scratch register.
+const FRegister FTMP = FT11;
+
+// Architecture independent aliases.
+typedef FRegister FpuRegister;
+const FpuRegister FpuTMP = FTMP;
+const int kFpuRegisterSize = 8;
+typedef double fpu_register_t;
+
+extern const char* const cpu_reg_names[kNumberOfCpuRegisters];
+extern const char* const fpu_reg_names[kNumberOfFpuRegisters];
+
+// Register aliases.
+constexpr Register TMP = A3;  // Used as scratch register by assembler.
+constexpr Register TMP2 = A4;
+constexpr Register FAR_TMP = S8;
+constexpr Register PP = A5;  // Caches object pool pointer in generated code.
+constexpr Register DISPATCH_TABLE_REG = S9;  // Dispatch table register.
+constexpr Register CODE_REG = A2;
+constexpr Register FPREG = FP;          // Frame pointer register.
+constexpr Register SPREG = SP;          // Stack pointer register.
+constexpr Register IC_DATA_REG = S5;    // ICData/MegamorphicCache register.
+constexpr Register ARGS_DESC_REG = S4;  // Arguments descriptor register.
+constexpr Register THR = S1;  // Caches current thread in generated code.
+constexpr Register CALLEE_SAVED_TEMP = S8;
+constexpr Register CALLEE_SAVED_TEMP2 = S7;
+constexpr Register WRITE_BARRIER_MASK = S11;
+constexpr Register NULL_REG = S10;  // Caches NullObject() value.
+
+// ABI for catch-clause entry point.
+constexpr Register kExceptionObjectReg = A0;
+constexpr Register kStackTraceObjectReg = A1;
+
+// ABI for write barrier stub.
+constexpr Register kWriteBarrierObjectReg = A0;
+constexpr Register kWriteBarrierValueReg = A1;
+constexpr Register kWriteBarrierSlotReg = A6;
+
+// Common ABI for shared slow path stubs.
+struct SharedSlowPathStubABI {
+  static constexpr Register kResultReg = A0;
+};
+
+// ABI for instantiation stubs.
+struct InstantiationABI {
+  static constexpr Register kUninstantiatedTypeArgumentsReg = T1;
+  static constexpr Register kInstantiatorTypeArgumentsReg = T2;
+  static constexpr Register kFunctionTypeArgumentsReg = T3;
+  static constexpr Register kResultTypeArgumentsReg = A0;
+  static constexpr Register kResultTypeReg = A0;
+};
+
+// Registers in addition to those listed in TypeTestABI used inside the
+// implementation of type testing stubs that are _not_ preserved.
+struct TTSInternalRegs {
+  static constexpr Register kInstanceTypeArgumentsReg = S2;
+  static constexpr Register kScratchReg = S3;
+  static constexpr Register kSubTypeArgumentReg = S4;
+  static constexpr Register kSuperTypeArgumentReg = S5;
+
+  // Must be pushed/popped whenever generic type arguments are being checked as
+  // they overlap with registers in TypeTestABI.
+  static constexpr intptr_t kSavedTypeArgumentRegisters = 0;
+
+  static const intptr_t kInternalRegisters =
+      ((1 << kInstanceTypeArgumentsReg) | (1 << kScratchReg) |
+       (1 << kSubTypeArgumentReg) | (1 << kSuperTypeArgumentReg)) &
+      ~kSavedTypeArgumentRegisters;
+};
+
+// Registers in addition to those listed in TypeTestABI used inside the
+// implementation of subtype test cache stubs that are _not_ preserved.
+struct STCInternalRegs {
+  static constexpr Register kInstanceCidOrSignatureReg = S2;
+  static constexpr Register kInstanceInstantiatorTypeArgumentsReg = S3;
+  static constexpr Register kInstanceParentFunctionTypeArgumentsReg = S4;
+  static constexpr Register kInstanceDelayedFunctionTypeArgumentsReg = S5;
+
+  static const intptr_t kInternalRegisters =
+      (1 << kInstanceCidOrSignatureReg) |
+      (1 << kInstanceInstantiatorTypeArgumentsReg) |
+      (1 << kInstanceParentFunctionTypeArgumentsReg) |
+      (1 << kInstanceDelayedFunctionTypeArgumentsReg);
+};
+
+// Calling convention when calling TypeTestingStub and SubtypeTestCacheStub.
+struct TypeTestABI {
+  static constexpr Register kInstanceReg = A0;
+  static constexpr Register kDstTypeReg = T1;
+  static constexpr Register kInstantiatorTypeArgumentsReg = T2;
+  static constexpr Register kFunctionTypeArgumentsReg = T3;
+  static constexpr Register kSubtypeTestCacheReg = T4;
+  static constexpr Register kScratchReg = T5;
+
+  // For calls to InstanceOfStub.
+  static constexpr Register kInstanceOfResultReg = kInstanceReg;
+  // For calls to SubtypeNTestCacheStub. Must not overlap with any other
+  // registers above, for it is also used internally as kNullReg in those stubs.
+  static constexpr Register kSubtypeTestCacheResultReg = T0;
+
+  // Registers that need saving across SubtypeTestCacheStub calls.
+  static const intptr_t kSubtypeTestCacheStubCallerSavedRegisters =
+      1 << kSubtypeTestCacheReg;
+
+  static const intptr_t kPreservedAbiRegisters =
+      (1 << kInstanceReg) | (1 << kDstTypeReg) |
+      (1 << kInstantiatorTypeArgumentsReg) | (1 << kFunctionTypeArgumentsReg);
+
+  static const intptr_t kNonPreservedAbiRegisters =
+      TTSInternalRegs::kInternalRegisters |
+      STCInternalRegs::kInternalRegisters | (1 << kSubtypeTestCacheReg) |
+      (1 << kScratchReg) | (1 << kSubtypeTestCacheResultReg) | (1 << CODE_REG);
+
+  static const intptr_t kAbiRegisters =
+      kPreservedAbiRegisters | kNonPreservedAbiRegisters;
+};
+
+// Calling convention when calling AssertSubtypeStub.
+struct AssertSubtypeABI {
+  static constexpr Register kSubTypeReg = T1;
+  static constexpr Register kSuperTypeReg = T2;
+  static constexpr Register kInstantiatorTypeArgumentsReg = T3;
+  static constexpr Register kFunctionTypeArgumentsReg = T4;
+  static constexpr Register kDstNameReg = T5;
+
+  static const intptr_t kAbiRegisters =
+      (1 << kSubTypeReg) | (1 << kSuperTypeReg) |
+      (1 << kInstantiatorTypeArgumentsReg) | (1 << kFunctionTypeArgumentsReg) |
+      (1 << kDstNameReg);
+
+  // No result register, as AssertSubtype is only run for side effect
+  // (throws if the subtype check fails).
+};
+
+// ABI for InitStaticFieldStub.
+struct InitStaticFieldABI {
+  static constexpr Register kFieldReg = T2;
+  static constexpr Register kResultReg = A0;
+};
+
+// Registers used inside the implementation of InitLateStaticFieldStub.
+struct InitLateStaticFieldInternalRegs {
+  static const Register kFunctionReg = T0;
+  static const Register kAddressReg = T3;
+  static const Register kScratchReg = T4;
+};
+
+// ABI for InitInstanceFieldStub.
+struct InitInstanceFieldABI {
+  static constexpr Register kInstanceReg = T1;
+  static constexpr Register kFieldReg = T2;
+  static constexpr Register kResultReg = A0;
+};
+
+// Registers used inside the implementation of InitLateInstanceFieldStub.
+struct InitLateInstanceFieldInternalRegs {
+  static constexpr Register kFunctionReg =
+      T0;  // Must agreee with lazy compile stub.
+  static constexpr Register kAddressReg = T3;
+  static constexpr Register kScratchReg = T4;
+};
+
+// ABI for LateInitializationError stubs.
+struct LateInitializationErrorABI {
+  static constexpr Register kFieldReg = T2;
+};
+
+// ABI for ThrowStub.
+struct ThrowABI {
+  static constexpr Register kExceptionReg = A0;
+};
+
+// ABI for ReThrowStub.
+struct ReThrowABI {
+  static constexpr Register kExceptionReg = A0;
+  static constexpr Register kStackTraceReg = A1;
+};
+
+// ABI for AssertBooleanStub.
+struct AssertBooleanABI {
+  static constexpr Register kObjectReg = A0;
+};
+
+// ABI for RangeErrorStub.
+struct RangeErrorABI {
+  static constexpr Register kLengthReg = T1;
+  static constexpr Register kIndexReg = T2;
+};
+
+// ABI for AllocateObjectStub.
+struct AllocateObjectABI {
+  static constexpr Register kResultReg = A0;
+  static constexpr Register kTypeArgumentsReg = T1;
+};
+
+// ABI for AllocateClosureStub.
+struct AllocateClosureABI {
+  static constexpr Register kResultReg = AllocateObjectABI::kResultReg;
+  static constexpr Register kFunctionReg = T2;
+  static constexpr Register kContextReg = T3;
+  static constexpr Register kScratchReg = T4;
+};
+
+// ABI for AllocateMintShared*Stub.
+struct AllocateMintABI {
+  static constexpr Register kResultReg = AllocateObjectABI::kResultReg;
+  static constexpr Register kTempReg = T2;
+};
+
+// ABI for Allocate{Mint,Double,Float32x4,Float64x2}Stub.
+struct AllocateBoxABI {
+  static constexpr Register kResultReg = AllocateObjectABI::kResultReg;
+  static constexpr Register kTempReg = T2;
+};
+
+// ABI for AllocateArrayStub.
+struct AllocateArrayABI {
+  static constexpr Register kResultReg = AllocateObjectABI::kResultReg;
+  static constexpr Register kLengthReg = T2;
+  static constexpr Register kTypeArgumentsReg = T1;
+};
+
+// ABI for AllocateTypedDataArrayStub.
+struct AllocateTypedDataArrayABI {
+  static constexpr Register kResultReg = AllocateObjectABI::kResultReg;
+  static constexpr Register kLengthReg = T2;
+};
+
+// ABI for BoxDoubleStub.
+struct BoxDoubleStubABI {
+  static constexpr FpuRegister kValueReg = FA0;
+  static constexpr Register kTempReg = T1;
+  static constexpr Register kResultReg = A0;
+};
+
+// ABI for DoubleToIntegerStub.
+struct DoubleToIntegerStubABI {
+  static constexpr FpuRegister kInputReg = FA0;
+  static constexpr Register kRecognizedKindReg = T1;
+  static constexpr Register kResultReg = A0;
+};
+
+// ABI for DispatchTableNullErrorStub and consequently for all dispatch
+// table calls (though normal functions will not expect or use this
+// register). This ABI is added to distinguish memory corruption errors from
+// null errors.
+struct DispatchTableNullErrorABI {
+  static constexpr Register kClassIdReg = T1;
+};
+
+typedef uint32_t RegList;
+const RegList kAllCpuRegistersList = 0xFFFFFFFF;
+
+#define R(REG) (1 << REG)
+
+// C++ ABI call registers.
+
+constexpr RegList kAbiArgumentCpuRegs =
+    R(A0) | R(A1) | R(A2) | R(A3) | R(A4) | R(A5) | R(A6) | R(A7);
+constexpr RegList kAbiVolatileCpuRegs =
+    kAbiArgumentCpuRegs | R(T0) | R(T1) | R(T2) | R(T3) | R(T4) | R(T5) | R(T6);
+constexpr RegList kAbiPreservedCpuRegs = R(S1) | R(S2) | R(S3) | R(S4) | R(S5) |
+                                         R(S6) | R(S7) | R(S8) | R(S9) |
+                                         R(S10) | R(S11);
+constexpr int kAbiPreservedCpuRegCount = 11;
+constexpr intptr_t kReservedCpuRegisters =
+    R(ZR) | R(TP) | R(GP) | R(SP) | R(FP) | R(TMP) | R(TMP2) | R(PP) | R(THR) |
+    R(RA) | R(WRITE_BARRIER_MASK) | R(NULL_REG) | R(DISPATCH_TABLE_REG) |
+    R(FAR_TMP);
+constexpr intptr_t kNumberOfReservedCpuRegisters = 14;
+// CPU registers available to Dart allocator.
+constexpr RegList kDartAvailableCpuRegs =
+    kAllCpuRegistersList & ~kReservedCpuRegisters;
+constexpr int kNumberOfDartAvailableCpuRegs = 18;
+// Registers available to Dart that are not preserved by runtime calls.
+constexpr RegList kDartVolatileCpuRegs =
+    kDartAvailableCpuRegs & ~kAbiPreservedCpuRegs;
+
+constexpr RegList kAbiArgumentFpuRegs =
+    R(FA0) | R(FA1) | R(FA2) | R(FA3) | R(FA4) | R(FA5) | R(FA6) | R(FA7);
+constexpr RegList kAbiVolatileFpuRegs =
+    kAbiArgumentFpuRegs | R(FT0) | R(FT1) | R(FT2) | R(FT3) | R(FT4) | R(FT5) |
+    R(FT6) | R(FT7) | R(FT8) | R(FT9) | R(FT10) | R(FT11);
+constexpr RegList kAbiPreservedFpuRegs = R(FS0) | R(FS1) | R(FS2) | R(FS3) |
+                                         R(FS4) | R(FS5) | R(FS6) | R(FS7) |
+                                         R(FS8) | R(FS9) | R(FS10) | R(FS11);
+constexpr int kAbiPreservedFpuRegCount = 12;
+constexpr intptr_t kReservedFpuRegisters = 0;
+constexpr intptr_t kNumberOfReservedFpuRegisters = 0;
+
+// Two callee save scratch registers used by leaf runtime call sequence.
+constexpr Register kCallLeafRuntimeCalleeSaveScratch1 = CALLEE_SAVED_TEMP;
+constexpr Register kCallLeafRuntimeCalleeSaveScratch2 = CALLEE_SAVED_TEMP2;
+static_assert((R(kCallLeafRuntimeCalleeSaveScratch1) & kAbiPreservedCpuRegs) !=
+                  0,
+              "Need callee save scratch register for leaf runtime calls.");
+static_assert((R(kCallLeafRuntimeCalleeSaveScratch2) & kAbiPreservedCpuRegs) !=
+                  0,
+              "Need callee save scratch register for leaf runtime calls.");
+
+constexpr int kStoreBufferWrapperSize = 26;
+
+class CallingConventions {
+ public:
+  static const intptr_t kArgumentRegisters = kAbiArgumentCpuRegs;
+  static const Register ArgumentRegisters[];
+  static const intptr_t kNumArgRegs = 8;
+  static const Register kPointerToReturnStructRegisterCall = A0;
+  static const Register kPointerToReturnStructRegisterReturn = A0;
+
+  static const FpuRegister FpuArgumentRegisters[];
+  static const intptr_t kFpuArgumentRegisters =
+      R(FA0) | R(FA1) | R(FA2) | R(FA3) | R(FA4) | R(FA5) | R(FA6) | R(FA7);
+  static const intptr_t kNumFpuArgRegs = 8;
+
+  static const bool kArgumentIntRegXorFpuReg = false;
+
+  static constexpr intptr_t kCalleeSaveCpuRegisters = kAbiPreservedCpuRegs;
+
+  // Whether larger than wordsize arguments are aligned to even registers.
+  static constexpr AlignmentStrategy kArgumentRegisterAlignment =
+      kAlignedToWordSize;
+
+  // How stack arguments are aligned.
+  static constexpr AlignmentStrategy kArgumentStackAlignment =
+      kAlignedToValueSize;
+
+  // How fields in compounds are aligned.
+  static constexpr AlignmentStrategy kFieldAlignment = kAlignedToValueSize;
+
+  // Whether 1 or 2 byte-sized arguments or return values are passed extended
+  // to 4 bytes.
+  // TODO(ffi): Need to add kExtendedToWord.
+  static constexpr ExtensionStrategy kReturnRegisterExtension = kExtendedTo4;
+  static constexpr ExtensionStrategy kArgumentRegisterExtension = kExtendedTo4;
+  static constexpr ExtensionStrategy kArgumentStackExtension = kNotExtended;
+
+  static constexpr Register kReturnReg = A0;
+  static constexpr Register kSecondReturnReg = A1;
+  static constexpr FpuRegister kReturnFpuReg = FA0;
+
+  static constexpr Register kFfiAnyNonAbiRegister = S2;  // S0=FP, S1=THR
+  static constexpr Register kFirstNonArgumentRegister = T0;
+  static constexpr Register kSecondNonArgumentRegister = T1;
+  static constexpr Register kStackPointerRegister = SPREG;
+
+  COMPILE_ASSERT(
+      ((R(kFirstNonArgumentRegister) | R(kSecondNonArgumentRegister)) &
+       (kArgumentRegisters | R(kPointerToReturnStructRegisterCall))) == 0);
+};
+
+// TODO(riscv): Architecture-independent parts of the compiler should use
+// compare-and-branch instead of condition codes.
+enum Condition {
+  kNoCondition = -1,
+  EQ = 0,   // equal
+  NE = 1,   // not equal
+  CS = 2,   // carry set/unsigned higher or same
+  CC = 3,   // carry clear/unsigned lower
+  MI = 4,   // minus/negative
+  PL = 5,   // plus/positive or zero
+  VS = 6,   // overflow
+  VC = 7,   // no overflow
+  HI = 8,   // unsigned higher
+  LS = 9,   // unsigned lower or same
+  GE = 10,  // signed greater than or equal
+  LT = 11,  // signed less than
+  GT = 12,  // signed greater than
+  LE = 13,  // signed less than or equal
+  AL = 14,  // always (unconditional)
+  NV = 15,  // special condition (refer to section C1.2.3)
+  kNumberOfConditions = 16,
+
+  // Platform-independent variants declared for all platforms
+  EQUAL = EQ,
+  ZERO = EQUAL,
+  NOT_EQUAL = NE,
+  NOT_ZERO = NOT_EQUAL,
+  LESS = LT,
+  LESS_EQUAL = LE,
+  GREATER_EQUAL = GE,
+  GREATER = GT,
+  UNSIGNED_LESS = CC,
+  UNSIGNED_LESS_EQUAL = LS,
+  UNSIGNED_GREATER = HI,
+  UNSIGNED_GREATER_EQUAL = CS,
+  OVERFLOW = VS,
+  NO_OVERFLOW = VC,
+
+  kInvalidCondition = 16
+};
+
+static inline Condition InvertCondition(Condition c) {
+  COMPILE_ASSERT((EQ ^ NE) == 1);
+  COMPILE_ASSERT((CS ^ CC) == 1);
+  COMPILE_ASSERT((MI ^ PL) == 1);
+  COMPILE_ASSERT((VS ^ VC) == 1);
+  COMPILE_ASSERT((HI ^ LS) == 1);
+  COMPILE_ASSERT((GE ^ LT) == 1);
+  COMPILE_ASSERT((GT ^ LE) == 1);
+  COMPILE_ASSERT((AL ^ NV) == 1);
+  ASSERT(c != AL);
+  ASSERT(c != kInvalidCondition);
+  return static_cast<Condition>(c ^ 1);
+}
+
+enum ScaleFactor {
+  TIMES_1 = 0,
+  TIMES_2 = 1,
+  TIMES_4 = 2,
+  TIMES_8 = 3,
+  TIMES_16 = 4,
+// We can't include vm/compiler/runtime_api.h, so just be explicit instead
+// of using (dart::)kWordSizeLog2.
+#if defined(TARGET_ARCH_IS_64_BIT)
+  // Used for Smi-boxed indices.
+  TIMES_HALF_WORD_SIZE = kInt64SizeLog2 - 1,
+  // Used for unboxed indices.
+  TIMES_WORD_SIZE = kInt64SizeLog2,
+#elif defined(TARGET_ARCH_IS_32_BIT)
+  // Used for Smi-boxed indices.
+  TIMES_HALF_WORD_SIZE = kInt32SizeLog2 - 1,
+  // Used for unboxed indices.
+  TIMES_WORD_SIZE = kInt32SizeLog2,
+#else
+#error "Unexpected word size"
+#endif
+#if !defined(DART_COMPRESSED_POINTERS)
+  TIMES_COMPRESSED_WORD_SIZE = TIMES_WORD_SIZE,
+#else
+  TIMES_COMPRESSED_WORD_SIZE = TIMES_HALF_WORD_SIZE,
+#endif
+};
+
+const uword kBreakInstructionFiller = 0;  // trap or c.trap
+
+inline int32_t SignExtend(int N, int32_t value) {
+  return (value << (32 - N)) >> (32 - N);
+}
+
+inline intx_t sign_extend(int32_t x) {
+  return static_cast<intx_t>(x);
+}
+inline intx_t sign_extend(int64_t x) {
+  return static_cast<intx_t>(x);
+}
+inline intx_t sign_extend(uint32_t x) {
+  return static_cast<intx_t>(static_cast<int32_t>(x));
+}
+inline intx_t sign_extend(uint64_t x) {
+  return static_cast<intx_t>(static_cast<int64_t>(x));
+}
+
+enum Opcode {
+  LUI = 0b0110111,
+  AUIPC = 0b0010111,
+  JAL = 0b1101111,
+  JALR = 0b1100111,
+  BRANCH = 0b1100011,
+  LOAD = 0b0000011,
+  STORE = 0b0100011,
+  OPIMM = 0b0010011,
+  OP = 0b0110011,
+  MISCMEM = 0b0001111,
+  SYSTEM = 0b1110011,
+  OP32 = 0b0111011,
+  OPIMM32 = 0b0011011,
+  AMO = 0b0101111,
+  LOADFP = 0b0000111,
+  STOREFP = 0b0100111,
+  FMADD = 0b1000011,
+  FMSUB = 0b1000111,
+  FNMSUB = 0b1001011,
+  FNMADD = 0b1001111,
+  OPFP = 0b1010011,
+};
+
+enum Funct12 {
+  ECALL = 0,
+  EBREAK = 1,
+};
+
+enum Funct3 {
+  F3_0 = 0,
+  F3_1 = 1,
+
+  BEQ = 0b000,
+  BNE = 0b001,
+  BLT = 0b100,
+  BGE = 0b101,
+  BLTU = 0b110,
+  BGEU = 0b111,
+
+  LB = 0b000,
+  LH = 0b001,
+  LW = 0b010,
+  LBU = 0b100,
+  LHU = 0b101,
+  LWU = 0b110,
+  LD = 0b011,
+
+  SB = 0b000,
+  SH = 0b001,
+  SW = 0b010,
+  SD = 0b011,
+
+  ADDI = 0b000,
+  SLLI = 0b001,
+  SLTI = 0b010,
+  SLTIU = 0b011,
+  XORI = 0b100,
+  SRI = 0b101,
+  ORI = 0b110,
+  ANDI = 0b111,
+
+  ADD = 0b000,
+  SLL = 0b001,
+  SLT = 0b010,
+  SLTU = 0b011,
+  XOR = 0b100,
+  SR = 0b101,
+  OR = 0b110,
+  AND = 0b111,
+
+  FENCE = 0b000,
+  FENCEI = 0b001,
+
+  CSRRW = 0b001,
+  CSRRS = 0b010,
+  CSRRC = 0b011,
+  CSRRWI = 0b101,
+  CSRRSI = 0b110,
+  CSRRCI = 0b111,
+
+  MUL = 0b000,
+  MULH = 0b001,
+  MULHSU = 0b010,
+  MULHU = 0b011,
+  DIV = 0b100,
+  DIVU = 0b101,
+  REM = 0b110,
+  REMU = 0b111,
+
+  MULW = 0b000,
+  DIVW = 0b100,
+  DIVUW = 0b101,
+  REMW = 0b110,
+  REMUW = 0b111,
+
+  WIDTH32 = 0b010,
+  WIDTH64 = 0b011,
+
+  S = 0b010,
+  D = 0b011,
+  J = 0b000,
+  JN = 0b001,
+  JX = 0b010,
+  MIN = 0b000,
+  MAX = 0b001,
+  FEQ = 0b010,
+  FLT = 0b001,
+  FLE = 0b000,
+};
+
+enum Funct7 {
+  F7_0 = 0,
+  SRA = 0b0100000,
+  SUB = 0b0100000,
+  MULDIV = 0b0000001,
+
+  FADDS = 0b0000000,
+  FSUBS = 0b0000100,
+  FMULS = 0b0001000,
+  FDIVS = 0b0001100,
+  FSQRTS = 0b0101100,
+  FSGNJS = 0b0010000,
+  FMINMAXS = 0b0010100,
+  FCMPS = 0b1010000,
+  FCLASSS = 0b1110000,
+  FCVTintS = 0b1100000,
+  FCVTSint = 0b1101000,
+  FMVXW = 0b1110000,
+  FMVWX = 0b1111000,
+
+  FADDD = 0b0000001,
+  FSUBD = 0b0000101,
+  FMULD = 0b0001001,
+  FDIVD = 0b0001101,
+  FSQRTD = 0b0101101,
+  FSGNJD = 0b0010001,
+  FMINMAXD = 0b0010101,
+  FCVTS = 0b0100000,
+  FCVTD = 0b0100001,
+  FCMPD = 0b1010001,
+  FCLASSD = 0b1110001,
+  FCVTintD = 0b1100001,
+  FCVTDint = 0b1101001,
+  FMVXD = 0b1110001,
+  FMVDX = 0b1111001,
+};
+
+enum Funct5 {
+  LR = 0b00010,
+  SC = 0b00011,
+  AMOSWAP = 0b00001,
+  AMOADD = 0b00000,
+  AMOXOR = 0b00100,
+  AMOAND = 0b01100,
+  AMOOR = 0b01000,
+  AMOMIN = 0b10000,
+  AMOMAX = 0b10100,
+  AMOMINU = 0b11000,
+  AMOMAXU = 0b11100,
+};
+
+enum Funct2 {
+  F2_S = 0b00,
+  F2_D = 0b01,
+};
+
+enum RoundingMode {
+  RNE = 0b000,  // Round to Nearest, ties to Even
+  RTZ = 0b001,  // Round toward Zero
+  RDN = 0b010,  // Round Down (toward negative infinity)
+  RUP = 0b011,  // Round Up (toward positive infinity)
+  RMM = 0b100,  // Round to nearest, ties to Max Magnitude
+  DYN = 0b111,  // Dynamic rounding mode
+};
+
+enum FcvtRs2 {
+  W = 0b00000,
+  WU = 0b00001,
+  L = 0b00010,
+  LU = 0b00011,
+};
+
+enum FClass {
+  kFClassNegInfinity = 1 << 0,
+  kFClassNegNormal = 1 << 1,
+  kFClassNegSubnormal = 1 << 2,
+  kFClassNegZero = 1 << 3,
+  kFClassPosZero = 1 << 4,
+  kFClassPosSubnormal = 1 << 5,
+  kFClassPosNormal = 1 << 6,
+  kFClassPosInfinity = 1 << 7,
+  kFClassSignallingNan = 1 << 8,
+  kFClassQuietNan = 1 << 9,
+};
+
+enum HartEffects {
+  kWrite = 1 << 0,
+  kRead = 1 << 1,
+  kOutput = 1 << 2,
+  kInput = 1 << 3,
+  kMemory = kWrite | kRead,
+  kIO = kOutput | kInput,
+  kAll = kMemory | kIO,
+};
+
+const intptr_t kReleaseShift = 25;
+const intptr_t kAcquireShift = 26;
+
+#define DEFINE_REG_ENCODING(type, name, shift)                                 \
+  inline uint32_t Is##name(type r) { return static_cast<uint32_t>(r) < 32; }   \
+  inline uint32_t Encode##name(type r) {                                       \
+    ASSERT(Is##name(r));                                                       \
+    return static_cast<uint32_t>(r) << shift;                                  \
+  }                                                                            \
+  inline type Decode##name(uint32_t encoding) {                                \
+    return type((encoding >> shift) & 31);                                     \
+  }
+
+DEFINE_REG_ENCODING(Register, Rd, 7)
+DEFINE_REG_ENCODING(Register, Rs1, 15)
+DEFINE_REG_ENCODING(Register, Rs2, 20)
+DEFINE_REG_ENCODING(FRegister, FRd, 7)
+DEFINE_REG_ENCODING(FRegister, FRs1, 15)
+DEFINE_REG_ENCODING(FRegister, FRs2, 20)
+DEFINE_REG_ENCODING(FRegister, FRs3, 27)
+#undef DEFINE_REG_ENCODING
+
+#define DEFINE_FUNCT_ENCODING(type, name, shift, mask)                         \
+  inline uint32_t Is##name(type f) { return (f & mask) == f; }                 \
+  inline uint32_t Encode##name(type f) {                                       \
+    ASSERT(Is##name(f));                                                       \
+    return f << shift;                                                         \
+  }                                                                            \
+  inline type Decode##name(uint32_t encoding) {                                \
+    return static_cast<type>((encoding >> shift) & mask);                      \
+  }
+
+DEFINE_FUNCT_ENCODING(Opcode, Opcode, 0, 0x7F)
+DEFINE_FUNCT_ENCODING(Funct2, Funct2, 25, 0x3)
+DEFINE_FUNCT_ENCODING(Funct3, Funct3, 12, 0x7)
+DEFINE_FUNCT_ENCODING(Funct5, Funct5, 27, 0x1F)
+DEFINE_FUNCT_ENCODING(Funct7, Funct7, 25, 0x7F)
+DEFINE_FUNCT_ENCODING(Funct12, Funct12, 20, 0xFFF)
+#if XLEN == 32
+DEFINE_FUNCT_ENCODING(uint32_t, Shamt, 20, 0x1F)
+#elif XLEN == 64
+DEFINE_FUNCT_ENCODING(uint32_t, Shamt, 20, 0x3F)
+#endif
+DEFINE_FUNCT_ENCODING(RoundingMode, RoundingMode, 12, 0x7)
+#undef DEFINE_FUNCT_ENCODING
+
+inline bool IsBTypeImm(intptr_t imm) {
+  return Utils::IsInt(12, imm) && Utils::IsAligned(imm, 2);
+}
+inline uint32_t EncodeBTypeImm(intptr_t imm) {
+  ASSERT(IsBTypeImm(imm));
+  uint32_t encoded = 0;
+  encoded |= ((imm >> 12) & 0x1) << 31;
+  encoded |= ((imm >> 5) & 0x3f) << 25;
+  encoded |= ((imm >> 1) & 0xf) << 8;
+  encoded |= ((imm >> 11) & 0x1) << 7;
+  return encoded;
+}
+inline intptr_t DecodeBTypeImm(uint32_t encoded) {
+  uint32_t imm = 0;
+  imm |= (((encoded >> 31) & 0x1) << 12);
+  imm |= (((encoded >> 25) & 0x3f) << 5);
+  imm |= (((encoded >> 8) & 0xf) << 1);
+  imm |= (((encoded >> 7) & 0x1) << 11);
+  return SignExtend(12, imm);
+}
+
+inline bool IsJTypeImm(intptr_t imm) {
+  return Utils::IsInt(20, imm) && Utils::IsAligned(imm, 2);
+}
+inline uint32_t EncodeJTypeImm(intptr_t imm) {
+  ASSERT(IsJTypeImm(imm));
+  uint32_t encoded = 0;
+  encoded |= ((imm >> 20) & 0x1) << 31;
+  encoded |= ((imm >> 1) & 0x3ff) << 21;
+  encoded |= ((imm >> 11) & 0x1) << 20;
+  encoded |= ((imm >> 12) & 0xff) << 12;
+  return encoded;
+}
+inline intptr_t DecodeJTypeImm(uint32_t encoded) {
+  uint32_t imm = 0;
+  imm |= (((encoded >> 31) & 0x1) << 20);
+  imm |= (((encoded >> 21) & 0x3ff) << 1);
+  imm |= (((encoded >> 20) & 0x1) << 11);
+  imm |= (((encoded >> 12) & 0xff) << 12);
+  return SignExtend(20, imm);
+}
+
+inline bool IsITypeImm(intptr_t imm) {
+  return Utils::IsInt(12, imm);
+}
+inline uint32_t EncodeITypeImm(intptr_t imm) {
+  ASSERT(IsITypeImm(imm));
+  return imm << 20;
+}
+inline intptr_t DecodeITypeImm(uint32_t encoded) {
+  return SignExtend(12, encoded >> 20);
+}
+
+inline bool IsUTypeImm(intptr_t imm) {
+  return Utils::IsInt(32, imm) && Utils::IsAligned(imm, 1 << 12);
+}
+inline uint32_t EncodeUTypeImm(intptr_t imm) {
+  ASSERT(IsUTypeImm(imm));
+  return imm;
+}
+inline intptr_t DecodeUTypeImm(uint32_t encoded) {
+  return SignExtend(32, encoded & ~((1 << 12) - 1));
+}
+
+inline bool IsSTypeImm(intptr_t imm) {
+  return Utils::IsInt(12, imm);
+}
+inline uint32_t EncodeSTypeImm(intptr_t imm) {
+  ASSERT(IsSTypeImm(imm));
+  uint32_t encoded = 0;
+  encoded |= ((imm >> 5) & 0x7f) << 25;
+  encoded |= ((imm >> 0) & 0x1f) << 7;
+  return encoded;
+}
+inline intptr_t DecodeSTypeImm(uint32_t encoded) {
+  uint32_t imm = 0;
+  imm |= (((encoded >> 25) & 0x7f) << 5);
+  imm |= (((encoded >> 7) & 0x1f) << 0);
+  return SignExtend(12, imm);
+}
+
+inline bool IsCInstruction(uint16_t parcel) {
+  return (parcel & 3) != 3;
+}
+
+class Instr {
+ public:
+  explicit Instr(uint32_t encoding) : encoding_(encoding) {}
+  uint32_t encoding() const { return encoding_; }
+
+  size_t length() const { return 4; }
+
+  Opcode opcode() const { return DecodeOpcode(encoding_); }
+
+  Register rd() const { return DecodeRd(encoding_); }
+  Register rs1() const { return DecodeRs1(encoding_); }
+  Register rs2() const { return DecodeRs2(encoding_); }
+
+  FRegister frd() const { return DecodeFRd(encoding_); }
+  FRegister frs1() const { return DecodeFRs1(encoding_); }
+  FRegister frs2() const { return DecodeFRs2(encoding_); }
+  FRegister frs3() const { return DecodeFRs3(encoding_); }
+
+  Funct2 funct2() const { return DecodeFunct2(encoding_); }
+  Funct3 funct3() const { return DecodeFunct3(encoding_); }
+  Funct5 funct5() const { return DecodeFunct5(encoding_); }
+  Funct7 funct7() const { return DecodeFunct7(encoding_); }
+  Funct12 funct12() const { return DecodeFunct12(encoding_); }
+
+  uint32_t shamt() const { return DecodeShamt(encoding_); }
+  RoundingMode rounding() const { return DecodeRoundingMode(encoding_); }
+
+  std::memory_order memory_order() const {
+    bool acquire = ((encoding_ >> kAcquireShift) & 1) != 0;
+    bool release = ((encoding_ >> kReleaseShift) & 1) != 0;
+    if (acquire && release) return std::memory_order_acq_rel;
+    if (acquire) return std::memory_order_acquire;
+    if (release) return std::memory_order_release;
+    return std::memory_order_relaxed;
+  }
+
+  intx_t itype_imm() const { return DecodeITypeImm(encoding_); }
+  intx_t stype_imm() const { return DecodeSTypeImm(encoding_); }
+  intx_t btype_imm() const { return DecodeBTypeImm(encoding_); }
+  intx_t utype_imm() const { return DecodeUTypeImm(encoding_); }
+  intx_t jtype_imm() const { return DecodeJTypeImm(encoding_); }
+
+  uint32_t csr() const { return encoding_ >> 20; }
+  uint32_t zimm() const { return rs1(); }
+
+  static const uint32_t kBreakPointInstruction = 0;
+  static const uint32_t kInstrSize = 4;
+  static const uint32_t kSimulatorRedirectInstruction = ECALL << 20 | SYSTEM;
+
+ private:
+  const uint32_t encoding_;
+};
+
+#define DEFINE_REG_ENCODING(type, name, shift)                                 \
+  inline uint32_t Is##name(type r) { return static_cast<uint32_t>(r) < 32; }   \
+  inline uint32_t Encode##name(type r) {                                       \
+    ASSERT(Is##name(r));                                                       \
+    return static_cast<uint32_t>(r) << shift;                                  \
+  }                                                                            \
+  inline type Decode##name(uint32_t encoding) {                                \
+    return type((encoding >> shift) & 31);                                     \
+  }
+
+#define DEFINE_REG_PRIME_ENCODING(type, name, shift)                           \
+  inline uint32_t Is##name(type r) { return (r >= 8) && (r < 16); }            \
+  inline uint32_t Encode##name(type r) {                                       \
+    ASSERT(Is##name(r));                                                       \
+    return (static_cast<uint32_t>(r) & 7) << shift;                            \
+  }                                                                            \
+  inline type Decode##name(uint32_t encoding) {                                \
+    return type(((encoding >> shift) & 7) + 8);                                \
+  }
+
+DEFINE_REG_ENCODING(Register, CRd, 7)
+DEFINE_REG_ENCODING(Register, CRs1, 7)
+DEFINE_REG_ENCODING(Register, CRs2, 2)
+DEFINE_REG_ENCODING(FRegister, CFRd, 7)
+DEFINE_REG_ENCODING(FRegister, CFRs1, 7)
+DEFINE_REG_ENCODING(FRegister, CFRs2, 2)
+DEFINE_REG_PRIME_ENCODING(Register, CRdp, 2)
+DEFINE_REG_PRIME_ENCODING(Register, CRs1p, 7)
+DEFINE_REG_PRIME_ENCODING(Register, CRs2p, 2)
+DEFINE_REG_PRIME_ENCODING(FRegister, CFRdp, 2)
+DEFINE_REG_PRIME_ENCODING(FRegister, CFRs1p, 7)
+DEFINE_REG_PRIME_ENCODING(FRegister, CFRs2p, 2)
+#undef DEFINE_REG_ENCODING
+#undef DEFINE_REG_PRIME_ENCODING
+
+inline bool IsCSPLoad4Imm(intptr_t imm) {
+  return Utils::IsUint(8, imm) && Utils::IsAligned(imm, 4);
+}
+inline uint32_t EncodeCSPLoad4Imm(intptr_t imm) {
+  ASSERT(IsCSPLoad4Imm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 5) & 0x1) << 12;
+  encoding |= ((imm >> 2) & 0x7) << 4;
+  encoding |= ((imm >> 6) & 0x3) << 2;
+  return encoding;
+}
+inline intx_t DecodeCSPLoad4Imm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 12) & 0x1) << 5;
+  imm |= ((encoding >> 4) & 0x7) << 2;
+  imm |= ((encoding >> 2) & 0x3) << 6;
+  return imm;
+}
+
+inline bool IsCSPLoad8Imm(intptr_t imm) {
+  return Utils::IsUint(9, imm) && Utils::IsAligned(imm, 8);
+}
+inline uint32_t EncodeCSPLoad8Imm(intptr_t imm) {
+  ASSERT(IsCSPLoad8Imm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 5) & 0x1) << 12;
+  encoding |= ((imm >> 3) & 0x3) << 5;
+  encoding |= ((imm >> 6) & 0x7) << 2;
+  return encoding;
+}
+inline intx_t DecodeCSPLoad8Imm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 12) & 0x1) << 5;
+  imm |= ((encoding >> 5) & 0x3) << 3;
+  imm |= ((encoding >> 2) & 0x7) << 6;
+  return imm;
+}
+
+inline bool IsCSPStore4Imm(intptr_t imm) {
+  return Utils::IsUint(8, imm) && Utils::IsAligned(imm, 4);
+}
+inline uint32_t EncodeCSPStore4Imm(intptr_t imm) {
+  ASSERT(IsCSPStore4Imm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 2) & 0xF) << 9;
+  encoding |= ((imm >> 6) & 0x3) << 7;
+  return encoding;
+}
+inline intx_t DecodeCSPStore4Imm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 9) & 0xF) << 2;
+  imm |= ((encoding >> 7) & 0x3) << 6;
+  return imm;
+}
+
+inline bool IsCSPStore8Imm(intptr_t imm) {
+  return Utils::IsUint(9, imm) && Utils::IsAligned(imm, 8);
+}
+inline uint32_t EncodeCSPStore8Imm(intptr_t imm) {
+  ASSERT(IsCSPStore8Imm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 3) & 0x7) << 10;
+  encoding |= ((imm >> 6) & 0x7) << 7;
+  return encoding;
+}
+inline intx_t DecodeCSPStore8Imm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 10) & 0x7) << 3;
+  imm |= ((encoding >> 7) & 0x7) << 6;
+  return imm;
+}
+
+inline bool IsCMem4Imm(intptr_t imm) {
+  return Utils::IsUint(7, imm) && Utils::IsAligned(imm, 4);
+}
+inline uint32_t EncodeCMem4Imm(intptr_t imm) {
+  ASSERT(IsCMem4Imm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 3) & 0x7) << 10;
+  encoding |= ((imm >> 2) & 0x1) << 6;
+  encoding |= ((imm >> 6) & 0x1) << 5;
+  return encoding;
+}
+inline intx_t DecodeCMem4Imm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 10) & 0x7) << 3;
+  imm |= ((encoding >> 6) & 0x1) << 2;
+  imm |= ((encoding >> 5) & 0x1) << 6;
+  return imm;
+}
+
+inline bool IsCMem8Imm(intptr_t imm) {
+  return Utils::IsUint(8, imm) && Utils::IsAligned(imm, 8);
+}
+inline uint32_t EncodeCMem8Imm(intptr_t imm) {
+  ASSERT(IsCMem8Imm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 3) & 0x7) << 10;
+  encoding |= ((imm >> 6) & 0x3) << 5;
+  return encoding;
+}
+inline intx_t DecodeCMem8Imm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 10) & 0x7) << 3;
+  imm |= ((encoding >> 5) & 0x3) << 6;
+  return imm;
+}
+
+inline bool IsCJImm(intptr_t imm) {
+  return Utils::IsInt(11, imm) && Utils::IsAligned(imm, 2);
+}
+inline uint32_t EncodeCJImm(intptr_t imm) {
+  ASSERT(IsCJImm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 11) & 0x1) << 12;
+  encoding |= ((imm >> 4) & 0x1) << 11;
+  encoding |= ((imm >> 8) & 0x3) << 9;
+  encoding |= ((imm >> 10) & 0x1) << 8;
+  encoding |= ((imm >> 6) & 0x1) << 7;
+  encoding |= ((imm >> 7) & 0x1) << 6;
+  encoding |= ((imm >> 1) & 0x7) << 3;
+  encoding |= ((imm >> 5) & 0x1) << 2;
+  return encoding;
+}
+inline intx_t DecodeCJImm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 12) & 0x1) << 11;
+  imm |= ((encoding >> 11) & 0x1) << 4;
+  imm |= ((encoding >> 9) & 0x3) << 8;
+  imm |= ((encoding >> 8) & 0x1) << 10;
+  imm |= ((encoding >> 7) & 0x1) << 6;
+  imm |= ((encoding >> 6) & 0x1) << 7;
+  imm |= ((encoding >> 3) & 0x7) << 1;
+  imm |= ((encoding >> 2) & 0x1) << 5;
+  return SignExtend(11, imm);
+}
+
+inline bool IsCBImm(intptr_t imm) {
+  return Utils::IsInt(8, imm) && Utils::IsAligned(imm, 2);
+}
+inline uint32_t EncodeCBImm(intptr_t imm) {
+  ASSERT(IsCBImm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 8) & 0x1) << 12;
+  encoding |= ((imm >> 3) & 0x3) << 10;
+  encoding |= ((imm >> 6) & 0x3) << 5;
+  encoding |= ((imm >> 1) & 0x3) << 3;
+  encoding |= ((imm >> 5) & 0x1) << 2;
+  return encoding;
+}
+inline intx_t DecodeCBImm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 12) & 0x1) << 8;
+  imm |= ((encoding >> 10) & 0x3) << 3;
+  imm |= ((encoding >> 5) & 0x3) << 6;
+  imm |= ((encoding >> 3) & 0x3) << 1;
+  imm |= ((encoding >> 2) & 0x1) << 5;
+  return SignExtend(8, imm);
+}
+
+inline bool IsCIImm(intptr_t imm) {
+  return Utils::IsInt(6, imm) && Utils::IsAligned(imm, 1);
+}
+inline uint32_t EncodeCIImm(intptr_t imm) {
+  ASSERT(IsCIImm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 5) & 0x1) << 12;
+  encoding |= ((imm >> 0) & 0x1F) << 2;
+  return encoding;
+}
+inline intx_t DecodeCIImm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 12) & 0x1) << 5;
+  imm |= ((encoding >> 2) & 0x1F) << 0;
+  return SignExtend(6, imm);
+}
+
+inline bool IsCUImm(intptr_t imm) {
+  return Utils::IsInt(17, imm) && Utils::IsAligned(imm, 1 << 12);
+}
+inline uint32_t EncodeCUImm(intptr_t imm) {
+  ASSERT(IsCUImm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 17) & 0x1) << 12;
+  encoding |= ((imm >> 12) & 0x1F) << 2;
+  return encoding;
+}
+inline intx_t DecodeCUImm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 12) & 0x1) << 17;
+  imm |= ((encoding >> 2) & 0x1F) << 12;
+  return SignExtend(17, imm);
+}
+
+inline bool IsCI16Imm(intptr_t imm) {
+  return Utils::IsInt(10, imm) && Utils::IsAligned(imm, 16);
+}
+inline uint32_t EncodeCI16Imm(intptr_t imm) {
+  ASSERT(IsCI16Imm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 9) & 0x1) << 12;
+  encoding |= ((imm >> 4) & 0x1) << 6;
+  encoding |= ((imm >> 6) & 0x1) << 5;
+  encoding |= ((imm >> 7) & 0x3) << 3;
+  encoding |= ((imm >> 5) & 0x1) << 2;
+  return encoding;
+}
+inline intx_t DecodeCI16Imm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 12) & 0x1) << 9;
+  imm |= ((encoding >> 6) & 0x1) << 4;
+  imm |= ((encoding >> 5) & 0x1) << 6;
+  imm |= ((encoding >> 3) & 0x3) << 7;
+  imm |= ((encoding >> 2) & 0x1) << 5;
+  return SignExtend(10, imm);
+}
+
+inline bool IsCI4SPNImm(intptr_t imm) {
+  return Utils::IsUint(9, imm) && Utils::IsAligned(imm, 4);
+}
+inline uint32_t EncodeCI4SPNImm(intptr_t imm) {
+  ASSERT(IsCI4SPNImm(imm));
+  uint32_t encoding = 0;
+  encoding |= ((imm >> 4) & 0x3) << 11;
+  encoding |= ((imm >> 6) & 0xF) << 7;
+  encoding |= ((imm >> 2) & 0x1) << 6;
+  encoding |= ((imm >> 3) & 0x1) << 5;
+  return encoding;
+}
+inline intx_t DecodeCI4SPNImm(uint32_t encoding) {
+  uint32_t imm = 0;
+  imm |= ((encoding >> 11) & 0x3) << 4;
+  imm |= ((encoding >> 7) & 0xF) << 6;
+  imm |= ((encoding >> 6) & 0x1) << 2;
+  imm |= ((encoding >> 5) & 0x1) << 3;
+  return imm;
+}
+
+enum COpcode {
+  C_OP_MASK = 0b1110000000000011,
+
+  C_ADDI4SPN = 0b0000000000000000,
+  C_FLD = 0b0010000000000000,
+  C_LW = 0b0100000000000000,
+  C_FLW = 0b0110000000000000,
+  C_LD = 0b0110000000000000,
+  C_FSD = 0b1010000000000000,
+  C_SW = 0b1100000000000000,
+  C_FSW = 0b1110000000000000,
+  C_SD = 0b1110000000000000,
+
+  C_ADDI = 0b0000000000000001,
+  C_JAL = 0b0010000000000001,
+  C_ADDIW = 0b0010000000000001,
+  C_LI = 0b0100000000000001,
+  C_ADDI16SP = 0b0110000000000001,
+  C_LUI = 0b0110000000000001,
+
+  C_MISCALU = 0b1000000000000001,
+  C_MISCALU_MASK = 0b1110110000000011,
+  C_SRLI = 0b1000000000000001,
+  C_SRAI = 0b1000010000000001,
+  C_ANDI = 0b1000100000000001,
+  C_RR = 0b1000110000000001,
+  C_RR_MASK = 0b1111110001100011,
+  C_SUB = 0b1000110000000001,
+  C_XOR = 0b1000110000100001,
+  C_OR = 0b1000110001000001,
+  C_AND = 0b1000110001100001,
+  C_SUBW = 0b1001110000000001,
+  C_ADDW = 0b1001110000100001,
+
+  C_J = 0b1010000000000001,
+  C_BEQZ = 0b1100000000000001,
+  C_BNEZ = 0b1110000000000001,
+
+  C_SLLI = 0b0000000000000010,
+  C_FLDSP = 0b0010000000000010,
+  C_LWSP = 0b0100000000000010,
+  C_FLWSP = 0b0110000000000010,
+  C_LDSP = 0b0110000000000010,
+  C_JR = 0b1000000000000010,
+  C_MV = 0b1000000000000010,
+  C_JALR = 0b1001000000000010,
+  C_ADD = 0b1001000000000010,
+  C_FSDSP = 0b1010000000000010,
+  C_SWSP = 0b1100000000000010,
+  C_FSWSP = 0b1110000000000010,
+  C_SDSP = 0b1110000000000010,
+
+  C_NOP = 0b0000000000000001,
+  C_EBREAK = 0b1001000000000010,
+};
+
+class CInstr {
+ public:
+  explicit CInstr(uint16_t encoding) : encoding_(encoding) {}
+  uint16_t encoding() const { return encoding_; }
+
+  static const uint32_t kInstrSize = 2;
+  size_t length() const { return kInstrSize; }
+
+  COpcode opcode() const { return COpcode(encoding_ & C_OP_MASK); }
+
+  Register rd() const { return DecodeCRd(encoding_); }
+  Register rs1() const { return DecodeCRd(encoding_); }
+  Register rs2() const { return DecodeCRs2(encoding_); }
+  Register rdp() const { return DecodeCRdp(encoding_); }
+  Register rs1p() const { return DecodeCRs1p(encoding_); }
+  Register rs2p() const { return DecodeCRs2p(encoding_); }
+  FRegister frd() const { return DecodeCFRd(encoding_); }
+  FRegister frs1() const { return DecodeCFRd(encoding_); }
+  FRegister frs2() const { return DecodeCFRs2(encoding_); }
+  FRegister frdp() const { return DecodeCFRdp(encoding_); }
+  FRegister frs1p() const { return DecodeCFRs1p(encoding_); }
+  FRegister frs2p() const { return DecodeCFRs2p(encoding_); }
+
+  intx_t spload4_imm() { return DecodeCSPLoad4Imm(encoding_); }
+  intx_t spload8_imm() { return DecodeCSPLoad8Imm(encoding_); }
+  intx_t spstore4_imm() { return DecodeCSPStore4Imm(encoding_); }
+  intx_t spstore8_imm() { return DecodeCSPStore8Imm(encoding_); }
+  intx_t mem4_imm() { return DecodeCMem4Imm(encoding_); }
+  intx_t mem8_imm() { return DecodeCMem8Imm(encoding_); }
+  intx_t j_imm() { return DecodeCJImm(encoding_); }
+  intx_t b_imm() { return DecodeCBImm(encoding_); }
+  intx_t i_imm() { return DecodeCIImm(encoding_); }
+  intx_t u_imm() { return DecodeCUImm(encoding_); }
+  intx_t i16_imm() { return DecodeCI16Imm(encoding_); }
+  intx_t i4spn_imm() { return DecodeCI4SPNImm(encoding_); }
+
+ private:
+  const uint16_t encoding_;
+};
+
+#define DEFINE_TYPED_ENUM_SET(name, storage_t)                                 \
+  class name##Set;                                                             \
+  class name {                                                                 \
+   public:                                                                     \
+    constexpr explicit name(storage_t encoding) : encoding_(encoding) {}       \
+    constexpr storage_t encoding() const { return encoding_; }                 \
+    constexpr bool operator==(const name& other) const {                       \
+      return encoding_ == other.encoding_;                                     \
+    }                                                                          \
+    constexpr bool operator!=(const name& other) const {                       \
+      return encoding_ != other.encoding_;                                     \
+    }                                                                          \
+    inline constexpr name##Set operator|(const name& other) const;             \
+    inline constexpr name##Set operator|(const name##Set& other) const;        \
+                                                                               \
+   private:                                                                    \
+    const storage_t encoding_;                                                 \
+  };                                                                           \
+  inline std::ostream& operator<<(std::ostream& stream, const name& element) { \
+    return stream << #name << "(" << element.encoding() << ")";                \
+  }                                                                            \
+  class name##Set {                                                            \
+   public:                                                                     \
+    constexpr /* implicit */ name##Set(name element)                           \
+        : encoding_(1u << element.encoding()) {}                               \
+    constexpr explicit name##Set(storage_t encoding) : encoding_(encoding) {}  \
+    constexpr static name##Set Empty() { return name##Set(0); }                \
+    constexpr bool Includes(const name r) const {                              \
+      return (encoding_ & (1 << r.encoding())) != 0;                           \
+    }                                                                          \
+    constexpr bool IncludesAll(const name##Set other) const {                  \
+      return (encoding_ & other.encoding_) == other.encoding_;                 \
+    }                                                                          \
+    constexpr bool IsEmpty() const { return encoding_ == 0; }                  \
+    constexpr bool operator==(const name##Set& other) const {                  \
+      return encoding_ == other.encoding_;                                     \
+    }                                                                          \
+    constexpr bool operator!=(const name##Set& other) const {                  \
+      return encoding_ != other.encoding_;                                     \
+    }                                                                          \
+    constexpr name##Set operator|(const name& other) const {                   \
+      return name##Set(encoding_ | (1 << other.encoding()));                   \
+    }                                                                          \
+    constexpr name##Set operator|(const name##Set& other) const {              \
+      return name##Set(encoding_ | other.encoding_);                           \
+    }                                                                          \
+    constexpr name##Set operator&(const name##Set& other) const {              \
+      return name##Set(encoding_ & other.encoding_);                           \
+    }                                                                          \
+                                                                               \
+   private:                                                                    \
+    storage_t encoding_;                                                       \
+  };                                                                           \
+  constexpr name##Set name::operator|(const name& other) const {               \
+    return name##Set((1u << encoding_) | (1u << other.encoding_));             \
+  }                                                                            \
+  constexpr name##Set name::operator|(const name##Set& other) const {          \
+    return other | *this;                                                      \
+  }
+
+DEFINE_TYPED_ENUM_SET(Extension, uint32_t)
+static constexpr Extension RV_I(0);  // Integer base
+static constexpr Extension RV_M(1);  // Multiply/divide
+static constexpr Extension RV_A(2);  // Atomic
+static constexpr Extension RV_F(3);  // Single-precision floating point
+static constexpr Extension RV_D(4);  // Double-precision floating point
+static constexpr Extension RV_C(5);  // Compressed instructions
+static constexpr ExtensionSet RV_G = RV_I | RV_M | RV_A | RV_F | RV_D;
+static constexpr ExtensionSet RV_GC = RV_G | RV_C;
+
+#undef R
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_CONSTANTS_RISCV_H_
diff --git a/runtime/vm/cpu.h b/runtime/vm/cpu.h
index 2713cab..bc57030 100644
--- a/runtime/vm/cpu.h
+++ b/runtime/vm/cpu.h
@@ -30,6 +30,8 @@
 #include "vm/cpu_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 #include "vm/cpu_arm64.h"
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#include "vm/cpu_riscv.h"
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/vm/cpu_riscv.cc b/runtime/vm/cpu_riscv.cc
new file mode 100644
index 0000000..9d61655
--- /dev/null
+++ b/runtime/vm/cpu_riscv.cc
@@ -0,0 +1,125 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/cpu.h"
+#include "vm/cpu_riscv.h"
+
+#include "vm/cpuinfo.h"
+#include "vm/simulator.h"
+
+#if !defined(USING_SIMULATOR)
+#if !defined(DART_HOST_OS_FUCHSIA)
+#include <sys/syscall.h>
+#else
+#include <zircon/syscalls.h>
+#endif
+#include <unistd.h>
+#endif
+
+#if defined(DART_HOST_OS_MACOS) || defined(DART_HOST_OS_IOS)
+#include <libkern/OSCacheControl.h>
+#endif
+
+namespace dart {
+
+void CPU::FlushICache(uword start, uword size) {
+#if defined(DART_PRECOMPILED_RUNTIME)
+  UNREACHABLE();
+#elif !defined(USING_SIMULATOR)
+  // Nothing to do. Flushing no instructions.
+  if (size == 0) {
+    return;
+  }
+
+// ARM recommends using the gcc intrinsic __clear_cache on Linux and Android.
+//
+// https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/caches-and-self-modifying-code
+//
+// On iOS we use sys_icache_invalidate from Darwin. See:
+//
+// https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/sys_icache_invalidate.3.html
+#if defined(DART_HOST_OS_MACOS) || defined(DART_HOST_OS_IOS)
+  sys_icache_invalidate(reinterpret_cast<void*>(start), size);
+#elif defined(DART_HOST_OS_ANDROID) || defined(DART_HOST_OS_LINUX)
+  char* beg = reinterpret_cast<char*>(start);
+  char* end = reinterpret_cast<char*>(start + size);
+  __builtin___clear_cache(beg, end);
+#elif defined(DART_HOST_OS_FUCHSIA)
+  zx_status_t result = zx_cache_flush(reinterpret_cast<const void*>(start),
+                                      size, ZX_CACHE_FLUSH_INSN);
+  ASSERT(result == ZX_OK);
+#else
+#error FlushICache not implemented for this OS
+#endif
+
+#endif
+}
+
+const char* CPU::Id() {
+  return
+#if defined(USING_SIMULATOR)
+      "sim"
+#endif  // !defined(USING_SIMULATOR)
+#if defined(TARGET_ARCH_RISCV32)
+      "riscv32";
+#elif defined(TARGET_ARCH_RISCV64)
+      "riscv64";
+#else
+#error What XLEN?
+#endif
+}
+
+const char* HostCPUFeatures::hardware_ = NULL;
+#if defined(DEBUG)
+bool HostCPUFeatures::initialized_ = false;
+#endif
+
+#if !defined(USING_SIMULATOR)
+void HostCPUFeatures::Init() {
+  CpuInfo::Init();
+  hardware_ = CpuInfo::GetCpuModel();
+#if defined(DEBUG)
+  initialized_ = true;
+#endif
+}
+
+void HostCPUFeatures::Cleanup() {
+  DEBUG_ASSERT(initialized_);
+#if defined(DEBUG)
+  initialized_ = false;
+#endif
+  ASSERT(hardware_ != NULL);
+  free(const_cast<char*>(hardware_));
+  hardware_ = NULL;
+  CpuInfo::Cleanup();
+}
+
+#else  // !defined(USING_SIMULATOR)
+
+void HostCPUFeatures::Init() {
+  CpuInfo::Init();
+  hardware_ = CpuInfo::GetCpuModel();
+#if defined(DEBUG)
+  initialized_ = true;
+#endif
+}
+
+void HostCPUFeatures::Cleanup() {
+  DEBUG_ASSERT(initialized_);
+#if defined(DEBUG)
+  initialized_ = false;
+#endif
+  ASSERT(hardware_ != NULL);
+  free(const_cast<char*>(hardware_));
+  hardware_ = NULL;
+  CpuInfo::Cleanup();
+}
+#endif  // !defined(USING_SIMULATOR)
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/cpu_riscv.h b/runtime/vm/cpu_riscv.h
new file mode 100644
index 0000000..f891a12
--- /dev/null
+++ b/runtime/vm/cpu_riscv.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_CPU_RISCV_H_
+#define RUNTIME_VM_CPU_RISCV_H_
+
+#if !defined(RUNTIME_VM_CPU_H_)
+#error Do not include cpu_riscv.h directly; use cpu.h instead.
+#endif
+
+#include "vm/allocation.h"
+#include "vm/simulator.h"
+
+namespace dart {
+
+// TargetCPUFeatures gives CPU features for the architecture that we are
+// generating code for. HostCPUFeatures gives the CPU features for the
+// architecture that we are actually running on. When the architectures
+// are the same, TargetCPUFeatures will query HostCPUFeatures. When they are
+// different (i.e. we are running in a simulator), HostCPUFeatures will
+// additionally mock the options needed for the target architecture so that
+// they may be altered for testing.
+
+class HostCPUFeatures : public AllStatic {
+ public:
+  static void Init();
+  static void Cleanup();
+  static const char* hardware() {
+    DEBUG_ASSERT(initialized_);
+    return hardware_;
+  }
+
+ private:
+  static const char* hardware_;
+#if defined(DEBUG)
+  static bool initialized_;
+#endif
+};
+
+class TargetCPUFeatures : public AllStatic {
+ public:
+  static void Init() { HostCPUFeatures::Init(); }
+  static void Cleanup() { HostCPUFeatures::Cleanup(); }
+  static const char* hardware() { return HostCPUFeatures::hardware(); }
+  static bool double_truncate_round_supported() { return false; }
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_CPU_RISCV_H_
diff --git a/runtime/vm/cpu_test.cc b/runtime/vm/cpu_test.cc
index d902713..5e59d31 100644
--- a/runtime/vm/cpu_test.cc
+++ b/runtime/vm/cpu_test.cc
@@ -26,6 +26,18 @@
 #else   // defined(HOST_ARCH_ARM64)
   EXPECT_STREQ("simarm64", CPU::Id());
 #endif  // defined(HOST_ARCH_ARM64)
+#elif defined(TARGET_ARCH_RISCV32)
+#if defined(HOST_ARCH_RISCV32)
+  EXPECT_STREQ("riscv32", CPU::Id());
+#else   // defined(HOST_ARCH_RISCV32)
+  EXPECT_STREQ("simriscv32", CPU::Id());
+#endif  // defined(HOST_ARCH_RISCV32)
+#elif defined(TARGET_ARCH_RISCV64)
+#if defined(HOST_ARCH_RISCV64)
+  EXPECT_STREQ("riscv64", CPU::Id());
+#else   // defined(HOST_ARCH_RISCV64)
+  EXPECT_STREQ("simriscv64", CPU::Id());
+#endif  // defined(HOST_ARCH_RISCV64)
 #else
 #error Architecture was not detected as supported by Dart.
 #endif
diff --git a/runtime/vm/cpuinfo.h b/runtime/vm/cpuinfo.h
index bd900ff..4634057 100644
--- a/runtime/vm/cpuinfo.h
+++ b/runtime/vm/cpuinfo.h
@@ -29,6 +29,9 @@
   // Use system calls.
   kCpuInfoSystem,
 
+  // Don't query anything.
+  kCpuInfoNone,
+
   // Use whatever the default is for a particular OS:
   // Linux, Windows -> CpuId,
   // Android, MacOS -> System.
diff --git a/runtime/vm/cpuinfo_linux.cc b/runtime/vm/cpuinfo_linux.cc
index 08621ed..27cc16e 100644
--- a/runtime/vm/cpuinfo_linux.cc
+++ b/runtime/vm/cpuinfo_linux.cc
@@ -45,6 +45,10 @@
   fields_[kCpuInfoArchitecture] = "CPU architecture";
   method_ = kCpuInfoSystem;
   ProcCpuInfo::Init();
+#elif defined(HOST_ARCH_RISCV32) || defined(HOST_ARCH_RISCV64)
+  // We only rely on the base Linux configuration of IMAFDC, so don't need
+  // dynamic feature detection.
+  method_ = kCpuInfoNone;
 #else
 #error Unrecognized target architecture
 #endif
@@ -53,9 +57,10 @@
 void CpuInfo::Cleanup() {
   if (method_ == kCpuInfoCpuId) {
     CpuId::Cleanup();
-  } else {
-    ASSERT(method_ == kCpuInfoSystem);
+  } else if (method_ == kCpuInfoSystem) {
     ProcCpuInfo::Cleanup();
+  } else {
+    ASSERT(method_ == kCpuInfoNone);
   }
 }
 
@@ -65,18 +70,20 @@
     bool contains = (strstr(field, search_string) != NULL);
     free(const_cast<char*>(field));
     return contains;
-  } else {
-    ASSERT(method_ == kCpuInfoSystem);
+  } else if (method_ == kCpuInfoSystem) {
     return ProcCpuInfo::FieldContains(FieldName(idx), search_string);
+  } else {
+    UNREACHABLE();
   }
 }
 
 const char* CpuInfo::ExtractField(CpuInfoIndices idx) {
   if (method_ == kCpuInfoCpuId) {
     return CpuId::field(idx);
-  } else {
-    ASSERT(method_ == kCpuInfoSystem);
+  } else if (method_ == kCpuInfoSystem) {
     return ProcCpuInfo::ExtractField(FieldName(idx));
+  } else {
+    UNREACHABLE();
   }
 }
 
@@ -86,9 +93,12 @@
            (strcmp(field, fields_[kCpuInfoModel]) == 0) ||
            (strcmp(field, fields_[kCpuInfoHardware]) == 0) ||
            (strcmp(field, fields_[kCpuInfoFeatures]) == 0);
-  } else {
-    ASSERT(method_ == kCpuInfoSystem);
+  } else if (method_ == kCpuInfoSystem) {
     return ProcCpuInfo::HasField(field);
+  } else if (method_ == kCpuInfoNone) {
+    return false;
+  } else {
+    UNREACHABLE();
   }
 }
 
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index 1f74527..a38e3c7 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -63,6 +63,7 @@
 DebugInfo* Dart::pprof_symbol_generator_ = NULL;
 ReadOnlyHandles* Dart::predefined_handles_ = NULL;
 Snapshot::Kind Dart::vm_snapshot_kind_ = Snapshot::kInvalid;
+Dart_ThreadStartCallback Dart::thread_start_callback_ = NULL;
 Dart_ThreadExitCallback Dart::thread_exit_callback_ = NULL;
 Dart_FileOpenCallback Dart::file_open_callback_ = NULL;
 Dart_FileReadCallback Dart::file_read_callback_ = NULL;
@@ -254,6 +255,7 @@
                      Dart_IsolateShutdownCallback shutdown,
                      Dart_IsolateCleanupCallback cleanup,
                      Dart_IsolateGroupCleanupCallback cleanup_group,
+                     Dart_ThreadStartCallback thread_start,
                      Dart_ThreadExitCallback thread_exit,
                      Dart_FileOpenCallback file_open,
                      Dart_FileReadCallback file_read,
@@ -296,6 +298,7 @@
 
   UntaggedFrame::Init();
 
+  set_thread_start_callback(thread_start);
   set_thread_exit_callback(thread_exit);
   SetFileCallbacks(file_open, file_read, file_write, file_close);
   set_entropy_source_callback(entropy_source);
@@ -532,6 +535,7 @@
                  Dart_IsolateShutdownCallback shutdown,
                  Dart_IsolateCleanupCallback cleanup,
                  Dart_IsolateGroupCleanupCallback cleanup_group,
+                 Dart_ThreadStartCallback thread_start,
                  Dart_ThreadExitCallback thread_exit,
                  Dart_FileOpenCallback file_open,
                  Dart_FileReadCallback file_read,
@@ -552,9 +556,9 @@
   char* retval =
       DartInit(vm_isolate_snapshot, instructions_snapshot, create_group,
                initialize_isolate, shutdown, cleanup, cleanup_group,
-               thread_exit, file_open, file_read, file_write, file_close,
-               entropy_source, get_service_assets, start_kernel_isolate,
-               observer, post_task, post_task_data);
+               thread_start, thread_exit, file_open, file_read, file_write,
+               file_close, entropy_source, get_service_assets,
+               start_kernel_isolate, observer, post_task, post_task_data);
   if (retval != NULL) {
     init_state_.ResetInitializing();
     return retval;
@@ -1191,7 +1195,10 @@
 #else
     buffer.AddString(" x64-sysv");
 #endif
-
+#elif defined(TARGET_ARCH_RISCV32)
+    buffer.AddString(" riscv32");
+#elif defined(TARGET_ARCH_RISCV64)
+    buffer.AddString(" riscv64");
 #else
 #error What architecture?
 #endif
diff --git a/runtime/vm/dart.h b/runtime/vm/dart.h
index a8c4a56..5a170c2 100644
--- a/runtime/vm/dart.h
+++ b/runtime/vm/dart.h
@@ -36,6 +36,7 @@
                     Dart_IsolateShutdownCallback shutdown,
                     Dart_IsolateCleanupCallback cleanup,
                     Dart_IsolateGroupCleanupCallback cleanup_group,
+                    Dart_ThreadStartCallback thread_start,
                     Dart_ThreadExitCallback thread_exit,
                     Dart_FileOpenCallback file_open,
                     Dart_FileReadCallback file_read,
@@ -119,6 +120,12 @@
                               Snapshot::Kind kind);
   static Snapshot::Kind vm_snapshot_kind() { return vm_snapshot_kind_; }
 
+  static Dart_ThreadStartCallback thread_start_callback() {
+    return thread_start_callback_;
+  }
+  static void set_thread_start_callback(Dart_ThreadStartCallback cback) {
+    thread_start_callback_ = cback;
+  }
   static Dart_ThreadExitCallback thread_exit_callback() {
     return thread_exit_callback_;
   }
@@ -176,6 +183,7 @@
                         Dart_IsolateShutdownCallback shutdown,
                         Dart_IsolateCleanupCallback cleanup,
                         Dart_IsolateGroupCleanupCallback cleanup_group,
+                        Dart_ThreadStartCallback thread_start,
                         Dart_ThreadExitCallback thread_exit,
                         Dart_FileOpenCallback file_open,
                         Dart_FileReadCallback file_read,
@@ -199,6 +207,7 @@
   static DebugInfo* pprof_symbol_generator_;
   static ReadOnlyHandles* predefined_handles_;
   static Snapshot::Kind vm_snapshot_kind_;
+  static Dart_ThreadStartCallback thread_start_callback_;
   static Dart_ThreadExitCallback thread_exit_callback_;
   static Dart_FileOpenCallback file_open_callback_;
   static Dart_FileReadCallback file_read_callback_;
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index 7106d5b..42021bd 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -1206,14 +1206,15 @@
         "Invalid Dart_InitializeParams version.");
   }
 
-  return Dart::Init(
-      params->vm_snapshot_data, params->vm_snapshot_instructions,
-      params->create_group, params->initialize_isolate,
-      params->shutdown_isolate, params->cleanup_isolate, params->cleanup_group,
-      params->thread_exit, params->file_open, params->file_read,
-      params->file_write, params->file_close, params->entropy_source,
-      params->get_service_assets, params->start_kernel_isolate,
-      params->code_observer, params->post_task, params->post_task_data);
+  return Dart::Init(params->vm_snapshot_data, params->vm_snapshot_instructions,
+                    params->create_group, params->initialize_isolate,
+                    params->shutdown_isolate, params->cleanup_isolate,
+                    params->cleanup_group, params->thread_start,
+                    params->thread_exit, params->file_open, params->file_read,
+                    params->file_write, params->file_close,
+                    params->entropy_source, params->get_service_assets,
+                    params->start_kernel_isolate, params->code_observer,
+                    params->post_task, params->post_task_data);
 }
 
 DART_EXPORT char* Dart_Cleanup() {
diff --git a/runtime/vm/debugger_riscv.cc b/runtime/vm/debugger_riscv.cc
new file mode 100644
index 0000000..37e3804
--- /dev/null
+++ b/runtime/vm/debugger_riscv.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/code_patcher.h"
+#include "vm/cpu.h"
+#include "vm/debugger.h"
+#include "vm/instructions.h"
+#include "vm/stub_code.h"
+
+namespace dart {
+
+#ifndef PRODUCT
+
+CodePtr CodeBreakpoint::OrigStubAddress() const {
+  return saved_value_;
+}
+
+void CodeBreakpoint::PatchCode() {
+  ASSERT(!IsEnabled());
+  const Code& code = Code::Handle(code_);
+  switch (breakpoint_kind_) {
+    case UntaggedPcDescriptors::kIcCall: {
+      Object& data = Object::Handle();
+      saved_value_ = CodePatcher::GetInstanceCallAt(pc_, code, &data);
+      CodePatcher::PatchInstanceCallAt(pc_, code, data,
+                                       StubCode::ICCallBreakpoint());
+      break;
+    }
+    case UntaggedPcDescriptors::kUnoptStaticCall: {
+      saved_value_ = CodePatcher::GetStaticCallTargetAt(pc_, code);
+      CodePatcher::PatchPoolPointerCallAt(
+          pc_, code, StubCode::UnoptStaticCallBreakpoint());
+      break;
+    }
+    case UntaggedPcDescriptors::kRuntimeCall: {
+      saved_value_ = CodePatcher::GetStaticCallTargetAt(pc_, code);
+      CodePatcher::PatchPoolPointerCallAt(pc_, code,
+                                          StubCode::RuntimeCallBreakpoint());
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+void CodeBreakpoint::RestoreCode() {
+  ASSERT(IsEnabled());
+  const Code& code = Code::Handle(code_);
+  switch (breakpoint_kind_) {
+    case UntaggedPcDescriptors::kIcCall: {
+      Object& data = Object::Handle();
+      CodePatcher::GetInstanceCallAt(pc_, code, &data);
+      CodePatcher::PatchInstanceCallAt(pc_, code, data,
+                                       Code::Handle(saved_value_));
+      break;
+    }
+    case UntaggedPcDescriptors::kUnoptStaticCall:
+    case UntaggedPcDescriptors::kRuntimeCall: {
+      CodePatcher::PatchPoolPointerCallAt(pc_, code,
+                                          Code::Handle(saved_value_));
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+#endif  // !PRODUCT
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/dispatch_table.cc b/runtime/vm/dispatch_table.cc
index 56e2449..7aa3eca 100644
--- a/runtime/vm/dispatch_table.cc
+++ b/runtime/vm/dispatch_table.cc
@@ -18,6 +18,12 @@
 #elif defined(TARGET_ARCH_ARM64)
   // Max consecutive sub immediate value
   return 4096;
+#elif defined(TARGET_ARCH_RISCV32)
+  // Max consecutive sub immediate value
+  return 2048 / 4;
+#elif defined(TARGET_ARCH_RISCV64)
+  // Max consecutive sub immediate value
+  return 2048 / 8;
 #else
   // No AOT on IA32
   UNREACHABLE();
@@ -35,6 +41,12 @@
 #elif defined(TARGET_ARCH_ARM64)
   // Origin + Max consecutive add immediate value
   return 8192;
+#elif defined(TARGET_ARCH_RISCV32)
+  // Origin + Max consecutive add immediate value
+  return 4096 / 4;
+#elif defined(TARGET_ARCH_RISCV64)
+  // Origin + Max consecutive add immediate value
+  return 4096 / 8;
 #else
   // No AOT on IA32
   UNREACHABLE();
diff --git a/runtime/vm/elf.cc b/runtime/vm/elf.cc
index 28e8224..77d6949 100644
--- a/runtime/vm/elf.cc
+++ b/runtime/vm/elf.cc
@@ -1908,6 +1908,8 @@
   stream->WriteHalf(elf::EM_ARM);
 #elif defined(TARGET_ARCH_ARM64)
   stream->WriteHalf(elf::EM_AARCH64);
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+  stream->WriteHalf(elf::EM_RISCV);
 #else
   FATAL("Unknown ELF architecture");
 #endif
diff --git a/runtime/vm/globals.h b/runtime/vm/globals.h
index 46cd706..1a3d6c0 100644
--- a/runtime/vm/globals.h
+++ b/runtime/vm/globals.h
@@ -95,11 +95,10 @@
 #define ONLY_IN_PRECOMPILED(code)
 #endif  // defined(DART_PRECOMPILED_RUNTIME)
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
-    defined(TARGET_ARCH_X64)
-#define ONLY_IN_ARM_ARM64_X64(code) code
+#if defined(TARGET_ARCH_IA32)
+#define NOT_IN_IA32(code)
 #else
-#define ONLY_IN_ARM_ARM64_X64(code)
+#define NOT_IN_IA32(code) code
 #endif
 
 #if defined(DART_PRECOMPILED_RUNTIME)
@@ -198,13 +197,15 @@
 #endif  // !defined(DART_HOST_OS_WINDOWS))
 
 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
-    defined(TARGET_ARCH_X64)
+    defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_RISCV32) ||                \
+    defined(TARGET_ARCH_RISCV64)
 #define TARGET_USES_OBJECT_POOL 1
 #endif
 
 #if defined(DART_PRECOMPILER) &&                                               \
     (defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM) ||                   \
-     defined(TARGET_ARCH_ARM64))
+     defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) ||             \
+     defined(TARGET_ARCH_RISCV64))
 #define DART_SUPPORT_PRECOMPILATION 1
 #endif
 
diff --git a/runtime/vm/heap/freelist_test.cc b/runtime/vm/heap/freelist_test.cc
index 5778886..6a0c19c 100644
--- a/runtime/vm/heap/freelist_test.cc
+++ b/runtime/vm/heap/freelist_test.cc
@@ -213,6 +213,8 @@
   const uint8_t ret[4] = {0x1e, 0xff, 0x2f, 0xe1};  // bx lr
 #elif defined(HOST_ARCH_ARM64)
   const uint8_t ret[4] = {0xc0, 0x03, 0x5f, 0xd6};  // ret
+#elif defined(HOST_ARCH_RISCV32) || defined(HOST_ARCH_RISCV64)
+  const uint8_t ret[2] = {0x82, 0x80};  // c.ret
 #else
 #error "Unknown architecture."
 #endif
diff --git a/runtime/vm/image_snapshot.cc b/runtime/vm/image_snapshot.cc
index 36c7d34..3661d07 100644
--- a/runtime/vm/image_snapshot.cc
+++ b/runtime/vm/image_snapshot.cc
@@ -884,7 +884,8 @@
     ASSERT_EQUAL(remaining, 4);
     bytes_written += WriteBytes(&kBreakInstructionFiller, remaining);
   }
-#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
+#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32) ||                 \
+    defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
   // The break instruction is a single byte, repeated to fill a word.
   bytes_written += WriteBytes(&kBreakInstructionFiller, remaining);
 #else
diff --git a/runtime/vm/instructions.h b/runtime/vm/instructions.h
index 76b929d..5dbe5d6 100644
--- a/runtime/vm/instructions.h
+++ b/runtime/vm/instructions.h
@@ -15,6 +15,8 @@
 #include "vm/instructions_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 #include "vm/instructions_arm64.h"
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#include "vm/instructions_riscv.h"
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/vm/instructions_arm.cc b/runtime/vm/instructions_arm.cc
index 3a9f84d..ecacd45 100644
--- a/runtime/vm/instructions_arm.cc
+++ b/runtime/vm/instructions_arm.cc
@@ -87,30 +87,6 @@
 // Decodes a load sequence ending at 'end' (the last instruction of the load
 // sequence is the instruction before the one at end).  Returns a pointer to
 // the first instruction in the sequence.  Returns the register being loaded
-// and the loaded object in the output parameters 'reg' and 'obj'
-// respectively.
-uword InstructionPattern::DecodeLoadObject(uword end,
-                                           const ObjectPool& object_pool,
-                                           Register* reg,
-                                           Object* obj) {
-  uword start = 0;
-  Instr* instr = Instr::At(end - Instr::kInstrSize);
-  if ((instr->InstructionBits() & 0xfff00000) == 0xe5900000) {
-    // ldr reg, [reg, #+offset]
-    intptr_t index = 0;
-    start = DecodeLoadWordFromPool(end, reg, &index);
-    *obj = object_pool.ObjectAt(index);
-  } else {
-    intptr_t value = 0;
-    start = DecodeLoadWordImmediate(end, reg, &value);
-    *obj = static_cast<ObjectPtr>(value);
-  }
-  return start;
-}
-
-// Decodes a load sequence ending at 'end' (the last instruction of the load
-// sequence is the instruction before the one at end).  Returns a pointer to
-// the first instruction in the sequence.  Returns the register being loaded
 // and the loaded immediate value in the output parameters 'reg' and 'value'
 // respectively.
 uword InstructionPattern::DecodeLoadWordImmediate(uword end,
diff --git a/runtime/vm/instructions_arm.h b/runtime/vm/instructions_arm.h
index 5bfc9bd..96ccf6c 100644
--- a/runtime/vm/instructions_arm.h
+++ b/runtime/vm/instructions_arm.h
@@ -32,16 +32,6 @@
   // Decodes a load sequence ending at 'end' (the last instruction of the
   // load sequence is the instruction before the one at end).  Returns the
   // address of the first instruction in the sequence.  Returns the register
-  // being loaded and the loaded object in the output parameters 'reg' and
-  // 'obj' respectively.
-  static uword DecodeLoadObject(uword end,
-                                const ObjectPool& object_pool,
-                                Register* reg,
-                                Object* obj);
-
-  // Decodes a load sequence ending at 'end' (the last instruction of the
-  // load sequence is the instruction before the one at end).  Returns the
-  // address of the first instruction in the sequence.  Returns the register
   // being loaded and the loaded immediate value in the output parameters
   // 'reg' and 'value' respectively.
   static uword DecodeLoadWordImmediate(uword end,
diff --git a/runtime/vm/instructions_arm64.cc b/runtime/vm/instructions_arm64.cc
index 35d0250..0c64963 100644
--- a/runtime/vm/instructions_arm64.cc
+++ b/runtime/vm/instructions_arm64.cc
@@ -88,34 +88,6 @@
 // Decodes a load sequence ending at 'end' (the last instruction of the load
 // sequence is the instruction before the one at end).  Returns a pointer to
 // the first instruction in the sequence.  Returns the register being loaded
-// and the loaded object in the output parameters 'reg' and 'obj'
-// respectively.
-uword InstructionPattern::DecodeLoadObject(uword end,
-                                           const ObjectPool& object_pool,
-                                           Register* reg,
-                                           Object* obj) {
-  // 1. LoadWordFromPool
-  // or
-  // 2. LoadDecodableImmediate
-  uword start = 0;
-  Instr* instr = Instr::At(end - Instr::kInstrSize);
-  if (instr->IsLoadStoreRegOp()) {
-    // Case 1.
-    intptr_t index = 0;
-    start = DecodeLoadWordFromPool(end, reg, &index);
-    *obj = object_pool.ObjectAt(index);
-  } else {
-    // Case 2.
-    intptr_t value = 0;
-    start = DecodeLoadWordImmediate(end, reg, &value);
-    *obj = static_cast<ObjectPtr>(value);
-  }
-  return start;
-}
-
-// Decodes a load sequence ending at 'end' (the last instruction of the load
-// sequence is the instruction before the one at end).  Returns a pointer to
-// the first instruction in the sequence.  Returns the register being loaded
 // and the loaded immediate value in the output parameters 'reg' and 'value'
 // respectively.
 uword InstructionPattern::DecodeLoadWordImmediate(uword end,
@@ -344,6 +316,19 @@
   }
   // TODO(rmacnak): Loads with offsets beyond 12 bits.
 
+  if (instr->IsAddSubImmOp() && instr->SFField() &&
+      (instr->RnField() == NULL_REG)) {
+    uint32_t imm = (instr->Bit(22) == 1) ? (instr->Imm12Field() << 12)
+                                         : (instr->Imm12Field());
+    if (imm == kTrueOffsetFromNull) {
+      *obj = Object::bool_true().ptr();
+      return true;
+    } else if (imm == kFalseOffsetFromNull) {
+      *obj = Object::bool_false().ptr();
+      return true;
+    }
+  }
+
   return false;
 }
 
diff --git a/runtime/vm/instructions_arm64.h b/runtime/vm/instructions_arm64.h
index ecfecb6..b135c63 100644
--- a/runtime/vm/instructions_arm64.h
+++ b/runtime/vm/instructions_arm64.h
@@ -31,16 +31,6 @@
   // Decodes a load sequence ending at 'end' (the last instruction of the
   // load sequence is the instruction before the one at end).  Returns the
   // address of the first instruction in the sequence.  Returns the register
-  // being loaded and the loaded object in the output parameters 'reg' and
-  // 'obj' respectively.
-  static uword DecodeLoadObject(uword end,
-                                const ObjectPool& object_pool,
-                                Register* reg,
-                                Object* obj);
-
-  // Decodes a load sequence ending at 'end' (the last instruction of the
-  // load sequence is the instruction before the one at end).  Returns the
-  // address of the first instruction in the sequence.  Returns the register
   // being loaded and the loaded immediate value in the output parameters
   // 'reg' and 'value' respectively.
   static uword DecodeLoadWordImmediate(uword end,
diff --git a/runtime/vm/instructions_riscv.cc b/runtime/vm/instructions_riscv.cc
new file mode 100644
index 0000000..161db93
--- /dev/null
+++ b/runtime/vm/instructions_riscv.cc
@@ -0,0 +1,462 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV*.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/instructions.h"
+#include "vm/instructions_riscv.h"
+
+#include "vm/constants.h"
+#include "vm/cpu.h"
+#include "vm/object.h"
+#include "vm/object_store.h"
+#include "vm/reverse_pc_lookup_cache.h"
+
+namespace dart {
+
+CallPattern::CallPattern(uword pc, const Code& code)
+    : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
+      target_code_pool_index_(-1) {
+  ASSERT(code.ContainsInstructionAt(pc));
+  //          [lui,add,]lx CODE_REG, ##(pp)
+  // xxxxxxxx lx ra, ##(CODE_REG)
+  //     xxxx jalr ra
+
+  // Last instruction: jalr ra.
+  ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
+  Register reg;
+  InstructionPattern::DecodeLoadWordFromPool(pc - 6, &reg,
+                                             &target_code_pool_index_);
+  ASSERT(reg == CODE_REG);
+}
+
+ICCallPattern::ICCallPattern(uword pc, const Code& code)
+    : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
+      target_pool_index_(-1),
+      data_pool_index_(-1) {
+  ASSERT(code.ContainsInstructionAt(pc));
+  //          [lui,add,]lx IC_DATA_REG, ##(pp)
+  //          [lui,add,]lx CODE_REG, ##(pp)
+  // xxxxxxxx lx ra, ##(CODE_REG)
+  //     xxxx jalr ra
+
+  // Last instruction: jalr ra.
+  ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
+
+  Register reg;
+  uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
+      pc - 6, &reg, &target_pool_index_);
+  ASSERT(reg == CODE_REG);
+
+  InstructionPattern::DecodeLoadWordFromPool(data_load_end, &reg,
+                                             &data_pool_index_);
+  ASSERT(reg == IC_DATA_REG);
+}
+
+NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
+    : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
+      end_(pc),
+      native_function_pool_index_(-1),
+      target_code_pool_index_(-1) {
+  ASSERT(code.ContainsInstructionAt(pc));
+  //          [lui,add,]lx t5, ##(pp)
+  //          [lui,add,]lx CODE_REG, ##(pp)
+  // xxxxxxxx lx ra, ##(CODE_REG)
+  //     xxxx jalr ra
+
+  // Last instruction: jalr ra.
+  ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
+
+  Register reg;
+  uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool(
+      pc - 6, &reg, &target_code_pool_index_);
+  ASSERT(reg == CODE_REG);
+  InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, &reg,
+                                             &native_function_pool_index_);
+  ASSERT(reg == T5);
+}
+
+CodePtr NativeCallPattern::target() const {
+  return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
+}
+
+void NativeCallPattern::set_target(const Code& target) const {
+  object_pool_.SetObjectAt(target_code_pool_index_, target);
+  // No need to flush the instruction cache, since the code is not modified.
+}
+
+NativeFunction NativeCallPattern::native_function() const {
+  return reinterpret_cast<NativeFunction>(
+      object_pool_.RawValueAt(native_function_pool_index_));
+}
+
+void NativeCallPattern::set_native_function(NativeFunction func) const {
+  object_pool_.SetRawValueAt(native_function_pool_index_,
+                             reinterpret_cast<uword>(func));
+}
+
+// Decodes a load sequence ending at 'end' (the last instruction of the load
+// sequence is the instruction before the one at end).  Returns a pointer to
+// the first instruction in the sequence.  Returns the register being loaded
+// and the loaded immediate value in the output parameters 'reg' and 'value'
+// respectively.
+uword InstructionPattern::DecodeLoadWordImmediate(uword end,
+                                                  Register* reg,
+                                                  intptr_t* value) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+static bool DecodeLoadX(uword end,
+                        Register* dst,
+                        Register* base,
+                        intptr_t* offset,
+                        intptr_t* length) {
+  Instr instr(*reinterpret_cast<uint32_t*>(end - 4));
+#if XLEN == 32
+  if (instr.opcode() == LOAD && instr.funct3() == LW) {
+#elif XLEN == 64
+  if (instr.opcode() == LOAD && instr.funct3() == LD) {
+#endif
+    *dst = instr.rd();
+    *base = instr.rs1();
+    *offset = instr.itype_imm();
+    *length = 4;
+    return true;
+  }
+
+  CInstr cinstr(*reinterpret_cast<uint16_t*>(end - 2));
+#if XLEN == 32
+  if (cinstr.opcode() == C_LW) {
+#elif XLEN == 64
+  if (cinstr.opcode() == C_LD) {
+#endif
+    *dst = cinstr.rdp();
+    *base = cinstr.rs1p();
+#if XLEN == 32
+    *offset = cinstr.mem4_imm();
+#elif XLEN == 64
+    *offset = cinstr.mem8_imm();
+#endif
+    *length = 2;
+    return true;
+  }
+
+  return false;
+}
+
+static bool DecodeLUI(uword end,
+                      Register* dst,
+                      intptr_t* imm,
+                      intptr_t* length) {
+  Instr instr(*reinterpret_cast<uint32_t*>(end - 4));
+  if (instr.opcode() == LUI) {
+    *dst = instr.rd();
+    *imm = instr.utype_imm();
+    *length = 4;
+    return true;
+  }
+
+  CInstr cinstr(*reinterpret_cast<uint16_t*>(end - 2));
+  if (cinstr.opcode() == C_LUI) {
+    *dst = cinstr.rd();
+    *imm = cinstr.u_imm();
+    *length = 2;
+    return true;
+  }
+
+  return false;
+}
+
+// See comment in instructions_arm64.h
+uword InstructionPattern::DecodeLoadWordFromPool(uword end,
+                                                 Register* reg,
+                                                 intptr_t* index) {
+  // [c.]lx dst, offset(pp)
+  // or
+  // [c.]lui dst, hi
+  // c.add dst, dst, pp
+  // [c.]lx dst, lo(dst)
+
+  Register base;
+  intptr_t lo, length;
+  if (!DecodeLoadX(end, reg, &base, &lo, &length)) {
+    UNREACHABLE();
+  }
+
+  if (base == PP) {
+    // PP is untagged on RISCV.
+    *index = ObjectPool::IndexFromOffset(lo - kHeapObjectTag);
+    return end - length;
+  }
+  ASSERT(base == *reg);
+  end -= length;
+
+  CInstr add_instr(*reinterpret_cast<uint16_t*>(end - 2));
+  ASSERT(add_instr.opcode() ==
+         C_MV);  // Not C_ADD, which extends past the opcode proper.
+  ASSERT(add_instr.rd() == base);
+  ASSERT(add_instr.rs1() == base);
+  ASSERT(add_instr.rs2() == PP);
+  end -= 2;
+
+  Register dst;
+  intptr_t hi;
+  if (!DecodeLUI(end, &dst, &hi, &length)) {
+    UNREACHABLE();
+  }
+  ASSERT(dst == base);
+  // PP is untagged on RISC-V.
+  *index = ObjectPool::IndexFromOffset(hi + lo - kHeapObjectTag);
+  return end - length;
+}
+
+bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
+  ASSERT(code.ContainsInstructionAt(pc));
+  uint16_t parcel = *reinterpret_cast<uint16_t*>(pc);
+  if (IsCInstruction(parcel)) {
+    CInstr instr(parcel);
+#if XLEN == 32
+    if (instr.opcode() == C_LW) {
+      intptr_t offset = instr.mem4_imm();
+#elif XLEN == 64
+    if (instr.opcode() == C_LD) {
+      intptr_t offset = instr.mem8_imm();
+#endif
+      if (instr.rs1p() == PP) {
+        // PP is untagged on RISC-V.
+        ASSERT(Utils::IsAligned(offset, kWordSize));
+        intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
+        const ObjectPool& pool = ObjectPool::Handle(code.GetObjectPool());
+        if (!pool.IsNull() && (index < pool.Length()) &&
+            (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject)) {
+          *obj = pool.ObjectAt(index);
+          return true;
+        }
+      } else if (instr.rs1p() == THR) {
+        return Thread::ObjectAtOffset(offset, obj);
+      }
+    }
+  } else {
+    Instr instr(*reinterpret_cast<uint32_t*>(pc));
+#if XLEN == 32
+    if (instr.opcode() == LOAD && instr.funct3() == LW) {
+#elif XLEN == 64
+    if (instr.opcode() == LOAD && instr.funct3() == LD) {
+#endif
+      intptr_t offset = instr.itype_imm();
+      if (instr.rs1() == PP) {
+        // PP is untagged on RISC-V.
+        ASSERT(Utils::IsAligned(offset, kWordSize));
+        intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
+        const ObjectPool& pool = ObjectPool::Handle(code.GetObjectPool());
+        if (!pool.IsNull() && (index < pool.Length()) &&
+            (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject)) {
+          *obj = pool.ObjectAt(index);
+          return true;
+        }
+      } else if (instr.rs1() == THR) {
+        return Thread::ObjectAtOffset(offset, obj);
+      }
+    }
+    if ((instr.opcode() == OPIMM) && (instr.funct3() == ADDI) &&
+        (instr.rs1() == NULL_REG)) {
+      if (instr.itype_imm() == 0) {
+        *obj = Object::null();
+        return true;
+      }
+      if (instr.itype_imm() == kTrueOffsetFromNull) {
+        *obj = Object::bool_true().ptr();
+        return true;
+      }
+      if (instr.itype_imm() == kFalseOffsetFromNull) {
+        *obj = Object::bool_false().ptr();
+        return true;
+      }
+    }
+  }
+
+  // TODO(riscv): Loads with offsets beyond 12 bits.
+  return false;
+}
+
+// Encodes a load sequence ending at 'end'. Encodes a fixed length two
+// instruction load from the pool pointer in PP using the destination
+// register reg as a temporary for the base address.
+// Assumes that the location has already been validated for patching.
+void InstructionPattern::EncodeLoadWordFromPoolFixed(uword end,
+                                                     int32_t offset) {
+  UNIMPLEMENTED();
+}
+
+CodePtr CallPattern::TargetCode() const {
+  return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
+}
+
+void CallPattern::SetTargetCode(const Code& target) const {
+  object_pool_.SetObjectAt(target_code_pool_index_, target);
+  // No need to flush the instruction cache, since the code is not modified.
+}
+
+ObjectPtr ICCallPattern::Data() const {
+  return object_pool_.ObjectAt(data_pool_index_);
+}
+
+void ICCallPattern::SetData(const Object& data) const {
+  ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
+  object_pool_.SetObjectAt(data_pool_index_, data);
+}
+
+CodePtr ICCallPattern::TargetCode() const {
+  return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
+}
+
+void ICCallPattern::SetTargetCode(const Code& target) const {
+  object_pool_.SetObjectAt(target_pool_index_, target);
+  // No need to flush the instruction cache, since the code is not modified.
+}
+
+SwitchableCallPatternBase::SwitchableCallPatternBase(
+    const ObjectPool& object_pool)
+    : object_pool_(object_pool), data_pool_index_(-1), target_pool_index_(-1) {}
+
+ObjectPtr SwitchableCallPatternBase::data() const {
+  return object_pool_.ObjectAt(data_pool_index_);
+}
+
+void SwitchableCallPatternBase::SetData(const Object& data) const {
+  ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode());
+  object_pool_.SetObjectAt(data_pool_index_, data);
+}
+
+SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
+    : SwitchableCallPatternBase(ObjectPool::Handle(code.GetObjectPool())) {
+  ASSERT(code.ContainsInstructionAt(pc));
+  UNIMPLEMENTED();
+}
+
+uword SwitchableCallPattern::target_entry() const {
+  return Code::Handle(Code::RawCast(object_pool_.ObjectAt(target_pool_index_)))
+      .MonomorphicEntryPoint();
+}
+
+void SwitchableCallPattern::SetTarget(const Code& target) const {
+  ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode());
+  object_pool_.SetObjectAt(target_pool_index_, target);
+}
+
+BareSwitchableCallPattern::BareSwitchableCallPattern(uword pc)
+    : SwitchableCallPatternBase(ObjectPool::Handle(
+          IsolateGroup::Current()->object_store()->global_object_pool())) {
+  //      [lui,add,]lx RA, ##(pp)
+  //      [lui,add,]lx IC_DATA_REG, ##(pp)
+  // xxxx jalr RA
+
+  // Last instruction: jalr ra.
+  ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
+
+  Register reg;
+  uword target_load_end = InstructionPattern::DecodeLoadWordFromPool(
+      pc - 2, &reg, &data_pool_index_);
+  ASSERT_EQUAL(reg, IC_DATA_REG);
+
+  InstructionPattern::DecodeLoadWordFromPool(target_load_end, &reg,
+                                             &target_pool_index_);
+  ASSERT_EQUAL(reg, RA);
+}
+
+uword BareSwitchableCallPattern::target_entry() const {
+  return object_pool_.RawValueAt(target_pool_index_);
+}
+
+void BareSwitchableCallPattern::SetTarget(const Code& target) const {
+  ASSERT(object_pool_.TypeAt(target_pool_index_) ==
+         ObjectPool::EntryType::kImmediate);
+  object_pool_.SetRawValueAt(target_pool_index_,
+                             target.MonomorphicEntryPoint());
+}
+
+ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {}
+
+bool ReturnPattern::IsValid() const {
+  return *reinterpret_cast<uint16_t*>(pc_) == 0x8082;
+}
+
+bool PcRelativeCallPattern::IsValid() const {
+  Instr aupic(*reinterpret_cast<uint32_t*>(pc_));
+  if (aupic.opcode() != AUIPC) return false;
+  Instr jalr(*reinterpret_cast<uint32_t*>(pc_ + 4));
+  if (jalr.opcode() != JALR) return false;
+  if (aupic.rd() != jalr.rs1()) return false;
+  if (jalr.rd() != RA) return false;
+  return true;
+}
+
+bool PcRelativeTailCallPattern::IsValid() const {
+  Instr aupic(*reinterpret_cast<uint32_t*>(pc_));
+  if (aupic.opcode() != AUIPC) return false;
+  Instr jr(*reinterpret_cast<uint32_t*>(pc_ + 4));
+  if (jr.opcode() != JALR) return false;
+  if (aupic.rd() != jr.rs1()) return false;
+  if (jr.rd() != ZR) return false;
+  return true;
+}
+
+void PcRelativeTrampolineJumpPattern::Initialize() {
+  UNREACHABLE();
+}
+
+int32_t PcRelativeTrampolineJumpPattern::distance() {
+  UNREACHABLE();
+  return 0;
+}
+
+void PcRelativeTrampolineJumpPattern::set_distance(int32_t distance) {
+  UNREACHABLE();
+}
+
+bool PcRelativeTrampolineJumpPattern::IsValid() const {
+  UNREACHABLE();
+  return false;
+}
+
+intptr_t TypeTestingStubCallPattern::GetSubtypeTestCachePoolIndex() {
+  // Calls to the type testing stubs look like:
+  //   lx s3, ...
+  //   lx Rn, idx(pp)
+  //   jalr s3
+  // where Rn = TypeTestABI::kSubtypeTestCacheReg.
+
+  // Ensure the caller of the type testing stub (whose return address is [pc_])
+  // branched via `blr R9` or a pc-relative call.
+  if (*reinterpret_cast<uint16_t*>(pc_ - 2) == 0x9982) {  // jalr s3
+    // indirect call
+    //     xxxx c.jalr s3
+    Register reg;
+    intptr_t pool_index = -1;
+    InstructionPattern::DecodeLoadWordFromPool(pc_ - 2, &reg, &pool_index);
+    ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
+    return pool_index;
+  } else {
+    ASSERT(FLAG_precompiled_mode);
+    // pc-relative call
+    // xxxxxxxx aupic ra, hi
+    // xxxxxxxx jalr ra, lo
+    Instr jalr(*reinterpret_cast<uint32_t*>(pc_ - 4));
+    ASSERT(jalr.opcode() == JALR);
+    Instr auipc(*reinterpret_cast<uint32_t*>(pc_ - 8));
+    ASSERT(auipc.opcode() == AUIPC);
+
+    Register reg;
+    intptr_t pool_index = -1;
+    InstructionPattern::DecodeLoadWordFromPool(pc_ - 8, &reg, &pool_index);
+    ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
+    return pool_index;
+  }
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/instructions_riscv.h b/runtime/vm/instructions_riscv.h
new file mode 100644
index 0000000..bdea5d0
--- /dev/null
+++ b/runtime/vm/instructions_riscv.h
@@ -0,0 +1,254 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+// Classes that describe assembly patterns as used by inline caches.
+
+#ifndef RUNTIME_VM_INSTRUCTIONS_RISCV_H_
+#define RUNTIME_VM_INSTRUCTIONS_RISCV_H_
+
+#ifndef RUNTIME_VM_INSTRUCTIONS_H_
+#error Do not include instructions_riscv.h directly; use instructions.h instead.
+#endif
+
+#include "vm/allocation.h"
+#include "vm/constants.h"
+#include "vm/native_function.h"
+#include "vm/tagged_pointer.h"
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/compiler/assembler/assembler.h"
+#endif  // !defined(DART_PRECOMPILED_RUNTIME)
+
+namespace dart {
+
+class Code;
+class ICData;
+class Object;
+class ObjectPool;
+
+class InstructionPattern : public AllStatic {
+ public:
+  // Decodes a load sequence ending at 'end' (the last instruction of the
+  // load sequence is the instruction before the one at end).  Returns the
+  // address of the first instruction in the sequence.  Returns the register
+  // being loaded and the loaded immediate value in the output parameters
+  // 'reg' and 'value' respectively.
+  static uword DecodeLoadWordImmediate(uword end,
+                                       Register* reg,
+                                       intptr_t* value);
+
+  // Decodes a load sequence ending at 'end' (the last instruction of the
+  // load sequence is the instruction before the one at end).  Returns the
+  // address of the first instruction in the sequence.  Returns the register
+  // being loaded and the index in the pool being read from in the output
+  // parameters 'reg' and 'index' respectively.
+  // IMPORANT: When generating code loading values from pool on ARM64 use
+  // LoadWordFromPool macro instruction instead of emitting direct load.
+  // The macro instruction takes care of pool offsets that can't be
+  // encoded as immediates.
+  static uword DecodeLoadWordFromPool(uword end,
+                                      Register* reg,
+                                      intptr_t* index);
+
+  // Encodes a load sequence ending at 'end'. Encodes a fixed length two
+  // instruction load from the pool pointer in PP using the destination
+  // register reg as a temporary for the base address.
+  static void EncodeLoadWordFromPoolFixed(uword end, int32_t offset);
+};
+
+class CallPattern : public ValueObject {
+ public:
+  CallPattern(uword pc, const Code& code);
+
+  CodePtr TargetCode() const;
+  void SetTargetCode(const Code& target) const;
+
+ private:
+  const ObjectPool& object_pool_;
+
+  intptr_t target_code_pool_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(CallPattern);
+};
+
+class ICCallPattern : public ValueObject {
+ public:
+  ICCallPattern(uword pc, const Code& caller_code);
+
+  ObjectPtr Data() const;
+  void SetData(const Object& data) const;
+
+  CodePtr TargetCode() const;
+  void SetTargetCode(const Code& target) const;
+
+ private:
+  const ObjectPool& object_pool_;
+
+  intptr_t target_pool_index_;
+  intptr_t data_pool_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(ICCallPattern);
+};
+
+class NativeCallPattern : public ValueObject {
+ public:
+  NativeCallPattern(uword pc, const Code& code);
+
+  CodePtr target() const;
+  void set_target(const Code& target) const;
+
+  NativeFunction native_function() const;
+  void set_native_function(NativeFunction target) const;
+
+ private:
+  const ObjectPool& object_pool_;
+
+  uword end_;
+  intptr_t native_function_pool_index_;
+  intptr_t target_code_pool_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(NativeCallPattern);
+};
+
+// Instance call that can switch between a direct monomorphic call, an IC call,
+// and a megamorphic call.
+//   load guarded cid            load ICData             load MegamorphicCache
+//   load monomorphic target <-> load ICLookup stub  ->  load MMLookup stub
+//   call target.entry           call stub.entry         call stub.entry
+class SwitchableCallPatternBase : public ValueObject {
+ public:
+  explicit SwitchableCallPatternBase(const ObjectPool& object_pool);
+
+  ObjectPtr data() const;
+  void SetData(const Object& data) const;
+
+ protected:
+  const ObjectPool& object_pool_;
+  intptr_t data_pool_index_;
+  intptr_t target_pool_index_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SwitchableCallPatternBase);
+};
+
+// See [SwitchableCallBase] for a switchable calls in general.
+//
+// The target slot is always a [Code] object: Either the code of the
+// monomorphic function or a stub code.
+class SwitchableCallPattern : public SwitchableCallPatternBase {
+ public:
+  SwitchableCallPattern(uword pc, const Code& code);
+
+  uword target_entry() const;
+  void SetTarget(const Code& target) const;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SwitchableCallPattern);
+};
+
+// See [SwitchableCallBase] for a switchable calls in general.
+//
+// The target slot is always a direct entrypoint address: Either the entry point
+// of the monomorphic function or a stub entry point.
+class BareSwitchableCallPattern : public SwitchableCallPatternBase {
+ public:
+  explicit BareSwitchableCallPattern(uword pc);
+
+  uword target_entry() const;
+  void SetTarget(const Code& target) const;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BareSwitchableCallPattern);
+};
+
+class ReturnPattern : public ValueObject {
+ public:
+  explicit ReturnPattern(uword pc);
+
+  // ret = 1 compressed instruction
+  static const intptr_t kLengthInBytes = 2;
+
+  int pattern_length_in_bytes() const { return kLengthInBytes; }
+
+  bool IsValid() const;
+
+ private:
+  const uword pc_;
+};
+
+class PcRelativePatternBase : public ValueObject {
+ public:
+  static constexpr intptr_t kLengthInBytes = 8;
+  static constexpr intptr_t kLowerCallingRange =
+      static_cast<int32_t>(0x80000000);
+  static constexpr intptr_t kUpperCallingRange =
+      static_cast<int32_t>(0x7FFFFFFE);
+
+  explicit PcRelativePatternBase(uword pc) : pc_(pc) {}
+
+  int32_t distance() {
+    Instr auipc(*reinterpret_cast<uint32_t*>(pc_));
+    Instr jalr(*reinterpret_cast<uint32_t*>(pc_ + 4));
+    return auipc.utype_imm() + jalr.itype_imm();
+  }
+
+  void set_distance(int32_t distance) {
+    Instr auipc(*reinterpret_cast<uint32_t*>(pc_));
+    Instr jalr(*reinterpret_cast<uint32_t*>(pc_ + 4));
+    intx_t imm = distance;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    *reinterpret_cast<uint32_t*>(pc_) =
+        EncodeUTypeImm(hi) | EncodeRd(auipc.rd()) | EncodeOpcode(AUIPC);
+    *reinterpret_cast<uint32_t*>(pc_ + 4) =
+        EncodeITypeImm(lo) | EncodeRs1(jalr.rs1()) | EncodeFunct3(F3_0) |
+        EncodeRd(jalr.rd()) | EncodeOpcode(JALR);
+  }
+
+  bool IsValid() const;
+
+ protected:
+  uword pc_;
+};
+
+class PcRelativeCallPattern : public PcRelativePatternBase {
+ public:
+  explicit PcRelativeCallPattern(uword pc) : PcRelativePatternBase(pc) {}
+
+  bool IsValid() const;
+};
+
+class PcRelativeTailCallPattern : public PcRelativePatternBase {
+ public:
+  explicit PcRelativeTailCallPattern(uword pc) : PcRelativePatternBase(pc) {}
+
+  bool IsValid() const;
+};
+
+// RISC-V never uses trampolines since the range of the regular pc-relative call
+// is enough.
+class PcRelativeTrampolineJumpPattern : public ValueObject {
+ public:
+  static constexpr intptr_t kLengthInBytes = 8;
+  static constexpr intptr_t kLowerCallingRange =
+      -(DART_INT64_C(1) << 31) + kLengthInBytes;
+  static constexpr intptr_t kUpperCallingRange = (DART_INT64_C(1) << 31) - 1;
+
+  explicit PcRelativeTrampolineJumpPattern(uword pattern_start)
+      : pattern_start_(pattern_start) {
+    USE(pattern_start_);
+  }
+
+  void Initialize();
+
+  int32_t distance();
+  void set_distance(int32_t distance);
+  bool IsValid() const;
+
+ private:
+  uword pattern_start_;
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_INSTRUCTIONS_RISCV_H_
diff --git a/runtime/vm/instructions_riscv_test.cc b/runtime/vm/instructions_riscv_test.cc
new file mode 100644
index 0000000..4f7ecb1
--- /dev/null
+++ b/runtime/vm/instructions_riscv_test.cc
@@ -0,0 +1,36 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/cpu.h"
+#include "vm/instructions.h"
+#include "vm/stub_code.h"
+#include "vm/unit_test.h"
+
+namespace dart {
+
+#define __ assembler->
+
+ASSEMBLER_TEST_GENERATE(Call, assembler) {
+  // Code is generated, but not executed. Just parsed with CallPattern
+  __ set_constant_pool_allowed(true);  // Uninitialized pp is OK.
+  __ JumpAndLinkPatchable(StubCode::InvokeDartCode());
+  __ ret();
+}
+
+ASSEMBLER_TEST_RUN(Call, test) {
+  // The return address, which must be the address of an instruction contained
+  // in the code, points to the Ret instruction above, i.e. one instruction
+  // before the end of the code buffer.
+  uword end = test->payload_start() + test->code().Size();
+  CallPattern call(end - CInstr::kInstrSize, test->code());
+  EXPECT_EQ(StubCode::InvokeDartCode().ptr(), call.TargetCode());
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/malloc_hooks_unsupported.cc b/runtime/vm/malloc_hooks_unsupported.cc
index 5b666d1..11bc115 100644
--- a/runtime/vm/malloc_hooks_unsupported.cc
+++ b/runtime/vm/malloc_hooks_unsupported.cc
@@ -69,25 +69,7 @@
     return true;
   }
 #endif
-#if defined(DART_HOST_OS_LINUX) || defined(DART_HOST_OS_ANDROID)
-#if defined(DART_USE_MALLINFO2)
-  struct mallinfo2 info = mallinfo2();
-#else
-  struct mallinfo info = mallinfo();
-#endif  // defined(DART_USE_MALLINFO2)
-  *used = info.uordblks;
-  *capacity = *used + info.fordblks;
-  *implementation = "unknown";
-  return true;
-#elif defined(DART_HOST_OS_MACOS)
-  struct mstats stats = mstats();
-  *used = stats.bytes_used;
-  *capacity = stats.bytes_total;
-  *implementation = "macos";
-  return true;
-#else
   return false;
-#endif
 #else
   return false;
 #endif
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 4ed246d..b2b35a6 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -14813,7 +14813,11 @@
   THR_Print("ObjectPool len:%" Pd " {\n", Length());
   for (intptr_t i = 0; i < Length(); i++) {
     intptr_t offset = OffsetFromIndex(i);
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+    THR_Print("  %" Pd "(pp) ", offset + kHeapObjectTag);
+#else
     THR_Print("  [pp+0x%" Px "] ", offset);
+#endif
     if (TypeAt(i) == EntryType::kTaggedObject) {
       const Object& obj = Object::Handle(ObjectAt(i));
       THR_Print("%s (obj)\n", obj.ToCString());
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 097f562..6320a45 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -5423,6 +5423,16 @@
   static const intptr_t kPolymorphicEntryOffsetJIT = 48;
   static const intptr_t kMonomorphicEntryOffsetAOT = 8;
   static const intptr_t kPolymorphicEntryOffsetAOT = 20;
+#elif defined(TARGET_ARCH_RISCV32)
+  static const intptr_t kMonomorphicEntryOffsetJIT = 6;
+  static const intptr_t kPolymorphicEntryOffsetJIT = 42;
+  static const intptr_t kMonomorphicEntryOffsetAOT = 6;
+  static const intptr_t kPolymorphicEntryOffsetAOT = 16;
+#elif defined(TARGET_ARCH_RISCV64)
+  static const intptr_t kMonomorphicEntryOffsetJIT = 6;
+  static const intptr_t kPolymorphicEntryOffsetJIT = 42;
+  static const intptr_t kMonomorphicEntryOffsetAOT = 6;
+  static const intptr_t kPolymorphicEntryOffsetAOT = 16;
 #else
 #error Missing entry offsets for current architecture
 #endif
@@ -6692,7 +6702,7 @@
   // embedded objects in the instructions using pointer_offsets.
 
   static const intptr_t kBytesPerElement =
-      sizeof(reinterpret_cast<UntaggedCode*>(0)->data()[0]);
+      sizeof(reinterpret_cast<UntaggedCode*>(kOffsetOfPtr)->data()[0]);
   static const intptr_t kMaxElements = kSmiMax / kBytesPerElement;
 
   struct ArrayTraits {
diff --git a/runtime/vm/object_riscv_test.cc b/runtime/vm/object_riscv_test.cc
new file mode 100644
index 0000000..2b798c9
--- /dev/null
+++ b/runtime/vm/object_riscv_test.cc
@@ -0,0 +1,56 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "platform/assert.h"
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/object.h"
+#include "vm/unit_test.h"
+
+namespace dart {
+
+#define __ assembler->
+
+// Generate a simple dart code sequence.
+// This is used to test Code and Instruction object creation.
+void GenerateIncrement(compiler::Assembler* assembler) {
+  __ EnterFrame(1 * kWordSize);
+  __ li(A0, 0);
+  __ PushRegister(A0);
+  __ addi(A0, A0, 1);
+  __ sx(A0, compiler::Address(SP));
+  __ lx(A1, compiler::Address(SP));
+  __ addi(A1, A1, 1);
+  __ PopRegister(A0);
+  __ mv(A0, A1);
+  __ LeaveFrame();
+  __ ret();
+}
+
+// Generate a dart code sequence that embeds a string object in it.
+// This is used to test Embedded String objects in the instructions.
+void GenerateEmbedStringInCode(compiler::Assembler* assembler,
+                               const char* str) {
+  const String& string_object =
+      String::ZoneHandle(String::New(str, Heap::kOld));
+  __ EnterStubFrame();
+  __ LoadObject(A0, string_object);
+  __ LeaveStubFrame();
+  __ ret();
+}
+
+// Generate a dart code sequence that embeds a smi object in it.
+// This is used to test Embedded Smi objects in the instructions.
+void GenerateEmbedSmiInCode(compiler::Assembler* assembler, intptr_t value) {
+  const Smi& smi_object = Smi::ZoneHandle(Smi::New(value));
+  const intx_t val = static_cast<intx_t>(smi_object.ptr());
+  __ LoadImmediate(A0, val);
+  __ ret();
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/os_linux.cc b/runtime/vm/os_linux.cc
index 5d78dc1..aed7558 100644
--- a/runtime/vm/os_linux.cc
+++ b/runtime/vm/os_linux.cc
@@ -276,6 +276,7 @@
   static const uint32_t EM_X86_64 = 62;
   static const uint32_t EM_ARM = 40;
   static const uint32_t EM_AARCH64 = 183;
+  static const uint32_t EM_RISCV = 243;
 
   static uint32_t GetElfMachineArchitecture() {
 #if TARGET_ARCH_IA32
@@ -286,6 +287,8 @@
     return EM_ARM;
 #elif TARGET_ARCH_ARM64
     return EM_AARCH64;
+#elif TARGET_ARCH_RISCV32 || TARGET_ARCH_RISCV64
+    return EM_RISCV;
 #else
     UNREACHABLE();
     return 0;
@@ -501,7 +504,8 @@
 // into a architecture specific file e.g: os_ia32_linux.cc
 intptr_t OS::ActivationFrameAlignment() {
 #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) ||                   \
-    defined(TARGET_ARCH_ARM64)
+    defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) ||              \
+    defined(TARGET_ARCH_RISCV64)
   const int kMinimumAlignment = 16;
 #elif defined(TARGET_ARCH_ARM)
   const int kMinimumAlignment = 8;
diff --git a/runtime/vm/profiler.cc b/runtime/vm/profiler.cc
index 19c98fd..8ff785b 100644
--- a/runtime/vm/profiler.cc
+++ b/runtime/vm/profiler.cc
@@ -535,12 +535,8 @@
   }
   return false;
 }
-#elif defined(TARGET_ARCH_ARM)
-bool ReturnAddressLocator::LocateReturnAddress(uword* return_address) {
-  ASSERT(return_address != NULL);
-  return false;
-}
-#elif defined(TARGET_ARCH_ARM64)
+#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                \
+    defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
 bool ReturnAddressLocator::LocateReturnAddress(uword* return_address) {
   ASSERT(return_address != NULL);
   return false;
diff --git a/runtime/vm/regexp.cc b/runtime/vm/regexp.cc
index a6ad932..1d2ea7a 100644
--- a/runtime/vm/regexp.cc
+++ b/runtime/vm/regexp.cc
@@ -2684,7 +2684,7 @@
   intptr_t preload_characters =
       Utils::Minimum(static_cast<intptr_t>(4), eats_at_least);
   if (compiler->one_byte()) {
-#if !defined(DART_COMPRESSED_POINTERS)
+#if !defined(DART_COMPRESSED_POINTERS) && !defined(TARGET_ARCH_RISCV32)
     if (preload_characters > 4) preload_characters = 4;
     // We can't preload 3 characters because there is no machine instruction
     // to do that.  We can't just load 4 because we could be reading
@@ -2696,7 +2696,7 @@
     if (preload_characters > 2) preload_characters = 2;
 #endif
   } else {
-#if !defined(DART_COMPRESSED_POINTERS)
+#if !defined(DART_COMPRESSED_POINTERS) && !defined(TARGET_ARCH_RISCV32)
     if (preload_characters > 2) preload_characters = 2;
 #else
     // Ensure LoadCodeUnitsInstr can always produce a Smi. See
diff --git a/runtime/vm/runtime_entry_arm.cc b/runtime/vm/runtime_entry_arm.cc
index 0870d45..e08ebbd 100644
--- a/runtime/vm/runtime_entry_arm.cc
+++ b/runtime/vm/runtime_entry_arm.cc
@@ -58,8 +58,11 @@
     __ LoadImmediate(TMP, VMTag::kDartTagId);
     __ str(TMP,
            compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
-    COMPILE_ASSERT(IsAbiPreservedRegister(THR));
-    COMPILE_ASSERT(IsAbiPreservedRegister(PP));
+    // These registers must be preserved by runtime functions, otherwise
+    // we'd need to restore them here.
+    COMPILE_ASSERT(IsCalleeSavedRegister(THR));
+    COMPILE_ASSERT(IsCalleeSavedRegister(PP));
+    COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
   } else {
     // Argument count is not checked here, but in the runtime entry for a more
     // informative error message.
diff --git a/runtime/vm/runtime_entry_arm64.cc b/runtime/vm/runtime_entry_arm64.cc
index 465c0fd9..05efb72 100644
--- a/runtime/vm/runtime_entry_arm64.cc
+++ b/runtime/vm/runtime_entry_arm64.cc
@@ -73,13 +73,14 @@
     __ str(TMP, compiler::Address(THR, Thread::vm_tag_offset()));
     __ mov(SP, kCallLeafRuntimeCalleeSaveScratch2);
     __ mov(CSP, kCallLeafRuntimeCalleeSaveScratch1);
-    COMPILE_ASSERT(IsAbiPreservedRegister(THR));
-    COMPILE_ASSERT(IsAbiPreservedRegister(PP));
-    COMPILE_ASSERT(IsAbiPreservedRegister(NULL_REG));
-    COMPILE_ASSERT(IsAbiPreservedRegister(HEAP_BITS));
-    COMPILE_ASSERT(IsAbiPreservedRegister(DISPATCH_TABLE_REG));
-    // These registers must be preserved by the runtime functions, otherwise
+    // These registers must be preserved by runtime functions, otherwise
     // we'd need to restore them here.
+    COMPILE_ASSERT(IsCalleeSavedRegister(THR));
+    COMPILE_ASSERT(IsCalleeSavedRegister(PP));
+    COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
+    COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
+    COMPILE_ASSERT(IsCalleeSavedRegister(HEAP_BITS));
+    COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
   } else {
     // Argument count is not checked here, but in the runtime entry for a more
     // informative error message.
diff --git a/runtime/vm/runtime_entry_ia32.cc b/runtime/vm/runtime_entry_ia32.cc
index 8bba816..bbf92c5 100644
--- a/runtime/vm/runtime_entry_ia32.cc
+++ b/runtime/vm/runtime_entry_ia32.cc
@@ -37,6 +37,9 @@
     __ call(EAX);
     __ movl(compiler::Assembler::VMTagAddress(),
             compiler::Immediate(VMTag::kDartTagId));
+    // These registers must be preserved by runtime functions, otherwise
+    // we'd need to restore them here.
+    COMPILE_ASSERT(IsCalleeSavedRegister(THR));
   } else {
     // Argument count is not checked here, but in the runtime entry for a more
     // informative error message.
diff --git a/runtime/vm/runtime_entry_riscv.cc b/runtime/vm/runtime_entry_riscv.cc
new file mode 100644
index 0000000..8dfab67
--- /dev/null
+++ b/runtime/vm/runtime_entry_riscv.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/runtime_entry.h"
+
+#include "vm/simulator.h"
+#include "vm/stub_code.h"
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/compiler/assembler/assembler.h"
+#endif  // !defined(DART_PRECOMPILED_RUNTIME)
+
+namespace dart {
+
+#define __ assembler->
+
+uword RuntimeEntry::GetEntryPoint() const {
+  // Compute the effective address. When running under the simulator,
+  // this is a redirection address that forces the simulator to call
+  // into the runtime system.
+  uword entry = reinterpret_cast<uword>(function());
+#if defined(USING_SIMULATOR)
+  // Redirection to leaf runtime calls supports a maximum of 4 arguments passed
+  // in registers (maximum 2 double arguments for leaf float runtime calls).
+  ASSERT(argument_count() >= 0);
+  ASSERT(!is_leaf() || (!is_float() && (argument_count() <= 4)) ||
+         (argument_count() <= 2));
+  Simulator::CallKind call_kind =
+      is_leaf() ? (is_float() ? Simulator::kLeafFloatRuntimeCall
+                              : Simulator::kLeafRuntimeCall)
+                : Simulator::kRuntimeCall;
+  entry =
+      Simulator::RedirectExternalReference(entry, call_kind, argument_count());
+#endif
+  return entry;
+}
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+// Generate code to call into the stub which will call the runtime
+// function. Input for the stub is as follows:
+//   SP : points to the arguments and return value array.
+//   T5 : address of the runtime function to call.
+//   T4 : number of arguments to the call.
+void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
+                                compiler::Assembler* assembler,
+                                intptr_t argument_count) {
+  if (runtime_entry->is_leaf()) {
+    ASSERT(argument_count == runtime_entry->argument_count());
+    // Caller is responsible for either using CallRuntimeScore or manually
+    // saving PP (C volatile register) and SP (altered by alignment).
+    COMPILE_ASSERT(!IsAbiPreservedRegister(PP));
+
+    __ lx(TMP2,
+          compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
+    __ sx(TMP2, compiler::Address(THR, Thread::vm_tag_offset()));
+    __ ReserveAlignedFrameSpace(0);
+    __ jalr(TMP2);
+    __ LoadImmediate(TMP2, VMTag::kDartTagId);
+    __ sx(TMP2, compiler::Address(THR, Thread::vm_tag_offset()));
+    // These registers must be preserved by runtime functions, otherwise
+    // we'd need to restore them here.
+    COMPILE_ASSERT(IsCalleeSavedRegister(THR));
+    COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
+    COMPILE_ASSERT(IsCalleeSavedRegister(WRITE_BARRIER_MASK));
+    COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
+  } else {
+    // Argument count is not checked here, but in the runtime entry for a more
+    // informative error message.
+    __ lx(T5, compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
+    __ li(T4, argument_count);
+    __ JumpAndLinkToRuntime();
+  }
+}
+#endif  // !defined(DART_PRECOMPILED_RUNTIME)
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/runtime_entry_x64.cc b/runtime/vm/runtime_entry_x64.cc
index 78d3ead..fe759e1 100644
--- a/runtime/vm/runtime_entry_x64.cc
+++ b/runtime/vm/runtime_entry_x64.cc
@@ -38,8 +38,11 @@
     __ CallCFunction(RAX);
     __ movq(compiler::Assembler::VMTagAddress(),
             compiler::Immediate(VMTag::kDartTagId));
-    ASSERT((CallingConventions::kCalleeSaveCpuRegisters & (1 << THR)) != 0);
-    ASSERT((CallingConventions::kCalleeSaveCpuRegisters & (1 << PP)) != 0);
+    // These registers must be preserved by runtime functions, otherwise
+    // we'd need to restore them here.
+    ASSERT(IsCalleeSavedRegister(THR));
+    ASSERT(IsCalleeSavedRegister(PP));
+    ASSERT(IsCalleeSavedRegister(CODE_REG));
   } else {
     // Argument count is not checked here, but in the runtime entry for a more
     // informative error message.
diff --git a/runtime/vm/signal_handler_linux.cc b/runtime/vm/signal_handler_linux.cc
index 1979da4..a0127e9 100644
--- a/runtime/vm/signal_handler_linux.cc
+++ b/runtime/vm/signal_handler_linux.cc
@@ -21,6 +21,10 @@
   pc = static_cast<uintptr_t>(mcontext.arm_pc);
 #elif defined(HOST_ARCH_ARM64)
   pc = static_cast<uintptr_t>(mcontext.pc);
+#elif defined(HOST_ARCH_RISCV32)
+  pc = static_cast<uintptr_t>(mcontext.__gregs[REG_PC]);
+#elif defined(HOST_ARCH_RISCV64)
+  pc = static_cast<uintptr_t>(mcontext.__gregs[REG_PC]);
 #else
 #error Unsupported architecture.
 #endif  // HOST_ARCH_...
@@ -45,6 +49,10 @@
   }
 #elif defined(HOST_ARCH_ARM64)
   fp = static_cast<uintptr_t>(mcontext.regs[29]);
+#elif defined(HOST_ARCH_RISCV32)
+  fp = static_cast<uintptr_t>(mcontext.__gregs[REG_S0]);
+#elif defined(HOST_ARCH_RISCV64)
+  fp = static_cast<uintptr_t>(mcontext.__gregs[REG_S0]);
 #else
 #error Unsupported architecture.
 #endif  // HOST_ARCH_...
@@ -63,6 +71,10 @@
   sp = static_cast<uintptr_t>(mcontext.arm_sp);
 #elif defined(HOST_ARCH_ARM64)
   sp = static_cast<uintptr_t>(mcontext.sp);
+#elif defined(HOST_ARCH_RISCV32)
+  sp = static_cast<uintptr_t>(mcontext.__gregs[REG_SP]);
+#elif defined(HOST_ARCH_RISCV64)
+  sp = static_cast<uintptr_t>(mcontext.__gregs[REG_SP]);
 #else
 #error Unsupported architecture.
 #endif  // HOST_ARCH_...
@@ -88,6 +100,10 @@
   lr = static_cast<uintptr_t>(mcontext.arm_lr);
 #elif defined(HOST_ARCH_ARM64)
   lr = static_cast<uintptr_t>(mcontext.regs[30]);
+#elif defined(HOST_ARCH_RISCV32)
+  lr = static_cast<uintptr_t>(mcontext.__gregs[REG_RA]);
+#elif defined(HOST_ARCH_RISCV64)
+  lr = static_cast<uintptr_t>(mcontext.__gregs[REG_RA]);
 #else
 #error Unsupported architecture.
 #endif  // HOST_ARCH_...
diff --git a/runtime/vm/simulator.h b/runtime/vm/simulator.h
index e08337c..7738d0c 100644
--- a/runtime/vm/simulator.h
+++ b/runtime/vm/simulator.h
@@ -14,6 +14,8 @@
 #include "vm/simulator_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 #include "vm/simulator_arm64.h"
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#include "vm/simulator_riscv.h"
 #else
 #error Unknown architecture.
 #endif  // defined(TARGET_ARCH_...)
diff --git a/runtime/vm/simulator_riscv.cc b/runtime/vm/simulator_riscv.cc
new file mode 100644
index 0000000..73cdb72
--- /dev/null
+++ b/runtime/vm/simulator_riscv.cc
@@ -0,0 +1,2617 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include <setjmp.h>  // NOLINT
+#include <stdlib.h>
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+// Only build the simulator if not compiling for real RISCV hardware.
+#if defined(USING_SIMULATOR)
+
+#include "vm/simulator.h"
+
+#include "vm/compiler/assembler/disassembler.h"
+#include "vm/constants.h"
+#include "vm/image_snapshot.h"
+#include "vm/native_arguments.h"
+#include "vm/os_thread.h"
+#include "vm/stack_frame.h"
+
+namespace dart {
+
+DEFINE_FLAG(uint64_t,
+            trace_sim_after,
+            ULLONG_MAX,
+            "Trace simulator execution after instruction count reached.");
+DEFINE_FLAG(uint64_t,
+            stop_sim_at,
+            ULLONG_MAX,
+            "Instruction address or instruction count to stop simulator at.");
+
+// SimulatorSetjmpBuffer are linked together, and the last created one
+// is referenced by the Simulator. When an exception is thrown, the exception
+// runtime looks at where to jump and finds the corresponding
+// SimulatorSetjmpBuffer based on the stack pointer of the exception handler.
+// The runtime then does a Longjmp on that buffer to return to the simulator.
+class SimulatorSetjmpBuffer {
+ public:
+  void Longjmp() {
+    // "This" is now the last setjmp buffer.
+    simulator_->set_last_setjmp_buffer(this);
+    longjmp(buffer_, 1);
+  }
+
+  explicit SimulatorSetjmpBuffer(Simulator* sim) {
+    simulator_ = sim;
+    link_ = sim->last_setjmp_buffer();
+    sim->set_last_setjmp_buffer(this);
+    sp_ = static_cast<uword>(sim->get_register(SP));
+  }
+
+  ~SimulatorSetjmpBuffer() {
+    ASSERT(simulator_->last_setjmp_buffer() == this);
+    simulator_->set_last_setjmp_buffer(link_);
+  }
+
+  SimulatorSetjmpBuffer* link() { return link_; }
+
+  uword sp() { return sp_; }
+
+ private:
+  uword sp_;
+  Simulator* simulator_;
+  SimulatorSetjmpBuffer* link_;
+  jmp_buf buffer_;
+
+  friend class Simulator;
+};
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a svc (supervisor call) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  uword address_of_ecall_instruction() {
+    return reinterpret_cast<uword>(&ecall_instruction_);
+  }
+
+  uword external_function() const { return external_function_; }
+
+  Simulator::CallKind call_kind() const { return call_kind_; }
+
+  int argument_count() const { return argument_count_; }
+
+  static Redirection* Get(uword external_function,
+                          Simulator::CallKind call_kind,
+                          int argument_count) {
+    MutexLocker ml(mutex_);
+
+    Redirection* old_head = list_.load(std::memory_order_relaxed);
+    for (Redirection* current = old_head; current != nullptr;
+         current = current->next_) {
+      if (current->external_function_ == external_function) return current;
+    }
+
+    Redirection* redirection =
+        new Redirection(external_function, call_kind, argument_count);
+    redirection->next_ = old_head;
+
+    // Use a memory fence to ensure all pending writes are written at the time
+    // of updating the list head, so the profiling thread always has a valid
+    // list to look at.
+    list_.store(redirection, std::memory_order_release);
+
+    return redirection;
+  }
+
+  static Redirection* FromECallInstruction(uintx_t ecall_instruction) {
+    char* addr_of_ecall = reinterpret_cast<char*>(ecall_instruction);
+    char* addr_of_redirection =
+        addr_of_ecall - OFFSET_OF(Redirection, ecall_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+  // Please note that this function is called by the signal handler of the
+  // profiling thread.  It can therefore run at any point in time and is not
+  // allowed to hold any locks - which is precisely the reason why the list is
+  // prepend-only and a memory fence is used when writing the list head [list_]!
+  static uword FunctionForRedirect(uword address_of_ecall) {
+    for (Redirection* current = list_.load(std::memory_order_acquire);
+         current != nullptr; current = current->next_) {
+      if (current->address_of_ecall_instruction() == address_of_ecall) {
+        return current->external_function_;
+      }
+    }
+    return 0;
+  }
+
+ private:
+  Redirection(uword external_function,
+              Simulator::CallKind call_kind,
+              int argument_count)
+      : external_function_(external_function),
+        call_kind_(call_kind),
+        argument_count_(argument_count),
+        ecall_instruction_(Instr::kSimulatorRedirectInstruction),
+        next_(NULL) {}
+
+  uword external_function_;
+  Simulator::CallKind call_kind_;
+  int argument_count_;
+  uint32_t ecall_instruction_;
+  Redirection* next_;
+  static std::atomic<Redirection*> list_;
+  static Mutex* mutex_;
+};
+
+std::atomic<Redirection*> Redirection::list_ = {nullptr};
+Mutex* Redirection::mutex_ = new Mutex();
+
+uword Simulator::RedirectExternalReference(uword function,
+                                           CallKind call_kind,
+                                           int argument_count) {
+  Redirection* redirection =
+      Redirection::Get(function, call_kind, argument_count);
+  return redirection->address_of_ecall_instruction();
+}
+
+uword Simulator::FunctionForRedirect(uword redirect) {
+  return Redirection::FunctionForRedirect(redirect);
+}
+
+// Get the active Simulator for the current isolate.
+Simulator* Simulator::Current() {
+  Isolate* isolate = Isolate::Current();
+  Simulator* simulator = isolate->simulator();
+  if (simulator == NULL) {
+    NoSafepointScope no_safepoint;
+    simulator = new Simulator();
+    isolate->set_simulator(simulator);
+  }
+  return simulator;
+}
+
+void Simulator::Init() {}
+
+Simulator::Simulator()
+    : pc_(0),
+      instret_(0),
+      reserved_address_(0),
+      reserved_value_(0),
+      fcsr_(0),
+      random_(),
+      last_setjmp_buffer_(NULL) {
+  // Setup simulator support first. Some of this information is needed to
+  // setup the architecture state.
+  // We allocate the stack here, the size is computed as the sum of
+  // the size specified by the user and the buffer space needed for
+  // handling stack overflow exceptions. To be safe in potential
+  // stack underflows we also add some underflow buffer space.
+  stack_ =
+      new char[(OSThread::GetSpecifiedStackSize() +
+                OSThread::kStackSizeBufferMax + kSimulatorStackUnderflowSize)];
+  // Low address.
+  stack_limit_ = reinterpret_cast<uword>(stack_);
+  // Limit for StackOverflowError.
+  overflow_stack_limit_ = stack_limit_ + OSThread::kStackSizeBufferMax;
+  // High address.
+  stack_base_ = overflow_stack_limit_ + OSThread::GetSpecifiedStackSize();
+
+  // Setup architecture state.
+  xregs_[0] = 0;
+  for (intptr_t i = 1; i < kNumberOfCpuRegisters; i++) {
+    xregs_[i] = random_.NextUInt64();
+  }
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    // TODO(riscv): This generates values that are very wide when printed,
+    // making it hard to read register state. Maybe generate random values in
+    // the unit interval instead?
+    // fregs_[i] = bit_cast<double>(random_.NextUInt64());
+    fregs_[i] = bit_cast<double>(kNaNBox);
+  }
+
+  // The sp is initialized to point to the bottom (high address) of the
+  // allocated stack area.
+  set_xreg(SP, stack_base());
+  // The lr and pc are initialized to a known bad value that will cause an
+  // access violation if the simulator ever tries to execute it.
+  set_xreg(RA, kBadLR);
+  pc_ = kBadLR;
+}
+
+Simulator::~Simulator() {
+  delete[] stack_;
+  Isolate* isolate = Isolate::Current();
+  if (isolate != NULL) {
+    isolate->set_simulator(NULL);
+  }
+}
+
+void Simulator::PrepareCall(PreservedRegisters* preserved) {
+#if defined(DEBUG)
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    preserved->xregs[i] = xregs_[i];
+    if ((kAbiVolatileCpuRegs & (1 << i)) != 0) {
+      xregs_[i] = random_.NextUInt64();
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    preserved->fregs[i] = fregs_[i];
+    if ((kAbiVolatileFpuRegs & (1 << i)) != 0) {
+      // TODO(riscv): This generates values that are very wide when printed,
+      // making it hard to read register state. Maybe generate random values in
+      // the unit interval instead?
+      // fregs_[i] = bit_cast<double>(random_.NextUInt64());
+      fregs_[i] = bit_cast<double>(kNaNBox);
+    }
+  }
+#endif
+}
+
+void Simulator::ClobberVolatileRegisters() {
+#if defined(DEBUG)
+  reserved_address_ = reserved_value_ = 0;  // Clear atomic reservation.
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    if ((kAbiVolatileCpuRegs & (1 << i)) != 0) {
+      xregs_[i] = random_.NextUInt64();
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    if ((kAbiVolatileFpuRegs & (1 << i)) != 0) {
+      // TODO(riscv): This generates values that are very wide when printed,
+      // making it hard to read register state. Maybe generate random values in
+      // the unit interval instead?
+      // fregs_[i] = bit_cast<double>(random_.NextUInt64());
+      fregs_[i] = bit_cast<double>(kNaNBox);
+    }
+  }
+#endif
+}
+
+void Simulator::SavePreservedRegisters(PreservedRegisters* preserved) {
+#if defined(DEBUG)
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    preserved->xregs[i] = xregs_[i];
+  }
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    preserved->fregs[i] = fregs_[i];
+  }
+#endif
+}
+
+void Simulator::CheckPreservedRegisters(PreservedRegisters* preserved) {
+#if defined(DEBUG)
+  if (preserved->xregs[SP] != xregs_[SP]) {
+    PrintRegisters();
+    PrintStack();
+    FATAL("Stack unbalanced");
+  }
+  const intptr_t kPreservedAtCall =
+      kAbiPreservedCpuRegs | (1 << TP) | (1 << GP) | (1 << SP) | (1 << FP);
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    if ((kPreservedAtCall & (1 << i)) != 0) {
+      if (preserved->xregs[i] != xregs_[i]) {
+        FATAL("%s was not preserved\n", cpu_reg_names[i]);
+      }
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    if ((kAbiVolatileFpuRegs & (1 << i)) == 0) {
+      if (bit_cast<uint64_t>(preserved->fregs[i]) !=
+          bit_cast<uint64_t>(fregs_[i])) {
+        FATAL("%s was not preserved\n", fpu_reg_names[i]);
+      }
+    }
+  }
+#endif
+}
+
+void Simulator::RunCall(intx_t entry, PreservedRegisters* preserved) {
+  pc_ = entry;
+  set_xreg(RA, kEndSimulatingPC);
+  Execute();
+  CheckPreservedRegisters(preserved);
+}
+
+int64_t Simulator::Call(intx_t entry,
+                        intx_t parameter0,
+                        intx_t parameter1,
+                        intx_t parameter2,
+                        intx_t parameter3,
+                        bool fp_return,
+                        bool fp_args) {
+  // Save the SP register before the call so we can restore it.
+  const intptr_t sp_before_call = get_xreg(SP);
+
+  // Setup parameters.
+  if (fp_args) {
+    set_fregd(FA0, parameter0);
+    set_fregd(FA1, parameter1);
+    set_fregd(FA2, parameter2);
+    set_fregd(FA3, parameter3);
+  } else {
+    set_xreg(A0, parameter0);
+    set_xreg(A1, parameter1);
+    set_xreg(A2, parameter2);
+    set_xreg(A3, parameter3);
+  }
+
+  // Make sure the activation frames are properly aligned.
+  intptr_t stack_pointer = sp_before_call;
+  if (OS::ActivationFrameAlignment() > 1) {
+    stack_pointer =
+        Utils::RoundDown(stack_pointer, OS::ActivationFrameAlignment());
+  }
+  set_xreg(SP, stack_pointer);
+
+  // Prepare to execute the code at entry.
+  pc_ = entry;
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  set_xreg(RA, kEndSimulatingPC);
+
+  // Remember the values of callee-saved registers, and set them up with a
+  // known value so that we are able to check that they are preserved
+  // properly across Dart execution.
+  PreservedRegisters preserved;
+  SavePreservedRegisters(&preserved);
+
+  // Start the simulation.
+  Execute();
+
+  // Check that the callee-saved registers have been preserved,
+  // and restore them with the original value.
+  CheckPreservedRegisters(&preserved);
+
+  // Restore the SP register and return R0.
+  set_xreg(SP, sp_before_call);
+  int64_t return_value;
+  if (fp_return) {
+    return_value = get_fregd(FA0);
+  } else {
+    return_value = get_xreg(A0);
+  }
+  return return_value;
+}
+
+void Simulator::Execute() {
+  while (pc_ != kEndSimulatingPC) {
+    uint16_t parcel = *reinterpret_cast<uint16_t*>(pc_);
+    if (IsCInstruction(parcel)) {
+      CInstr instr(parcel);
+      if (IsTracingExecution()) {
+        Disassembler::Disassemble(pc_, pc_ + instr.length());
+      }
+      Interpret(instr);
+    } else {
+      Instr instr(*reinterpret_cast<uint32_t*>(pc_));
+      if (IsTracingExecution()) {
+        Disassembler::Disassemble(pc_, pc_ + instr.length());
+      }
+      Interpret(instr);
+    }
+    instret_++;
+  }
+}
+
+bool Simulator::IsTracingExecution() const {
+  return instret_ > FLAG_trace_sim_after;
+}
+
+void Simulator::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) {
+  // Walk over all setjmp buffers (simulated --> C++ transitions)
+  // and try to find the setjmp associated with the simulated stack pointer.
+  SimulatorSetjmpBuffer* buf = last_setjmp_buffer();
+  while (buf->link() != NULL && buf->link()->sp() <= sp) {
+    buf = buf->link();
+  }
+  ASSERT(buf != NULL);
+
+  // The C++ caller has not cleaned up the stack memory of C++ frames.
+  // Prepare for unwinding frames by destroying all the stack resources
+  // in the previous C++ frames.
+  StackResource::Unwind(thread);
+
+  // Keep the following code in sync with `StubCode::JumpToFrameStub()`.
+
+  // Unwind the C++ stack and continue simulation in the target frame.
+  pc_ = pc;
+  set_xreg(SP, static_cast<uintx_t>(sp));
+  set_xreg(FP, static_cast<uintx_t>(fp));
+  set_xreg(THR, reinterpret_cast<uintx_t>(thread));
+  // Set the tag.
+  thread->set_vm_tag(VMTag::kDartTagId);
+  // Clear top exit frame.
+  thread->set_top_exit_frame_info(0);
+  // Restore pool pointer.
+  uintx_t code =
+      *reinterpret_cast<uintx_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
+  uintx_t pp = FLAG_precompiled_mode
+                   ? static_cast<uintx_t>(thread->global_object_pool())
+                   : *reinterpret_cast<uintx_t*>(
+                         code + Code::object_pool_offset() - kHeapObjectTag);
+  pp -= kHeapObjectTag;  // In the PP register, the pool pointer is untagged.
+  set_xreg(CODE_REG, code);
+  set_xreg(PP, pp);
+  set_xreg(WRITE_BARRIER_MASK, thread->write_barrier_mask());
+  set_xreg(NULL_REG, static_cast<uintx_t>(Object::null()));
+  if (FLAG_precompiled_mode) {
+    set_xreg(DISPATCH_TABLE_REG,
+             reinterpret_cast<uintx_t>(thread->dispatch_table_array()));
+  }
+
+  buf->Longjmp();
+}
+
+void Simulator::PrintRegisters() {
+  ASSERT(static_cast<intptr_t>(kNumberOfCpuRegisters) ==
+         static_cast<intptr_t>(kNumberOfFpuRegisters));
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+#if XLEN == 32
+    OS::Print("%4s: %8x %11d", cpu_reg_names[i], xregs_[i], xregs_[i]);
+#elif XLEN == 64
+    OS::Print("%4s: %16" Px64 " %20" Pd64, cpu_reg_names[i], xregs_[i],
+              xregs_[i]);
+#endif
+    OS::Print("  %4s: %lf\n", fpu_reg_names[i], fregs_[i]);
+  }
+#if XLEN == 32
+  OS::Print("  pc: %8x\n", pc_);
+#elif XLEN == 64
+  OS::Print("  pc: %16" Px64 "\n", pc_);
+#endif
+}
+
+void Simulator::PrintStack() {
+  StackFrameIterator frames(get_register(FP), get_register(SP), get_pc(),
+                            ValidationPolicy::kDontValidateFrames,
+                            Thread::Current(),
+                            StackFrameIterator::kNoCrossThreadIteration);
+  StackFrame* frame = frames.NextFrame();
+  while (frame != nullptr) {
+    OS::PrintErr("%s\n", frame->ToCString());
+    frame = frames.NextFrame();
+  }
+}
+
+void Simulator::Interpret(Instr instr) {
+  switch (instr.opcode()) {
+    case LUI:
+      InterpretLUI(instr);
+      break;
+    case AUIPC:
+      InterpretAUIPC(instr);
+      break;
+    case JAL:
+      InterpretJAL(instr);
+      break;
+    case JALR:
+      InterpretJALR(instr);
+      break;
+    case BRANCH:
+      InterpretBRANCH(instr);
+      break;
+    case LOAD:
+      InterpretLOAD(instr);
+      break;
+    case STORE:
+      InterpretSTORE(instr);
+      break;
+    case OPIMM:
+      InterpretOPIMM(instr);
+      break;
+    case OPIMM32:
+      InterpretOPIMM32(instr);
+      break;
+    case OP:
+      InterpretOP(instr);
+      break;
+    case OP32:
+      InterpretOP32(instr);
+      break;
+    case MISCMEM:
+      InterpretMISCMEM(instr);
+      break;
+    case SYSTEM:
+      InterpretSYSTEM(instr);
+      break;
+    case AMO:
+      InterpretAMO(instr);
+      break;
+    case LOADFP:
+      InterpretLOADFP(instr);
+      break;
+    case STOREFP:
+      InterpretSTOREFP(instr);
+      break;
+    case FMADD:
+      InterpretFMADD(instr);
+      break;
+    case FMSUB:
+      InterpretFMSUB(instr);
+      break;
+    case FNMADD:
+      InterpretFNMADD(instr);
+      break;
+    case FNMSUB:
+      InterpretFNMSUB(instr);
+      break;
+    case OPFP:
+      InterpretOPFP(instr);
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+}
+
+void Simulator::Interpret(CInstr instr) {
+  switch (instr.opcode()) {
+    case C_LWSP: {
+      uintx_t addr = get_xreg(SP) + instr.spload4_imm();
+      set_xreg(instr.rd(), MemoryRead<int32_t>(addr, SP));
+      break;
+    }
+#if XLEN == 32
+    case C_FLWSP: {
+      uintx_t addr = get_xreg(SP) + instr.spload4_imm();
+      set_fregs(instr.frd(), MemoryRead<float>(addr, SP));
+      break;
+    }
+#else
+    case C_LDSP: {
+      uintx_t addr = get_xreg(SP) + instr.spload8_imm();
+      set_xreg(instr.rd(), MemoryRead<int64_t>(addr, SP));
+      break;
+    }
+#endif
+    case C_FLDSP: {
+      uintx_t addr = get_xreg(SP) + instr.spload8_imm();
+      set_fregd(instr.frd(), MemoryRead<double>(addr, SP));
+      break;
+    }
+    case C_SWSP: {
+      uintx_t addr = get_xreg(SP) + instr.spstore4_imm();
+      MemoryWrite<uint32_t>(addr, get_xreg(instr.rs2()), SP);
+      break;
+    }
+#if XLEN == 32
+    case C_FSWSP: {
+      uintx_t addr = get_xreg(SP) + instr.spstore4_imm();
+      MemoryWrite<float>(addr, get_fregs(instr.frs2()), SP);
+      break;
+    }
+#else
+    case C_SDSP: {
+      uintx_t addr = get_xreg(SP) + instr.spstore8_imm();
+      MemoryWrite<uint64_t>(addr, get_xreg(instr.rs2()), SP);
+      break;
+    }
+#endif
+    case C_FSDSP: {
+      uintx_t addr = get_xreg(SP) + instr.spstore8_imm();
+      MemoryWrite<double>(addr, get_fregd(instr.frs2()), SP);
+      break;
+    }
+    case C_LW: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
+      set_xreg(instr.rdp(), MemoryRead<int32_t>(addr, instr.rs1p()));
+      break;
+    }
+#if XLEN == 32
+    case C_FLW: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
+      set_fregs(instr.frdp(), MemoryRead<float>(addr, instr.rs1p()));
+      break;
+    }
+#else
+    case C_LD: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
+      set_xreg(instr.rdp(), MemoryRead<int64_t>(addr, instr.rs1p()));
+      break;
+    }
+#endif
+    case C_FLD: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
+      set_fregd(instr.frdp(), MemoryRead<double>(addr, instr.rs1p()));
+      break;
+    }
+    case C_SW: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
+      MemoryWrite<uint32_t>(addr, get_xreg(instr.rs2p()), instr.rs1p());
+      break;
+    }
+#if XLEN == 32
+    case C_FSW: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
+      MemoryWrite<float>(addr, get_fregs(instr.frs2p()), instr.rs1p());
+      break;
+    }
+#else
+    case C_SD: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
+      MemoryWrite<uint64_t>(addr, get_xreg(instr.rs2p()), instr.rs1p());
+      break;
+    }
+#endif
+    case C_FSD: {
+      uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
+      MemoryWrite<double>(addr, get_fregd(instr.frs2p()), instr.rs1p());
+      break;
+    }
+    case C_J: {
+      pc_ += sign_extend((int32_t)instr.j_imm());
+      return;
+    }
+#if XLEN == 32
+    case C_JAL: {
+      set_xreg(RA, pc_ + instr.length());
+      pc_ += sign_extend((int32_t)instr.j_imm());
+      return;
+    }
+#endif
+    case C_JR: {
+      if (instr.encoding() & (C_JALR ^ C_JR)) {
+        if ((instr.rs1() == ZR) && (instr.rs2() == ZR)) {
+          InterpretEBREAK(instr);
+        } else if (instr.rs2() == ZR) {
+          // JALR
+          uintx_t target = get_xreg(instr.rs1());
+          set_xreg(RA, pc_ + instr.length());
+          pc_ = target;
+          return;
+        } else {
+          // ADD
+          set_xreg(instr.rd(), get_xreg(instr.rs1()) + get_xreg(instr.rs2()));
+        }
+      } else {
+        if ((instr.rd() != ZR) && (instr.rs2() != ZR)) {
+          // MV
+          set_xreg(instr.rd(), get_xreg(instr.rs2()));
+        } else if (instr.rs2() != ZR) {
+          IllegalInstruction(instr);
+        } else {
+          // JR
+          pc_ = get_xreg(instr.rs1());
+          return;
+        }
+      }
+      break;
+    }
+    case C_BEQZ:
+      if (get_xreg(instr.rs1p()) == 0) {
+        pc_ += instr.b_imm();
+        return;
+      }
+      break;
+    case C_BNEZ:
+      if (get_xreg(instr.rs1p()) != 0) {
+        pc_ += instr.b_imm();
+        return;
+      }
+      break;
+    case C_LI:
+      if (instr.rd() == ZR) {
+        IllegalInstruction(instr);
+      } else {
+        set_xreg(instr.rd(), sign_extend(instr.i_imm()));
+      }
+      break;
+    case C_LUI:
+      if (instr.rd() == SP) {
+        if (instr.i16_imm() == 0) {
+          IllegalInstruction(instr);
+        } else {
+          set_xreg(instr.rd(),
+                   get_xreg(instr.rs1()) + sign_extend(instr.i16_imm()));
+        }
+      } else if ((instr.rd() == ZR) || (instr.u_imm() == 0)) {
+        IllegalInstruction(instr);
+      } else {
+        set_xreg(instr.rd(), sign_extend(instr.u_imm()));
+      }
+      break;
+    case C_ADDI:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) + instr.i_imm());
+      break;
+#if XLEN >= 64
+    case C_ADDIW: {
+      uint32_t a = get_xreg(instr.rs1());
+      uint32_t b = instr.i_imm();
+      set_xreg(instr.rd(), sign_extend(a + b));
+      break;
+    }
+#endif  // XLEN >= 64
+    case C_ADDI4SPN:
+      if (instr.i4spn_imm() == 0) {
+        IllegalInstruction(instr);
+      } else {
+        set_xreg(instr.rdp(), get_xreg(SP) + instr.i4spn_imm());
+      }
+      break;
+    case C_SLLI:
+      if (instr.i_imm() == 0) {
+        IllegalInstruction(instr);
+      } else {
+        set_xreg(instr.rd(), get_xreg(instr.rs1())
+                                 << (instr.i_imm() & (XLEN - 1)));
+      }
+      break;
+    case C_MISCALU:
+      // Note MISCALU has a different notion of rsd′ than other instructions,
+      // so use rs1′ instead.
+      switch (instr.encoding() & C_MISCALU_MASK) {
+        case C_SRLI:
+          if (instr.i_imm() == 0) {
+            IllegalInstruction(instr);
+          } else {
+            set_xreg(instr.rs1p(),
+                     get_xreg(instr.rs1p()) >> (instr.i_imm() & (XLEN - 1)));
+          }
+          break;
+        case C_SRAI:
+          if (instr.i_imm() == 0) {
+            IllegalInstruction(instr);
+          } else {
+            set_xreg(instr.rs1p(),
+                     static_cast<intx_t>(get_xreg(instr.rs1p())) >>
+                         (instr.i_imm() & (XLEN - 1)));
+          }
+          break;
+        case C_ANDI:
+          set_xreg(instr.rs1p(), get_xreg(instr.rs1p()) & instr.i_imm());
+          break;
+        case C_RR:
+          switch (instr.encoding() & C_RR_MASK) {
+            case C_AND:
+              set_xreg(instr.rs1p(),
+                       get_xreg(instr.rs1p()) & get_xreg(instr.rs2p()));
+              break;
+            case C_OR:
+              set_xreg(instr.rs1p(),
+                       get_xreg(instr.rs1p()) | get_xreg(instr.rs2p()));
+              break;
+            case C_XOR:
+              set_xreg(instr.rs1p(),
+                       get_xreg(instr.rs1p()) ^ get_xreg(instr.rs2p()));
+              break;
+            case C_SUB:
+              set_xreg(instr.rs1p(),
+                       get_xreg(instr.rs1p()) - get_xreg(instr.rs2p()));
+              break;
+            case C_ADDW: {
+              uint32_t a = get_xreg(instr.rs1p());
+              uint32_t b = get_xreg(instr.rs2p());
+              set_xreg(instr.rs1p(), sign_extend(a + b));
+              break;
+            }
+            case C_SUBW: {
+              uint32_t a = get_xreg(instr.rs1p());
+              uint32_t b = get_xreg(instr.rs2p());
+              set_xreg(instr.rs1p(), sign_extend(a - b));
+              break;
+            }
+            default:
+              IllegalInstruction(instr);
+          }
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretLUI(Instr instr) {
+  set_xreg(instr.rd(), sign_extend(instr.utype_imm()));
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretAUIPC(Instr instr) {
+  set_xreg(instr.rd(), pc_ + sign_extend(instr.utype_imm()));
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretJAL(Instr instr) {
+  set_xreg(instr.rd(), pc_ + instr.length());
+  pc_ += sign_extend(instr.jtype_imm());
+}
+
+void Simulator::InterpretJALR(Instr instr) {
+  uintx_t base = get_xreg(instr.rs1());
+  uintx_t offset = static_cast<uintx_t>(instr.itype_imm());
+  set_xreg(instr.rd(), pc_ + instr.length());
+  pc_ = base + offset;
+}
+
+void Simulator::InterpretBRANCH(Instr instr) {
+  switch (instr.funct3()) {
+    case BEQ:
+      if (get_xreg(instr.rs1()) == get_xreg(instr.rs2())) {
+        pc_ += instr.btype_imm();
+      } else {
+        pc_ += instr.length();
+      }
+      break;
+    case BNE:
+      if (get_xreg(instr.rs1()) != get_xreg(instr.rs2())) {
+        pc_ += instr.btype_imm();
+      } else {
+        pc_ += instr.length();
+      }
+      break;
+    case BLT:
+      if (static_cast<intx_t>(get_xreg(instr.rs1())) <
+          static_cast<intx_t>(get_xreg(instr.rs2()))) {
+        pc_ += instr.btype_imm();
+      } else {
+        pc_ += instr.length();
+      }
+      break;
+    case BGE:
+      if (static_cast<intx_t>(get_xreg(instr.rs1())) >=
+          static_cast<intx_t>(get_xreg(instr.rs2()))) {
+        pc_ += instr.btype_imm();
+      } else {
+        pc_ += instr.length();
+      }
+      break;
+    case BLTU:
+      if (static_cast<uintx_t>(get_xreg(instr.rs1())) <
+          static_cast<uintx_t>(get_xreg(instr.rs2()))) {
+        pc_ += instr.btype_imm();
+      } else {
+        pc_ += instr.length();
+      }
+      break;
+    case BGEU:
+      if (static_cast<uintx_t>(get_xreg(instr.rs1())) >=
+          static_cast<uintx_t>(get_xreg(instr.rs2()))) {
+        pc_ += instr.btype_imm();
+      } else {
+        pc_ += instr.length();
+      }
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+}
+
+void Simulator::InterpretLOAD(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1()) + instr.itype_imm();
+  switch (instr.funct3()) {
+    case LB:
+      set_xreg(instr.rd(), MemoryRead<int8_t>(addr, instr.rs1()));
+      break;
+    case LH:
+      set_xreg(instr.rd(), MemoryRead<int16_t>(addr, instr.rs1()));
+      break;
+    case LW:
+      set_xreg(instr.rd(), MemoryRead<int32_t>(addr, instr.rs1()));
+      break;
+    case LBU:
+      set_xreg(instr.rd(), MemoryRead<uint8_t>(addr, instr.rs1()));
+      break;
+    case LHU:
+      set_xreg(instr.rd(), MemoryRead<uint16_t>(addr, instr.rs1()));
+      break;
+#if XLEN >= 64
+    case LWU:
+      set_xreg(instr.rd(), MemoryRead<uint32_t>(addr, instr.rs1()));
+      break;
+    case LD:
+      set_xreg(instr.rd(), MemoryRead<int64_t>(addr, instr.rs1()));
+      break;
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretLOADFP(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1()) + instr.itype_imm();
+  switch (instr.funct3()) {
+    case S:
+      set_fregs(instr.frd(), MemoryRead<float>(addr, instr.rs1()));
+      break;
+    case D:
+      set_fregd(instr.frd(), MemoryRead<double>(addr, instr.rs1()));
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretSTORE(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1()) + instr.stype_imm();
+  switch (instr.funct3()) {
+    case SB:
+      MemoryWrite<uint8_t>(addr, get_xreg(instr.rs2()), instr.rs1());
+      break;
+    case SH:
+      MemoryWrite<uint16_t>(addr, get_xreg(instr.rs2()), instr.rs1());
+      break;
+    case SW:
+      MemoryWrite<uint32_t>(addr, get_xreg(instr.rs2()), instr.rs1());
+      break;
+#if XLEN >= 64
+    case SD:
+      MemoryWrite<uint64_t>(addr, get_xreg(instr.rs2()), instr.rs1());
+      break;
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretSTOREFP(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1()) + instr.stype_imm();
+  switch (instr.funct3()) {
+    case S:
+      MemoryWrite<float>(addr, get_fregs(instr.frs2()), instr.rs1());
+      break;
+    case D:
+      MemoryWrite<double>(addr, get_fregd(instr.frs2()), instr.rs1());
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretOPIMM(Instr instr) {
+  switch (instr.funct3()) {
+    case ADDI:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) + instr.itype_imm());
+      break;
+    case SLTI: {
+      set_xreg(instr.rd(), static_cast<intx_t>(get_xreg(instr.rs1())) <
+                                   static_cast<intx_t>(instr.itype_imm())
+                               ? 1
+                               : 0);
+      break;
+    }
+    case SLTIU:
+      set_xreg(instr.rd(), static_cast<uintx_t>(get_xreg(instr.rs1())) <
+                                   static_cast<uintx_t>(instr.itype_imm())
+                               ? 1
+                               : 0);
+      break;
+    case XORI:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) ^ instr.itype_imm());
+      break;
+    case ORI:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) | instr.itype_imm());
+      break;
+    case ANDI:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) & instr.itype_imm());
+
+      break;
+    case SLLI:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) << instr.shamt());
+      break;
+    case SRI:
+      if ((instr.funct7() & 0b1111110) == SRA) {
+        set_xreg(instr.rd(),
+                 static_cast<intx_t>(get_xreg(instr.rs1())) >> instr.shamt());
+      } else {
+        set_xreg(instr.rd(),
+                 static_cast<uintx_t>(get_xreg(instr.rs1())) >> instr.shamt());
+      }
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretOPIMM32(Instr instr) {
+  switch (instr.funct3()) {
+    case ADDI: {
+      uint32_t a = get_xreg(instr.rs1());
+      uint32_t b = instr.itype_imm();
+      set_xreg(instr.rd(), sign_extend(a + b));
+      break;
+    }
+    case SLLI: {
+      uint32_t a = get_xreg(instr.rs1());
+      uint32_t b = instr.shamt();
+      set_xreg(instr.rd(), sign_extend(a << b));
+      break;
+    }
+    case SRI:
+      if (instr.funct7() == SRA) {
+        int32_t a = get_xreg(instr.rs1());
+        int32_t b = instr.shamt();
+        set_xreg(instr.rd(), sign_extend(a >> b));
+      } else {
+        uint32_t a = get_xreg(instr.rs1());
+        uint32_t b = instr.shamt();
+        set_xreg(instr.rd(), sign_extend(a >> b));
+      }
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretOP(Instr instr) {
+  switch (instr.funct7()) {
+    case 0:
+      InterpretOP_0(instr);
+      break;
+    case SUB:
+      InterpretOP_SUB(instr);
+      break;
+    case MULDIV:
+      InterpretOP_MULDIV(instr);
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+}
+
+void Simulator::InterpretOP_0(Instr instr) {
+  switch (instr.funct3()) {
+    case ADD:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) + get_xreg(instr.rs2()));
+      break;
+    case SLL: {
+      uintx_t shamt = get_xreg(instr.rs2()) & (XLEN - 1);
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) << shamt);
+      break;
+    }
+    case SLT:
+      set_xreg(instr.rd(), static_cast<intx_t>(get_xreg(instr.rs1())) <
+                                   static_cast<intx_t>(get_xreg(instr.rs2()))
+                               ? 1
+                               : 0);
+      break;
+    case SLTU:
+      set_xreg(instr.rd(), static_cast<uintx_t>(get_xreg(instr.rs1())) <
+                                   static_cast<uintx_t>(get_xreg(instr.rs2()))
+                               ? 1
+                               : 0);
+      break;
+    case XOR:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) ^ get_xreg(instr.rs2()));
+      break;
+    case SR: {
+      uintx_t shamt = get_xreg(instr.rs2()) & (XLEN - 1);
+      set_xreg(instr.rd(),
+               static_cast<uintx_t>(get_xreg(instr.rs1())) >> shamt);
+      break;
+    }
+    case OR:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) | get_xreg(instr.rs2()));
+      break;
+    case AND:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) & get_xreg(instr.rs2()));
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+static intx_t mul(intx_t a, intx_t b) {
+  return a * b;
+}
+
+static intx_t mulh(intx_t a, intx_t b) {
+  const uintx_t kLoMask = (static_cast<uintx_t>(1) << (XLEN / 2)) - 1;
+  const uintx_t kHiShift = XLEN / 2;
+
+  uintx_t a_lo = a & kLoMask;
+  intx_t a_hi = a >> kHiShift;
+  uintx_t b_lo = b & kLoMask;
+  intx_t b_hi = b >> kHiShift;
+
+  uintx_t x = a_lo * b_lo;
+  intx_t y = a_hi * b_lo;
+  intx_t z = a_lo * b_hi;
+  intx_t w = a_hi * b_hi;
+
+  intx_t r0 = (x >> kHiShift) + y;
+  intx_t r1 = (r0 & kLoMask) + z;
+  return w + (r0 >> kHiShift) + (r1 >> kHiShift);
+}
+
+static uintx_t mulhu(uintx_t a, uintx_t b) {
+  const uintx_t kLoMask = (static_cast<uintx_t>(1) << (XLEN / 2)) - 1;
+  const uintx_t kHiShift = XLEN / 2;
+
+  uintx_t a_lo = a & kLoMask;
+  uintx_t a_hi = a >> kHiShift;
+  uintx_t b_lo = b & kLoMask;
+  uintx_t b_hi = b >> kHiShift;
+
+  uintx_t x = a_lo * b_lo;
+  uintx_t y = a_hi * b_lo;
+  uintx_t z = a_lo * b_hi;
+  uintx_t w = a_hi * b_hi;
+
+  uintx_t r0 = (x >> kHiShift) + y;
+  uintx_t r1 = (r0 & kLoMask) + z;
+  return w + (r0 >> kHiShift) + (r1 >> kHiShift);
+}
+
+static uintx_t mulhsu(intx_t a, uintx_t b) {
+  const uintx_t kLoMask = (static_cast<uintx_t>(1) << (XLEN / 2)) - 1;
+  const uintx_t kHiShift = XLEN / 2;
+
+  uintx_t a_lo = a & kLoMask;
+  intx_t a_hi = a >> kHiShift;
+  uintx_t b_lo = b & kLoMask;
+  uintx_t b_hi = b >> kHiShift;
+
+  uintx_t x = a_lo * b_lo;
+  intx_t y = a_hi * b_lo;
+  uintx_t z = a_lo * b_hi;
+  intx_t w = a_hi * b_hi;
+
+  intx_t r0 = (x >> kHiShift) + y;
+  uintx_t r1 = (r0 & kLoMask) + z;
+  return w + (r0 >> kHiShift) + (r1 >> kHiShift);
+}
+
+static intx_t div(intx_t a, intx_t b) {
+  if (b == 0) {
+    return -1;
+  } else if (b == -1 && a == kMinIntX) {
+    return kMinIntX;
+  } else {
+    return a / b;
+  }
+}
+
+static uintx_t divu(uintx_t a, uintx_t b) {
+  if (b == 0) {
+    return kMaxUIntX;
+  } else {
+    return a / b;
+  }
+}
+
+static intx_t rem(intx_t a, intx_t b) {
+  if (b == 0) {
+    return a;
+  } else if (b == -1 && a == kMinIntX) {
+    return 0;
+  } else {
+    return a % b;
+  }
+}
+
+static uintx_t remu(uintx_t a, uintx_t b) {
+  if (b == 0) {
+    return a;
+  } else {
+    return a % b;
+  }
+}
+
+#if XLEN >= 64
+static int32_t mulw(int32_t a, int32_t b) {
+  return a * b;
+}
+
+static int32_t divw(int32_t a, int32_t b) {
+  if (b == 0) {
+    return -1;
+  } else if (b == -1 && a == kMinInt32) {
+    return kMinInt32;
+  } else {
+    return a / b;
+  }
+}
+
+static uint32_t divuw(uint32_t a, uint32_t b) {
+  if (b == 0) {
+    return kMaxUint32;
+  } else {
+    return a / b;
+  }
+}
+
+static int32_t remw(int32_t a, int32_t b) {
+  if (b == 0) {
+    return a;
+  } else if (b == -1 && a == kMinInt32) {
+    return 0;
+  } else {
+    return a % b;
+  }
+}
+
+static uint32_t remuw(uint32_t a, uint32_t b) {
+  if (b == 0) {
+    return a;
+  } else {
+    return a % b;
+  }
+}
+#endif  // XLEN >= 64
+
+void Simulator::InterpretOP_MULDIV(Instr instr) {
+  switch (instr.funct3()) {
+    case MUL:
+      set_xreg(instr.rd(), mul(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    case MULH:
+      set_xreg(instr.rd(), mulh(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    case MULHSU:
+      set_xreg(instr.rd(),
+               mulhsu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    case MULHU:
+      set_xreg(instr.rd(), mulhu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    case DIV:
+      set_xreg(instr.rd(), div(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    case DIVU:
+      set_xreg(instr.rd(), divu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    case REM:
+      set_xreg(instr.rd(), rem(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    case REMU:
+      set_xreg(instr.rd(), remu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretOP_SUB(Instr instr) {
+  switch (instr.funct3()) {
+    case ADD:
+      set_xreg(instr.rd(), get_xreg(instr.rs1()) - get_xreg(instr.rs2()));
+      break;
+    case SR: {
+      uintx_t shamt = get_xreg(instr.rs2()) & (XLEN - 1);
+      set_xreg(instr.rd(), static_cast<intx_t>(get_xreg(instr.rs1())) >> shamt);
+      break;
+    }
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretOP32(Instr instr) {
+  switch (instr.funct7()) {
+#if XLEN >= 64
+    case 0:
+      InterpretOP32_0(instr);
+      break;
+    case SUB:
+      InterpretOP32_SUB(instr);
+      break;
+    case MULDIV:
+      InterpretOP32_MULDIV(instr);
+      break;
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+}
+
+void Simulator::InterpretOP32_0(Instr instr) {
+  switch (instr.funct3()) {
+#if XLEN >= 64
+    case ADD: {
+      uint32_t a = get_xreg(instr.rs1());
+      uint32_t b = get_xreg(instr.rs2());
+      set_xreg(instr.rd(), sign_extend(a + b));
+      break;
+    }
+    case SLL: {
+      uint32_t a = get_xreg(instr.rs1());
+      uint32_t b = get_xreg(instr.rs2()) & (32 - 1);
+      set_xreg(instr.rd(), sign_extend(a << b));
+      break;
+    }
+    case SR: {
+      uint32_t b = get_xreg(instr.rs2()) & (32 - 1);
+      uint32_t a = get_xreg(instr.rs1());
+      set_xreg(instr.rd(), sign_extend(a >> b));
+      break;
+    }
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretOP32_SUB(Instr instr) {
+  switch (instr.funct3()) {
+#if XLEN >= 64
+    case ADD: {
+      uint32_t a = get_xreg(instr.rs1());
+      uint32_t b = get_xreg(instr.rs2());
+      set_xreg(instr.rd(), sign_extend(a - b));
+      break;
+    }
+    case SR: {
+      uint32_t b = get_xreg(instr.rs2()) & (32 - 1);
+      int32_t a = get_xreg(instr.rs1());
+      set_xreg(instr.rd(), sign_extend(a >> b));
+      break;
+    }
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretOP32_MULDIV(Instr instr) {
+  switch (instr.funct3()) {
+#if XLEN >= 64
+    case MULW:
+      set_xreg(instr.rd(),
+               sign_extend(mulw(get_xreg(instr.rs1()), get_xreg(instr.rs2()))));
+      break;
+    case DIVW:
+      set_xreg(instr.rd(),
+               sign_extend(divw(get_xreg(instr.rs1()), get_xreg(instr.rs2()))));
+      break;
+    case DIVUW:
+      set_xreg(instr.rd(), sign_extend(divuw(get_xreg(instr.rs1()),
+                                             get_xreg(instr.rs2()))));
+      break;
+    case REMW:
+      set_xreg(instr.rd(),
+               sign_extend(remw(get_xreg(instr.rs1()), get_xreg(instr.rs2()))));
+      break;
+    case REMUW:
+      set_xreg(instr.rd(), sign_extend(remuw(get_xreg(instr.rs1()),
+                                             get_xreg(instr.rs2()))));
+      break;
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretMISCMEM(Instr instr) {
+  switch (instr.funct3()) {
+    case FENCE:
+      std::atomic_thread_fence(std::memory_order_acq_rel);
+      break;
+    case FENCEI:
+      // Nothing to do: simulated instructions are data on the host.
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretSYSTEM(Instr instr) {
+  switch (instr.funct3()) {
+    case 0:
+      switch (instr.funct12()) {
+        case ECALL:
+          InterpretECALL(instr);
+          return;
+        case EBREAK:
+          InterpretEBREAK(instr);
+          return;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    case CSRRW: {
+      if (instr.rd() == ZR) {
+        // No read effect.
+        CSRWrite(instr.csr(), get_xreg(instr.rs1()));
+      } else {
+        intx_t result = CSRRead(instr.csr());
+        CSRWrite(instr.csr(), get_xreg(instr.rs1()));
+        set_xreg(instr.rd(), result);
+      }
+      break;
+    }
+    case CSRRS: {
+      intx_t result = CSRRead(instr.csr());
+      if (instr.rs1() == ZR) {
+        // No write effect.
+      } else {
+        CSRSet(instr.csr(), get_xreg(instr.rs1()));
+      }
+      set_xreg(instr.rd(), result);
+      break;
+    }
+    case CSRRC: {
+      intx_t result = CSRRead(instr.csr());
+      if (instr.rs1() == ZR) {
+        // No write effect.
+      } else {
+        CSRClear(instr.csr(), get_xreg(instr.rs1()));
+      }
+      set_xreg(instr.rd(), result);
+      break;
+    }
+    case CSRRWI: {
+      if (instr.rd() == ZR) {
+        // No read effect.
+        CSRWrite(instr.csr(), instr.zimm());
+      } else {
+        intx_t result = CSRRead(instr.csr());
+        CSRWrite(instr.csr(), instr.zimm());
+        set_xreg(instr.rd(), result);
+      }
+      break;
+    }
+    case CSRRSI: {
+      intx_t result = CSRRead(instr.csr());
+      if (instr.zimm() == 0) {
+        // No write effect.
+      } else {
+        CSRSet(instr.csr(), instr.zimm());
+      }
+      set_xreg(instr.rd(), result);
+      break;
+    }
+    case CSRRCI: {
+      intx_t result = CSRRead(instr.csr());
+      if (instr.zimm() == 0) {
+        // No write effect.
+      } else {
+        CSRClear(instr.csr(), instr.zimm());
+      }
+      set_xreg(instr.rd(), result);
+      break;
+    }
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+// Calls into the Dart runtime are based on this interface.
+typedef void (*SimulatorRuntimeCall)(NativeArguments arguments);
+
+// Calls to leaf Dart runtime functions are based on this interface.
+typedef intx_t (*SimulatorLeafRuntimeCall)(intx_t r0,
+                                           intx_t r1,
+                                           intx_t r2,
+                                           intx_t r3,
+                                           intx_t r4,
+                                           intx_t r5,
+                                           intx_t r6,
+                                           intx_t r7);
+
+// [target] has several different signatures that differ from
+// SimulatorLeafRuntimeCall. We can call them all from here only because in
+// X64's calling conventions a function can be called with extra arguments
+// and the callee will see the first arguments and won't unbalance the stack.
+NO_SANITIZE_UNDEFINED("function")
+static intx_t InvokeLeafRuntime(SimulatorLeafRuntimeCall target,
+                                intx_t r0,
+                                intx_t r1,
+                                intx_t r2,
+                                intx_t r3,
+                                intx_t r4,
+                                intx_t r5,
+                                intx_t r6,
+                                intx_t r7) {
+  return target(r0, r1, r2, r3, r4, r5, r6, r7);
+}
+
+// Calls to leaf float Dart runtime functions are based on this interface.
+typedef double (*SimulatorLeafFloatRuntimeCall)(double d0,
+                                                double d1,
+                                                double d2,
+                                                double d3,
+                                                double d4,
+                                                double d5,
+                                                double d6,
+                                                double d7);
+
+// [target] has several different signatures that differ from
+// SimulatorFloatLeafRuntimeCall. We can call them all from here only because in
+// X64's calling conventions a function can be called with extra arguments
+// and the callee will see the first arguments and won't unbalance the stack.
+NO_SANITIZE_UNDEFINED("function")
+static double InvokeFloatLeafRuntime(SimulatorLeafFloatRuntimeCall target,
+                                     double d0,
+                                     double d1,
+                                     double d2,
+                                     double d3,
+                                     double d4,
+                                     double d5,
+                                     double d6,
+                                     double d7) {
+  return target(d0, d1, d2, d3, d4, d5, d6, d7);
+}
+
+// Calls to native Dart functions are based on this interface.
+typedef void (*SimulatorNativeCallWrapper)(Dart_NativeArguments arguments,
+                                           Dart_NativeFunction target);
+
+void Simulator::InterpretECALL(Instr instr) {
+  if (instr.rs1() != ZR) {
+    // Fake instruction generated by Assembler::SimulatorPrintObject.
+    if (true || IsTracingExecution()) {
+      uintx_t raw = get_xreg(instr.rs1());
+      Object& obj = Object::Handle(static_cast<ObjectPtr>(raw));
+      THR_Print("%" Px ": %s = %s\n", pc_, cpu_reg_names[instr.rs1()],
+                obj.ToCString());
+      FLAG_trace_sim_after = 1;
+    }
+    pc_ += instr.length();
+    return;
+  }
+
+  // The C ABI stack alignment is 16 for both 32 and 64 bit.
+  if (!Utils::IsAligned(get_xreg(SP), 16)) {
+    PrintRegisters();
+    PrintStack();
+    FATAL("Stack misaligned at call to C function");
+  }
+
+  SimulatorSetjmpBuffer buffer(this);
+  if (!setjmp(buffer.buffer_)) {
+    uintx_t saved_ra = get_xreg(RA);
+    Redirection* redirection = Redirection::FromECallInstruction(pc_);
+    uword external = redirection->external_function();
+    if (IsTracingExecution()) {
+      THR_Print("Call to host function at 0x%" Pd "\n", external);
+    }
+
+    if (redirection->call_kind() == kRuntimeCall) {
+      NativeArguments* arguments =
+          reinterpret_cast<NativeArguments*>(get_register(A0));
+      SimulatorRuntimeCall target =
+          reinterpret_cast<SimulatorRuntimeCall>(external);
+      target(*arguments);
+      ClobberVolatileRegisters();
+    } else if (redirection->call_kind() == kLeafRuntimeCall) {
+      ASSERT((0 <= redirection->argument_count()) &&
+             (redirection->argument_count() <= 8));
+      SimulatorLeafRuntimeCall target =
+          reinterpret_cast<SimulatorLeafRuntimeCall>(external);
+      const intx_t r0 = get_register(A0);
+      const intx_t r1 = get_register(A1);
+      const intx_t r2 = get_register(A2);
+      const intx_t r3 = get_register(A3);
+      const intx_t r4 = get_register(A4);
+      const intx_t r5 = get_register(A5);
+      const intx_t r6 = get_register(A6);
+      const intx_t r7 = get_register(A7);
+      const intx_t res =
+          InvokeLeafRuntime(target, r0, r1, r2, r3, r4, r5, r6, r7);
+      ClobberVolatileRegisters();
+      set_xreg(A0, res);  // Set returned result from function.
+    } else if (redirection->call_kind() == kLeafFloatRuntimeCall) {
+      ASSERT((0 <= redirection->argument_count()) &&
+             (redirection->argument_count() <= 8));
+      SimulatorLeafFloatRuntimeCall target =
+          reinterpret_cast<SimulatorLeafFloatRuntimeCall>(external);
+      const double d0 = get_fregd(FA0);
+      const double d1 = get_fregd(FA1);
+      const double d2 = get_fregd(FA2);
+      const double d3 = get_fregd(FA3);
+      const double d4 = get_fregd(FA4);
+      const double d5 = get_fregd(FA5);
+      const double d6 = get_fregd(FA6);
+      const double d7 = get_fregd(FA7);
+      const double res =
+          InvokeFloatLeafRuntime(target, d0, d1, d2, d3, d4, d5, d6, d7);
+      ClobberVolatileRegisters();
+      set_fregd(FA0, res);
+    } else if (redirection->call_kind() == kNativeCallWrapper) {
+      SimulatorNativeCallWrapper wrapper =
+          reinterpret_cast<SimulatorNativeCallWrapper>(external);
+      Dart_NativeArguments arguments =
+          reinterpret_cast<Dart_NativeArguments>(get_register(A0));
+      Dart_NativeFunction target =
+          reinterpret_cast<Dart_NativeFunction>(get_register(A1));
+      wrapper(arguments, target);
+      ClobberVolatileRegisters();
+    } else {
+      UNREACHABLE();
+    }
+
+    // Return.
+    pc_ = saved_ra;
+  } else {
+    // Coming via long jump from a throw. Continue to exception handler.
+  }
+}
+
+void Simulator::InterpretAMO(Instr instr) {
+  switch (instr.funct3()) {
+    case WIDTH32:
+      InterpretAMO32(instr);
+      break;
+    case WIDTH64:
+      InterpretAMO64(instr);
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+}
+
+// Note: This implementation does not give full LR/SC semantics because it
+// suffers from the ABA problem.
+
+template <typename type>
+void Simulator::InterpretLR(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  reserved_address_ = addr;
+  reserved_value_ = atomic->load(instr.memory_order());
+  set_xreg(instr.rd(), reserved_value_);
+}
+
+template <typename type>
+void Simulator::InterpretSC(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  if (addr != reserved_address_) {
+    set_xreg(instr.rd(), 1);
+    return;
+  }
+  type expected = reserved_value_;
+  type desired = get_xreg(instr.rs2());
+  bool success =
+      atomic->compare_exchange_strong(expected, desired, instr.memory_order());
+  set_xreg(instr.rd(), success ? 0 : 1);
+}
+
+template <typename type>
+void Simulator::InterpretAMOSWAP(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  type desired = get_xreg(instr.rs2());
+  type result = atomic->exchange(desired, instr.memory_order());
+  set_xreg(instr.rd(), sign_extend(result));
+}
+
+template <typename type>
+void Simulator::InterpretAMOADD(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  type arg = get_xreg(instr.rs2());
+  type result = atomic->fetch_add(arg, instr.memory_order());
+  set_xreg(instr.rd(), sign_extend(result));
+}
+
+template <typename type>
+void Simulator::InterpretAMOXOR(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  type arg = get_xreg(instr.rs2());
+  type result = atomic->fetch_xor(arg, instr.memory_order());
+  set_xreg(instr.rd(), sign_extend(result));
+}
+
+template <typename type>
+void Simulator::InterpretAMOAND(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  type arg = get_xreg(instr.rs2());
+  type result = atomic->fetch_and(arg, instr.memory_order());
+  set_xreg(instr.rd(), sign_extend(result));
+}
+
+template <typename type>
+void Simulator::InterpretAMOOR(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  type arg = get_xreg(instr.rs2());
+  type result = atomic->fetch_or(arg, instr.memory_order());
+  set_xreg(instr.rd(), sign_extend(result));
+}
+
+template <typename type>
+void Simulator::InterpretAMOMIN(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  type expected = atomic->load(std::memory_order_relaxed);
+  type compare = get_xreg(instr.rs2());
+  type desired;
+  do {
+    desired = expected < compare ? expected : compare;
+  } while (
+      !atomic->compare_exchange_weak(expected, desired, instr.memory_order()));
+  set_xreg(instr.rd(), sign_extend(expected));
+}
+
+template <typename type>
+void Simulator::InterpretAMOMAX(Instr instr) {
+  uintx_t addr = get_xreg(instr.rs1());
+  if ((addr & (sizeof(type) - 1)) != 0) {
+    FATAL("Misaligned atomic memory operation");
+  }
+  std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
+  type expected = atomic->load(std::memory_order_relaxed);
+  type compare = get_xreg(instr.rs2());
+  type desired;
+  do {
+    desired = expected > compare ? expected : compare;
+  } while (
+      !atomic->compare_exchange_weak(expected, desired, instr.memory_order()));
+  set_xreg(instr.rd(), sign_extend(expected));
+}
+
+void Simulator::InterpretAMO32(Instr instr) {
+  switch (instr.funct5()) {
+    case LR:
+      InterpretLR<int32_t>(instr);
+      break;
+    case SC:
+      InterpretSC<int32_t>(instr);
+      break;
+    case AMOSWAP:
+      InterpretAMOSWAP<int32_t>(instr);
+      break;
+    case AMOADD:
+      InterpretAMOADD<int32_t>(instr);
+      break;
+    case AMOXOR:
+      InterpretAMOXOR<int32_t>(instr);
+      break;
+    case AMOAND:
+      InterpretAMOAND<int32_t>(instr);
+      break;
+    case AMOOR:
+      InterpretAMOOR<int32_t>(instr);
+      break;
+    case AMOMIN:
+      InterpretAMOMIN<int32_t>(instr);
+      break;
+    case AMOMAX:
+      InterpretAMOMAX<int32_t>(instr);
+      break;
+    case AMOMINU:
+      InterpretAMOMIN<uint32_t>(instr);
+      break;
+    case AMOMAXU:
+      InterpretAMOMAX<uint32_t>(instr);
+      break;
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretAMO64(Instr instr) {
+  switch (instr.funct5()) {
+#if XLEN >= 64
+    case LR:
+      InterpretLR<int64_t>(instr);
+      break;
+    case SC:
+      InterpretSC<int64_t>(instr);
+      break;
+    case AMOSWAP:
+      InterpretAMOSWAP<int64_t>(instr);
+      break;
+    case AMOADD:
+      InterpretAMOADD<int64_t>(instr);
+      break;
+    case AMOXOR:
+      InterpretAMOXOR<int64_t>(instr);
+      break;
+    case AMOAND:
+      InterpretAMOAND<int64_t>(instr);
+      break;
+    case AMOOR:
+      InterpretAMOOR<int64_t>(instr);
+      break;
+    case AMOMIN:
+      InterpretAMOMIN<int64_t>(instr);
+      break;
+    case AMOMAX:
+      InterpretAMOMAX<int64_t>(instr);
+      break;
+    case AMOMINU:
+      InterpretAMOMIN<uint64_t>(instr);
+      break;
+    case AMOMAXU:
+      InterpretAMOMAX<uint64_t>(instr);
+      break;
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretFMADD(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      float rs3 = get_fregs(instr.frs3());
+      set_fregs(instr.frd(), (rs1 * rs2) + rs3);
+      break;
+    }
+    case F2_D: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      double rs3 = get_fregd(instr.frs3());
+      set_fregd(instr.frd(), (rs1 * rs2) + rs3);
+      break;
+    }
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretFMSUB(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      float rs3 = get_fregs(instr.frs3());
+      set_fregs(instr.frd(), (rs1 * rs2) - rs3);
+      break;
+    }
+    case F2_D: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      double rs3 = get_fregd(instr.frs3());
+      set_fregd(instr.frd(), (rs1 * rs2) - rs3);
+      break;
+    }
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretFNMSUB(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      float rs3 = get_fregs(instr.frs3());
+      set_fregs(instr.frd(), -(rs1 * rs2) + rs3);
+      break;
+    }
+    case F2_D: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      double rs3 = get_fregd(instr.frs3());
+      set_fregd(instr.frd(), -(rs1 * rs2) + rs3);
+      break;
+    }
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretFNMADD(Instr instr) {
+  switch (instr.funct2()) {
+    case F2_S: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      float rs3 = get_fregs(instr.frs3());
+      set_fregs(instr.frd(), -(rs1 * rs2) - rs3);
+      break;
+    }
+    case F2_D: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      double rs3 = get_fregd(instr.frs3());
+      set_fregd(instr.frd(), -(rs1 * rs2) - rs3);
+      break;
+    }
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+static bool is_quiet(float x) {
+  // Warning: This is true on Intel/ARM, but not everywhere.
+  return (bit_cast<uint32_t>(x) & (static_cast<uint32_t>(1) << 22)) != 0;
+}
+
+static uintx_t fclass(float x) {
+  ASSERT(!is_quiet(std::numeric_limits<float>::signaling_NaN()));
+  ASSERT(is_quiet(std::numeric_limits<float>::quiet_NaN()));
+
+  switch (fpclassify(x)) {
+    case FP_INFINITE:
+      return signbit(x) ? kFClassNegInfinity : kFClassPosInfinity;
+    case FP_NAN:
+      return is_quiet(x) ? kFClassQuietNan : kFClassSignallingNan;
+    case FP_ZERO:
+      return signbit(x) ? kFClassNegZero : kFClassPosZero;
+    case FP_SUBNORMAL:
+      return signbit(x) ? kFClassNegSubnormal : kFClassPosSubnormal;
+    case FP_NORMAL:
+      return signbit(x) ? kFClassNegNormal : kFClassPosNormal;
+    default:
+      UNREACHABLE();
+      return 0;
+  }
+}
+
+static bool is_quiet(double x) {
+  // Warning: This is true on Intel/ARM, but not everywhere.
+  return (bit_cast<uint64_t>(x) & (static_cast<uint64_t>(1) << 51)) != 0;
+}
+
+static uintx_t fclass(double x) {
+  ASSERT(!is_quiet(std::numeric_limits<double>::signaling_NaN()));
+  ASSERT(is_quiet(std::numeric_limits<double>::quiet_NaN()));
+
+  switch (fpclassify(x)) {
+    case FP_INFINITE:
+      return signbit(x) ? kFClassNegInfinity : kFClassPosInfinity;
+    case FP_NAN:
+      return is_quiet(x) ? kFClassQuietNan : kFClassSignallingNan;
+    case FP_ZERO:
+      return signbit(x) ? kFClassNegZero : kFClassPosZero;
+    case FP_SUBNORMAL:
+      return signbit(x) ? kFClassNegSubnormal : kFClassPosSubnormal;
+    case FP_NORMAL:
+      return signbit(x) ? kFClassNegNormal : kFClassPosNormal;
+    default:
+      UNREACHABLE();
+      return 0;
+  }
+}
+
+static float roundevenf(float x) {
+  float rounded = roundf(x);
+  if (fabsf(x - rounded) == 0.5f) {  // Tie
+    if (fmodf(rounded, 2) != 0) {    // Not even
+      if (rounded > 0.0f) {
+        rounded -= 1.0f;
+      } else {
+        rounded += 1.0f;
+      }
+      ASSERT(fmodf(rounded, 2) == 0);
+    }
+  }
+  return rounded;
+}
+
+static double roundeven(double x) {
+  double rounded = round(x);
+  if (fabs(x - rounded) == 0.5f) {  // Tie
+    if (fmod(rounded, 2) != 0) {    // Not even
+      if (rounded > 0.0f) {
+        rounded -= 1.0f;
+      } else {
+        rounded += 1.0f;
+      }
+      ASSERT(fmod(rounded, 2) == 0);
+    }
+  }
+  return rounded;
+}
+
+static float Round(float x, RoundingMode rounding) {
+  switch (rounding) {
+    case RNE:  // Round to Nearest, ties to Even
+      return roundevenf(x);
+    case RTZ:  // Round towards Zero
+      return truncf(x);
+    case RDN:  // Round Down (toward negative infinity)
+      return floorf(x);
+    case RUP:  // Round Up (toward positive infinity)
+      return ceilf(x);
+    case RMM:  // Round to nearest, ties to Max Magnitude
+      return roundf(x);
+    case DYN:  // Dynamic rounding mode
+      UNIMPLEMENTED();
+    default:
+      FATAL("Invalid rounding mode");
+  }
+}
+
+static double Round(double x, RoundingMode rounding) {
+  switch (rounding) {
+    case RNE:  // Round to Nearest, ties to Even
+      return roundeven(x);
+    case RTZ:  // Round towards Zero
+      return trunc(x);
+    case RDN:  // Round Down (toward negative infinity)
+      return floor(x);
+    case RUP:  // Round Up (toward positive infinity)
+      return ceil(x);
+    case RMM:  // Round to nearest, ties to Max Magnitude
+      return round(x);
+    case DYN:  // Dynamic rounding mode
+      UNIMPLEMENTED();
+    default:
+      FATAL("Invalid rounding mode");
+  }
+}
+
+static int32_t fcvtws(float x, RoundingMode rounding) {
+  if (x < static_cast<float>(kMinInt32)) {
+    return kMinInt32;  // Negative infinity.
+  }
+  if (x < static_cast<float>(kMaxInt32)) {
+    return static_cast<int32_t>(Round(x, rounding));
+  }
+  return kMaxInt32;  // Positive infinity, NaN.
+}
+
+static uint32_t fcvtwus(float x, RoundingMode rounding) {
+  if (x < static_cast<float>(0)) {
+    return 0;  // Negative infinity.
+  }
+  if (x < static_cast<float>(kMaxUint32)) {
+    return static_cast<uint32_t>(Round(x, rounding));
+  }
+  return kMaxUint32;  // Positive infinity, NaN.
+}
+
+#if XLEN >= 64
+static int64_t fcvtls(float x, RoundingMode rounding) {
+  if (x < static_cast<float>(kMinInt64)) {
+    return kMinInt64;  // Negative infinity.
+  }
+  if (x < static_cast<float>(kMaxInt64)) {
+    return static_cast<int64_t>(Round(x, rounding));
+  }
+  return kMaxInt64;  // Positive infinity, NaN.
+}
+
+static uint64_t fcvtlus(float x, RoundingMode rounding) {
+  if (x < static_cast<float>(0.0)) {
+    return 0;  // Negative infinity.
+  }
+  if (x < static_cast<float>(kMaxUint64)) {
+    return static_cast<uint64_t>(Round(x, rounding));
+  }
+  return kMaxUint64;  // Positive infinity, NaN.
+}
+#endif  // XLEN >= 64
+
+static int32_t fcvtwd(double x, RoundingMode rounding) {
+  if (x < static_cast<double>(kMinInt32)) {
+    return kMinInt32;  // Negative infinity.
+  }
+  if (x < static_cast<double>(kMaxInt32)) {
+    return static_cast<int32_t>(Round(x, rounding));
+  }
+  return kMaxInt32;  // Positive infinity, NaN.
+}
+
+static uint32_t fcvtwud(double x, RoundingMode rounding) {
+  if (x < static_cast<double>(0)) {
+    return 0;  // Negative infinity.
+  }
+  if (x < static_cast<double>(kMaxUint32)) {
+    return static_cast<uint32_t>(Round(x, rounding));
+  }
+  return kMaxUint32;  // Positive infinity, NaN.
+}
+
+#if XLEN >= 64
+static int64_t fcvtld(double x, RoundingMode rounding) {
+  if (x < static_cast<double>(kMinInt64)) {
+    return kMinInt64;  // Negative infinity.
+  }
+  if (x < static_cast<double>(kMaxInt64)) {
+    return static_cast<int64_t>(Round(x, rounding));
+  }
+  return kMaxInt64;  // Positive infinity, NaN.
+}
+
+static uint64_t fcvtlud(double x, RoundingMode rounding) {
+  if (x < static_cast<double>(0.0)) {
+    return 0;  // Negative infinity.
+  }
+  if (x < static_cast<double>(kMaxUint64)) {
+    return static_cast<uint64_t>(Round(x, rounding));
+  }
+  return kMaxUint64;  // Positive infinity, NaN.
+}
+#endif  // XLEN >= 64
+
+void Simulator::InterpretOPFP(Instr instr) {
+  switch (instr.funct7()) {
+    case FADDS: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      set_fregs(instr.frd(), rs1 + rs2);
+      break;
+    }
+    case FSUBS: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      set_fregs(instr.frd(), rs1 - rs2);
+      break;
+    }
+    case FMULS: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      set_fregs(instr.frd(), rs1 * rs2);
+      break;
+    }
+    case FDIVS: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      set_fregs(instr.frd(), rs1 / rs2);
+      break;
+    }
+    case FSQRTS: {
+      float rs1 = get_fregs(instr.frs1());
+      set_fregs(instr.frd(), sqrtf(rs1));
+      break;
+    }
+    case FSGNJS: {
+      const uint32_t kSignMask = static_cast<uint32_t>(1) << 31;
+      uint32_t rs1 = bit_cast<uint32_t>(get_fregs(instr.frs1()));
+      uint32_t rs2 = bit_cast<uint32_t>(get_fregs(instr.frs2()));
+      uint32_t result;
+      switch (instr.funct3()) {
+        case J:
+          result = (rs1 & ~kSignMask) | (rs2 & kSignMask);
+          break;
+        case JN:
+          result = (rs1 & ~kSignMask) | (~rs2 & kSignMask);
+          break;
+        case JX:
+          result = (rs1 & ~kSignMask) | ((rs1 ^ rs2) & kSignMask);
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      set_fregs(instr.frd(), bit_cast<float>(result));
+      break;
+    }
+    case FMINMAXS: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      switch (instr.funct3()) {
+        case MIN:
+          set_fregs(instr.frd(), fminf(rs1, rs2));
+          break;
+        case MAX:
+          set_fregs(instr.frd(), fmaxf(rs1, rs2));
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    }
+    case FCMPS: {
+      float rs1 = get_fregs(instr.frs1());
+      float rs2 = get_fregs(instr.frs2());
+      switch (instr.funct3()) {
+        case FEQ:
+          set_xreg(instr.rd(), rs1 == rs2 ? 1 : 0);
+          break;
+        case FLT:
+          set_xreg(instr.rd(), rs1 < rs2 ? 1 : 0);
+          break;
+        case FLE:
+          set_xreg(instr.rd(), rs1 <= rs2 ? 1 : 0);
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    }
+    case FCLASSS:  // = FMVXW
+      switch (instr.funct3()) {
+        case 1:
+          // fclass.s
+          set_xreg(instr.rd(), fclass(get_fregs(instr.frs1())));
+          break;
+        case 0:
+          // fmv.x.s
+          set_xreg(instr.rd(),
+                   sign_extend(bit_cast<int32_t>(get_fregs(instr.frs1()))));
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    case FCVTintS:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          set_xreg(instr.rd(), sign_extend(fcvtws(get_fregs(instr.frs1()),
+                                                  instr.rounding())));
+          break;
+        case WU:
+          set_xreg(instr.rd(), sign_extend(fcvtwus(get_fregs(instr.frs1()),
+                                                   instr.rounding())));
+          break;
+#if XLEN >= 64
+        case L:
+          set_xreg(instr.rd(), sign_extend(fcvtls(get_fregs(instr.frs1()),
+                                                  instr.rounding())));
+          break;
+        case LU:
+          set_xreg(instr.rd(), sign_extend(fcvtlus(get_fregs(instr.frs1()),
+                                                   instr.rounding())));
+          break;
+#endif  // XLEN >= 64
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    case FCVTSint:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          set_fregs(
+              instr.frd(),
+              static_cast<float>(static_cast<int32_t>(get_xreg(instr.rs1()))));
+          break;
+        case WU:
+          set_fregs(
+              instr.frd(),
+              static_cast<float>(static_cast<uint32_t>(get_xreg(instr.rs1()))));
+          break;
+#if XLEN >= 64
+        case L:
+          set_fregs(
+              instr.frd(),
+              static_cast<float>(static_cast<int64_t>(get_xreg(instr.rs1()))));
+          break;
+        case LU:
+          set_fregs(
+              instr.frd(),
+              static_cast<float>(static_cast<uint64_t>(get_xreg(instr.rs1()))));
+          break;
+#endif  // XLEN >= 64
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    case FMVWX:
+      set_fregs(instr.frd(),
+                bit_cast<float>(static_cast<int32_t>(get_xreg(instr.rs1()))));
+      break;
+    case FADDD: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      set_fregd(instr.frd(), rs1 + rs2);
+      break;
+    }
+    case FSUBD: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      set_fregd(instr.frd(), rs1 - rs2);
+      break;
+    }
+    case FMULD: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      set_fregd(instr.frd(), rs1 * rs2);
+      break;
+    }
+    case FDIVD: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      set_fregd(instr.frd(), rs1 / rs2);
+      break;
+    }
+    case FSQRTD: {
+      double rs1 = get_fregd(instr.frs1());
+      set_fregd(instr.frd(), sqrt(rs1));
+      break;
+    }
+    case FSGNJD: {
+      const uint64_t kSignMask = static_cast<uint64_t>(1) << 63;
+      uint64_t rs1 = bit_cast<uint64_t>(get_fregd(instr.frs1()));
+      uint64_t rs2 = bit_cast<uint64_t>(get_fregd(instr.frs2()));
+      uint64_t result;
+      switch (instr.funct3()) {
+        case J:
+          result = (rs1 & ~kSignMask) | (rs2 & kSignMask);
+          break;
+        case JN:
+          result = (rs1 & ~kSignMask) | (~rs2 & kSignMask);
+          break;
+        case JX:
+          result = (rs1 & ~kSignMask) | ((rs1 ^ rs2) & kSignMask);
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      set_fregd(instr.frd(), bit_cast<double>(result));
+      break;
+    }
+    case FMINMAXD: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      switch (instr.funct3()) {
+        case MIN:
+          set_fregd(instr.frd(), fmin(rs1, rs2));
+          break;
+        case MAX:
+          set_fregd(instr.frd(), fmax(rs1, rs2));
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    }
+    case FCVTS: {
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case 1:
+          set_fregs(instr.frd(), static_cast<float>(get_fregd(instr.frs1())));
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    }
+    case FCVTD: {
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case 0:
+          set_fregd(instr.frd(), static_cast<double>(get_fregs(instr.frs1())));
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    }
+
+    case FCMPD: {
+      double rs1 = get_fregd(instr.frs1());
+      double rs2 = get_fregd(instr.frs2());
+      switch (instr.funct3()) {
+        case FEQ:
+          set_xreg(instr.rd(), rs1 == rs2 ? 1 : 0);
+          break;
+        case FLT:
+          set_xreg(instr.rd(), rs1 < rs2 ? 1 : 0);
+          break;
+        case FLE:
+          set_xreg(instr.rd(), rs1 <= rs2 ? 1 : 0);
+          break;
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    }
+    case FCLASSD:  // = FMVXD
+      switch (instr.funct3()) {
+        case 1:
+          // fclass.d
+          set_xreg(instr.rd(), fclass(get_fregd(instr.frs1())));
+          break;
+#if XLEN >= 64
+        case 0:
+          // fmv.x.d
+          set_xreg(instr.rd(), bit_cast<int64_t>(get_fregd(instr.frs1())));
+          break;
+#endif  // XLEN >= 64
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    case FCVTintD:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          set_xreg(instr.rd(), sign_extend(fcvtwd(get_fregd(instr.frs1()),
+                                                  instr.rounding())));
+          break;
+        case WU:
+          set_xreg(instr.rd(), sign_extend(fcvtwud(get_fregd(instr.frs1()),
+                                                   instr.rounding())));
+          break;
+#if XLEN >= 64
+        case L:
+          set_xreg(instr.rd(), sign_extend(fcvtld(get_fregd(instr.frs1()),
+                                                  instr.rounding())));
+          break;
+        case LU:
+          set_xreg(instr.rd(), sign_extend(fcvtlud(get_fregd(instr.frs1()),
+                                                   instr.rounding())));
+          break;
+#endif  // XLEN >= 64
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+    case FCVTDint:
+      switch (static_cast<FcvtRs2>(instr.rs2())) {
+        case W:
+          set_fregd(
+              instr.frd(),
+              static_cast<double>(static_cast<int32_t>(get_xreg(instr.rs1()))));
+          break;
+        case WU:
+          set_fregd(instr.frd(), static_cast<double>(static_cast<uint32_t>(
+                                     get_xreg(instr.rs1()))));
+          break;
+#if XLEN >= 64
+        case L:
+          set_fregd(
+              instr.frd(),
+              static_cast<double>(static_cast<int64_t>(get_xreg(instr.rs1()))));
+          break;
+        case LU:
+          set_fregd(instr.frd(), static_cast<double>(static_cast<uint64_t>(
+                                     get_xreg(instr.rs1()))));
+          break;
+#endif  // XLEN >= 64
+        default:
+          IllegalInstruction(instr);
+      }
+      break;
+#if XLEN >= 64
+    case FMVDX:
+      set_fregd(instr.frd(), bit_cast<double>(get_xreg(instr.rs1())));
+      break;
+#endif  // XLEN >= 64
+    default:
+      IllegalInstruction(instr);
+  }
+  pc_ += instr.length();
+}
+
+void Simulator::InterpretEBREAK(Instr instr) {
+  PrintRegisters();
+  PrintStack();
+  FATAL("Encounted EBREAK");
+}
+
+void Simulator::InterpretEBREAK(CInstr instr) {
+  PrintRegisters();
+  PrintStack();
+  FATAL("Encounted EBREAK");
+}
+
+void Simulator::IllegalInstruction(Instr instr) {
+  PrintRegisters();
+  PrintStack();
+  FATAL("Illegal instruction: 0x%08x", instr.encoding());
+}
+
+void Simulator::IllegalInstruction(CInstr instr) {
+  PrintRegisters();
+  PrintStack();
+  FATAL("Illegal instruction: 0x%04x", instr.encoding());
+}
+
+template <typename type>
+type Simulator::MemoryRead(uintx_t addr, Register base) {
+#if defined(DEBUG)
+  if ((base == SP) || (base == FP)) {
+    if ((addr + sizeof(type) > stack_base()) || (addr < get_xreg(SP))) {
+      PrintRegisters();
+      PrintStack();
+      FATAL("Out-of-bounds stack access");
+    }
+  } else {
+    const uintx_t kPageSize = 16 * KB;
+    if ((addr < kPageSize) || (addr + sizeof(type) >= ~kPageSize)) {
+      PrintRegisters();
+      PrintStack();
+      FATAL("Bad memory access");
+    }
+  }
+#endif
+  return *reinterpret_cast<type*>(addr);
+}
+
+template <typename type>
+void Simulator::MemoryWrite(uintx_t addr, type value, Register base) {
+#if defined(DEBUG)
+  if ((base == SP) || (base == FP)) {
+    if ((addr + sizeof(type) > stack_base()) || (addr < get_xreg(SP))) {
+      PrintRegisters();
+      PrintStack();
+      FATAL("Out-of-bounds stack access");
+    }
+  } else {
+    const uintx_t kPageSize = 16 * KB;
+    if ((addr < kPageSize) || (addr + sizeof(type) >= ~kPageSize)) {
+      PrintRegisters();
+      PrintStack();
+      FATAL("Bad memory access");
+    }
+  }
+#endif
+  *reinterpret_cast<type*>(addr) = value;
+}
+
+enum ControlStatusRegister {
+  fflags = 0x001,
+  frm = 0x002,
+  fcsr = 0x003,
+  cycle = 0xC00,
+  time = 0xC01,
+  instret = 0xC02,
+#if XLEN == 32
+  cycleh = 0xC80,
+  timeh = 0xC81,
+  instreth = 0xC82,
+#endif
+};
+
+intx_t Simulator::CSRRead(uint16_t csr) {
+  switch (csr) {
+    case fcsr:
+      return fcsr_;
+    case cycle:
+      return instret_ / 2;
+    case time:
+      return 0;
+    case instret:
+      return instret_;
+#if XLEN == 32
+    case cycleh:
+      return (instret_ / 2) >> 32;
+    case timeh:
+      return 0;
+    case instreth:
+      return instret_ >> 32;
+#endif
+    default:
+      FATAL("Unknown CSR: %d", csr);
+  }
+}
+
+void Simulator::CSRWrite(uint16_t csr, intx_t value) {
+  UNIMPLEMENTED();
+}
+
+void Simulator::CSRSet(uint16_t csr, intx_t mask) {
+  UNIMPLEMENTED();
+}
+
+void Simulator::CSRClear(uint16_t csr, intx_t mask) {
+  UNIMPLEMENTED();
+}
+
+}  // namespace dart
+
+#endif  // !defined(USING_SIMULATOR)
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/simulator_riscv.h b/runtime/vm/simulator_riscv.h
new file mode 100644
index 0000000..b63ed39
--- /dev/null
+++ b/runtime/vm/simulator_riscv.h
@@ -0,0 +1,343 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_SIMULATOR_RISCV_H_
+#define RUNTIME_VM_SIMULATOR_RISCV_H_
+
+#ifndef RUNTIME_VM_SIMULATOR_H_
+#error Do not include simulator_riscv.h directly; use simulator.h.
+#endif
+
+#include "vm/constants.h"
+#include "vm/random.h"
+
+namespace dart {
+
+class Isolate;
+class Mutex;
+class SimulatorSetjmpBuffer;
+class Thread;
+
+// TODO(riscv): Introduce random LR/SC failures.
+// TODO(riscv): Dynamic rounding mode and other FSCR state.
+class Simulator {
+ public:
+  static const uword kSimulatorStackUnderflowSize = 64;
+
+  Simulator();
+  ~Simulator();
+
+  static Simulator* Current();
+
+  intx_t CallX(intx_t function,
+               intx_t arg0 = 0,
+               intx_t arg1 = 0,
+               intx_t arg2 = 0,
+               intx_t arg3 = 0) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_xreg(A0, arg0);
+    set_xreg(A1, arg1);
+    set_xreg(A2, arg2);
+    set_xreg(A3, arg3);
+    RunCall(function, &preserved);
+    return get_xreg(A0);
+  }
+
+  intx_t CallI(intx_t function, double arg0, double arg1 = 0.0) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_fregd(FA0, arg0);
+    set_fregd(FA1, arg1);
+    RunCall(function, &preserved);
+    return get_xreg(A0);
+  }
+  intx_t CallI(intx_t function, float arg0, float arg1 = 0.0f) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_fregs(FA0, arg0);
+    set_fregs(FA1, arg1);
+    RunCall(function, &preserved);
+    return get_xreg(A0);
+  }
+
+  double CallD(intx_t function, intx_t arg0, intx_t arg1 = 0) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_xreg(A0, arg0);
+    set_xreg(A1, arg1);
+    RunCall(function, &preserved);
+    return get_fregd(FA0);
+  }
+  double CallD(intx_t function,
+               double arg0,
+               double arg1 = 0.0,
+               double arg2 = 0.0) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_fregd(FA0, arg0);
+    set_fregd(FA1, arg1);
+    set_fregd(FA2, arg2);
+    RunCall(function, &preserved);
+    return get_fregd(FA0);
+  }
+  double CallD(intx_t function, intx_t arg0, double arg1) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_xreg(A0, arg0);
+    set_fregd(FA0, arg1);
+    RunCall(function, &preserved);
+    return get_fregd(FA0);
+  }
+  double CallD(intx_t function, float arg0) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_fregs(FA0, arg0);
+    RunCall(function, &preserved);
+    return get_fregd(FA0);
+  }
+
+  float CallF(intx_t function, intx_t arg0, intx_t arg1 = 0) {
+    PreservedRegisters preserved;
+    SavePreservedRegisters(&preserved);
+    set_xreg(A0, arg0);
+    set_xreg(A1, arg1);
+    RunCall(function, &preserved);
+    return get_fregs(FA0);
+  }
+  float CallF(intx_t function,
+              float arg0,
+              float arg1 = 0.0f,
+              float arg2 = 0.0f) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_fregs(FA0, arg0);
+    set_fregs(FA1, arg1);
+    set_fregs(FA2, arg2);
+    RunCall(function, &preserved);
+    return get_fregs(FA0);
+  }
+  float CallF(intx_t function, intx_t arg0, float arg1) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_xreg(A0, arg0);
+    set_fregs(FA0, arg1);
+    RunCall(function, &preserved);
+    return get_fregs(FA0);
+  }
+  float CallF(intx_t function, double arg0) {
+    PreservedRegisters preserved;
+    PrepareCall(&preserved);
+    set_fregd(FA0, arg0);
+    RunCall(function, &preserved);
+    return get_fregs(FA0);
+  }
+
+  // Dart generally calls into generated code with 4 parameters. This is a
+  // convenience function, which sets up the simulator state and grabs the
+  // result on return. The return value is A0. The parameters are placed in
+  // A0-3.
+  int64_t Call(intx_t entry,
+               intx_t parameter0,
+               intx_t parameter1,
+               intx_t parameter2,
+               intx_t parameter3,
+               bool fp_return = false,
+               bool fp_args = false);
+
+  // Runtime and native call support.
+  enum CallKind {
+    kRuntimeCall,
+    kLeafRuntimeCall,
+    kLeafFloatRuntimeCall,
+    kNativeCallWrapper
+  };
+  static uword RedirectExternalReference(uword function,
+                                         CallKind call_kind,
+                                         int argument_count);
+
+  static uword FunctionForRedirect(uword redirect);
+
+  void JumpToFrame(uword pc, uword sp, uword fp, Thread* thread);
+
+  uintx_t get_register(Register rs) const { return get_xreg(rs); }
+  uintx_t get_pc() const { return pc_; }
+  uintx_t get_sp() const { return get_xreg(SP); }
+  uintx_t get_fp() const { return get_xreg(FP); }
+  void PrintRegisters();
+  void PrintStack();
+
+  // High address.
+  uword stack_base() const { return stack_base_; }
+  // Limit for StackOverflowError.
+  uword overflow_stack_limit() const { return overflow_stack_limit_; }
+  // Low address.
+  uword stack_limit() const { return stack_limit_; }
+
+  // Accessor to the instruction counter.
+  uint64_t get_icount() const { return instret_; }
+
+  // Call on program start.
+  static void Init();
+
+ private:
+  struct PreservedRegisters {
+    uintx_t xregs[kNumberOfCpuRegisters];
+    double fregs[kNumberOfFpuRegisters];
+  };
+  void PrepareCall(PreservedRegisters* preserved);
+  void ClobberVolatileRegisters();
+  void SavePreservedRegisters(PreservedRegisters* preserved);
+  void CheckPreservedRegisters(PreservedRegisters* preserved);
+  void RunCall(intx_t function, PreservedRegisters* preserved);
+
+  void Interpret(Instr instr);
+  void Interpret(CInstr instr);
+  void InterpretLUI(Instr instr);
+  void InterpretAUIPC(Instr instr);
+  void InterpretJAL(Instr instr);
+  void InterpretJALR(Instr instr);
+  void InterpretBRANCH(Instr instr);
+  void InterpretLOAD(Instr instr);
+  void InterpretSTORE(Instr instr);
+  void InterpretOPIMM(Instr instr);
+  void InterpretOPIMM32(Instr instr);
+  void InterpretOP(Instr instr);
+  void InterpretOP_0(Instr instr);
+  void InterpretOP_SUB(Instr instr);
+  void InterpretOP_MULDIV(Instr instr);
+  void InterpretOP32(Instr instr);
+  void InterpretOP32_0(Instr instr);
+  void InterpretOP32_SUB(Instr instr);
+  void InterpretOP32_MULDIV(Instr instr);
+  void InterpretMISCMEM(Instr instr);
+  void InterpretSYSTEM(Instr instr);
+  void InterpretECALL(Instr instr);
+  void InterpretEBREAK(Instr instr);
+  void InterpretEBREAK(CInstr instr);
+  void InterpretAMO(Instr instr);
+  void InterpretAMO32(Instr instr);
+  void InterpretAMO64(Instr instr);
+  template <typename type>
+  void InterpretLR(Instr instr);
+  template <typename type>
+  void InterpretSC(Instr instr);
+  template <typename type>
+  void InterpretAMOSWAP(Instr instr);
+  template <typename type>
+  void InterpretAMOADD(Instr instr);
+  template <typename type>
+  void InterpretAMOXOR(Instr instr);
+  template <typename type>
+  void InterpretAMOAND(Instr instr);
+  template <typename type>
+  void InterpretAMOOR(Instr instr);
+  template <typename type>
+  void InterpretAMOMIN(Instr instr);
+  template <typename type>
+  void InterpretAMOMAX(Instr instr);
+  template <typename type>
+  void InterpretAMOMINU(Instr instr);
+  template <typename type>
+  void InterpretAMOMAXU(Instr instr);
+  void InterpretLOADFP(Instr instr);
+  void InterpretSTOREFP(Instr instr);
+  void InterpretFMADD(Instr instr);
+  void InterpretFMSUB(Instr instr);
+  void InterpretFNMADD(Instr instr);
+  void InterpretFNMSUB(Instr instr);
+  void InterpretOPFP(Instr instr);
+  DART_NORETURN void IllegalInstruction(Instr instr);
+  DART_NORETURN void IllegalInstruction(CInstr instr);
+
+  template <typename type>
+  type MemoryRead(uintx_t address, Register base);
+  template <typename type>
+  void MemoryWrite(uintx_t address, type value, Register base);
+
+  intx_t CSRRead(uint16_t csr);
+  void CSRWrite(uint16_t csr, intx_t value);
+  void CSRSet(uint16_t csr, intx_t mask);
+  void CSRClear(uint16_t csr, intx_t mask);
+
+  uintx_t get_xreg(Register rs) const { return xregs_[rs]; }
+  void set_xreg(Register rd, uintx_t value) {
+    if (rd != ZR) {
+      xregs_[rd] = value;
+    }
+  }
+
+  double get_fregd(FRegister rs) const { return fregs_[rs]; }
+  void set_fregd(FRegister rd, double value) { fregs_[rd] = value; }
+
+  static constexpr uint64_t kNaNBox = 0xFFFFFFFF00000000;
+
+  float get_fregs(FRegister rs) const {
+    uint64_t bits64 = bit_cast<uint64_t>(fregs_[rs]);
+    if ((bits64 & kNaNBox) != kNaNBox) {
+      // When the register value isn't a valid NaN, the canonical NaN is used
+      // instead.
+      return bit_cast<float>(0x7fc00000);
+    }
+    uint32_t bits32 = static_cast<uint32_t>(bits64);
+    return bit_cast<float>(bits32);
+  }
+  void set_fregs(FRegister rd, float value) {
+    uint32_t bits32 = bit_cast<uint32_t>(value);
+    uint64_t bits64 = static_cast<uint64_t>(bits32);
+    bits64 |= kNaNBox;
+    fregs_[rd] = bit_cast<double>(bits64);
+  }
+
+  // Known bad pc value to ensure that the simulator does not execute
+  // without being properly setup.
+  static constexpr uword kBadLR = -1;
+  // A pc value used to signal the simulator to stop execution.  Generally
+  // the lr is set to this value on transition from native C code to
+  // simulated execution, so that the simulator can "return" to the native
+  // C code.
+  static constexpr uword kEndSimulatingPC = -2;
+
+  // I state.
+  uintx_t pc_;
+  uintx_t xregs_[kNumberOfCpuRegisters];
+  uint64_t instret_;  // "Instructions retired" - mandatory counter.
+
+  // A state.
+  uintx_t reserved_address_;
+  uintx_t reserved_value_;
+
+  // F/D state.
+  double fregs_[kNumberOfFpuRegisters];
+  uint32_t fcsr_;
+
+  // Simulator support.
+  char* stack_;
+  uword stack_limit_;
+  uword overflow_stack_limit_;
+  uword stack_base_;
+  Random random_;
+  SimulatorSetjmpBuffer* last_setjmp_buffer_;
+
+  static bool IsIllegalAddress(uword addr) { return addr < 64 * 1024; }
+
+  // Executes RISC-V instructions until the PC reaches kEndSimulatingPC.
+  void Execute();
+
+  // Returns true if tracing of executed instructions is enabled.
+  bool IsTracingExecution() const;
+
+  // Longjmp support for exceptions.
+  SimulatorSetjmpBuffer* last_setjmp_buffer() { return last_setjmp_buffer_; }
+  void set_last_setjmp_buffer(SimulatorSetjmpBuffer* buffer) {
+    last_setjmp_buffer_ = buffer;
+  }
+
+  friend class SimulatorSetjmpBuffer;
+  DISALLOW_COPY_AND_ASSIGN(Simulator);
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_SIMULATOR_RISCV_H_
diff --git a/runtime/vm/stack_frame.h b/runtime/vm/stack_frame.h
index e23d93b..3574ec3 100644
--- a/runtime/vm/stack_frame.h
+++ b/runtime/vm/stack_frame.h
@@ -18,6 +18,8 @@
 #include "vm/stack_frame_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 #include "vm/stack_frame_arm64.h"
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#include "vm/stack_frame_riscv.h"
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/vm/stack_frame_riscv.h b/runtime/vm/stack_frame_riscv.h
new file mode 100644
index 0000000..4bf58ab
--- /dev/null
+++ b/runtime/vm/stack_frame_riscv.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_STACK_FRAME_RISCV_H_
+#define RUNTIME_VM_STACK_FRAME_RISCV_H_
+
+#if !defined(RUNTIME_VM_STACK_FRAME_H_)
+#error Do not include stack_frame_riscv.h directly; use stack_frame.h instead.
+#endif
+
+namespace dart {
+
+/* RISC-V Dart Frame Layout
+               |                    | <- TOS
+Callee frame   | ...                |
+               | saved PP           |
+               | code object        |
+               | saved FP           |    (FP of current frame)
+               | saved PC           |    (PC of current frame)
+               +--------------------+
+Current frame  | ...               T| <- SP of current frame
+               | first local       T|
+               | caller's PP       T|
+               | code object       T|    (current frame's code object)
+               | caller's FP        | <- FP of current frame
+               | caller's RA        |    (PC of caller frame)
+               +--------------------+
+Caller frame   | last parameter     | <- SP of caller frame
+               |  ...               |
+
+               T against a slot indicates it needs to be traversed during GC.
+*/
+
+static const int kDartFrameFixedSize = 4;  // PP, FP, RA, PC marker.
+static const int kSavedPcSlotFromSp = -1;
+
+static const int kFirstObjectSlotFromFp = -1;  // Used by GC to traverse stack.
+static const int kLastFixedObjectSlotFromFp = -2;
+
+static const int kFirstLocalSlotFromFp = -3;
+static const int kSavedCallerPpSlotFromFp = -2;
+static const int kPcMarkerSlotFromFp = -1;
+static const int kSavedCallerFpSlotFromFp = 0;
+static const int kSavedCallerPcSlotFromFp = 1;
+
+static const int kParamEndSlotFromFp = 1;  // One slot past last parameter.
+static const int kCallerSpSlotFromFp = 2;
+static const int kLastParamSlotFromEntrySp = 0;
+
+// Entry and exit frame layout.
+#if defined(TARGET_ARCH_RISCV64)
+static const int kExitLinkSlotFromEntryFp = -28;
+#elif defined(TARGET_ARCH_RISCV32)
+static const int kExitLinkSlotFromEntryFp = -40;
+#endif
+COMPILE_ASSERT(kAbiPreservedCpuRegCount == 11);
+COMPILE_ASSERT(kAbiPreservedFpuRegCount == 12);
+
+// For FFI native -> Dart callbacks, this is the number of stack slots between
+// arguments passed on stack and arguments saved in callback prologue.
+//
+// 2 = return adddress (1) + saved frame pointer (1).
+//
+// If NativeCallbackTrampolines::Enabled(), then
+// kNativeCallbackTrampolineStackDelta must be added as well.
+constexpr intptr_t kCallbackSlotsBeforeSavedArguments = 2;
+
+// For FFI calls passing in TypedData, we save it on the stack before entering
+// a Dart frame. This denotes how to get to the backed up typed data.
+static const int kFfiCallerTypedDataSlotFromFp = kCallerSpSlotFromFp;
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_STACK_FRAME_RISCV_H_
diff --git a/runtime/vm/stub_code_riscv_test.cc b/runtime/vm/stub_code_riscv_test.cc
new file mode 100644
index 0000000..df605de
--- /dev/null
+++ b/runtime/vm/stub_code_riscv_test.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/dart_entry.h"
+#include "vm/isolate.h"
+#include "vm/native_entry.h"
+#include "vm/native_entry_test.h"
+#include "vm/object.h"
+#include "vm/runtime_entry.h"
+#include "vm/stub_code.h"
+#include "vm/symbols.h"
+#include "vm/unit_test.h"
+
+#define __ assembler->
+
+namespace dart {
+
+static Function* CreateFunction(const char* name) {
+  const String& class_name =
+      String::Handle(Symbols::New(Thread::Current(), "ownerClass"));
+  const Script& script = Script::Handle();
+  const Library& lib = Library::Handle(Library::New(class_name));
+  const Class& owner_class = Class::Handle(
+      Class::New(lib, class_name, script, TokenPosition::kNoSource));
+  const String& function_name =
+      String::ZoneHandle(Symbols::New(Thread::Current(), name));
+  const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
+  Function& function = Function::ZoneHandle(Function::New(
+      signature, function_name, UntaggedFunction::kRegularFunction, true, false,
+      false, false, false, owner_class, TokenPosition::kNoSource));
+  return &function;
+}
+
+// Test calls to stub code which calls into the runtime.
+static void GenerateCallToCallRuntimeStub(compiler::Assembler* assembler,
+                                          int length) {
+  const int argc = 2;
+  const Smi& smi_length = Smi::ZoneHandle(Smi::New(length));
+  __ EnterDartFrame(0);
+  __ PushObject(Object::null_object());  // Push Null obj for return value.
+  __ PushObject(smi_length);             // Push argument 1: length.
+  __ PushObject(Object::null_object());  // Push argument 2: type arguments.
+  ASSERT(kAllocateArrayRuntimeEntry.argument_count() == argc);
+  __ CallRuntime(kAllocateArrayRuntimeEntry, argc);
+  __ addi(SP, SP, argc * kWordSize);
+  __ PopRegister(A0);  // Pop return value from return slot.
+  __ LeaveDartFrame();
+  __ ret();
+}
+
+ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
+  extern const Function& RegisterFakeFunction(const char* name,
+                                              const Code& code);
+  const int length = 10;
+  const char* kName = "Test_CallRuntimeStubCode";
+  compiler::ObjectPoolBuilder object_pool_builder;
+  compiler::Assembler assembler(&object_pool_builder);
+  GenerateCallToCallRuntimeStub(&assembler, length);
+  SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
+  const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
+      *CreateFunction("Test_CallRuntimeStubCode"), nullptr, &assembler,
+      Code::PoolAttachment::kAttachPool));
+  const Function& function = RegisterFakeFunction(kName, code);
+  Array& result = Array::Handle();
+  result ^= DartEntry::InvokeFunction(function, Object::empty_array());
+  EXPECT_EQ(length, result.Length());
+}
+
+// Test calls to stub code which calls into a leaf runtime entry.
+static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
+                                              const char* str_value,
+                                              intptr_t lhs_index_value,
+                                              intptr_t rhs_index_value,
+                                              intptr_t length_value) {
+  const String& str = String::ZoneHandle(String::New(str_value, Heap::kOld));
+  const Smi& lhs_index = Smi::ZoneHandle(Smi::New(lhs_index_value));
+  const Smi& rhs_index = Smi::ZoneHandle(Smi::New(rhs_index_value));
+  const Smi& length = Smi::ZoneHandle(Smi::New(length_value));
+  __ EnterDartFrame(0);
+  __ ReserveAlignedFrameSpace(0);
+  __ LoadObject(A0, str);
+  __ LoadObject(A1, lhs_index);
+  __ LoadObject(A2, rhs_index);
+  __ LoadObject(A3, length);
+  __ CallRuntime(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
+  __ LeaveDartFrame();
+  __ ret();  // Return value is in A0.
+}
+
+ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
+  extern const Function& RegisterFakeFunction(const char* name,
+                                              const Code& code);
+  const char* str_value = "abAB";
+  intptr_t lhs_index_value = 0;
+  intptr_t rhs_index_value = 2;
+  intptr_t length_value = 2;
+  const char* kName = "Test_CallLeafRuntimeStubCode";
+  compiler::ObjectPoolBuilder object_pool_builder;
+  compiler::Assembler assembler(&object_pool_builder);
+  GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
+                                    rhs_index_value, length_value);
+  SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
+  const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
+      *CreateFunction("Test_CallLeafRuntimeStubCode"), nullptr, &assembler,
+      Code::PoolAttachment::kAttachPool));
+  const Function& function = RegisterFakeFunction(kName, code);
+  Instance& result = Instance::Handle();
+  result ^= DartEntry::InvokeFunction(function, Object::empty_array());
+  EXPECT_EQ(Bool::True().ptr(), result.ptr());
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc
index 6efc403..584cb9c 100644
--- a/runtime/vm/thread.cc
+++ b/runtime/vm/thread.cc
@@ -118,8 +118,7 @@
   CACHED_CONSTANTS_LIST(DEFAULT_INIT)
 #undef DEFAULT_INIT
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
-    defined(TARGET_ARCH_X64)
+#if !defined(TARGET_ARCH_IA32)
   for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
     write_barrier_wrappers_entry_points_[i] = 0;
   }
@@ -194,8 +193,7 @@
   CACHED_CONSTANTS_LIST(INIT_VALUE)
 #undef INIT_VALUE
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
-    defined(TARGET_ARCH_X64)
+#if !defined(TARGET_ARCH_IA32)
   for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
     write_barrier_wrappers_entry_points_[i] =
         StubCode::WriteBarrierWrappers().EntryPoint() +
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index be48283..aa38703 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -659,8 +659,7 @@
   CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD)
 #undef DEFINE_OFFSET_METHOD
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
-    defined(TARGET_ARCH_X64)
+#if !defined(TARGET_ARCH_IA32)
   static intptr_t write_barrier_wrappers_thread_offset(Register reg) {
     ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
     intptr_t index = 0;
@@ -1137,8 +1136,7 @@
   LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
 #undef DECLARE_MEMBERS
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
-    defined(TARGET_ARCH_X64)
+#if !defined(TARGET_ARCH_IA32)
   uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs];
 #endif
 
diff --git a/runtime/vm/thread_pool.cc b/runtime/vm/thread_pool.cc
index f450555..ac6adfc 100644
--- a/runtime/vm/thread_pool.cc
+++ b/runtime/vm/thread_pool.cc
@@ -317,6 +317,13 @@
 }
 
 void ThreadPool::Worker::Main(uword args) {
+  // Call the thread start hook here to notify the embedder that the
+  // thread pool thread has started.
+  Dart_ThreadStartCallback start_cb = Dart::thread_start_callback();
+  if (start_cb != nullptr) {
+    start_cb();
+  }
+
   OSThread* os_thread = OSThread::Current();
   ASSERT(os_thread != nullptr);
 
@@ -343,8 +350,9 @@
 
   // Call the thread exit hook here to notify the embedder that the
   // thread pool thread is exiting.
-  if (Dart::thread_exit_callback() != NULL) {
-    (*Dart::thread_exit_callback())();
+  Dart_ThreadExitCallback exit_cb = Dart::thread_exit_callback();
+  if (exit_cb != nullptr) {
+    exit_cb();
   }
 }
 
diff --git a/runtime/vm/type_testing_stubs.cc b/runtime/vm/type_testing_stubs.cc
index 785e536..c4dc3d3 100644
--- a/runtime/vm/type_testing_stubs.cc
+++ b/runtime/vm/type_testing_stubs.cc
@@ -206,28 +206,22 @@
 #if !defined(TARGET_ARCH_IA32)
 #if !defined(DART_PRECOMPILED_RUNTIME)
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
-#define ONLY_ON_ARM(...) __VA_ARGS__
-#else
-#define ONLY_ON_ARM(...)
-#endif
-
 static CodePtr RetryCompilationWithFarBranches(
     Thread* thread,
     std::function<CodePtr(compiler::Assembler&)> fun) {
-  volatile bool use_far_branches = false;
+  volatile intptr_t far_branch_level = 0;
   while (true) {
     LongJumpScope jump;
     if (setjmp(*jump.Set()) == 0) {
       // To use the already-defined __ Macro !
-      compiler::Assembler assembler(nullptr ONLY_ON_ARM(, use_far_branches));
+      compiler::Assembler assembler(nullptr, far_branch_level);
       return fun(assembler);
     } else {
       // We bailed out or we encountered an error.
       const Error& error = Error::Handle(thread->StealStickyError());
       if (error.ptr() == Object::branch_offset_error().ptr()) {
-        ASSERT(!use_far_branches);
-        use_far_branches = true;
+        ASSERT(far_branch_level < 2);
+        far_branch_level++;
       } else if (error.ptr() == Object::out_of_memory_error().ptr()) {
         thread->set_sticky_error(error);
         return Code::null();
@@ -238,8 +232,6 @@
   }
 }
 
-#undef ONLY_ON_ARM
-
 CodePtr TypeTestingStubGenerator::BuildCodeForType(const Type& type) {
   auto thread = Thread::Current();
   auto zone = thread->zone();
diff --git a/runtime/vm/type_testing_stubs_test.cc b/runtime/vm/type_testing_stubs_test.cc
index 7f04842..3c68eb8 100644
--- a/runtime/vm/type_testing_stubs_test.cc
+++ b/runtime/vm/type_testing_stubs_test.cc
@@ -15,8 +15,7 @@
 #include "vm/type_testing_stubs.h"
 #include "vm/unit_test.h"
 
-#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_ARM) ||                  \
-    defined(TARGET_ARCH_X64)
+#if !defined(TARGET_ARCH_IA32)
 
 namespace dart {
 
@@ -2267,5 +2266,4 @@
 
 }  // namespace dart
 
-#endif  // defined(TARGET_ARCH_ARM64) ||  defined(TARGET_ARCH_ARM) ||          \
-        // defined(TARGET_ARCH_X64)
+#endif  // !defined(TARGET_ARCH_IA32)
diff --git a/runtime/vm/unit_test.h b/runtime/vm/unit_test.h
index bc86b9e..42102af 100644
--- a/runtime/vm/unit_test.h
+++ b/runtime/vm/unit_test.h
@@ -101,40 +101,37 @@
 #define ASSEMBLER_TEST_RUN_WITH_EXPECTATION(name, test, expectation)           \
   static void AssemblerTestRun##name(AssemblerTest* test);                     \
   ISOLATE_UNIT_TEST_CASE_WITH_EXPECTATION(name, expectation) {                 \
-    {                                                                          \
-      bool use_far_branches = false;                                           \
+    volatile intptr_t far_branch_level = 0;                                    \
+    while (true) {                                                             \
       LongJumpScope jump;                                                      \
       if (setjmp(*jump.Set()) == 0) {                                          \
         compiler::ObjectPoolBuilder object_pool_builder;                       \
-        compiler::Assembler assembler(&object_pool_builder, use_far_branches); \
+        compiler::Assembler assembler(&object_pool_builder, far_branch_level); \
         AssemblerTest test("" #name, &assembler);                              \
         AssemblerTestGenerate##name(test.assembler());                         \
         test.Assemble();                                                       \
         AssemblerTestRun##name(&test);                                         \
         return;                                                                \
+      } else {                                                                 \
+        const Error& error = Error::Handle(Thread::Current()->sticky_error()); \
+        if (error.ptr() == Object::branch_offset_error().ptr()) {              \
+          ASSERT(far_branch_level < 2);                                        \
+          far_branch_level++;                                                  \
+        } else {                                                               \
+          FATAL1("Unexpected error: %s\n", error.ToErrorCString());            \
+        }                                                                      \
       }                                                                        \
     }                                                                          \
-                                                                               \
-    const Error& error = Error::Handle(Thread::Current()->sticky_error());     \
-    if (error.ptr() == Object::branch_offset_error().ptr()) {                  \
-      bool use_far_branches = true;                                            \
-      compiler::ObjectPoolBuilder object_pool_builder;                         \
-      compiler::Assembler assembler(&object_pool_builder, use_far_branches);   \
-      AssemblerTest test("" #name, &assembler);                                \
-      AssemblerTestGenerate##name(test.assembler());                           \
-      test.Assemble();                                                         \
-      AssemblerTestRun##name(&test);                                           \
-    } else {                                                                   \
-      FATAL1("Unexpected error: %s\n", error.ToErrorCString());                \
-    }                                                                          \
   }                                                                            \
   static void AssemblerTestRun##name(AssemblerTest* test)
 
 #define ASSEMBLER_TEST_RUN(name, test)                                         \
   ASSEMBLER_TEST_RUN_WITH_EXPECTATION(name, test, "Pass")
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
-#if defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
+#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
+    defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#if defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64) ||                      \
+    defined(HOST_ARCH_RISCV32) || defined(HOST_ARCH_RISCV64)
 // Running on actual ARM hardware, execute code natively.
 #define EXECUTE_TEST_CODE_INT32(name, entry) reinterpret_cast<name>(entry)()
 #define EXECUTE_TEST_CODE_INT64(name, entry) reinterpret_cast<name>(entry)()
diff --git a/runtime/vm/version_in.cc b/runtime/vm/version_in.cc
index 8596e4a..ef8274c 100644
--- a/runtime/vm/version_in.cc
+++ b/runtime/vm/version_in.cc
@@ -57,6 +57,10 @@
     "arm"
 #elif defined(TARGET_ARCH_ARM64)
     "arm64"
+#elif defined(TARGET_ARCH_RISCV32)
+    "riscv32"
+#elif defined(TARGET_ARCH_RISCV64)
+    "riscv64"
 #else
 #error Unknown arch
 #endif
diff --git a/runtime/vm/vm_sources.gni b/runtime/vm/vm_sources.gni
index f17cc64..059703c 100644
--- a/runtime/vm/vm_sources.gni
+++ b/runtime/vm/vm_sources.gni
@@ -44,6 +44,7 @@
   "code_patcher_arm.cc",
   "code_patcher_arm64.cc",
   "code_patcher_ia32.cc",
+  "code_patcher_riscv.cc",
   "code_patcher_x64.cc",
   "constants_arm.cc",
   "constants_arm.h",
@@ -52,12 +53,15 @@
   "constants_base.h",
   "constants_ia32.cc",
   "constants_ia32.h",
+  "constants_riscv.cc",
+  "constants_riscv.h",
   "constants_x64.cc",
   "constants_x64.h",
   "cpu.h",
   "cpu_arm.cc",
   "cpu_arm64.cc",
   "cpu_ia32.cc",
+  "cpu_riscv.cc",
   "cpu_x64.cc",
   "cpuid.cc",
   "cpuid.h",
@@ -82,6 +86,7 @@
   "debugger_arm.cc",
   "debugger_arm64.cc",
   "debugger_ia32.cc",
+  "debugger_riscv.cc",
   "debugger_x64.cc",
   "deferred_objects.cc",
   "deferred_objects.h",
@@ -128,6 +133,8 @@
   "instructions_arm64.h",
   "instructions_ia32.cc",
   "instructions_ia32.h",
+  "instructions_riscv.cc",
+  "instructions_riscv.h",
   "instructions_x64.cc",
   "instructions_x64.h",
   "intrusive_dlist.h",
@@ -266,6 +273,7 @@
   "runtime_entry_arm64.cc",
   "runtime_entry_ia32.cc",
   "runtime_entry_list.h",
+  "runtime_entry_riscv.cc",
   "runtime_entry_x64.cc",
   "scope_timer.h",
   "scopes.cc",
@@ -287,6 +295,8 @@
   "simulator_arm.h",
   "simulator_arm64.cc",
   "simulator_arm64.h",
+  "simulator_riscv.cc",
+  "simulator_riscv.h",
   "snapshot.cc",
   "snapshot.h",
   "source_report.cc",
@@ -381,6 +391,7 @@
   "code_patcher_arm64_test.cc",
   "code_patcher_arm_test.cc",
   "code_patcher_ia32_test.cc",
+  "code_patcher_riscv_test.cc",
   "code_patcher_x64_test.cc",
   "compiler_test.cc",
   "cpu_test.cc",
@@ -400,6 +411,7 @@
   "instructions_arm64_test.cc",
   "instructions_arm_test.cc",
   "instructions_ia32_test.cc",
+  "instructions_riscv_test.cc",
   "instructions_x64_test.cc",
   "intrusive_dlist_test.cc",
   "isolate_reload_test.cc",
@@ -420,6 +432,7 @@
   "object_graph_test.cc",
   "object_ia32_test.cc",
   "object_id_ring_test.cc",
+  "object_riscv_test.cc",
   "object_store_test.cc",
   "object_test.cc",
   "object_x64_test.cc",
@@ -433,11 +446,12 @@
   "snapshot_test.cc",
   "source_report_test.cc",
   "stack_frame_test.cc",
-  "stub_code_test.cc",
   "stub_code_arm64_test.cc",
   "stub_code_arm_test.cc",
   "stub_code_ia32_test.cc",
+  "stub_code_test.cc",
   "stub_code_x64_test.cc",
+  "stub_code_riscv_test.cc",
   "thread_barrier_test.cc",
   "thread_pool_test.cc",
   "thread_test.cc",
@@ -460,6 +474,8 @@
   "constants_base.h",
   "constants_ia32.cc",
   "constants_ia32.h",
+  "constants_riscv.cc",
+  "constants_riscv.h",
   "constants_x64.cc",
   "constants_x64.h",
 ]
diff --git a/sdk/lib/_internal/vm/lib/convert_patch.dart b/sdk/lib/_internal/vm/lib/convert_patch.dart
index 2885579..f0b84ad 100644
--- a/sdk/lib/_internal/vm/lib/convert_patch.dart
+++ b/sdk/lib/_internal/vm/lib/convert_patch.dart
@@ -1979,7 +1979,7 @@
         }
         byte = (byte << 6) | e;
       }
-      writeIntoOneByteString(result, j++, byte);
+      writeIntoOneByteString(result, j++, byte & 0xFF);
     }
     // Output size must match, unless we are doing single conversion and are
     // inside an unfinished sequence (which will trigger an error later).
diff --git a/sdk/lib/_internal/vm/lib/ffi_patch.dart b/sdk/lib/_internal/vm/lib/ffi_patch.dart
index 7ecccf0..271af42 100644
--- a/sdk/lib/_internal/vm/lib/ffi_patch.dart
+++ b/sdk/lib/_internal/vm/lib/ffi_patch.dart
@@ -37,6 +37,8 @@
       8, // linuxArm64,
       4, // linuxIA32,
       8, // linuxX64,
+      4, // linuxRiscv32,
+      8, // linuxRiscv64,
       8, // macosArm64,
       8, // macosX64,
       8, // windowsArm64,
diff --git a/sdk/lib/ffi/abi.dart b/sdk/lib/ffi/abi.dart
index f5cba04..1efe4a5 100644
--- a/sdk/lib/ffi/abi.dart
+++ b/sdk/lib/ffi/abi.dart
@@ -57,6 +57,12 @@
   /// The application binary interface for linux on the X64 architecture.
   static const linuxX64 = _linuxX64;
 
+  /// The application binary interface for linux on 32-bit RISC-V.
+  static const linuxRiscv32 = _linuxRiscv32;
+
+  /// The application binary interface for linux on 64-bit RISC-V.
+  static const linuxRiscv64 = _linuxRiscv64;
+
   /// The application binary interface for MacOS on the Arm64 architecture.
   static const macosArm64 = _macosArm64;
 
@@ -94,6 +100,8 @@
     linuxArm64,
     linuxIA32,
     linuxX64,
+    linuxRiscv32,
+    linuxRiscv64,
     macosArm64,
     macosX64,
     windowsArm64,
@@ -134,6 +142,8 @@
   static const _linuxArm64 = Abi._(_Architecture.arm64, _OS.linux);
   static const _linuxIA32 = Abi._(_Architecture.ia32, _OS.linux);
   static const _linuxX64 = Abi._(_Architecture.x64, _OS.linux);
+  static const _linuxRiscv32 = Abi._(_Architecture.riscv32, _OS.linux);
+  static const _linuxRiscv64 = Abi._(_Architecture.riscv64, _OS.linux);
   static const _macosArm64 = Abi._(_Architecture.arm64, _OS.macos);
   static const _macosX64 = Abi._(_Architecture.x64, _OS.macos);
   static const _windowsArm64 = Abi._(_Architecture.arm64, _OS.windows);
@@ -147,6 +157,8 @@
   arm64,
   ia32,
   x64,
+  riscv32,
+  riscv64,
 }
 
 /// The operating systems the Dart VM runs on.
diff --git a/sdk/lib/ffi/abi_specific.dart b/sdk/lib/ffi/abi_specific.dart
index 560044a..d84f74e 100644
--- a/sdk/lib/ffi/abi_specific.dart
+++ b/sdk/lib/ffi/abi_specific.dart
@@ -30,6 +30,8 @@
 ///   Abi.linuxArm64: Uint64(),
 ///   Abi.linuxIA32: Uint32(),
 ///   Abi.linuxX64: Uint64(),
+///   Abi.linuxRiscv32: Uint32(),
+///   Abi.linuxRiscv64: Uint64(),
 ///   Abi.macosArm64: Uint64(),
 ///   Abi.macosX64: Uint64(),
 ///   Abi.windowsIA32: Uint32(),
diff --git a/sdk/lib/ffi/native_type.dart b/sdk/lib/ffi/native_type.dart
index 01a68475..dbbea7c 100644
--- a/sdk/lib/ffi/native_type.dart
+++ b/sdk/lib/ffi/native_type.dart
@@ -116,6 +116,8 @@
   Abi.linuxArm64: Int64(),
   Abi.linuxIA32: Int32(),
   Abi.linuxX64: Int64(),
+  Abi.linuxRiscv32: Int32(),
+  Abi.linuxRiscv64: Int64(),
   Abi.macosArm64: Int64(),
   Abi.macosX64: Int64(),
   Abi.windowsArm64: Int64(),
diff --git a/tests/ffi/abi_specific_ints.dart b/tests/ffi/abi_specific_ints.dart
index 94e05a4..c0cd3f0 100644
--- a/tests/ffi/abi_specific_ints.dart
+++ b/tests/ffi/abi_specific_ints.dart
@@ -39,6 +39,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint64(),
@@ -67,6 +69,8 @@
   Abi.linuxArm64: Int64(),
   Abi.linuxIA32: Int32(),
   Abi.linuxX64: Int64(),
+  Abi.linuxRiscv32: Int32(),
+  Abi.linuxRiscv64: Int64(),
   Abi.macosArm64: Int64(),
   Abi.macosX64: Int64(),
   Abi.windowsArm64: Int32(),
@@ -95,6 +99,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint32(),
@@ -126,6 +132,8 @@
   Abi.linuxArm64: Uint32(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint32(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint32(),
   Abi.macosArm64: Uint32(),
   Abi.macosX64: Uint32(),
   Abi.windowsArm64: Uint16(),
diff --git a/tests/ffi_2/abi_specific_ints.dart b/tests/ffi_2/abi_specific_ints.dart
index 91c9875..c1d8302 100644
--- a/tests/ffi_2/abi_specific_ints.dart
+++ b/tests/ffi_2/abi_specific_ints.dart
@@ -26,6 +26,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint64(),
@@ -54,6 +56,8 @@
   Abi.linuxArm64: Int64(),
   Abi.linuxIA32: Int32(),
   Abi.linuxX64: Int64(),
+  Abi.linuxRiscv32: Int32(),
+  Abi.linuxRiscv64: Int64(),
   Abi.macosArm64: Int64(),
   Abi.macosX64: Int64(),
   Abi.windowsArm64: Int32(),
@@ -82,6 +86,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint32(),
@@ -113,6 +119,8 @@
   Abi.linuxArm64: Uint32(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint32(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint32(),
   Abi.macosArm64: Uint32(),
   Abi.macosX64: Uint32(),
   Abi.windowsArm64: Uint16(),
diff --git a/tools/VERSION b/tools/VERSION
index 9b534e9..0107d24 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 17
 PATCH 0
-PRERELEASE 34
+PRERELEASE 35
 PRERELEASE_PATCH 0
\ No newline at end of file
diff --git a/tools/bots/test_matrix.json b/tools/bots/test_matrix.json
index 1312a27..0400f5d 100644
--- a/tools/bots/test_matrix.json
+++ b/tools/bots/test_matrix.json
@@ -239,12 +239,16 @@
       "out/DebugSIMARM/",
       "out/DebugSIMARM64/",
       "out/DebugSIMARM64C/",
+      "out/DebugSIMRISCV32/",
+      "out/DebugSIMRISCV64/",
       "out/DebugX64/",
       "out/DebugX64C/",
       "out/ReleaseIA32/",
       "out/ReleaseSIMARM/",
       "out/ReleaseSIMARM64/",
       "out/ReleaseSIMARM64C/",
+      "out/ReleaseSIMRISCV32/",
+      "out/ReleaseSIMRISCV64/",
       "out/ReleaseX64/",
       "out/ReleaseX64C/",
       "third_party/pkg/",
@@ -482,61 +486,61 @@
         "builder-tag": "analyzer_use_fasta"
       }
     },
-    "dartk-asan-(linux|mac)-(debug|product|release)-(ia32|x64|simarm|simarm64)": {
+    "dartk-asan-(linux|mac)-(debug|product|release)-(ia32|x64|simarm|simarm64|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "asan",
         "timeout": 240
       }
     },
-    "dartk-lsan-(linux|mac)-(debug|product|release)-(ia32|x64|simarm|simarm64)": {
+    "dartk-lsan-(linux|mac)-(debug|product|release)-(ia32|x64|simarm|simarm64|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "lsan",
         "timeout": 240
       }
     },
-    "dartk-msan-linux-(debug|product|release)-(x64|simarm64)": {
+    "dartk-msan-linux-(debug|product|release)-(x64|simarm64|simriscv64)": {
       "options": {
         "builder-tag": "msan",
         "timeout": 240
       }
     },
-    "dartk-tsan-(linux|mac)-(debug|product|release)-(x64|simarm64)": {
+    "dartk-tsan-(linux|mac)-(debug|product|release)-(x64|simarm64|simriscv64)": {
       "options": {
         "builder-tag": "tsan",
         "timeout": 240
       }
     },
-    "dartk-ubsan-(linux|mac)-(debug|product|release)-(ia32|x64|simarm|simarm64)": {
+    "dartk-ubsan-(linux|mac)-(debug|product|release)-(ia32|x64|simarm|simarm64|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "ubsan",
         "timeout": 240
       }
     },
-    "dartkp-asan-(linux|mac)-(debug|product|release)-(x64|simarm|simarm64)": {
+    "dartkp-asan-(linux|mac)-(debug|product|release)-(x64|simarm|simarm64|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "asan",
         "timeout": 240
       }
     },
-    "dartkp-lsan-(linux|mac)-(debug|product|release)-(x64|simarm|simarm64)": {
+    "dartkp-lsan-(linux|mac)-(debug|product|release)-(x64|simarm|simarm64|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "lsan",
         "timeout": 240
       }
     },
-    "dartkp-msan-linux-(debug|product|release)-(x64|simarm64)": {
+    "dartkp-msan-linux-(debug|product|release)-(x64|simarm64|simriscv64)": {
       "options": {
         "builder-tag": "msan",
         "timeout": 240
       }
     },
-    "dartkp-tsan-(linux|mac)-(debug|product|release)-(x64|simarm64)": {
+    "dartkp-tsan-(linux|mac)-(debug|product|release)-(x64|simarm64|simriscv64)": {
       "options": {
         "builder-tag": "tsan",
         "timeout": 240
       }
     },
-    "dartkp-ubsan-(linux|mac)-(debug|product|release)-(x64|simarm|simarm64)": {
+    "dartkp-ubsan-(linux|mac)-(debug|product|release)-(x64|simarm|simarm64|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "ubsan",
         "timeout": 240
@@ -784,12 +788,12 @@
       }
     },
     "dartk-android-(debug|product|release)-(arm|arm64|arm64c)": {},
-    "dartkp-(linux|win|mac)-(debug|product|release)-(arm64|arm64c|simarm|simarm64|simarm64c)": {
+    "dartkp-(linux|win|mac)-(debug|product|release)-(arm64|arm64c|simarm|simarm64|simarm64c|simriscv32|simriscv64)": {
       "options": {
         "use-elf": true
       }
     },
-    "dartkp-dwarf-(linux|win|mac)-product-(arm64|arm64c|simarm|simarm64|simarm64c|x64|x64c)": {
+    "dartkp-dwarf-(linux|win|mac)-product-(arm64|arm64c|simarm|simarm64|simarm64c|x64|x64c|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "dwarf",
         "vm-options": [
@@ -858,7 +862,7 @@
         "builder-tag": "vm_nnbd"
       }
     },
-    "dartkp-weak-asserts-(linux|mac)-(debug|product|release)-(simarm|simarm64|simarm64c)": {
+    "dartkp-weak-asserts-(linux|mac)-(debug|product|release)-(simarm|simarm64|simarm64c|simriscv32|simriscv64)": {
       "options": {
         "enable-asserts": true,
         "use-elf": true,
@@ -872,13 +876,13 @@
         "builder-tag": "vm_nnbd"
       }
     },
-    "dartk-weak-asserts-(linux|mac|win)-(debug|product|release)-(arm64|ia32|simarm|simarm64|simarm64c|x64|x64c)": {
+    "dartk-weak-asserts-(linux|mac|win)-(debug|product|release)-(arm64|ia32|simarm|simarm64|simarm64c|x64|x64c|simriscv32|simriscv64)": {
       "options": {
         "enable-asserts": true,
         "builder-tag": "vm_nnbd"
       }
     },
-    "dartk-strong-(linux|mac|win)-(debug|product|release)-(arm64|ia32|simarm|simarm64|simarm64c|x64|x64c)": {
+    "dartk-strong-(linux|mac|win)-(debug|product|release)-(arm64|ia32|simarm|simarm64|simarm64c|simriscv32|simriscv64|x64|x64c)": {
       "options": {
         "builder-tag": "vm_nnbd"
       }
@@ -888,7 +892,7 @@
         "builder-tag": "vm_nnbd"
       }
     },
-    "dartkp-strong-(linux|mac)-(debug|product|release)-(simarm|simarm64|simarm64c)": {
+    "dartkp-strong-(linux|mac)-(debug|product|release)-(simarm|simarm64|simarm64c|simriscv32|simriscv64)": {
       "options": {
         "use-elf": true,
         "builder-tag": "vm_nnbd"
@@ -905,8 +909,8 @@
         "enable-asserts": true
       }
     },
-    "dartk-(linux|mac|win)-(debug|product|release)-(arm64|arm64c|simarm|simarm64|simarm64c)": {},
-    "dartk-optcounter-(linux|mac|win)-(debug|product|release)-(ia32|x64|x64c|simarm|simarm64|simarm64c)": {
+    "dartk-(linux|mac|win)-(debug|product|release)-(arm64|arm64c|simarm|simarm64|simarm64c|simriscv32|simriscv64)": {},
+    "dartk-optcounter-(linux|mac|win)-(debug|product|release)-(ia32|x64|x64c|simarm|simarm64|simarm64c|simriscv32|simriscv64)": {
       "options": {
         "builder-tag": "optimization_counter_threshold",
         "vm-options": [
@@ -925,12 +929,12 @@
         "hot-reload-rollback": true
       }
     },
-    "dartk-linux-(debug|product|release)-(arm|arm64|arm64c)-qemu": {
+    "dartk-linux-(debug|product|release)-(arm|arm64|arm64c|riscv32|riscv64)-qemu": {
       "options": {
         "use-qemu": true
       }
     },
-    "dartkp-linux-(debug|product|release)-(arm|arm64|arm64c)-qemu": {
+    "dartkp-linux-(debug|product|release)-(arm|arm64|arm64c|riscv32|riscv64)-qemu": {
       "options": {
         "use-qemu": true
       }
@@ -1297,6 +1301,47 @@
     },
     {
       "builders": [
+        "vm-precomp-ffi-qemu-linux-release-riscv64"
+      ],
+      "meta": {
+        "description": "This configuration is used for running vm unit tests and FFI tests on qemu and FFI unit tests."
+      },
+      "steps": [
+        {
+          "name": "build dart",
+          "script": "tools/build.py",
+          "arguments": [
+            "--use-qemu",
+            "dart_precompiled_runtime",
+            "runtime"
+          ]
+        },
+        {
+          "name": "build dart",
+          "script": "tools/build.py",
+          "arguments": [
+            "--arch=simriscv64",
+            "gen_snapshot"
+          ]
+        },
+        {
+          "name": "vm unit tests",
+          "arguments": [
+            "-ndartk-linux-${mode}-riscv64-qemu",
+            "vm/cc"
+          ]
+        },
+        {
+          "name": "ffi tests",
+          "arguments": [
+            "-ndartkp-linux-${mode}-riscv64-qemu",
+            "ffi_2"
+          ]
+        }
+      ]
+    },
+    {
+      "builders": [
         "vm-kernel-precomp-nnbd-mac-release-arm64"
       ],
       "meta": {
@@ -1465,7 +1510,8 @@
         "vm-kernel-precomp-linux-product-x64c",
         "vm-kernel-precomp-linux-release-simarm",
         "vm-kernel-precomp-linux-release-simarm64",
-        "vm-kernel-precomp-linux-release-simarm64c",
+        "vm-kernel-precomp-linux-release-simriscv32",
+        "vm-kernel-precomp-linux-release-simriscv64",
         "vm-kernel-precomp-mac-release-simarm64",
         "vm-kernel-precomp-mac-release-simarm64c",
         "vm-kernel-precomp-win-release-x64",
@@ -1499,6 +1545,8 @@
         "vm-kernel-precomp-linux-debug-x64c",
         "vm-kernel-precomp-linux-debug-simarm64",
         "vm-kernel-precomp-linux-debug-simarm64c",
+        "vm-kernel-precomp-linux-debug-simriscv32",
+        "vm-kernel-precomp-linux-debug-simriscv64",
         "vm-kernel-precomp-win-debug-x64c"
       ],
       "meta": {
@@ -1741,6 +1789,17 @@
             "--no-goma",
             "runtime"
           ]
+        },
+        {
+          "name": "build dart simriscv64",
+          "script": "tools/build.py",
+          "arguments": [
+            "--mode=all",
+            "--arch=simriscv64",
+            "--no-clang",
+            "--no-goma",
+            "runtime"
+          ]
         }
       ]
     },
@@ -1751,6 +1810,8 @@
         "vm-kernel-linux-release-simarm",
         "vm-kernel-linux-release-simarm64",
         "vm-kernel-linux-release-simarm64c",
+        "vm-kernel-linux-release-simriscv32",
+        "vm-kernel-linux-release-simriscv64",
         "vm-kernel-linux-release-ia32",
         "vm-kernel-win-debug-ia32",
         "vm-kernel-win-debug-x64",
@@ -1789,6 +1850,8 @@
         "vm-kernel-nnbd-linux-release-simarm",
         "vm-kernel-nnbd-linux-release-simarm64",
         "vm-kernel-nnbd-linux-release-simarm64c",
+        "vm-kernel-nnbd-linux-release-simriscv32",
+        "vm-kernel-nnbd-linux-release-simriscv64",
         "vm-kernel-nnbd-linux-release-x64",
         "vm-kernel-nnbd-linux-release-x64c",
         "vm-kernel-nnbd-mac-debug-arm64",
@@ -2325,7 +2388,9 @@
         "vm-kernel-optcounter-threshold-linux-release-x64c",
         "vm-kernel-optcounter-threshold-linux-release-simarm",
         "vm-kernel-optcounter-threshold-linux-release-simarm64",
-        "vm-kernel-optcounter-threshold-linux-release-simarm64c"
+        "vm-kernel-optcounter-threshold-linux-release-simarm64c",
+        "vm-kernel-optcounter-threshold-linux-release-simriscv32",
+        "vm-kernel-optcounter-threshold-linux-release-simriscv64"
       ],
       "meta": {
         "description": "This is the configuration for the kernel optcounter builders, under the vm-kernel group. They run the same tests as the ordinary VM kernel builders, but add extra options to the vm."
@@ -3838,7 +3903,7 @@
           "script": "tools/build.py",
           "arguments": [
             "--mode=debug,release",
-            "--arch=x64,x64c,simarm64,simarm64c",
+            "--arch=x64,x64c,simarm64,simarm64c,simriscv32,simriscv64",
             "runtime",
             "dart_precompiled_runtime"
           ]
diff --git a/tools/gn.py b/tools/gn.py
index fe4718d..b895393 100755
--- a/tools/gn.py
+++ b/tools/gn.py
@@ -100,19 +100,25 @@
     if m == 'armv7l' or m == 'armv6l':
         return 'arm'
 
-    if arch in ['ia32', 'arm', 'armv6', 'simarm', 'simarmv6', 'simarm_x64']:
+    if arch in [
+            'ia32', 'arm', 'armv6', 'simarm', 'simarm_x64', 'riscv32',
+            'simriscv32'
+    ]:
         return 'x86'
     if arch in [
-            'x64', 'arm64', 'simarm64', 'arm_x64', 'x64c', 'arm64c', 'simarm64c'
+            'x64', 'arm64', 'simarm64', 'arm_x64', 'x64c', 'arm64c',
+            'simarm64c', 'riscv64', 'simriscv64'
     ]:
         return 'x64'
 
 
 # The C compiler's target.
 def TargetCpuForArch(arch, target_os):
-    if arch in ['ia32', 'simarm', 'simarmv6']:
+    if arch in ['ia32', 'simarm', 'simriscv32']:
         return 'x86'
-    if arch in ['x64', 'simarm64', 'simarm_x64', 'x64c', 'simarm64c']:
+    if arch in [
+            'x64', 'simarm64', 'simarm_x64', 'simriscv64', 'x64c', 'simarm64c'
+    ]:
         return 'x64'
     if arch == 'arm_x64':
         return 'arm'
@@ -129,10 +135,12 @@
         return 'x64'
     if arch in ['arm', 'simarm', 'simarm_x64', 'arm_x64']:
         return 'arm'
-    if arch in ['armv6', 'simarmv6']:
-        return 'armv6'
     if arch in ['arm64', 'simarm64', 'arm64c', 'simarm64c']:
         return 'arm64'
+    if arch in ['riscv32', 'simriscv32']:
+        return 'riscv32'
+    if arch in ['riscv64', 'simriscv64']:
+        return 'riscv64'
     return arch
 
 
@@ -171,6 +179,9 @@
     # Our Debian Jesse sysroot has incorrect annotations on realloc.
     if gn_args['is_ubsan']:
         return False
+    # Our Debian Jesse sysroot doesn't support RISCV
+    if gn_args['target_cpu'] in ['riscv32', 'riscv64']:
+        return False
     # Otherwise use the sysroot.
     return True
 
@@ -213,6 +224,8 @@
     # Use tcmalloc only when targeting Linux and when not using ASAN.
     gn_args['dart_use_tcmalloc'] = ((gn_args['target_os'] == 'linux') and
                                     (gn_args['target_cpu'] != 'arm') and
+                                    (gn_args['target_cpu'] != 'riscv32') and
+                                    (gn_args['target_cpu'] != 'riscv64') and
                                     sanitizer == 'none')
 
     # Use mallinfo2 if specified on the command line
@@ -370,8 +383,14 @@
                     (os_name, HOST_OS))
                 return False
             if not arch in [
-                    'ia32', 'x64', 'arm', 'arm_x64', 'armv6', 'arm64', 'x64c',
-                    'arm64c'
+                    'ia32',
+                    'x64',
+                    'arm',
+                    'arm_x64',
+                    'armv6',
+                    'arm64',
+                    'x64c',
+                    'arm64c',
             ]:
                 print(
                     "Cross-compilation to %s is not supported for architecture %s."
diff --git a/tools/run_offsets_extractor.sh b/tools/run_offsets_extractor.sh
index 52b1376..b0ebd57 100755
--- a/tools/run_offsets_extractor.sh
+++ b/tools/run_offsets_extractor.sh
@@ -45,6 +45,8 @@
 run release simarm64 ReleaseSIMARM64
 run release x64c ReleaseX64C
 run release simarm64c ReleaseSIMARM64C
+run release simriscv32 ReleaseSIMRISCV32
+run release simriscv64 ReleaseSIMRISCV64
 echo "" >>"$TEMP_JIT"
 echo "" >>"$TEMP_AOT"
 echo "#else  // !defined(PRODUCT)" >>"$TEMP_JIT"
@@ -55,6 +57,8 @@
 run product simarm64 ProductSIMARM64
 run product x64c ProductX64C
 run product simarm64c ProductSIMARM64C
+run product simriscv32 ProductSIMRISCV32
+run product simriscv64 ProductSIMRISCV64
 echo "" >>"$TEMP_JIT"
 echo "" >>"$TEMP_AOT"
 echo "#endif  // !defined(PRODUCT)" >>"$TEMP_JIT"
diff --git a/tools/utils.py b/tools/utils.py
index 644fc49..cecdbc2 100644
--- a/tools/utils.py
+++ b/tools/utils.py
@@ -65,16 +65,18 @@
     'ia32': 'ia32',
     'x64': 'ia32',
     'arm': 'arm',
-    'armv6': 'arm',
     'arm64': 'arm',
     'arm_x64': 'arm',
     'simarm': 'ia32',
-    'simarmv6': 'ia32',
     'simarm64': 'ia32',
     'simarm_x64': 'ia32',
     'x64c': 'ia32',
     'arm64c': 'arm',
     'simarm64c': 'ia32',
+    'simriscv32': 'ia32',
+    'simriscv64': 'ia32',
+    'riscv32': 'riscv',
+    'riscv64': 'riscv',
 }
 
 BASE_DIR = os.path.abspath(os.path.join(os.curdir, '..'))