Version 2.17.0-35.0.dev

Merge commit '04ba20aa9847d40844446bfe21d37bb11a665d3a' into 'dev'
diff --git a/AUTHORS b/AUTHORS
index b3a3ac6..5879a77 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -35,3 +35,4 @@
 K. Alex Gann <k.alexgann@gmail.com>
 Kenneth Endfinger <kaendfinger@gmail.com>
 Cristian Almstrand <cristian.almstrand@gmail.com>
+Ryan Macnak <rmacnak@gmail.com>
diff --git a/benchmarks/FfiMemory/dart/FfiMemory.dart b/benchmarks/FfiMemory/dart/FfiMemory.dart
index 686d6bd..881eff2 100644
--- a/benchmarks/FfiMemory/dart/FfiMemory.dart
+++ b/benchmarks/FfiMemory/dart/FfiMemory.dart
@@ -33,6 +33,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint64(),
diff --git a/benchmarks/FfiMemory/dart2/FfiMemory.dart b/benchmarks/FfiMemory/dart2/FfiMemory.dart
index a170aaa..86f7e1f 100644
--- a/benchmarks/FfiMemory/dart2/FfiMemory.dart
+++ b/benchmarks/FfiMemory/dart2/FfiMemory.dart
@@ -35,6 +35,8 @@
   Abi.linuxArm64: Uint64(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint64(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint64(),
   Abi.macosArm64: Uint64(),
   Abi.macosX64: Uint64(),
   Abi.windowsArm64: Uint64(),
diff --git a/build/config/compiler/BUILD.gn b/build/config/compiler/BUILD.gn
index a4614c2..b38dc18 100644
--- a/build/config/compiler/BUILD.gn
+++ b/build/config/compiler/BUILD.gn
@@ -258,6 +258,12 @@
       } else if (current_cpu == "arm64") {
         cflags += [ "--target=aarch64-linux-gnu" ]
         ldflags += [ "--target=aarch64-linux-gnu" ]
+      } else if (current_cpu == "riscv32") {
+        cflags += [ "--target=riscv32-linux-gnu" ]
+        ldflags += [ "--target=riscv32-linux-gnu" ]
+      } else if (current_cpu == "riscv64") {
+        cflags += [ "--target=riscv64-linux-gnu" ]
+        ldflags += [ "--target=riscv64-linux-gnu" ]
       } else if (current_cpu == "x86") {
         cflags += [ "--target=i386-linux-gnu" ]
         ldflags += [ "--target=i386-linux-gnu" ]
@@ -554,7 +560,7 @@
   if (is_clang) {
     default_warning_flags += [
       "-Wno-tautological-constant-compare",
-      "-Wno-unused-but-set-variable", # icu
+      "-Wno-unused-but-set-variable",  # icu
     ]
   } else {
     default_warning_flags +=
@@ -700,6 +706,7 @@
   common_optimize_on_ldflags = [
     # Linker GC.
     "/OPT:REF",
+
     # Identical code folding to reduce size.
     # Warning: This changes C/C++ semantics of function pointer comparison.
     "/OPT:ICF",
diff --git a/build/toolchain/linux/BUILD.gn b/build/toolchain/linux/BUILD.gn
index 4692784..5f9bc32 100644
--- a/build/toolchain/linux/BUILD.gn
+++ b/build/toolchain/linux/BUILD.gn
@@ -156,3 +156,75 @@
   toolchain_os = "linux"
   is_clang = false
 }
+
+gcc_toolchain("riscv32") {
+  prefix = "riscv32-linux-gnu-"
+  if (toolchain_prefix != "") {
+    prefix = toolchain_prefix
+  }
+
+  cc = "${compiler_prefix}${prefix}gcc"
+  cxx = "${compiler_prefix}${prefix}g++"
+
+  ar = "${prefix}ar"
+  ld = cxx
+  readelf = "${prefix}readelf"
+  nm = "${prefix}nm"
+  strip = "${prefix}strip"
+
+  toolchain_cpu = "riscv32"
+  toolchain_os = "linux"
+  is_clang = false
+}
+
+gcc_toolchain("clang_riscv32") {
+  prefix = rebase_path("//buildtools/linux-x64/clang/bin", root_build_dir)
+  cc = "${compiler_prefix}${prefix}/clang"
+  cxx = "${compiler_prefix}${prefix}/clang++"
+
+  readelf = "readelf"
+  nm = "${prefix}/llvm-nm"
+  ar = "${prefix}/llvm-ar"
+  ld = cxx
+  llvm_objcopy = "${prefix}/llvm-objcopy"
+
+  toolchain_cpu = "riscv32"
+  toolchain_os = "linux"
+  is_clang = true
+}
+
+gcc_toolchain("riscv64") {
+  prefix = "riscv64-linux-gnu-"
+  if (toolchain_prefix != "") {
+    prefix = toolchain_prefix
+  }
+
+  cc = "${compiler_prefix}${prefix}gcc"
+  cxx = "${compiler_prefix}${prefix}g++"
+
+  ar = "${prefix}ar"
+  ld = cxx
+  readelf = "${prefix}readelf"
+  nm = "${prefix}nm"
+  strip = "${prefix}strip"
+
+  toolchain_cpu = "riscv64"
+  toolchain_os = "linux"
+  is_clang = false
+}
+
+gcc_toolchain("clang_riscv64") {
+  prefix = rebase_path("//buildtools/linux-x64/clang/bin", root_build_dir)
+  cc = "${compiler_prefix}${prefix}/clang"
+  cxx = "${compiler_prefix}${prefix}/clang++"
+
+  readelf = "readelf"
+  nm = "${prefix}/llvm-nm"
+  ar = "${prefix}/llvm-ar"
+  ld = cxx
+  llvm_objcopy = "${prefix}/llvm-objcopy"
+
+  toolchain_cpu = "riscv64"
+  toolchain_os = "linux"
+  is_clang = true
+}
diff --git a/pkg/_fe_analyzer_shared/pubspec.yaml b/pkg/_fe_analyzer_shared/pubspec.yaml
index 16c442b..cfc2267 100644
--- a/pkg/_fe_analyzer_shared/pubspec.yaml
+++ b/pkg/_fe_analyzer_shared/pubspec.yaml
@@ -1,5 +1,5 @@
 name: _fe_analyzer_shared
-version: 33.0.0
+version: 34.0.0
 description: Logic that is shared between the front_end and analyzer packages.
 homepage: https://github.com/dart-lang/sdk/tree/master/pkg/_fe_analyzer_shared
 
diff --git a/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart b/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart
index c378884..9276b71 100644
--- a/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart
+++ b/pkg/analysis_server/test/src/services/correction/fix/analysis_options/remove_setting_test.dart
@@ -18,13 +18,13 @@
     await assertHasFix('''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
   language:
     enableSuperMixins: true
 ''', '''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
 ''');
   }
 
@@ -33,11 +33,11 @@
 analyzer:
   enable-experiment:
     - not-an-experiment
-    - non-nullable
+    - super-parameters
 ''', '''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
 ''');
   }
 
@@ -45,12 +45,12 @@
     await assertHasFix('''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
     - not-an-experiment
 ''', '''
 analyzer:
   enable-experiment:
-    - non-nullable
+    - super-parameters
 ''');
   }
 
diff --git a/pkg/analyzer/CHANGELOG.md b/pkg/analyzer/CHANGELOG.md
index 4d6ca01..65409ce 100644
--- a/pkg/analyzer/CHANGELOG.md
+++ b/pkg/analyzer/CHANGELOG.md
@@ -1,4 +1,4 @@
-## 3.2.0-dev
+## 3.2.0
 * Deprecated `changes` getter in `File` and `Folder`, use `watch()` instead.
 
 ## 3.1.0
diff --git a/pkg/analyzer/lib/dart/ast/ast.dart b/pkg/analyzer/lib/dart/ast/ast.dart
index a0b7a9c..748df11 100644
--- a/pkg/analyzer/lib/dart/ast/ast.dart
+++ b/pkg/analyzer/lib/dart/ast/ast.dart
@@ -402,6 +402,8 @@
 
   R? visitConstructorReference(ConstructorReference node);
 
+  R? visitConstructorSelector(ConstructorSelector node);
+
   R? visitContinueStatement(ContinueStatement node);
 
   R? visitDeclaredIdentifier(DeclaredIdentifier node);
@@ -418,6 +420,8 @@
 
   R? visitEmptyStatement(EmptyStatement node);
 
+  R? visitEnumConstantArguments(EnumConstantArguments node);
+
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node);
 
   R? visitEnumDeclaration(EnumDeclaration node);
@@ -1365,6 +1369,20 @@
   ConstructorElement? get staticElement;
 }
 
+/// The name of a constructor being invoked.
+///
+///    constructorSelector ::=
+///        '.' identifier
+///
+/// Clients may not extend, implement or mix-in this class.
+abstract class ConstructorSelector implements AstNode {
+  /// Return the constructor name.
+  SimpleIdentifier get name;
+
+  /// Return the period before the constructor name.
+  Token get period;
+}
+
 /// A continue statement.
 ///
 ///    continueStatement ::=
@@ -1563,10 +1581,41 @@
   Token get semicolon;
 }
 
+/// The arguments part of an enum constant.
+///
+///    enumConstantArguments ::=
+///        [TypeArgumentList]? [ConstructorSelector]? [ArgumentList]
+///
+/// Clients may not extend, implement or mix-in this class.
+abstract class EnumConstantArguments implements AstNode {
+  /// Return the explicit arguments (there are always implicit `index` and
+  /// `name` leading arguments) to the invoked constructor.
+  ArgumentList get argumentList;
+
+  /// Return the selector of the constructor that is invoked by this enum
+  /// constant, or `null` if the default constructor is invoked.
+  ConstructorSelector? get constructorSelector;
+
+  /// Return the type arguments applied to the enclosing enum declaration
+  /// when invoking the constructor, or `null` if no type arguments were
+  /// provided.
+  TypeArgumentList? get typeArguments;
+}
+
 /// The declaration of an enum constant.
 ///
 /// Clients may not extend, implement or mix-in this class.
 abstract class EnumConstantDeclaration implements Declaration {
+  /// Return the explicit arguments (there are always implicit `index` and
+  /// `name` leading arguments) to the invoked constructor, or `null` if this
+  /// constant does not provide any explicit arguments.
+  EnumConstantArguments? get arguments;
+
+  /// Return the constructor that is invoked by this enum constant, or `null`
+  /// if the AST structure has not been resolved, or if the constructor could
+  /// not be resolved.
+  ConstructorElement? get constructorElement;
+
   /// Return the name of the constant.
   SimpleIdentifier get name;
 }
diff --git a/pkg/analyzer/lib/dart/ast/visitor.dart b/pkg/analyzer/lib/dart/ast/visitor.dart
index a77a83e..309fcf1 100644
--- a/pkg/analyzer/lib/dart/ast/visitor.dart
+++ b/pkg/analyzer/lib/dart/ast/visitor.dart
@@ -230,6 +230,9 @@
       visitExpression(node);
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => visitNode(node);
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => visitStatement(node);
 
   R? visitDeclaration(Declaration node) => visitAnnotatedNode(node);
@@ -259,6 +262,9 @@
   R? visitEmptyStatement(EmptyStatement node) => visitStatement(node);
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => visitNode(node);
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) =>
       visitDeclaration(node);
 
@@ -803,6 +809,12 @@
   }
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) {
+    node.visitChildren(this);
+    return null;
+  }
+
+  @override
   R? visitContinueStatement(ContinueStatement node) {
     node.visitChildren(this);
     return null;
@@ -851,6 +863,12 @@
   }
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) {
+    node.visitChildren(this);
+    return null;
+  }
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     node.visitChildren(this);
     return null;
@@ -1511,6 +1529,9 @@
   R? visitConstructorReference(ConstructorReference node) => null;
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => null;
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => null;
 
   @override
@@ -1535,6 +1556,9 @@
   R? visitEmptyStatement(EmptyStatement node) => null;
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => null;
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) => null;
 
   @override
@@ -1917,6 +1941,9 @@
   R? visitConstructorReference(ConstructorReference node) => _throw(node);
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => _throw(node);
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => _throw(node);
 
   @override
@@ -1941,6 +1968,9 @@
   R? visitEmptyStatement(EmptyStatement node) => _throw(node);
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => _throw(node);
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) => _throw(node);
 
   @override
@@ -2466,6 +2496,14 @@
   }
 
   @override
+  T? visitConstructorSelector(ConstructorSelector node) {
+    stopwatch.start();
+    T? result = _baseVisitor.visitConstructorSelector(node);
+    stopwatch.stop();
+    return result;
+  }
+
+  @override
   T? visitContinueStatement(ContinueStatement node) {
     stopwatch.start();
     T? result = _baseVisitor.visitContinueStatement(node);
@@ -2530,6 +2568,14 @@
   }
 
   @override
+  T? visitEnumConstantArguments(EnumConstantArguments node) {
+    stopwatch.start();
+    T? result = _baseVisitor.visitEnumConstantArguments(node);
+    stopwatch.stop();
+    return result;
+  }
+
+  @override
   T? visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     stopwatch.start();
     T? result = _baseVisitor.visitEnumConstantDeclaration(node);
@@ -3387,6 +3433,9 @@
   R? visitConstructorReference(ConstructorReference node) => visitNode(node);
 
   @override
+  R? visitConstructorSelector(ConstructorSelector node) => visitNode(node);
+
+  @override
   R? visitContinueStatement(ContinueStatement node) => visitNode(node);
 
   @override
@@ -3412,6 +3461,9 @@
   R? visitEmptyStatement(EmptyStatement node) => visitNode(node);
 
   @override
+  R? visitEnumConstantArguments(EnumConstantArguments node) => visitNode(node);
+
+  @override
   R? visitEnumConstantDeclaration(EnumConstantDeclaration node) =>
       visitNode(node);
 
diff --git a/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart b/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart
index 9cda58d..ae8a33d 100644
--- a/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart
+++ b/pkg/analyzer/lib/src/dart/analysis/experiments.g.dart
@@ -374,13 +374,13 @@
   static const bool enhanced_enums = false;
 
   /// Expiration status of the experiment "extension-methods"
-  static const bool extension_methods = false;
+  static const bool extension_methods = true;
 
   /// Expiration status of the experiment "extension-types"
   static const bool extension_types = false;
 
   /// Expiration status of the experiment "generic-metadata"
-  static const bool generic_metadata = false;
+  static const bool generic_metadata = true;
 
   /// Expiration status of the experiment "macros"
   static const bool macros = false;
@@ -389,10 +389,10 @@
   static const bool named_arguments_anywhere = false;
 
   /// Expiration status of the experiment "non-nullable"
-  static const bool non_nullable = false;
+  static const bool non_nullable = true;
 
   /// Expiration status of the experiment "nonfunction-type-aliases"
-  static const bool nonfunction_type_aliases = false;
+  static const bool nonfunction_type_aliases = true;
 
   /// Expiration status of the experiment "set-literals"
   static const bool set_literals = true;
@@ -407,7 +407,7 @@
   static const bool test_experiment = false;
 
   /// Expiration status of the experiment "triple-shift"
-  static const bool triple_shift = false;
+  static const bool triple_shift = true;
 
   /// Expiration status of the experiment "value-class"
   static const bool value_class = false;
diff --git a/pkg/analyzer/lib/src/dart/ast/ast.dart b/pkg/analyzer/lib/src/dart/ast/ast.dart
index ca91eb4..5218135 100644
--- a/pkg/analyzer/lib/src/dart/ast/ast.dart
+++ b/pkg/analyzer/lib/src/dart/ast/ast.dart
@@ -2725,6 +2725,41 @@
   }
 }
 
+class ConstructorSelectorImpl extends AstNodeImpl
+    implements ConstructorSelector {
+  @override
+  final Token period;
+
+  @override
+  final SimpleIdentifierImpl name;
+
+  ConstructorSelectorImpl({
+    required this.period,
+    required this.name,
+  }) {
+    _becomeParentOf(name);
+  }
+
+  @override
+  Token get beginToken => period;
+
+  @override
+  Iterable<SyntacticEntity> get childEntities => ChildEntities()
+    ..add(period)
+    ..add(name);
+
+  @override
+  Token get endToken => name.token;
+
+  @override
+  E? accept<E>(AstVisitor<E> visitor) {
+    return visitor.visitConstructorSelector(this);
+  }
+
+  @override
+  void visitChildren(AstVisitor visitor) {}
+}
+
 /// A continue statement.
 ///
 ///    continueStatement ::=
@@ -3242,31 +3277,89 @@
   }
 }
 
+class EnumConstantArgumentsImpl extends AstNodeImpl
+    implements EnumConstantArguments {
+  @override
+  final TypeArgumentListImpl? typeArguments;
+
+  @override
+  final ConstructorSelectorImpl? constructorSelector;
+
+  @override
+  final ArgumentListImpl argumentList;
+
+  EnumConstantArgumentsImpl({
+    required this.typeArguments,
+    required this.constructorSelector,
+    required this.argumentList,
+  }) {
+    _becomeParentOf(typeArguments);
+    _becomeParentOf(constructorSelector);
+    _becomeParentOf(argumentList);
+  }
+
+  @override
+  Token get beginToken =>
+      (typeArguments ?? constructorSelector ?? argumentList).beginToken;
+
+  @override
+  Iterable<SyntacticEntity> get childEntities => ChildEntities()
+    ..add(typeArguments)
+    ..add(constructorSelector)
+    ..add(argumentList);
+
+  @override
+  Token get endToken => argumentList.endToken;
+
+  @override
+  E? accept<E>(AstVisitor<E> visitor) {
+    return visitor.visitEnumConstantArguments(this);
+  }
+
+  @override
+  void visitChildren(AstVisitor visitor) {
+    typeArguments?.accept(visitor);
+    constructorSelector?.accept(visitor);
+    argumentList.accept(visitor);
+  }
+}
+
 /// The declaration of an enum constant.
 class EnumConstantDeclarationImpl extends DeclarationImpl
     implements EnumConstantDeclaration {
   /// The name of the constant.
   SimpleIdentifierImpl _name;
 
+  @override
+  final EnumConstantArgumentsImpl? arguments;
+
+  @override
+  ConstructorElement? constructorElement;
+
   /// Initialize a newly created enum constant declaration. Either or both of
-  /// the [comment] and [metadata] can be `null` if the constant does not have
-  /// the corresponding attribute. (Technically, enum constants cannot have
-  /// metadata, but we allow it for consistency.)
-  EnumConstantDeclarationImpl(
-      CommentImpl? comment, List<Annotation>? metadata, this._name)
-      : super(comment, metadata) {
+  /// the [documentationComment] and [metadata] can be `null` if the constant
+  /// does not have the corresponding attribute.
+  EnumConstantDeclarationImpl({
+    required CommentImpl? documentationComment,
+    required List<Annotation>? metadata,
+    required SimpleIdentifierImpl name,
+    required this.arguments,
+  })  : _name = name,
+        super(documentationComment, metadata) {
     _becomeParentOf(_name);
+    _becomeParentOf(arguments);
   }
 
   @override
-  Iterable<SyntacticEntity> get childEntities =>
-      super._childEntities..add(_name);
+  Iterable<SyntacticEntity> get childEntities => super._childEntities
+    ..add(_name)
+    ..add(arguments);
 
   @override
   FieldElement get declaredElement => _name.staticElement as FieldElement;
 
   @override
-  Token get endToken => _name.endToken;
+  Token get endToken => (arguments ?? _name).endToken;
 
   @override
   Token get firstTokenAfterCommentAndMetadata => _name.beginToken;
@@ -3286,6 +3379,7 @@
   void visitChildren(AstVisitor visitor) {
     super.visitChildren(visitor);
     _name.accept(visitor);
+    arguments?.accept(visitor);
   }
 }
 
diff --git a/pkg/analyzer/lib/src/dart/ast/ast_factory.dart b/pkg/analyzer/lib/src/dart/ast/ast_factory.dart
index 806d84e..37cfef3 100644
--- a/pkg/analyzer/lib/src/dart/ast/ast_factory.dart
+++ b/pkg/analyzer/lib/src/dart/ast/ast_factory.dart
@@ -385,7 +385,11 @@
   EnumConstantDeclarationImpl enumConstantDeclaration(Comment? comment,
           List<Annotation>? metadata, SimpleIdentifier name) =>
       EnumConstantDeclarationImpl(
-          comment as CommentImpl?, metadata, name as SimpleIdentifierImpl);
+        documentationComment: comment as CommentImpl?,
+        metadata: metadata,
+        name: name as SimpleIdentifierImpl,
+        arguments: null,
+      );
 
   @Deprecated('Use enumDeclaration2() instead')
   @override
diff --git a/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart b/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart
index 294a89f..d2c7f88 100644
--- a/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart
+++ b/pkg/analyzer/lib/src/dart/ast/to_source_visitor.dart
@@ -249,6 +249,12 @@
   }
 
   @override
+  void visitConstructorSelector(ConstructorSelector node) {
+    _visitToken(node.period);
+    _visitNode(node.name);
+  }
+
+  @override
   void visitContinueStatement(ContinueStatement node) {
     sink.write('continue');
     _visitNode(node.label, prefix: ' ');
@@ -306,9 +312,17 @@
   }
 
   @override
+  void visitEnumConstantArguments(EnumConstantArguments node) {
+    _visitNode(node.typeArguments);
+    _visitNode(node.constructorSelector);
+    _visitNode(node.argumentList);
+  }
+
+  @override
   void visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     _visitNodeList(node.metadata, separator: ' ', suffix: ' ');
     _visitNode(node.name);
+    _visitNode(node.arguments);
   }
 
   @override
diff --git a/pkg/analyzer/lib/src/dart/ast/utilities.dart b/pkg/analyzer/lib/src/dart/ast/utilities.dart
index 4db6958..1aca07f 100644
--- a/pkg/analyzer/lib/src/dart/ast/utilities.dart
+++ b/pkg/analyzer/lib/src/dart/ast/utilities.dart
@@ -350,6 +350,13 @@
   }
 
   @override
+  bool visitConstructorSelector(ConstructorSelector node) {
+    var other = _other as ConstructorSelector;
+    return isEqualTokens(node.period, other.period) &&
+        isEqualNodes(node.name, other.name);
+  }
+
+  @override
   bool visitContinueStatement(ContinueStatement node) {
     ContinueStatement other = _other as ContinueStatement;
     return isEqualTokens(node.continueKeyword, other.continueKeyword) &&
@@ -415,6 +422,14 @@
   }
 
   @override
+  bool visitEnumConstantArguments(EnumConstantArguments node) {
+    var other = _other as EnumConstantArguments;
+    return isEqualNodes(node.typeArguments, other.typeArguments) &&
+        isEqualNodes(node.constructorSelector, other.constructorSelector) &&
+        isEqualNodes(node.argumentList, other.argumentList);
+  }
+
+  @override
   bool visitEnumConstantDeclaration(EnumConstantDeclaration node) {
     EnumConstantDeclaration other = _other as EnumConstantDeclaration;
     return isEqualNodes(
@@ -1913,6 +1928,11 @@
   }
 
   @override
+  bool visitConstructorSelector(ConstructorSelector node) {
+    throw UnimplementedError();
+  }
+
+  @override
   bool visitContinueStatement(covariant ContinueStatementImpl node) {
     if (identical(node.label, _oldNode)) {
       node.label = _newNode as SimpleIdentifier;
@@ -1981,6 +2001,11 @@
   bool visitEmptyStatement(EmptyStatement node) => visitNode(node);
 
   @override
+  bool visitEnumConstantArguments(EnumConstantArguments node) {
+    throw UnimplementedError();
+  }
+
+  @override
   bool visitEnumConstantDeclaration(
       covariant EnumConstantDeclarationImpl node) {
     if (identical(node.name, _oldNode)) {
diff --git a/pkg/analyzer/lib/src/fasta/ast_builder.dart b/pkg/analyzer/lib/src/fasta/ast_builder.dart
index 576a41f..db03646 100644
--- a/pkg/analyzer/lib/src/fasta/ast_builder.dart
+++ b/pkg/analyzer/lib/src/fasta/ast_builder.dart
@@ -64,6 +64,9 @@
         ClassDeclarationImpl,
         CompilationUnitImpl,
         ConstructorNameImpl,
+        EnumConstantArgumentsImpl,
+        ConstructorSelectorImpl,
+        EnumConstantDeclarationImpl,
         EnumDeclarationImpl,
         ExtensionDeclarationImpl,
         ImportDirectiveImpl,
@@ -2809,17 +2812,17 @@
   @override
   void handleEnumElement(Token beginToken) {
     debugEvent("EnumElement");
-    var arguments = pop() as MethodInvocationImpl?;
-    var constructorName = pop() as ConstructorNameImpl?;
+    var tmpArguments = pop() as MethodInvocationImpl?;
+    var tmpConstructor = pop() as ConstructorNameImpl?;
 
     if (!enableEnhancedEnums &&
-        (arguments != null ||
-            constructorName != null &&
-                (constructorName.type2.typeArguments != null ||
-                    constructorName.name != null))) {
-      Token token = arguments != null
-          ? arguments.argumentList.beginToken
-          : constructorName!.beginToken;
+        (tmpArguments != null ||
+            tmpConstructor != null &&
+                (tmpConstructor.type2.typeArguments != null ||
+                    tmpConstructor.name != null))) {
+      Token token = tmpArguments != null
+          ? tmpArguments.argumentList.beginToken
+          : tmpConstructor!.beginToken;
       var feature = ExperimentalFeatures.enhanced_enums;
       handleRecoverableError(
         templateExperimentNotEnabled.withArguments(
@@ -2830,6 +2833,37 @@
         token,
       );
     }
+
+    var constant = pop() as EnumConstantDeclarationImpl;
+
+    // Replace the constant to include arguments.
+    if (tmpArguments != null) {
+      TypeArgumentListImpl? typeArguments;
+      ConstructorSelectorImpl? constructorName;
+      if (tmpConstructor != null) {
+        typeArguments = tmpConstructor.type2.typeArguments;
+        var constructorNamePeriod = tmpConstructor.period;
+        var constructorNameId = tmpConstructor.name;
+        if (constructorNamePeriod != null && constructorNameId != null) {
+          constructorName = ConstructorSelectorImpl(
+            period: constructorNamePeriod,
+            name: constructorNameId,
+          );
+        }
+      }
+      constant = EnumConstantDeclarationImpl(
+        documentationComment: constant.documentationComment,
+        metadata: constant.metadata,
+        name: constant.name,
+        arguments: EnumConstantArgumentsImpl(
+          typeArguments: typeArguments,
+          constructorSelector: constructorName,
+          argumentList: tmpArguments.argumentList,
+        ),
+      );
+    }
+
+    push(constant);
   }
 
   @override
diff --git a/pkg/analyzer/pubspec.yaml b/pkg/analyzer/pubspec.yaml
index 547e31e..5994057 100644
--- a/pkg/analyzer/pubspec.yaml
+++ b/pkg/analyzer/pubspec.yaml
@@ -1,5 +1,5 @@
 name: analyzer
-version: 3.1.0
+version: 3.2.0
 description: This package provides a library that performs static analysis of Dart code.
 homepage: https://github.com/dart-lang/sdk/tree/main/pkg/analyzer
 
@@ -7,7 +7,7 @@
   sdk: '>=2.14.0 <3.0.0'
 
 dependencies:
-  _fe_analyzer_shared: ^33.0.0
+  _fe_analyzer_shared: ^34.0.0
   cli_util: ^0.3.0
   collection: ^1.15.0
   convert: ^3.0.0
diff --git a/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart b/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart
index 082ba9d..eac40aa 100644
--- a/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart
+++ b/pkg/analyzer/test/src/dart/ast/to_source_visitor_test.dart
@@ -810,6 +810,30 @@
     _assertSource(";", AstTestFactory.emptyStatement());
   }
 
+  void test_visitEnumDeclaration_constant_arguments_named() {
+    var findNode = _parseStringToFindNode(r'''
+enum E {
+  v<double>.named(42)
+}
+''');
+    _assertSource(
+      'enum E {v<double>.named(42)}',
+      findNode.enumDeclaration('enum E'),
+    );
+  }
+
+  void test_visitEnumDeclaration_constant_arguments_unnamed() {
+    var findNode = _parseStringToFindNode(r'''
+enum E {
+  v<double>(42)
+}
+''');
+    _assertSource(
+      'enum E {v<double>(42)}',
+      findNode.enumDeclaration('enum E'),
+    );
+  }
+
   void test_visitEnumDeclaration_constants_multiple() {
     var findNode = _parseStringToFindNode(r'''
 enum E {one, two}
diff --git a/pkg/analyzer/test/src/dart/resolution/language_version_test.dart b/pkg/analyzer/test/src/dart/resolution/language_version_test.dart
index a9868b6..fa4ab1c 100644
--- a/pkg/analyzer/test/src/dart/resolution/language_version_test.dart
+++ b/pkg/analyzer/test/src/dart/resolution/language_version_test.dart
@@ -11,7 +11,6 @@
 main() {
   defineReflectiveSuite(() {
     defineReflectiveTests(NullSafetyExperimentGlobalTest);
-    defineReflectiveTests(NullSafetyUsingAllowedExperimentsTest);
     defineReflectiveTests(PackageConfigAndLanguageOverrideTest);
   });
 }
@@ -99,171 +98,6 @@
 }
 
 @reflectiveTest
-class NullSafetyUsingAllowedExperimentsTest extends _FeaturesTest {
-  test_jsonConfig_disable_bin() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/",
-      "languageVersion": "2.8"
-    }
-  ]
-}
-''');
-
-    var path = '$testPackageRootPath/bin/a.dart';
-
-    await resolveFileCode(path, r'''
-var x = 0;
-''');
-    assertErrorsInResult([]);
-    assertType(findElement.topVar('x').type, 'int*');
-
-    // Upgrade the language version to `2.10`, so enable Null Safety.
-    _changeFile(path);
-    await resolveFileCode(path, r'''
-// @dart = 2.10
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int');
-  }
-
-  test_jsonConfig_disable_lib() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/",
-      "languageVersion": "2.8"
-    }
-  ]
-}
-''');
-
-    var path = testFilePath;
-
-    await resolveFileCode(path, '''
-var x = 0;
-''');
-    assertErrorsInResult([]);
-    assertType(findElement.topVar('x').type, 'int*');
-
-    // Upgrade the language version to `2.10`, so enable Null Safety.
-    _changeFile(path);
-    await assertNoErrorsInCode('''
-// @dart = 2.10
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int');
-  }
-
-  test_jsonConfig_enable_bin() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/"
-    }
-  ]
-}
-''');
-
-    var path = '$testPackageRootPath/bin/a.dart';
-
-    await resolveFileCode(path, r'''
-var x = 0;
-''');
-    assertErrorsInList(result.errors, []);
-    assertType(findElement.topVar('x').type, 'int');
-
-    // Downgrade the version to `2.8`, so disable Null Safety.
-    _changeFile(path);
-    await resolveFileCode(path, r'''
-// @dart = 2.8
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int*');
-  }
-
-  test_jsonConfig_enable_lib() async {
-    _configureAllowedExperimentsTestNullSafety();
-
-    _configureTestWithJsonConfig('''
-{
-  "configVersion": 2,
-  "packages": [
-    {
-      "name": "test",
-      "rootUri": "../",
-      "packageUri": "lib/"
-    }
-  ]
-}
-''');
-
-    var path = testFilePath;
-
-    await resolveFileCode(path, '''
-var x = 0;
-''');
-    assertErrorsInResult([]);
-    assertType(findElement.topVar('x').type, 'int');
-
-    // Downgrade the version to `2.8`, so disable Null Safety.
-    _changeFile(path);
-    await assertNoErrorsInCode('''
-// @dart = 2.8
-var x = 0;
-''');
-    assertType(findElement.topVar('x').type, 'int*');
-  }
-
-  void _configureAllowedExperimentsTestNullSafety() {
-    _newSdkExperimentsFile(r'''
-{
-  "version": 1,
-  "experimentSets": {
-    "nullSafety": ["non-nullable"]
-  },
-  "sdk": {
-    "default": {
-      "experimentSet": "nullSafety"
-    }
-  },
-  "packages": {
-    "test": {
-      "experimentSet": "nullSafety"
-    }
-  }
-}
-''');
-  }
-
-  void _newSdkExperimentsFile(String content) {
-    newFile(
-      '${sdkRoot.path}/lib/_internal/allowed_experiments.json',
-      content: content,
-    );
-  }
-}
-
-@reflectiveTest
 class PackageConfigAndLanguageOverrideTest extends _FeaturesTest {
   test_jsonConfigDisablesExtensions() async {
     _configureTestWithJsonConfig('''
@@ -313,15 +147,6 @@
 }
 
 class _FeaturesTest extends PubPackageResolutionTest {
-  /// Do necessary work to ensure that the file with the [path] is considered
-  /// changed for the purpose of following analysis.
-  ///
-  /// Currently we just dispose the whole analysis context collection, so when
-  /// we ask to analyze anything again, we will pick up the new file content.
-  void _changeFile(String path) {
-    disposeAnalysisContextCollection();
-  }
-
   void _configureTestWithJsonConfig(String content) {
     newFile(
       '$testPackageRootPath/.dart_tool/package_config.json',
diff --git a/pkg/analyzer/test/src/dart/resolution/library_element_test.dart b/pkg/analyzer/test/src/dart/resolution/library_element_test.dart
index b569f89..61f0885 100644
--- a/pkg/analyzer/test/src/dart/resolution/library_element_test.dart
+++ b/pkg/analyzer/test/src/dart/resolution/library_element_test.dart
@@ -62,34 +62,6 @@
     ]);
   }
 
-  test_language208_experimentNonNullable() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.8',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('');
-
-    _assertLanguageVersion(
-      package: Version.parse('2.8.0'),
-      override: null,
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
   test_language208_override205() async {
     writeTestPackageConfig(
       PackageConfigFileBuilder(),
@@ -134,36 +106,6 @@
     ]);
   }
 
-  test_language209_experimentNonNullable_override210() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.9',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('// @dart = 2.10');
-
-    // Valid override, even if greater than the package language version.
-    _assertLanguageVersion(
-      package: Version.parse('2.9.0'),
-      override: Version.parse('2.10.0'),
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.non_nullable,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
   test_language209_override299() async {
     writeTestPackageConfig(
       PackageConfigFileBuilder(),
@@ -210,63 +152,6 @@
     ]);
   }
 
-  test_language210_experimentNonNullable() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.10',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('');
-
-    _assertLanguageVersion(
-      package: Version.parse('2.10.0'),
-      override: null,
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.non_nullable,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
-  test_language210_experimentNonNullable_override209() async {
-    writeTestPackageConfig(
-      PackageConfigFileBuilder(),
-      languageVersion: '2.10',
-    );
-
-    writeTestPackageAnalysisOptionsFile(
-      AnalysisOptionsFileConfig(experiments: [
-        EnableString.non_nullable,
-      ]),
-    );
-
-    await resolveTestCode('// @dart = 2.9');
-
-    _assertLanguageVersion(
-      package: Version.parse('2.10.0'),
-      override: Version.parse('2.9.0'),
-    );
-
-    _assertFeatureSet([
-      Feature.constant_update_2018,
-      Feature.control_flow_collections,
-      Feature.extension_methods,
-      Feature.set_literals,
-      Feature.spread_collections,
-    ]);
-  }
-
   void _assertFeatureSet(List<Feature> expected) {
     var featureSet = result.libraryElement.featureSet;
 
diff --git a/pkg/analyzer/tool/experiments/experiments_test.dart b/pkg/analyzer/tool/experiments/experiments_test.dart
index 575c084..dc8e2ac 100644
--- a/pkg/analyzer/tool/experiments/experiments_test.dart
+++ b/pkg/analyzer/tool/experiments/experiments_test.dart
@@ -10,7 +10,7 @@
 import 'generate.dart';
 
 /// Check that all targets have been code generated.  If they haven't tell the
-/// user to run generate_all.dart.
+/// user to run `generate.dart`.
 main() async {
   String script = Platform.script.toFilePath(windows: Platform.isWindows);
   List<String> components = split(script);
diff --git a/pkg/dartdev/test/experiments_test.dart b/pkg/dartdev/test/experiments_test.dart
index f9bec0e..5608ac7 100644
--- a/pkg/dartdev/test/experiments_test.dart
+++ b/pkg/dartdev/test/experiments_test.dart
@@ -11,7 +11,7 @@
       expect(experimentalFeatures, isNotEmpty);
       expect(
         experimentalFeatures.map((experiment) => experiment.enableString),
-        contains('non-nullable'),
+        contains('super-parameters'),
       );
     });
   });
diff --git a/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect b/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect
index 54231c0..8c513a4 100644
--- a/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect
+++ b/pkg/front_end/testcases/general/ffi_sample.dart.weak.expect
@@ -40,7 +40,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect b/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect
index 54231c0..8c513a4 100644
--- a/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect
+++ b/pkg/front_end/testcases/general/ffi_sample.dart.weak.modular.expect
@@ -40,7 +40,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect b/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect
index dc06840..ae497d5 100644
--- a/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/general/ffi_sample.dart.weak.transformed.expect
@@ -59,22 +59,22 @@
   #C7 = core::pragma {name:#C1, options:#C6}
   #C8 = ffi::Double {}
   #C9 = 0
-  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
 
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect b/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect
index cd22c39..bcb7537 100644
--- a/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/crash_05.yaml.world.1.expect
@@ -56,11 +56,11 @@
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = dart.ffi::Uint32 {}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = "vm:prefer-inline"
   #C11 = dart.core::pragma {name:#C10, options:#C4}
   #C12 = 4
-  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C14 = TypeLiteralConstant(lib::Y)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect b/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect
index cd22c39..bcb7537 100644
--- a/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/crash_05.yaml.world.2.expect
@@ -56,11 +56,11 @@
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = dart.ffi::Uint32 {}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = "vm:prefer-inline"
   #C11 = dart.core::pragma {name:#C10, options:#C4}
   #C12 = 4
-  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C13 = <dart.core::int*>[#C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C14 = TypeLiteralConstant(lib::Y)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect b/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect
index 2b217e2..a7835ef 100644
--- a/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/crash_06.yaml.world.1.expect
@@ -62,7 +62,7 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
 }
diff --git a/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect b/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect
index 2b217e2..a7835ef 100644
--- a/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/crash_06.yaml.world.2.expect
@@ -62,7 +62,7 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
 }
diff --git a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect
index 72d79b8..e6e7693 100644
--- a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.1.expect
@@ -62,14 +62,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect
index 6d9d90c..7712ae2 100644
--- a/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/ffi_01.yaml.world.2.expect
@@ -66,14 +66,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect b/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect
index eb79aaf..99eefd4 100644
--- a/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/ffi_02.yaml.world.1.expect
@@ -63,14 +63,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect
index beb059f..a889a57 100644
--- a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.1.expect
@@ -101,19 +101,19 @@
   #C6 = dart.ffi::_FfiStructLayout {fieldTypes:#C4, packing:#C5}
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = 4
   #C11 = 8
-  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
+  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
+  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
   #C15 = 12
   #C16 = 24
-  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
+  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 48
-  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
+  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
   #C22 = <dart.core::Type>[#C2, #C2, #C2]
   #C23 = dart.ffi::_FfiStructLayout {fieldTypes:#C22, packing:#C5}
   #C24 = dart.core::pragma {name:#C1, options:#C23}
diff --git a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect
index beb059f..a889a57 100644
--- a/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/issue_46666.yaml.world.2.expect
@@ -101,19 +101,19 @@
   #C6 = dart.ffi::_FfiStructLayout {fieldTypes:#C4, packing:#C5}
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = 0
-  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <dart.core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = 4
   #C11 = 8
-  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
+  #C12 = <dart.core::int*>[#C10, #C11, #C10, #C11, #C11, #C11, #C10, #C11, #C11, #C10, #C11, #C10, #C11, #C10, #C11, #C11, #C11, #C11, #C10, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
+  #C14 = <dart.core::int*>[#C11, #C13, #C11, #C13, #C13, #C13, #C11, #C13, #C13, #C11, #C13, #C11, #C13, #C11, #C13, #C13, #C13, #C13, #C11, #C13]
   #C15 = 12
   #C16 = 24
-  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
+  #C17 = <dart.core::int*>[#C15, #C16, #C15, #C16, #C16, #C16, #C15, #C16, #C16, #C15, #C16, #C15, #C16, #C15, #C16, #C16, #C16, #C16, #C15, #C16]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 48
-  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
+  #C21 = <dart.core::int*>[#C16, #C20, #C16, #C20, #C20, #C20, #C16, #C20, #C20, #C16, #C20, #C16, #C20, #C16, #C20, #C20, #C20, #C20, #C16, #C20]
   #C22 = <dart.core::Type>[#C2, #C2, #C2]
   #C23 = dart.ffi::_FfiStructLayout {fieldTypes:#C22, packing:#C5}
   #C24 = dart.core::pragma {name:#C1, options:#C23}
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect
index 72d79b8..e6e7693 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.1.expect
@@ -62,14 +62,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect
index d4ce18c..1134f3c 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.2.expect
@@ -63,14 +63,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect
index 4bfcd2e..e52f835 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_35.yaml.world.3.expect
@@ -64,14 +64,14 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Double {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <dart.core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = dart.core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <dart.core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect
index 3f3d73e..a86189c 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.1.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 1
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = dart.ffi::Uint64 {}
   #C14 = 8
   #C15 = 4
-  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C17 = "vm:prefer-inline"
   #C18 = dart.core::pragma {name:#C17, options:#C5}
   #C19 = 16
   #C20 = 12
-  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19]
+  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19, #C19, #C19]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 32
   #C27 = 24
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 40
   #C30 = 28
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect
index e241ee1..a9302fa 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_48_ffi.yaml.world.2.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = dart.ffi::Uint64 {}
   #C12 = 8
   #C13 = 4
-  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C15 = 16
   #C16 = 12
-  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15]
+  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15, #C15, #C15]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 24
-  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20]
+  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20, #C20, #C20]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 48
   #C27 = 32
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 56
   #C30 = 36
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect
index 3f3d73e..a86189c 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.1.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 1
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = dart.ffi::Uint64 {}
   #C14 = 8
   #C15 = 4
-  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C17 = "vm:prefer-inline"
   #C18 = dart.core::pragma {name:#C17, options:#C5}
   #C19 = 16
   #C20 = 12
-  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19]
+  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19, #C19, #C19]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 32
   #C27 = 24
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 40
   #C30 = 28
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect
index e241ee1..a9302fa 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_49_ffi.yaml.world.2.expect
@@ -82,26 +82,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = dart.ffi::Uint64 {}
   #C12 = 8
   #C13 = 4
-  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C15 = 16
   #C16 = 12
-  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15]
+  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15, #C15, #C15]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 24
-  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20]
+  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20, #C20, #C20]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 48
   #C27 = 32
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 56
   #C30 = 36
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect
index 3c26667..6c37a20d 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.1.expect
@@ -234,26 +234,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 1
-  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <dart.core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = dart.ffi::Uint64 {}
   #C14 = 8
   #C15 = 4
-  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C16 = <dart.core::int*>[#C14, #C14, #C15, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C15, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C17 = "vm:prefer-inline"
   #C18 = dart.core::pragma {name:#C17, options:#C5}
   #C19 = 16
   #C20 = 12
-  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19]
+  #C21 = <dart.core::int*>[#C19, #C19, #C20, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C20, #C19, #C19, #C19, #C19, #C19, #C19, #C19, #C19]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 32
   #C27 = 24
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 40
   #C30 = 28
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect
index 7c86d77..e7c047a 100644
--- a/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/no_outline_change_50_ffi.yaml.world.2.expect
@@ -234,26 +234,26 @@
   #C7 = dart.core::pragma {name:#C1, options:#C6}
   #C8 = dart.ffi::Uint8 {}
   #C9 = 0
-  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <dart.core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = dart.ffi::Uint64 {}
   #C12 = 8
   #C13 = 4
-  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12]
+  #C14 = <dart.core::int*>[#C12, #C12, #C13, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C13, #C12, #C12, #C12, #C12, #C12, #C12, #C12, #C12]
   #C15 = 16
   #C16 = 12
-  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15]
+  #C17 = <dart.core::int*>[#C15, #C15, #C16, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C16, #C15, #C15, #C15, #C15, #C15, #C15, #C15, #C15]
   #C18 = "vm:prefer-inline"
   #C19 = dart.core::pragma {name:#C18, options:#C5}
   #C20 = 24
-  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20]
+  #C21 = <dart.core::int*>[#C20, #C20, #C15, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C15, #C20, #C20, #C20, #C20, #C20, #C20, #C20, #C20]
   #C22 = TypeLiteralConstant(lib::Y)
   #C23 = <dart.core::Type>[#C22, #C22, #C2]
   #C24 = dart.ffi::_FfiStructLayout {fieldTypes:#C23, packing:#C5}
   #C25 = dart.core::pragma {name:#C1, options:#C24}
   #C26 = 48
   #C27 = 32
-  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26]
+  #C28 = <dart.core::int*>[#C26, #C26, #C27, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C27, #C26, #C26, #C26, #C26, #C26, #C26, #C26, #C26]
   #C29 = 56
   #C30 = 36
-  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29]
+  #C31 = <dart.core::int*>[#C29, #C29, #C30, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C30, #C29, #C29, #C29, #C29, #C29, #C29, #C29, #C29]
 }
diff --git a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect
index 1cb94e2..81904cb 100644
--- a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect
+++ b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.1.expect
@@ -55,12 +55,12 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
   #C11 = 4
   #C12 = 8
-  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
+  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
   #C14 = TypeLiteralConstant(lib::COMObject)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect
index 1cb94e2..81904cb 100644
--- a/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect
+++ b/pkg/front_end/testcases/incremental/regress_46004.yaml.world.2.expect
@@ -55,12 +55,12 @@
   #C5 = dart.ffi::_FfiStructLayout {fieldTypes:#C3, packing:#C4}
   #C6 = dart.core::pragma {name:#C1, options:#C5}
   #C7 = 0
-  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
+  #C8 = <dart.core::int*>[#C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7, #C7]
   #C9 = "vm:prefer-inline"
   #C10 = dart.core::pragma {name:#C9, options:#C4}
   #C11 = 4
   #C12 = 8
-  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
+  #C13 = <dart.core::int*>[#C11, #C12, #C11, #C12, #C12, #C12, #C11, #C12, #C12, #C11, #C12, #C11, #C12, #C11, #C12, #C12, #C12, #C12, #C11, #C12]
   #C14 = TypeLiteralConstant(lib::COMObject)
   #C15 = <dart.core::Type>[#C14]
   #C16 = dart.ffi::_FfiStructLayout {fieldTypes:#C15, packing:#C4}
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect
index f21662b..35dda10 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.expect
@@ -34,7 +34,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect
index 5082fb7..02dfd2a 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.strong.transformed.expect
@@ -50,22 +50,22 @@
   #C7 = core::pragma {name:#C1, options:#C6}
   #C8 = ffi::Double {}
   #C9 = 0
-  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
 
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect
index f21662b..35dda10 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.expect
@@ -34,7 +34,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect
index f21662b..35dda10 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.modular.expect
@@ -34,7 +34,7 @@
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect
index 5082fb7..02dfd2a 100644
--- a/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_sample.dart.weak.transformed.expect
@@ -50,22 +50,22 @@
   #C7 = core::pragma {name:#C1, options:#C6}
   #C8 = ffi::Double {}
   #C9 = 0
-  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
+  #C10 = <core::int*>[#C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9, #C9]
   #C11 = 8
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 16
-  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
+  #C14 = <core::int*>[#C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13, #C13]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C5}
   #C17 = 24
   #C18 = 20
-  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17]
+  #C19 = <core::int*>[#C17, #C17, #C18, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C18, #C17, #C17, #C17, #C17, #C17, #C17, #C17, #C17]
 }
 
 
 Constructor coverage from constants:
 org-dartlang-testcase:///ffi_sample.dart:
-- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:142:9)
+- Double. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:144:9)
 - _NativeDouble. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:34:9)
 - NativeType. (from org-dartlang-sdk:///sdk/lib/ffi/native_type.dart:12:9)
 - Object. (from org-dartlang-sdk:///sdk/lib/core/object.dart:25:9)
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect
index b41e171..525cfd9 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.strong.transformed.expect
@@ -42,8 +42,8 @@
   #C8 = core::pragma {name:#C1, options:#C7}
   #C9 = ffi::_ArraySize<ffi::NativeType> {dimension1:#C3, dimension2:#C6, dimension3:#C6, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C10 = 0
-  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
-  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
+  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C13 = <core::int*>[]
   #C14 = "vm:prefer-inline"
   #C15 = core::pragma {name:#C14, options:#C6}
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect
index c21a02f..b38ad34 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array.dart.weak.transformed.expect
@@ -42,8 +42,8 @@
   #C8 = core::pragma {name:#C1, options:#C7}
   #C9 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C3, dimension2:#C6, dimension3:#C6, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C10 = 0
-  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
-  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
+  #C12 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C13 = <core::int*>[]
   #C14 = "vm:prefer-inline"
   #C15 = core::pragma {name:#C14, options:#C6}
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect
index dec8714..ecf21c0 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.strong.transformed.expect
@@ -67,8 +67,8 @@
   #C9 = 2
   #C10 = ffi::_ArraySize<ffi::NativeType> {dimension1:#C9, dimension2:#C9, dimension3:#C9, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C11 = 0
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
-  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C14 = <core::int*>[#C9, #C9]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C6}
diff --git a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect
index e5881fb..a9ba39c 100644
--- a/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect
+++ b/pkg/front_end/testcases/nnbd/ffi_struct_inline_array_multi_dimensional.dart.weak.transformed.expect
@@ -67,8 +67,8 @@
   #C9 = 2
   #C10 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C9, dimension2:#C9, dimension3:#C9, dimension4:#C6, dimension5:#C6, dimensions:#C6}
   #C11 = 0
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
-  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C13 = <core::int*>[#C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3, #C3]
   #C14 = <core::int*>[#C9, #C9]
   #C15 = "vm:prefer-inline"
   #C16 = core::pragma {name:#C15, options:#C6}
diff --git a/pkg/smith/lib/configuration.dart b/pkg/smith/lib/configuration.dart
index b1576bb7..bac7ac7 100644
--- a/pkg/smith/lib/configuration.dart
+++ b/pkg/smith/lib/configuration.dart
@@ -584,6 +584,10 @@
   static const simarmv6 = Architecture._('simarmv6');
   static const simarm64 = Architecture._('simarm64');
   static const simarm64c = Architecture._('simarm64c');
+  static const riscv32 = Architecture._('riscv32');
+  static const riscv64 = Architecture._('riscv64');
+  static const simriscv32 = Architecture._('simriscv32');
+  static const simriscv64 = Architecture._('simriscv64');
 
   static final List<String> names = _all.keys.toList();
 
@@ -600,6 +604,10 @@
     simarmv6,
     simarm64,
     simarm64c,
+    riscv32,
+    riscv64,
+    simriscv32,
+    simriscv64,
   ], key: (architecture) => (architecture as Architecture).name);
 
   static Architecture find(String name) {
diff --git a/pkg/test_runner/lib/src/compiler_configuration.dart b/pkg/test_runner/lib/src/compiler_configuration.dart
index 6185d8f..16ac923 100644
--- a/pkg/test_runner/lib/src/compiler_configuration.dart
+++ b/pkg/test_runner/lib/src/compiler_configuration.dart
@@ -99,6 +99,8 @@
         if (configuration.architecture == Architecture.simarm ||
             configuration.architecture == Architecture.simarm64 ||
             configuration.architecture == Architecture.simarm64c ||
+            configuration.architecture == Architecture.simriscv32 ||
+            configuration.architecture == Architecture.simriscv64 ||
             configuration.system == System.android) {
           return VMKernelCompilerConfiguration(configuration);
         }
@@ -705,6 +707,16 @@
 
   bool get _isIA32 => _configuration.architecture == Architecture.ia32;
 
+  bool get _isRiscv32 => _configuration.architecture == Architecture.riscv32;
+
+  bool get _isSimRiscv32 =>
+      _configuration.architecture == Architecture.simriscv32;
+
+  bool get _isRiscv64 => _configuration.architecture == Architecture.riscv64;
+
+  bool get _isSimRiscv64 =>
+      _configuration.architecture == Architecture.simriscv64;
+
   bool get _isAot => true;
 
   PrecompilerCompilerConfiguration(TestConfiguration configuration)
@@ -880,6 +892,10 @@
         cc = 'arm-linux-gnueabihf-gcc';
       } else if (_isSimArm64 || (_isArm64 && _configuration.useQemu)) {
         cc = 'aarch64-linux-gnu-gcc';
+      } else if (_isSimRiscv32 || (_isRiscv32 && _configuration.useQemu)) {
+        cc = 'riscv32-linux-gnu-gcc';
+      } else if (_isSimRiscv64 || (_isRiscv64 && _configuration.useQemu)) {
+        cc = 'riscv64-linux-gnu-gcc';
       } else {
         cc = 'gcc';
       }
@@ -911,6 +927,10 @@
       case Architecture.arm_x64:
       case Architecture.arm64:
       case Architecture.arm64c:
+      case Architecture.riscv32:
+      case Architecture.riscv64:
+      case Architecture.simriscv32:
+      case Architecture.simriscv64:
         ccFlags = null;
         break;
       default:
diff --git a/pkg/test_runner/lib/src/options.dart b/pkg/test_runner/lib/src/options.dart
index c028e33..cd2c23c 100644
--- a/pkg/test_runner/lib/src/options.dart
+++ b/pkg/test_runner/lib/src/options.dart
@@ -143,8 +143,8 @@
 Allowed values are:
 all
 ia32, x64
-arm, armv6, arm64,
-simarm, simarmv6, simarm64, arm_x64''',
+arm, arm64, simarm, simarm64, arm_x64
+riscv32, riscv64, simriscv32, simriscv64''',
         abbr: 'a',
         values: ['all', ...Architecture.names],
         defaultsTo: Architecture.x64.name,
diff --git a/pkg/test_runner/lib/src/runtime_configuration.dart b/pkg/test_runner/lib/src/runtime_configuration.dart
index 1810702..2341361 100644
--- a/pkg/test_runner/lib/src/runtime_configuration.dart
+++ b/pkg/test_runner/lib/src/runtime_configuration.dart
@@ -220,10 +220,18 @@
 
 class QemuConfig {
   static const all = <Architecture, QemuConfig>{
+    Architecture.ia32:
+        QemuConfig('qemu-i386', ['-L', '/usr/lib/i386-linux-gnu/']),
+    Architecture.x64:
+        QemuConfig('qemu-x86_64', ['-L', '/usr/lib/x86_64-linux-gnu/']),
     Architecture.arm:
         QemuConfig('qemu-arm', ['-L', '/usr/arm-linux-gnueabihf/']),
     Architecture.arm64:
         QemuConfig('qemu-aarch64', ['-L', '/usr/aarch64-linux-gnu/']),
+    Architecture.riscv32:
+        QemuConfig('qemu-riscv32', ['-L', '/usr/riscv32-linux-gnu/']),
+    Architecture.riscv64:
+        QemuConfig('qemu-riscv64', ['-L', '/usr/riscv64-linux-gnu/']),
   };
 
   final String executable;
@@ -253,6 +261,8 @@
       case Architecture.armv6:
       case Architecture.simarm64:
       case Architecture.simarm64c:
+      case Architecture.simriscv32:
+      case Architecture.simriscv64:
         multiplier *= 4;
         break;
     }
diff --git a/pkg/test_runner/lib/src/test_case.dart b/pkg/test_runner/lib/src/test_case.dart
index 995bf0a..2597e45 100644
--- a/pkg/test_runner/lib/src/test_case.dart
+++ b/pkg/test_runner/lib/src/test_case.dart
@@ -285,7 +285,9 @@
               executable = '/usr/bin/sample';
             } else if (io.Platform.isWindows) {
               var isX64 = command.executable.contains("X64") ||
-                  command.executable.contains("SIMARM64");
+                  command.executable.contains("SIMARM64") ||
+                  command.executable.contains("SIMARM64C") ||
+                  command.executable.contains("SIMRISCV64");
               if (configuration.windowsSdkPath != null) {
                 executable = configuration.windowsSdkPath +
                     "\\Debuggers\\${isX64 ? 'x64' : 'x86'}\\cdb.exe";
diff --git a/pkg/test_runner/lib/src/test_suite.dart b/pkg/test_runner/lib/src/test_suite.dart
index bae578b..5aeaf54 100644
--- a/pkg/test_runner/lib/src/test_suite.dart
+++ b/pkg/test_runner/lib/src/test_suite.dart
@@ -399,6 +399,8 @@
     "x64_linux",
     "x64_macos",
     "x64_win",
+    "riscv32_linux",
+    "riscv64_linux",
   ];
 
   FfiTestSuite(TestConfiguration configuration)
diff --git a/pkg/vm/lib/transformations/ffi/abi.dart b/pkg/vm/lib/transformations/ffi/abi.dart
index 04bc8e2..cc2d86b 100644
--- a/pkg/vm/lib/transformations/ffi/abi.dart
+++ b/pkg/vm/lib/transformations/ffi/abi.dart
@@ -12,6 +12,8 @@
   arm64,
   ia32,
   x64,
+  riscv32,
+  riscv64,
 }
 
 extension on _Architecture {
@@ -20,9 +22,11 @@
     switch (this) {
       case _Architecture.arm:
       case _Architecture.ia32:
+      case _Architecture.riscv32:
         return 4;
       case _Architecture.arm64:
       case _Architecture.x64:
+      case _Architecture.riscv64:
         return 8;
     }
   }
@@ -91,6 +95,12 @@
   /// The application binary interface for linux on the X64 architecture.
   static const linuxX64 = _linuxX64;
 
+  /// The application binary interface for linux on 32-bit RISC-V.
+  static const linuxRiscv32 = _linuxRiscv32;
+
+  /// The application binary interface for linux on 64-bit RISC-V.
+  static const linuxRiscv64 = _linuxRiscv64;
+
   /// The application binary interface for MacOS on the Arm64 architecture.
   static const macosArm64 = _macosArm64;
 
@@ -128,6 +138,8 @@
     linuxArm64,
     linuxIA32,
     linuxX64,
+    linuxRiscv32,
+    linuxRiscv64,
     macosArm64,
     macosX64,
     windowsArm64,
@@ -171,6 +183,8 @@
   static const _linuxArm64 = Abi._(_Architecture.arm64, _OS.linux);
   static const _linuxIA32 = Abi._(_Architecture.ia32, _OS.linux);
   static const _linuxX64 = Abi._(_Architecture.x64, _OS.linux);
+  static const _linuxRiscv32 = Abi._(_Architecture.riscv32, _OS.linux);
+  static const _linuxRiscv64 = Abi._(_Architecture.riscv64, _OS.linux);
   static const _macosArm64 = Abi._(_Architecture.arm64, _OS.macos);
   static const _macosX64 = Abi._(_Architecture.x64, _OS.macos);
   static const _windowsArm64 = Abi._(_Architecture.arm64, _OS.windows);
@@ -193,6 +207,8 @@
   Abi.linuxArm64: 'linuxArm64',
   Abi.linuxIA32: 'linuxIA32',
   Abi.linuxX64: 'linuxX64',
+  Abi.linuxRiscv32: 'linuxRiscv32',
+  Abi.linuxRiscv64: 'linuxRiscv64',
   Abi.macosArm64: 'macosArm64',
   Abi.macosX64: 'macosX64',
   Abi.windowsArm64: 'windowsArm64',
@@ -229,6 +245,7 @@
   Abi.iosX64: _wordSize64,
   Abi.linuxArm64: _wordSize64,
   Abi.linuxX64: _wordSize64,
+  Abi.linuxRiscv64: _wordSize64,
   Abi.macosArm64: _wordSize64,
   Abi.macosX64: _wordSize64,
   Abi.windowsArm64: _wordSize64,
@@ -240,6 +257,7 @@
   // _wordSize32Align64
   Abi.androidArm: _wordSize32Align64,
   Abi.linuxArm: _wordSize32Align64,
+  Abi.linuxRiscv32: _wordSize32Align64,
   Abi.windowsIA32: _wordSize32Align64,
 };
 
diff --git a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart
index 50266bd..c3ff180 100644
--- a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart
+++ b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart
@@ -18,6 +18,8 @@
   Abi.linuxArm64: Uint32(),
   Abi.linuxIA32: Uint32(),
   Abi.linuxX64: Uint32(),
+  Abi.linuxRiscv32: Uint32(),
+  Abi.linuxRiscv64: Uint32(),
   Abi.macosArm64: Uint32(),
   Abi.macosX64: Uint32(),
   Abi.windowsArm64: Uint16(),
diff --git a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect
index d8ba7e5..63b991b 100644
--- a/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect
+++ b/pkg/vm/testcases/transformations/ffi/abi_specific_int.dart.expect
@@ -7,17 +7,17 @@
 
 import "dart:ffi";
 
-@#C49
-@#C56
+@#C55
+@#C62
 class WChar extends ffi::AbiSpecificInteger /*hasConstConstructor*/  {
   const constructor •() → self::WChar
     : super ffi::AbiSpecificInteger::•()
     ;
-  @#C59
+  @#C65
   static get #sizeOf() → core::int*
-    return #C61.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    return #C67.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
 }
-@#C66
+@#C72
 class WCharStruct extends ffi::Struct {
   synthetic constructor •() → self::WCharStruct
     : super ffi::Struct::•()
@@ -25,23 +25,23 @@
   constructor #fromTypedDataBase(core::Object #typedDataBase) → self::WCharStruct
     : super ffi::Struct::_fromTypedDataBase(#typedDataBase)
     ;
-  @#C67
+  @#C73
   get a0() → core::int
-    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
-  @#C67
+    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
+  @#C73
   set a0(core::int #externalFieldValue) → void
-    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
-  @#C67
+    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
+  @#C73
   get a1() → core::int
-    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C61.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
-  @#C67
+    return ffi::_loadAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C67.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
+  @#C73
   set a1(core::int #externalFieldValue) → void
-    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C61.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
-  @#C59
+    return ffi::_storeAbiSpecificInt<self::WChar>(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C67.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue);
+  @#C65
   static get #sizeOf() → core::int*
-    return #C70.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    return #C76.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
 }
-@#C75
+@#C81
 class WCharArrayStruct extends ffi::Struct {
   synthetic constructor •() → self::WCharArrayStruct
     : super ffi::Struct::•()
@@ -49,31 +49,31 @@
   constructor #fromTypedDataBase(core::Object #typedDataBase) → self::WCharArrayStruct
     : super ffi::Struct::_fromTypedDataBase(#typedDataBase)
     ;
-  @#C76
+  @#C82
   get a0() → ffi::Array<self::WChar>
     return new ffi::Array::_<self::WChar>( block {
       core::Object #typedDataBase = this.{ffi::_Compound::_typedDataBase}{core::Object};
-      core::int #offset = #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
-    } =>#typedDataBase is ffi::Pointer<dynamic> ?{core::Object} ffi::_fromAddress<self::WChar>(#typedDataBase.{ffi::Pointer::address}{core::int}.{core::num::+}(#offset){(core::num) → core::num}) : let typ::TypedData #typedData = _in::unsafeCast<typ::TypedData>(#typedDataBase) in #typedData.{typ::TypedData::buffer}{typ::ByteBuffer}.{typ::ByteBuffer::asUint8List}(#typedData.{typ::TypedData::offsetInBytes}{core::int}.{core::num::+}(#offset){(core::num) → core::num}, #C80.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}){([core::int, core::int?]) → typ::Uint8List}, #C71, #C81);
-  @#C76
+      core::int #offset = #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    } =>#typedDataBase is ffi::Pointer<dynamic> ?{core::Object} ffi::_fromAddress<self::WChar>(#typedDataBase.{ffi::Pointer::address}{core::int}.{core::num::+}(#offset){(core::num) → core::num}) : let typ::TypedData #typedData = _in::unsafeCast<typ::TypedData>(#typedDataBase) in #typedData.{typ::TypedData::buffer}{typ::ByteBuffer}.{typ::ByteBuffer::asUint8List}(#typedData.{typ::TypedData::offsetInBytes}{core::int}.{core::num::+}(#offset){(core::num) → core::num}, #C86.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}){([core::int, core::int?]) → typ::Uint8List}, #C77, #C87);
+  @#C82
   set a0(ffi::Array<self::WChar> #externalFieldValue) → void
-    return ffi::_memCopy(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C68.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue.{ffi::Array::_typedDataBase}{core::Object}, #C1, #C80.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
-  @#C59
+    return ffi::_memCopy(this.{ffi::_Compound::_typedDataBase}{core::Object}, #C74.{core::List::[]}(ffi::_abi()){(core::int) → core::int*}, #externalFieldValue.{ffi::Array::_typedDataBase}{core::Object}, #C1, #C86.{core::List::[]}(ffi::_abi()){(core::int) → core::int*});
+  @#C65
   static get #sizeOf() → core::int*
-    return #C80.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
+    return #C86.{core::List::[]}(ffi::_abi()){(core::int) → core::int*};
 }
 class _DummyAllocator extends core::Object implements ffi::Allocator /*hasConstConstructor*/  {
   const constructor •() → self::_DummyAllocator
     : super core::Object::•()
     ;
-  @#C82
-  method allocate<T extends ffi::NativeType>(core::int byteCount, {core::int? alignment = #C58}) → ffi::Pointer<self::_DummyAllocator::allocate::T> {
+  @#C88
+  method allocate<T extends ffi::NativeType>(core::int byteCount, {core::int? alignment = #C64}) → ffi::Pointer<self::_DummyAllocator::allocate::T> {
     return ffi::Pointer::fromAddress<self::_DummyAllocator::allocate::T>(0);
   }
-  @#C82
+  @#C88
   method free(ffi::Pointer<ffi::NativeType> pointer) → void {}
 }
-static const field self::_DummyAllocator noAlloc = #C83;
+static const field self::_DummyAllocator noAlloc = #C89;
 static method main() → void {
   self::testSizeOf();
   self::testStoreLoad();
@@ -86,29 +86,29 @@
   core::print(size);
 }
 static method testStoreLoad() → void {
-  final ffi::Pointer<self::WChar> p = #C83.{ffi::Allocator::allocate}<self::WChar>(self::WChar::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
+  final ffi::Pointer<self::WChar> p = #C89.{ffi::Allocator::allocate}<self::WChar>(self::WChar::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
   ffi::_storeAbiSpecificInt<self::WChar>(p, #C1, 10);
   core::print(ffi::_loadAbiSpecificInt<self::WChar>(p, #C1));
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 static method testStoreLoadIndexed() → void {
-  final ffi::Pointer<self::WChar> p = #C83.{ffi::Allocator::allocate}<self::WChar>(2.{core::num::*}(self::WChar::#sizeOf){(core::num) → core::num}){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
+  final ffi::Pointer<self::WChar> p = #C89.{ffi::Allocator::allocate}<self::WChar>(2.{core::num::*}(self::WChar::#sizeOf){(core::num) → core::num}){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WChar>};
   ffi::_storeAbiSpecificIntAtIndex<self::WChar>(p, 0, 10);
   ffi::_storeAbiSpecificIntAtIndex<self::WChar>(p, 1, 3);
   core::print(ffi::_loadAbiSpecificIntAtIndex<self::WChar>(p, 0));
   core::print(ffi::_loadAbiSpecificIntAtIndex<self::WChar>(p, 1));
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 static method testStruct() → void {
-  final ffi::Pointer<self::WCharStruct> p = #C83.{ffi::Allocator::allocate}<self::WCharStruct>(self::WCharStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharStruct>};
+  final ffi::Pointer<self::WCharStruct> p = #C89.{ffi::Allocator::allocate}<self::WCharStruct>(self::WCharStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharStruct>};
   new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0} = 1;
   core::print(new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0}{core::int});
   new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0} = 2;
   core::print(new self::WCharStruct::#fromTypedDataBase(p!).{self::WCharStruct::a0}{core::int});
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 static method testInlineArray() → void {
-  final ffi::Pointer<self::WCharArrayStruct> p = #C83.{ffi::Allocator::allocate}<self::WCharArrayStruct>(self::WCharArrayStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharArrayStruct>};
+  final ffi::Pointer<self::WCharArrayStruct> p = #C89.{ffi::Allocator::allocate}<self::WCharArrayStruct>(self::WCharArrayStruct::#sizeOf){(core::int, {alignment: core::int?}) → ffi::Pointer<self::WCharArrayStruct>};
   final ffi::Array<self::WChar> array = new self::WCharArrayStruct::#fromTypedDataBase(p!).{self::WCharArrayStruct::a0}{ffi::Array<self::WChar>};
   for (core::int i = 0; i.{core::num::<}(100){(core::num) → core::bool}; i = i.{core::num::+}(1){(core::num) → core::int}) {
     ffi::_storeAbiSpecificIntAtIndex<self::WChar>(array.{ffi::Array::_typedDataBase}{core::Object}, i, i);
@@ -116,7 +116,7 @@
   for (core::int i = 0; i.{core::num::<}(100){(core::num) → core::bool}; i = i.{core::num::+}(1){(core::num) → core::int}) {
     core::print(ffi::_loadAbiSpecificIntAtIndex<self::WChar>(array.{ffi::Array::_typedDataBase}{core::Object}, i));
   }
-  #C83.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
+  #C89.{self::_DummyAllocator::free}(p){(ffi::Pointer<ffi::NativeType>) → void};
 }
 constants  {
   #C1 = 0
@@ -155,51 +155,57 @@
   #C34 = ffi::Abi {_os:#C31, _architecture:#C14}
   #C35 = ffi::Abi {_os:#C31, _architecture:#C18}
   #C36 = 4
-  #C37 = "macos"
-  #C38 = ffi::_OS {index:#C36, _name:#C37}
-  #C39 = ffi::Abi {_os:#C38, _architecture:#C10}
-  #C40 = ffi::Abi {_os:#C38, _architecture:#C18}
-  #C41 = 5
-  #C42 = "windows"
-  #C43 = ffi::_OS {index:#C41, _name:#C42}
-  #C44 = ffi::Abi {_os:#C43, _architecture:#C10}
-  #C45 = ffi::Uint16 {}
-  #C46 = ffi::Abi {_os:#C43, _architecture:#C14}
-  #C47 = ffi::Abi {_os:#C43, _architecture:#C18}
-  #C48 = <ffi::Abi*, ffi::NativeType*>{#C6:#C7, #C11:#C7, #C15:#C7, #C19:#C7, #C22:#C23, #C24:#C7, #C27:#C7, #C28:#C7, #C29:#C7, #C32:#C7, #C33:#C7, #C34:#C7, #C35:#C7, #C39:#C7, #C40:#C7, #C44:#C45, #C46:#C45, #C47:#C45)
-  #C49 = ffi::AbiSpecificIntegerMapping {mapping:#C48}
-  #C50 = "vm:ffi:abi-specific-mapping"
-  #C51 = TypeLiteralConstant(ffi::Uint32)
-  #C52 = TypeLiteralConstant(ffi::Uint64)
-  #C53 = TypeLiteralConstant(ffi::Uint16)
-  #C54 = <core::Type?>[#C51, #C51, #C51, #C51, #C52, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C51, #C53, #C53, #C53]
-  #C55 = ffi::_FfiAbiSpecificMapping {nativeTypes:#C54}
-  #C56 = core::pragma {name:#C50, options:#C55}
-  #C57 = "vm:prefer-inline"
-  #C58 = null
-  #C59 = core::pragma {name:#C57, options:#C58}
-  #C60 = 8
-  #C61 = <core::int*>[#C36, #C36, #C36, #C36, #C60, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C12, #C12, #C12]
-  #C62 = "vm:ffi:struct-fields"
-  #C63 = TypeLiteralConstant(self::WChar)
-  #C64 = <core::Type>[#C63, #C63]
-  #C65 = ffi::_FfiStructLayout {fieldTypes:#C64, packing:#C58}
-  #C66 = core::pragma {name:#C62, options:#C65}
-  #C67 = self::WChar {}
-  #C68 = <core::int*>[#C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1]
-  #C69 = 16
-  #C70 = <core::int*>[#C60, #C60, #C60, #C60, #C69, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C60, #C36, #C36, #C36]
-  #C71 = 100
-  #C72 = ffi::_FfiInlineArray {elementType:#C63, length:#C71}
-  #C73 = <core::Type>[#C72]
-  #C74 = ffi::_FfiStructLayout {fieldTypes:#C73, packing:#C58}
-  #C75 = core::pragma {name:#C62, options:#C74}
-  #C76 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C71, dimension2:#C58, dimension3:#C58, dimension4:#C58, dimension5:#C58, dimensions:#C58}
-  #C77 = 400
-  #C78 = 800
-  #C79 = 200
-  #C80 = <core::int*>[#C77, #C77, #C77, #C77, #C78, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C77, #C79, #C79, #C79]
-  #C81 = <core::int*>[]
-  #C82 = core::_Override {}
-  #C83 = self::_DummyAllocator {}
+  #C37 = "riscv32"
+  #C38 = ffi::_Architecture {index:#C36, _name:#C37}
+  #C39 = ffi::Abi {_os:#C31, _architecture:#C38}
+  #C40 = 5
+  #C41 = "riscv64"
+  #C42 = ffi::_Architecture {index:#C40, _name:#C41}
+  #C43 = ffi::Abi {_os:#C31, _architecture:#C42}
+  #C44 = "macos"
+  #C45 = ffi::_OS {index:#C36, _name:#C44}
+  #C46 = ffi::Abi {_os:#C45, _architecture:#C10}
+  #C47 = ffi::Abi {_os:#C45, _architecture:#C18}
+  #C48 = "windows"
+  #C49 = ffi::_OS {index:#C40, _name:#C48}
+  #C50 = ffi::Abi {_os:#C49, _architecture:#C10}
+  #C51 = ffi::Uint16 {}
+  #C52 = ffi::Abi {_os:#C49, _architecture:#C14}
+  #C53 = ffi::Abi {_os:#C49, _architecture:#C18}
+  #C54 = <ffi::Abi*, ffi::NativeType*>{#C6:#C7, #C11:#C7, #C15:#C7, #C19:#C7, #C22:#C23, #C24:#C7, #C27:#C7, #C28:#C7, #C29:#C7, #C32:#C7, #C33:#C7, #C34:#C7, #C35:#C7, #C39:#C7, #C43:#C7, #C46:#C7, #C47:#C7, #C50:#C51, #C52:#C51, #C53:#C51)
+  #C55 = ffi::AbiSpecificIntegerMapping {mapping:#C54}
+  #C56 = "vm:ffi:abi-specific-mapping"
+  #C57 = TypeLiteralConstant(ffi::Uint32)
+  #C58 = TypeLiteralConstant(ffi::Uint64)
+  #C59 = TypeLiteralConstant(ffi::Uint16)
+  #C60 = <core::Type?>[#C57, #C57, #C57, #C57, #C58, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C57, #C59, #C59, #C59]
+  #C61 = ffi::_FfiAbiSpecificMapping {nativeTypes:#C60}
+  #C62 = core::pragma {name:#C56, options:#C61}
+  #C63 = "vm:prefer-inline"
+  #C64 = null
+  #C65 = core::pragma {name:#C63, options:#C64}
+  #C66 = 8
+  #C67 = <core::int*>[#C36, #C36, #C36, #C36, #C66, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C36, #C12, #C12, #C12]
+  #C68 = "vm:ffi:struct-fields"
+  #C69 = TypeLiteralConstant(self::WChar)
+  #C70 = <core::Type>[#C69, #C69]
+  #C71 = ffi::_FfiStructLayout {fieldTypes:#C70, packing:#C64}
+  #C72 = core::pragma {name:#C68, options:#C71}
+  #C73 = self::WChar {}
+  #C74 = <core::int*>[#C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1, #C1]
+  #C75 = 16
+  #C76 = <core::int*>[#C66, #C66, #C66, #C66, #C75, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C66, #C36, #C36, #C36]
+  #C77 = 100
+  #C78 = ffi::_FfiInlineArray {elementType:#C69, length:#C77}
+  #C79 = <core::Type>[#C78]
+  #C80 = ffi::_FfiStructLayout {fieldTypes:#C79, packing:#C64}
+  #C81 = core::pragma {name:#C68, options:#C80}
+  #C82 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C77, dimension2:#C64, dimension3:#C64, dimension4:#C64, dimension5:#C64, dimensions:#C64}
+  #C83 = 400
+  #C84 = 800
+  #C85 = 200
+  #C86 = <core::int*>[#C83, #C83, #C83, #C83, #C84, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C83, #C85, #C85, #C85]
+  #C87 = <core::int*>[]
+  #C88 = core::_Override {}
+  #C89 = self::_DummyAllocator {}
 }
diff --git a/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect b/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect
index 2acfbf6..306f413 100644
--- a/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect
+++ b/pkg/vm/testcases/transformations/ffi/abi_specific_int_incomplete.dart.expect
@@ -143,22 +143,22 @@
   #C22 = "vm:ffi:abi-specific-mapping"
   #C23 = null
   #C24 = TypeLiteralConstant(ffi::Uint32)
-  #C25 = <core::Type?>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C24, #C24, #C24, #C24, #C23, #C23, #C23, #C23, #C23]
+  #C25 = <core::Type?>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C24, #C24, #C24, #C24, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C26 = ffi::_FfiAbiSpecificMapping {nativeTypes:#C25}
   #C27 = core::pragma {name:#C22, options:#C26}
   #C28 = "vm:prefer-inline"
   #C29 = core::pragma {name:#C28, options:#C23}
   #C30 = 4
-  #C31 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C30, #C30, #C30, #C30, #C23, #C23, #C23, #C23, #C23]
+  #C31 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C30, #C30, #C30, #C30, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C32 = "vm:ffi:struct-fields"
   #C33 = TypeLiteralConstant(self::Incomplete)
   #C34 = <core::Type>[#C33, #C33]
   #C35 = ffi::_FfiStructLayout {fieldTypes:#C34, packing:#C23}
   #C36 = core::pragma {name:#C32, options:#C35}
   #C37 = self::Incomplete {}
-  #C38 = <core::int*>[#C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4]
+  #C38 = <core::int*>[#C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4, #C4]
   #C39 = 8
-  #C40 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C39, #C39, #C39, #C39, #C23, #C23, #C23, #C23, #C23]
+  #C40 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C39, #C39, #C39, #C39, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C41 = 100
   #C42 = ffi::_FfiInlineArray {elementType:#C33, length:#C41}
   #C43 = <core::Type>[#C42]
@@ -166,7 +166,7 @@
   #C45 = core::pragma {name:#C32, options:#C44}
   #C46 = ffi::_ArraySize<ffi::NativeType*> {dimension1:#C41, dimension2:#C23, dimension3:#C23, dimension4:#C23, dimension5:#C23, dimensions:#C23}
   #C47 = 400
-  #C48 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C47, #C47, #C47, #C47, #C23, #C23, #C23, #C23, #C23]
+  #C48 = <core::int*>[#C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C23, #C47, #C47, #C47, #C47, #C23, #C23, #C23, #C23, #C23, #C23, #C23]
   #C49 = <core::int*>[]
   #C50 = core::_Override {}
   #C51 = self::_DummyAllocator {}
diff --git a/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect b/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect
index 98ee713..b1fe1cf 100644
--- a/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect
+++ b/pkg/vm/testcases/transformations/ffi/compound_copies.dart.expect
@@ -73,13 +73,13 @@
   #C6 = core::pragma {name:#C1, options:#C5}
   #C7 = ffi::Int64 {}
   #C8 = 0
-  #C9 = <core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
+  #C9 = <core::int*>[#C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8, #C8]
   #C10 = 8
-  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
+  #C11 = <core::int*>[#C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10, #C10]
   #C12 = "vm:prefer-inline"
   #C13 = core::pragma {name:#C12, options:#C4}
   #C14 = 16
-  #C15 = <core::int*>[#C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
+  #C15 = <core::int*>[#C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14, #C14]
   #C16 = TypeLiteralConstant(self::Coordinate)
   #C17 = <core::Type>[#C16, #C2]
   #C18 = ffi::_FfiStructLayout {fieldTypes:#C17, packing:#C4}
diff --git a/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect b/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect
index c2c6faa..5927788 100644
--- a/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect
+++ b/pkg/vm/testcases/transformations/type_flow/transformer/ffi_struct_constructors.dart.expect
@@ -128,10 +128,10 @@
   #C9 = ffi::_FfiStructLayout {fieldTypes:#C8, packing:#C4}
   #C10 = core::pragma {name:#C1, options:#C9}
   #C11 = 0
-  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
+  #C12 = <core::int*>[#C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11, #C11]
   #C13 = 4
   #C14 = 8
-  #C15 = <core::int*>[#C13, #C14, #C13, #C14, #C14, #C14, #C13, #C14, #C14, #C13, #C14, #C13, #C14, #C14, #C14, #C14, #C13, #C14]
+  #C15 = <core::int*>[#C13, #C14, #C13, #C14, #C14, #C14, #C13, #C14, #C14, #C13, #C14, #C13, #C14, #C13, #C14, #C14, #C14, #C14, #C13, #C14]
   #C16 = static-tearoff self::useStruct3
   #C17 = static-tearoff self::returnStruct7
   #C18 = 1
diff --git a/runtime/BUILD.gn b/runtime/BUILD.gn
index 8b3999a..a4c477a 100644
--- a/runtime/BUILD.gn
+++ b/runtime/BUILD.gn
@@ -112,6 +112,10 @@
     defines += [ "TARGET_ARCH_X64" ]
   } else if (dart_target_arch == "ia32" || dart_target_arch == "x86") {
     defines += [ "TARGET_ARCH_IA32" ]
+  } else if (dart_target_arch == "riscv32") {
+    defines += [ "TARGET_ARCH_RISCV32" ]
+  } else if (dart_target_arch == "riscv64") {
+    defines += [ "TARGET_ARCH_RISCV64" ]
   } else {
     print("Invalid dart_target_arch: $dart_target_arch")
     assert(false)
diff --git a/runtime/bin/elf_loader.cc b/runtime/bin/elf_loader.cc
index a5ff8c0..fb0e3a0 100644
--- a/runtime/bin/elf_loader.cc
+++ b/runtime/bin/elf_loader.cc
@@ -313,6 +313,8 @@
 #elif defined(TARGET_ARCH_ARM64)
   CHECK_ERROR(header_.machine == dart::elf::EM_AARCH64,
               "Architecture mismatch.");
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+  CHECK_ERROR(header_.machine == dart::elf::EM_RISCV, "Architecture mismatch.");
 #else
 #error Unsupported architecture architecture.
 #endif
diff --git a/runtime/bin/ffi_test/clobber_riscv32.S b/runtime/bin/ffi_test/clobber_riscv32.S
new file mode 100644
index 0000000..374c1d5
--- /dev/null
+++ b/runtime/bin/ffi_test/clobber_riscv32.S
@@ -0,0 +1,27 @@
+.text
+
+#if defined(__linux__) || defined(__FreeBSD__) /* HOST_OS_LINUX */
+.globl ClobberAndCall
+.type ClobberAndCall, @function
+ClobberAndCall:
+#else /* HOST_OS_MACOS */
+.globl _ClobberAndCall
+_ClobberAndCall:
+#endif
+
+li a0, 1
+li a1, 1
+li a2, 1
+li a3, 1
+li a4, 1
+li a5, 1
+li a6, 1
+li a7, 1
+li t0, 1
+li t1, 1
+li t2, 1
+li t3, 1
+li t4, 1
+li t5, 1
+li t6, 1
+ret
diff --git a/runtime/bin/ffi_test/clobber_riscv64.S b/runtime/bin/ffi_test/clobber_riscv64.S
new file mode 100644
index 0000000..374c1d5
--- /dev/null
+++ b/runtime/bin/ffi_test/clobber_riscv64.S
@@ -0,0 +1,27 @@
+.text
+
+#if defined(__linux__) || defined(__FreeBSD__) /* HOST_OS_LINUX */
+.globl ClobberAndCall
+.type ClobberAndCall, @function
+ClobberAndCall:
+#else /* HOST_OS_MACOS */
+.globl _ClobberAndCall
+_ClobberAndCall:
+#endif
+
+li a0, 1
+li a1, 1
+li a2, 1
+li a3, 1
+li a4, 1
+li a5, 1
+li a6, 1
+li a7, 1
+li t0, 1
+li t1, 1
+li t2, 1
+li t3, 1
+li t4, 1
+li t5, 1
+li t6, 1
+ret
diff --git a/runtime/bin/ffi_unit_test/BUILD.gn b/runtime/bin/ffi_unit_test/BUILD.gn
index 99396af..bad85d0 100644
--- a/runtime/bin/ffi_unit_test/BUILD.gn
+++ b/runtime/bin/ffi_unit_test/BUILD.gn
@@ -49,6 +49,14 @@
   defines = [ "TARGET_ARCH_X64" ]
 }
 
+config("define_target_arch_riscv32") {
+  defines = [ "TARGET_ARCH_RISCV32" ]
+}
+
+config("define_target_arch_riscv64") {
+  defines = [ "TARGET_ARCH_RISCV64" ]
+}
+
 config("define_target_os_android") {
   defines = [ "DART_TARGET_OS_ANDROID" ]
 }
@@ -167,6 +175,20 @@
   ]
 }
 
+build_run_ffi_unit_tests("run_ffi_unit_tests_riscv32_linux") {
+  extra_configs = [
+    ":define_target_arch_riscv32",
+    ":define_target_os_linux",
+  ]
+}
+
+build_run_ffi_unit_tests("run_ffi_unit_tests_riscv64_linux") {
+  extra_configs = [
+    ":define_target_arch_riscv64",
+    ":define_target_os_linux",
+  ]
+}
+
 group("run_ffi_unit_tests") {
   deps = [
     ":run_ffi_unit_tests_arm64_android",
@@ -179,6 +201,8 @@
     ":run_ffi_unit_tests_ia32_android",  # Emulator, no other test coverage.
     ":run_ffi_unit_tests_ia32_linux",
     ":run_ffi_unit_tests_ia32_win",
+    ":run_ffi_unit_tests_riscv32_linux",
+    ":run_ffi_unit_tests_riscv64_linux",
     ":run_ffi_unit_tests_x64_ios",  # Simulator, no other test coverage.
     ":run_ffi_unit_tests_x64_linux",
     ":run_ffi_unit_tests_x64_macos",
diff --git a/runtime/bin/gen_snapshot.cc b/runtime/bin/gen_snapshot.cc
index 6a24139..b3c5cbb 100644
--- a/runtime/bin/gen_snapshot.cc
+++ b/runtime/bin/gen_snapshot.cc
@@ -957,3 +957,8 @@
 int main(int argc, char** argv) {
   return dart::bin::main(argc, argv);
 }
+
+// TODO(riscv): Why is this missing from libc?
+#if defined(__riscv)
+char __libc_single_threaded = 0;
+#endif
diff --git a/runtime/bin/main.cc b/runtime/bin/main.cc
index cf76d5f..2e80b0a 100644
--- a/runtime/bin/main.cc
+++ b/runtime/bin/main.cc
@@ -1400,3 +1400,8 @@
   dart::bin::main(argc, argv);
   UNREACHABLE();
 }
+
+// TODO(riscv): Why is this missing from libc?
+#if defined(__riscv)
+char __libc_single_threaded = 0;
+#endif
diff --git a/runtime/bin/platform.h b/runtime/bin/platform.h
index 68723c2..4b5bb50 100644
--- a/runtime/bin/platform.h
+++ b/runtime/bin/platform.h
@@ -47,6 +47,10 @@
     return "ia32";
 #elif defined(HOST_ARCH_X64)
     return "x64";
+#elif defined(HOST_ARCH_RISCV32)
+    return "riscv32";
+#elif defined(HOST_ARCH_RISCV64)
+    return "riscv64";
 #else
 #error Architecture detection failed.
 #endif
diff --git a/runtime/bin/run_vm_tests.cc b/runtime/bin/run_vm_tests.cc
index d560dab..58d16b6 100644
--- a/runtime/bin/run_vm_tests.cc
+++ b/runtime/bin/run_vm_tests.cc
@@ -380,6 +380,7 @@
       /*shutdown_isolate=*/nullptr,
       /*cleanup_isolate=*/nullptr,
       /*cleanup_group=*/CleanupIsolateGroup,
+      /*thread_start=*/nullptr,
       /*thread_exit=*/nullptr, dart::bin::DartUtils::OpenFile,
       dart::bin::DartUtils::ReadFile, dart::bin::DartUtils::WriteFile,
       dart::bin::DartUtils::CloseFile, /*entropy_source=*/nullptr,
@@ -426,3 +427,8 @@
 int main(int argc, const char** argv) {
   dart::bin::Platform::Exit(dart::Main(argc, argv));
 }
+
+// TODO(riscv): Why is this missing from libc?
+#if defined(__riscv)
+char __libc_single_threaded = 0;
+#endif
diff --git a/runtime/include/dart_api.h b/runtime/include/dart_api.h
index 61c56f6..360de52 100644
--- a/runtime/include/dart_api.h
+++ b/runtime/include/dart_api.h
@@ -757,6 +757,15 @@
 typedef void (*Dart_IsolateGroupCleanupCallback)(void* isolate_group_data);
 
 /**
+ * A thread start callback function.
+ * This callback, provided by the embedder, is called after a thread in the
+ * vm thread pool starts.
+ * This function could be used to adjust thread priority or attach native
+ * resources to the thread.
+ */
+typedef void (*Dart_ThreadStartCallback)(void);
+
+/**
  * A thread death callback function.
  * This callback, provided by the embedder, is called before a thread in the
  * vm thread pool exits.
@@ -840,7 +849,7 @@
  * The current version of the Dart_InitializeFlags. Should be incremented every
  * time Dart_InitializeFlags changes in a binary incompatible way.
  */
-#define DART_INITIALIZE_PARAMS_CURRENT_VERSION (0x00000005)
+#define DART_INITIALIZE_PARAMS_CURRENT_VERSION (0x00000006)
 
 /** Forward declaration */
 struct Dart_CodeObserver;
@@ -966,6 +975,7 @@
    */
   Dart_IsolateGroupCleanupCallback cleanup_group;
 
+  Dart_ThreadStartCallback thread_start;
   Dart_ThreadExitCallback thread_exit;
   Dart_FileOpenCallback file_open;
   Dart_FileReadCallback file_read;
diff --git a/runtime/platform/elf.h b/runtime/platform/elf.h
index 8f58005..545e9e8 100644
--- a/runtime/platform/elf.h
+++ b/runtime/platform/elf.h
@@ -171,6 +171,7 @@
 static constexpr intptr_t EM_ARM = 40;
 static constexpr intptr_t EM_X86_64 = 62;
 static constexpr intptr_t EM_AARCH64 = 183;
+static constexpr intptr_t EM_RISCV = 243;
 
 static const intptr_t EV_CURRENT = 1;
 
diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h
index 3891afc..28bf82c 100644
--- a/runtime/platform/globals.h
+++ b/runtime/platform/globals.h
@@ -206,6 +206,16 @@
 #elif defined(__aarch64__)
 #define HOST_ARCH_ARM64 1
 #define ARCH_IS_64_BIT 1
+#elif defined(__riscv)
+#if __SIZEOF_POINTER__ == 4
+#define HOST_ARCH_RISCV32 1
+#define ARCH_IS_32_BIT 1
+#elif __SIZEOF_POINTER__ == 8
+#define HOST_ARCH_RISCV64 1
+#define ARCH_IS_64_BIT 1
+#else
+#error Unknown XLEN
+#endif
 #else
 #error Architecture was not detected as supported by Dart.
 #endif
@@ -286,7 +296,8 @@
 #endif
 
 #if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_X64) &&                  \
-    !defined(TARGET_ARCH_IA32) && !defined(TARGET_ARCH_ARM64)
+    !defined(TARGET_ARCH_IA32) && !defined(TARGET_ARCH_ARM64) &&               \
+    !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
 // No target architecture specified pick the one matching the host architecture.
 #if defined(HOST_ARCH_ARM)
 #define TARGET_ARCH_ARM 1
@@ -296,14 +307,20 @@
 #define TARGET_ARCH_IA32 1
 #elif defined(HOST_ARCH_ARM64)
 #define TARGET_ARCH_ARM64 1
+#elif defined(HOST_ARCH_RISCV32)
+#define TARGET_ARCH_RISCV32 1
+#elif defined(HOST_ARCH_RISCV64)
+#define TARGET_ARCH_RISCV64 1
 #else
 #error Automatic target architecture detection failed.
 #endif
 #endif
 
-#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
+#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM) ||                   \
+    defined(TARGET_ARCH_RISCV32)
 #define TARGET_ARCH_IS_32_BIT 1
-#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
+#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64) ||                \
+    defined(TARGET_ARCH_RISCV64)
 #define TARGET_ARCH_IS_64_BIT 1
 #else
 #error Automatic target architecture detection failed.
@@ -315,11 +332,13 @@
 
 // Verify that host and target architectures match, we cannot
 // have a 64 bit Dart VM generating 32 bit code or vice-versa.
-#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
+#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64) ||                  \
+    defined(TARGET_ARCH_RISCV64)
 #if !defined(ARCH_IS_64_BIT) && !defined(FFI_UNIT_TESTS)
 #error Mismatched Host/Target architectures.
 #endif  // !defined(ARCH_IS_64_BIT) && !defined(FFI_UNIT_TESTS)
-#elif defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
+#elif defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM) ||                 \
+    defined(TARGET_ARCH_RISCV32)
 #if defined(HOST_ARCH_X64) && defined(TARGET_ARCH_ARM)
 // This is simarm_x64, which is the only case where host/target architecture
 // mismatch is allowed. Unless, we're running FFI unit tests.
@@ -345,12 +364,18 @@
 #define USING_SIMULATOR 1
 #endif
 #endif
-
 #elif defined(TARGET_ARCH_ARM64)
 #if !defined(HOST_ARCH_ARM64)
 #define USING_SIMULATOR 1
 #endif
-
+#elif defined(TARGET_ARCH_RISCV32)
+#if !defined(HOST_ARCH_RISCV32)
+#define USING_SIMULATOR 1
+#endif
+#elif defined(TARGET_ARCH_RISCV64)
+#if !defined(HOST_ARCH_RISCV64)
+#define USING_SIMULATOR 1
+#endif
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/third_party/double-conversion/src/utils.h b/runtime/third_party/double-conversion/src/utils.h
index 51d5e61..c419a6c 100644
--- a/runtime/third_party/double-conversion/src/utils.h
+++ b/runtime/third_party/double-conversion/src/utils.h
@@ -67,16 +67,14 @@
 // the output of the division with the expected result. (Inlining must be
 // disabled.)
 // On Linux,x86 89255e-22 != Div_double(89255.0/1e22)
-#if defined(_M_X64) || defined(__x86_64__) || \
-    defined(__ARMEL__) || defined(__avr32__) || \
-    defined(__hppa__) || defined(__ia64__) || \
-    defined(__mips__) || \
-    defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
-    defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
-    defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
-    defined(__SH4__) || defined(__alpha__) || \
-    defined(_MIPS_ARCH_MIPS32R2) || \
-    defined(__AARCH64EL__) || defined(__aarch64__)
+#if defined(_M_X64) || defined(__x86_64__) || defined(__ARMEL__) ||            \
+    defined(__avr32__) || defined(__hppa__) || defined(__ia64__) ||            \
+    defined(__mips__) || defined(__powerpc__) || defined(__ppc__) ||           \
+    defined(__ppc64__) || defined(_POWER) || defined(_ARCH_PPC) ||             \
+    defined(_ARCH_PPC64) || defined(__sparc__) || defined(__sparc) ||          \
+    defined(__s390__) || defined(__SH4__) || defined(__alpha__) ||             \
+    defined(_MIPS_ARCH_MIPS32R2) || defined(__AARCH64EL__) ||                  \
+    defined(__aarch64__) || defined(__riscv)
 #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
 #elif defined(__mc68000__)
 #undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
diff --git a/runtime/tools/dartfuzz/dartfuzz_test.dart b/runtime/tools/dartfuzz/dartfuzz_test.dart
index b6e32b7..6cf216e 100644
--- a/runtime/tools/dartfuzz/dartfuzz_test.dart
+++ b/runtime/tools/dartfuzz/dartfuzz_test.dart
@@ -112,12 +112,16 @@
     if (mode.endsWith('debug-arm32')) return 'DebugSIMARM';
     if (mode.endsWith('debug-arm64')) return 'DebugSIMARM64';
     if (mode.endsWith('debug-arm64c')) return 'DebugSIMARM64C';
+    if (mode.endsWith('debug-riscv32')) return 'DebugSIMRISCV32';
+    if (mode.endsWith('debug-riscv64')) return 'DebugSIMRISCV64';
     if (mode.endsWith('ia32')) return 'ReleaseIA32';
     if (mode.endsWith('x64')) return 'ReleaseX64';
     if (mode.endsWith('x64c')) return 'ReleaseX64C';
     if (mode.endsWith('arm32')) return 'ReleaseSIMARM';
     if (mode.endsWith('arm64')) return 'ReleaseSIMARM64';
     if (mode.endsWith('arm64c')) return 'ReleaseSIMARM64C';
+    if (mode.endsWith('riscv32')) return 'ReleaseSIMRISCV32';
+    if (mode.endsWith('riscv64')) return 'ReleaseSIMRISCV64';
     throw ('unknown tag in mode: $mode');
   }
 
@@ -333,7 +337,8 @@
       ((mode1.contains('arm32') && mode2.contains('arm32')) ||
           (mode1.contains('arm64') && mode2.contains('arm64')) ||
           (mode1.contains('x64') && mode2.contains('x64')) ||
-          (mode1.contains('ia32') && mode2.contains('ia32')));
+          (mode1.contains('riscv32') && mode2.contains('riscv32')) ||
+          (mode1.contains('riscv64') && mode2.contains('riscv64')));
 
   bool ffiCapable(String mode1, String mode2) =>
       mode1.startsWith('jit') &&
@@ -676,12 +681,16 @@
     'jit-debug-arm32',
     'jit-debug-arm64',
     'jit-debug-arm64c',
+    'jit-debug-riscv32',
+    'jit-debug-riscv64',
     'jit-ia32',
     'jit-x64',
     'jit-x64c',
     'jit-arm32',
     'jit-arm64',
     'jit-arm64c',
+    'jit-riscv32',
+    'jit-riscv64',
     'aot-debug-x64',
     'aot-debug-x64c',
     'aot-x64',
@@ -694,9 +703,13 @@
     'aot-debug-arm32',
     'aot-debug-arm64',
     'aot-debug-arm64c',
+    'aot-debug-riscv32',
+    'aot-debug-riscv64',
     'aot-arm32',
     'aot-arm64',
     'aot-arm64c',
+    'aot-riscv32',
+    'aot-riscv64',
     // Too many divergences (due to arithmetic):
     'js-x64',
   ];
diff --git a/runtime/tools/run_clang_tidy.dart b/runtime/tools/run_clang_tidy.dart
index aa32b3b..84b66e8 100644
--- a/runtime/tools/run_clang_tidy.dart
+++ b/runtime/tools/run_clang_tidy.dart
@@ -79,22 +79,26 @@
   'runtime/platform/utils_linux.h',
   'runtime/platform/utils_macos.h',
   'runtime/platform/utils_win.h',
-  'runtime/vm/compiler/assembler/assembler_arm64.h',
   'runtime/vm/compiler/assembler/assembler_arm.h',
+  'runtime/vm/compiler/assembler/assembler_arm64.h',
   'runtime/vm/compiler/assembler/assembler_ia32.h',
+  'runtime/vm/compiler/assembler/assembler_riscv.h',
   'runtime/vm/compiler/assembler/assembler_x64.h',
   'runtime/vm/compiler/runtime_offsets_extracted.h',
-  'runtime/vm/constants_arm64.h',
   'runtime/vm/constants_arm.h',
+  'runtime/vm/constants_arm64.h',
   'runtime/vm/constants_ia32.h',
+  'runtime/vm/constants_riscv.h',
   'runtime/vm/constants_x64.h',
-  'runtime/vm/cpu_arm64.h',
   'runtime/vm/cpu_arm.h',
+  'runtime/vm/cpu_arm64.h',
   'runtime/vm/cpu_ia32.h',
+  'runtime/vm/cpu_riscv.h',
   'runtime/vm/cpu_x64.h',
-  'runtime/vm/instructions_arm64.h',
   'runtime/vm/instructions_arm.h',
+  'runtime/vm/instructions_arm64.h',
   'runtime/vm/instructions_ia32.h',
+  'runtime/vm/instructions_riscv.h',
   'runtime/vm/instructions_x64.h',
   'runtime/vm/os_thread_android.h',
   'runtime/vm/os_thread_fuchsia.h',
@@ -104,9 +108,11 @@
   'runtime/vm/regexp_assembler_bytecode_inl.h',
   'runtime/vm/simulator_arm64.h',
   'runtime/vm/simulator_arm.h',
-  'runtime/vm/stack_frame_arm64.h',
+  'runtime/vm/simulator_riscv.h',
   'runtime/vm/stack_frame_arm.h',
+  'runtime/vm/stack_frame_arm64.h',
   'runtime/vm/stack_frame_ia32.h',
+  'runtime/vm/stack_frame_riscv.h',
   'runtime/vm/stack_frame_x64.h',
 
   // Only available in special builds
diff --git a/runtime/vm/code_patcher_riscv.cc b/runtime/vm/code_patcher_riscv.cc
new file mode 100644
index 0000000..415e645
--- /dev/null
+++ b/runtime/vm/code_patcher_riscv.cc
@@ -0,0 +1,196 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/code_patcher.h"
+#include "vm/cpu.h"
+#include "vm/instructions.h"
+#include "vm/object.h"
+
+namespace dart {
+
+class PoolPointerCall : public ValueObject {
+ public:
+  PoolPointerCall(uword pc, const Code& code)
+      : end_(pc), object_pool_(ObjectPool::Handle(code.GetObjectPool())) {
+    ASSERT(*reinterpret_cast<uint16_t*>(end_ - 2) == 0x9082);  // jalr ra
+    uint32_t load_entry = *reinterpret_cast<uint32_t*>(end_ - 6);
+#if XLEN == 32
+    ASSERT((load_entry == 0x00362083) ||  // lw ra, entry(code)
+           (load_entry == 0x00b62083));   // lw ra, unchecked_entry(code)
+#elif XLEN == 64
+    ASSERT((load_entry == 0x00763083) ||  // ld ra, entry(code)
+           (load_entry = 0x01763083));    // ld ra, unchecked_entry(code)
+#endif
+    InstructionPattern::DecodeLoadWordFromPool(end_ - 6, &reg_, &index_);
+  }
+
+  intptr_t pp_index() const { return index_; }
+
+  CodePtr Target() const {
+    return static_cast<CodePtr>(object_pool_.ObjectAt(pp_index()));
+  }
+
+  void SetTarget(const Code& target) const {
+    object_pool_.SetObjectAt(pp_index(), target);
+    // No need to flush the instruction cache, since the code is not modified.
+  }
+
+ private:
+  uword end_;
+  const ObjectPool& object_pool_;
+  Register reg_;
+  intptr_t index_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PoolPointerCall);
+};
+
+CodePtr CodePatcher::GetStaticCallTargetAt(uword return_address,
+                                           const Code& code) {
+  ASSERT(code.ContainsInstructionAt(return_address));
+  PoolPointerCall call(return_address, code);
+  return call.Target();
+}
+
+void CodePatcher::PatchStaticCallAt(uword return_address,
+                                    const Code& code,
+                                    const Code& new_target) {
+  PatchPoolPointerCallAt(return_address, code, new_target);
+}
+
+void CodePatcher::PatchPoolPointerCallAt(uword return_address,
+                                         const Code& code,
+                                         const Code& new_target) {
+  ASSERT(code.ContainsInstructionAt(return_address));
+  PoolPointerCall call(return_address, code);
+  call.SetTarget(new_target);
+}
+
+void CodePatcher::InsertDeoptimizationCallAt(uword start) {
+  UNREACHABLE();
+}
+
+CodePtr CodePatcher::GetInstanceCallAt(uword return_address,
+                                       const Code& caller_code,
+                                       Object* data) {
+  ASSERT(caller_code.ContainsInstructionAt(return_address));
+  ICCallPattern call(return_address, caller_code);
+  if (data != NULL) {
+    *data = call.Data();
+  }
+  return call.TargetCode();
+}
+
+void CodePatcher::PatchInstanceCallAt(uword return_address,
+                                      const Code& caller_code,
+                                      const Object& data,
+                                      const Code& target) {
+  auto thread = Thread::Current();
+  thread->isolate_group()->RunWithStoppedMutators([&]() {
+    PatchInstanceCallAtWithMutatorsStopped(thread, return_address, caller_code,
+                                           data, target);
+  });
+}
+
+void CodePatcher::PatchInstanceCallAtWithMutatorsStopped(
+    Thread* thread,
+    uword return_address,
+    const Code& caller_code,
+    const Object& data,
+    const Code& target) {
+  ASSERT(caller_code.ContainsInstructionAt(return_address));
+  ICCallPattern call(return_address, caller_code);
+  call.SetData(data);
+  call.SetTargetCode(target);
+}
+
+FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
+                                                    const Code& code,
+                                                    ICData* ic_data_result) {
+  ASSERT(code.ContainsInstructionAt(return_address));
+  ICCallPattern static_call(return_address, code);
+  ICData& ic_data = ICData::Handle();
+  ic_data ^= static_call.Data();
+  if (ic_data_result != NULL) {
+    *ic_data_result = ic_data.ptr();
+  }
+  return ic_data.GetTargetAt(0);
+}
+
+void CodePatcher::PatchSwitchableCallAt(uword return_address,
+                                        const Code& caller_code,
+                                        const Object& data,
+                                        const Code& target) {
+  auto thread = Thread::Current();
+  // Ensure all threads are suspended as we update data and target pair.
+  thread->isolate_group()->RunWithStoppedMutators([&]() {
+    PatchSwitchableCallAtWithMutatorsStopped(thread, return_address,
+                                             caller_code, data, target);
+  });
+}
+
+void CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
+    Thread* thread,
+    uword return_address,
+    const Code& caller_code,
+    const Object& data,
+    const Code& target) {
+  if (FLAG_precompiled_mode) {
+    BareSwitchableCallPattern call(return_address);
+    call.SetData(data);
+    call.SetTarget(target);
+  } else {
+    SwitchableCallPattern call(return_address, caller_code);
+    call.SetData(data);
+    call.SetTarget(target);
+  }
+}
+
+uword CodePatcher::GetSwitchableCallTargetEntryAt(uword return_address,
+                                                  const Code& caller_code) {
+  if (FLAG_precompiled_mode) {
+    BareSwitchableCallPattern call(return_address);
+    return call.target_entry();
+  } else {
+    SwitchableCallPattern call(return_address, caller_code);
+    return call.target_entry();
+  }
+}
+
+ObjectPtr CodePatcher::GetSwitchableCallDataAt(uword return_address,
+                                               const Code& caller_code) {
+  if (FLAG_precompiled_mode) {
+    BareSwitchableCallPattern call(return_address);
+    return call.data();
+  } else {
+    SwitchableCallPattern call(return_address, caller_code);
+    return call.data();
+  }
+}
+
+void CodePatcher::PatchNativeCallAt(uword return_address,
+                                    const Code& caller_code,
+                                    NativeFunction target,
+                                    const Code& trampoline) {
+  Thread::Current()->isolate_group()->RunWithStoppedMutators([&]() {
+    ASSERT(caller_code.ContainsInstructionAt(return_address));
+    NativeCallPattern call(return_address, caller_code);
+    call.set_target(trampoline);
+    call.set_native_function(target);
+  });
+}
+
+CodePtr CodePatcher::GetNativeCallAt(uword return_address,
+                                     const Code& caller_code,
+                                     NativeFunction* target) {
+  ASSERT(caller_code.ContainsInstructionAt(return_address));
+  NativeCallPattern call(return_address, caller_code);
+  *target = call.native_function();
+  return call.target();
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/code_patcher_riscv_test.cc b/runtime/vm/code_patcher_riscv_test.cc
new file mode 100644
index 0000000..e6d4480
--- /dev/null
+++ b/runtime/vm/code_patcher_riscv_test.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/code_patcher.h"
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/dart_entry.h"
+#include "vm/instructions.h"
+#include "vm/native_entry.h"
+#include "vm/native_entry_test.h"
+#include "vm/runtime_entry.h"
+#include "vm/stub_code.h"
+#include "vm/symbols.h"
+#include "vm/unit_test.h"
+
+namespace dart {
+
+#define __ assembler->
+
+ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
+  Thread* thread = Thread::Current();
+  const String& class_name = String::Handle(Symbols::New(thread, "ownerClass"));
+  const Script& script = Script::Handle();
+  const Class& owner_class = Class::Handle(Class::New(
+      Library::Handle(), class_name, script, TokenPosition::kNoSource));
+  const String& function_name =
+      String::Handle(Symbols::New(thread, "callerFunction"));
+  const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
+  const Function& function = Function::Handle(Function::New(
+      signature, function_name, UntaggedFunction::kRegularFunction, true, false,
+      false, false, false, owner_class, TokenPosition::kNoSource));
+
+  const String& target_name =
+      String::Handle(Symbols::New(thread, "targetFunction"));
+  const intptr_t kTypeArgsLen = 0;
+  const intptr_t kNumArgs = 1;
+  const Array& args_descriptor = Array::Handle(ArgumentsDescriptor::NewBoxed(
+      kTypeArgsLen, kNumArgs, Object::null_array()));
+  const ICData& ic_data = ICData::ZoneHandle(ICData::New(
+      function, target_name, args_descriptor, 15, 1, ICData::kInstance));
+  const Code& stub = StubCode::OneArgCheckInlineCache();
+
+  // Code is generated, but not executed. Just parsed with CodePatcher.
+  __ set_constant_pool_allowed(true);  // Uninitialized pp is OK.
+  __ LoadUniqueObject(IC_DATA_REG, ic_data);
+  __ LoadUniqueObject(CODE_REG, stub);
+  __ Call(compiler::FieldAddress(
+      CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
+  __ ret();
+}
+
+ASSEMBLER_TEST_RUN(IcDataAccess, test) {
+  uword end = test->payload_start() + test->code().Size();
+  uword return_address = end - CInstr::kInstrSize;
+  ICData& ic_data = ICData::Handle();
+  CodePatcher::GetInstanceCallAt(return_address, test->code(), &ic_data);
+  EXPECT_STREQ("targetFunction",
+               String::Handle(ic_data.target_name()).ToCString());
+  EXPECT_EQ(1, ic_data.NumArgsTested());
+  EXPECT_EQ(0, ic_data.NumberOfChecks());
+}
+
+}  // namespace dart
+
+#endif  // defined TARGET_ARCH_RISCV
diff --git a/runtime/vm/compiler/aot/aot_call_specializer.cc b/runtime/vm/compiler/aot/aot_call_specializer.cc
index a8e2f04..d4aa2e9 100644
--- a/runtime/vm/compiler/aot/aot_call_specializer.cc
+++ b/runtime/vm/compiler/aot/aot_call_specializer.cc
@@ -509,7 +509,7 @@
         if (replacement != nullptr) break;
         FALL_THROUGH;
       case Token::kTRUNCDIV:
-#if !defined(TARGET_ARCH_X64) && !defined(TARGET_ARCH_ARM64)
+#if !defined(TARGET_ARCH_IS_64_BIT)
         // TODO(ajcbik): 32-bit archs too?
         break;
 #else
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index 143546b..0a66815 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -3110,7 +3110,7 @@
   // true. use_far_branches is always false on ia32 and x64.
   bool done = false;
   // volatile because the variable may be clobbered by a longjmp.
-  volatile bool use_far_branches = false;
+  volatile intptr_t far_branch_level = 0;
   SpeculativeInliningPolicy speculative_policy(
       true, FLAG_max_speculative_inlining_attempts);
 
@@ -3198,7 +3198,7 @@
       // (See TryCommitToParent invocation below).
       compiler::ObjectPoolBuilder object_pool_builder(
           precompiler_->global_object_pool_builder());
-      compiler::Assembler assembler(&object_pool_builder, use_far_branches);
+      compiler::Assembler assembler(&object_pool_builder, far_branch_level);
 
       CodeStatistics* function_stats = NULL;
       if (FLAG_print_instruction_stats) {
@@ -3273,8 +3273,8 @@
         // Compilation failed due to an out of range branch offset in the
         // assembler. We try again (done = false) with far branches enabled.
         done = false;
-        ASSERT(!use_far_branches);
-        use_far_branches = true;
+        RELEASE_ASSERT(far_branch_level < 2);
+        far_branch_level++;
       } else if (error.ptr() == Object::speculative_inlining_error().ptr()) {
         // The return value of setjmp is the deopt id of the check instruction
         // that caused the bailout.
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm.cc b/runtime/vm/compiler/asm_intrinsifier_arm.cc
index 3b8931c..77cb516 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm.cc
@@ -15,10 +15,12 @@
 namespace compiler {
 
 // When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
 // R4: Arguments descriptor
 // LR: Return address
-// The R4 register can be destroyed only if there is no slow-path, i.e.
-// if the intrinsified method always executes a return.
+// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
+// i.e. if the intrinsified method always executes a return.
 // The FP register should not be modified, because it is used by the profiler.
 // The PP and THR registers (see constants_arm.h) must be preserved.
 
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm64.cc b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
index 4056cbf..600e28c 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
@@ -15,10 +15,12 @@
 namespace compiler {
 
 // When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
 // R4: Arguments descriptor
 // LR: Return address
-// The R4 register can be destroyed only if there is no slow-path, i.e.
-// if the intrinsified method always executes a return.
+// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
+// i.e. if the intrinsified method always executes a return.
 // The FP register should not be modified, because it is used by the profiler.
 // The PP and THR registers (see constants_arm64.h) must be preserved.
 
diff --git a/runtime/vm/compiler/asm_intrinsifier_riscv.cc b/runtime/vm/compiler/asm_intrinsifier_riscv.cc
new file mode 100644
index 0000000..bd85776
--- /dev/null
+++ b/runtime/vm/compiler/asm_intrinsifier_riscv.cc
@@ -0,0 +1,725 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // Needed here to get TARGET_ARCH_RISCV.
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/class_id.h"
+#include "vm/compiler/asm_intrinsifier.h"
+#include "vm/compiler/assembler/assembler.h"
+
+namespace dart {
+namespace compiler {
+
+// When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
+// S4: Arguments descriptor
+// RA: Return address
+// The S4 and CODE_REG registers can be destroyed only if there is no slow-path,
+// i.e. if the intrinsified method always executes a return.
+// The FP register should not be modified, because it is used by the profiler.
+// The PP and THR registers (see constants_riscv.h) must be preserved.
+
+#define __ assembler->
+
+// Allocate a GrowableObjectArray:: using the backing array specified.
+// On stack: type argument (+1), data (+0).
+void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  // The newly allocated object is returned in R0.
+  const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
+  const intptr_t kArrayOffset = 0 * target::kWordSize;
+
+  // Try allocating in new space.
+  const Class& cls = GrowableObjectArrayClass();
+  __ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, A0, A1);
+
+  // Store backing array object in growable array object.
+  __ lx(A1, Address(SP, kArrayOffset));  // Data argument.
+  // R0 is new, no barrier needed.
+  __ StoreCompressedIntoObjectNoBarrier(
+      A0, FieldAddress(A0, target::GrowableObjectArray::data_offset()), A1);
+
+  // R0: new growable array object start as a tagged pointer.
+  // Store the type argument field in the growable array object.
+  __ lx(A1, Address(SP, kTypeArgumentsOffset));  // Type argument.
+  __ StoreCompressedIntoObjectNoBarrier(
+      A0,
+      FieldAddress(A0, target::GrowableObjectArray::type_arguments_offset()),
+      A1);
+
+  // Set the length field in the growable array object to 0.
+  __ StoreCompressedIntoObjectNoBarrier(
+      A0, FieldAddress(A0, target::GrowableObjectArray::length_offset()), ZR);
+  __ ret();  // Returns the newly allocated object in A0.
+
+  __ Bind(normal_ir_body);
+}
+
+// Loads args from stack into A0 and A1
+// Tests if they are smis, jumps to label not_smi if not.
+static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
+  __ lx(A0, Address(SP, +1 * target::kWordSize));
+  __ lx(A1, Address(SP, +0 * target::kWordSize));
+  __ or_(TMP, A0, A1);
+  __ BranchIfNotSmi(TMP, not_smi, Assembler::kNearJump);
+}
+
+void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
+  const Register left = A0;
+  const Register right = A1;
+  const Register result = A0;
+
+  TestBothArgumentsSmis(assembler, normal_ir_body);
+  __ CompareImmediate(right, target::ToRawSmi(target::kSmiBits),
+                      compiler::kObjectBytes);
+  __ BranchIf(CS, normal_ir_body, Assembler::kNearJump);
+
+  __ SmiUntag(right);
+  __ sll(TMP, left, right);
+  __ sra(TMP2, TMP, right);
+  __ bne(TMP2, left, normal_ir_body, Assembler::kNearJump);
+  __ mv(result, TMP);
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+static void CompareIntegers(Assembler* assembler,
+                            Label* normal_ir_body,
+                            Condition true_condition) {
+  Label true_label;
+  TestBothArgumentsSmis(assembler, normal_ir_body);
+  __ CompareObjectRegisters(A0, A1);
+  __ BranchIf(true_condition, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, LT);
+}
+
+void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
+                                          Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, GT);
+}
+
+void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, LE);
+}
+
+void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
+                                               Label* normal_ir_body) {
+  CompareIntegers(assembler, normal_ir_body, GE);
+}
+
+// This is called for Smi and Mint receivers. The right argument
+// can be Smi, Mint or double.
+void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  Label true_label, check_for_mint;
+  // For integer receiver '===' check first.
+  __ lx(A0, Address(SP, 1 * target::kWordSize));
+  __ lx(A1, Address(SP, 0 * target::kWordSize));
+  __ CompareObjectRegisters(A0, A1);
+  __ BranchIf(EQ, &true_label, Assembler::kNearJump);
+
+  __ or_(TMP, A0, A1);
+  __ BranchIfNotSmi(TMP, &check_for_mint, Assembler::kNearJump);
+  // If R0 or R1 is not a smi do Mint checks.
+
+  // Both arguments are smi, '===' is good enough.
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  // At least one of the arguments was not Smi.
+  Label receiver_not_smi;
+  __ Bind(&check_for_mint);
+
+  __ BranchIfNotSmi(A0, &receiver_not_smi,
+                    Assembler::kNearJump);  // Check receiver.
+
+  // Left (receiver) is Smi, return false if right is not Double.
+  // Note that an instance of Mint never contains a value that can be
+  // represented by Smi.
+
+  __ CompareClassId(A1, kDoubleCid, TMP);
+  __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
+  __ LoadObject(A0,
+                CastHandle<Object>(FalseObject()));  // Smi == Mint -> false.
+  __ ret();
+
+  __ Bind(&receiver_not_smi);
+  // A0: receiver.
+
+  __ CompareClassId(A0, kMintCid, TMP);
+  __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
+  // Receiver is Mint, return false if right is Smi.
+  __ BranchIfNotSmi(A1, normal_ir_body, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  // TODO(srdjan): Implement Mint == Mint comparison.
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Integer_equal(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  Integer_equalToInteger(assembler, normal_ir_body);
+}
+
+void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
+                                                   Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// FA0: left
+// FA1: right
+static void PrepareDoubleOp(Assembler* assembler, Label* normal_ir_body) {
+  Label double_op;
+  __ lx(A0, Address(SP, 1 * target::kWordSize));  // Left
+  __ lx(A1, Address(SP, 0 * target::kWordSize));  // Right
+
+  __ fld(FA0, FieldAddress(A0, target::Double::value_offset()));
+
+  __ SmiUntag(TMP, A1);
+#if XLEN == 32
+  __ fcvtdw(FA1, TMP);
+#else
+  __ fcvtdl(FA1, TMP);
+#endif
+  __ BranchIfSmi(A1, &double_op, Assembler::kNearJump);
+  __ CompareClassId(A1, kDoubleCid, TMP);
+  __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
+  __ fld(FA1, FieldAddress(A1, target::Double::value_offset()));
+
+  __ Bind(&double_op);
+}
+
+void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
+                                         Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fltd(TMP, FA1, FA0);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
+                                              Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fled(TMP, FA1, FA0);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
+                                      Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fltd(TMP, FA0, FA1);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_equal(Assembler* assembler,
+                                   Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ feqd(TMP, FA0, FA1);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
+                                           Label* normal_ir_body) {
+  Label true_label;
+  PrepareDoubleOp(assembler, normal_ir_body);
+  __ fled(TMP, FA0, FA1);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+// Expects left argument to be double (receiver). Right argument is unknown.
+// Both arguments are on stack.
+static void DoubleArithmeticOperations(Assembler* assembler,
+                                       Label* normal_ir_body,
+                                       Token::Kind kind) {
+  PrepareDoubleOp(assembler, normal_ir_body);
+  switch (kind) {
+    case Token::kADD:
+      __ faddd(FA0, FA0, FA1);
+      break;
+    case Token::kSUB:
+      __ fsubd(FA0, FA0, FA1);
+      break;
+    case Token::kMUL:
+      __ fmuld(FA0, FA0, FA1);
+      break;
+    case Token::kDIV:
+      __ fdivd(FA0, FA0, FA1);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  const Class& double_class = DoubleClass();
+  __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, A0, TMP);
+  __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
+  __ ret();
+
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
+}
+
+void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
+}
+
+void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
+}
+
+void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
+  DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
+}
+
+// Left is double, right is integer (Mint or Smi)
+void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ BranchIfNotSmi(A0, normal_ir_body, Assembler::kNearJump);
+  // Is Smi.
+  __ SmiUntag(A0);
+#if XLEN == 32
+  __ fcvtdw(FA0, A0);
+#else
+  __ fcvtdl(FA0, A0);
+#endif
+  const Class& double_class = DoubleClass();
+  __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, A0, TMP);
+  __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
+  __ ret();
+  __ Bind(normal_ir_body);
+}
+
+static void DoubleIsClass(Assembler* assembler, intx_t fclass) {
+  Label true_label;
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ LoadDFieldFromOffset(FA0, A0, target::Double::value_offset());
+  __ fclassd(TMP, FA0);
+  __ andi(TMP, TMP, fclass);
+  __ bnez(TMP, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+}
+
+void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
+                                      Label* normal_ir_body) {
+  DoubleIsClass(assembler, kFClassSignallingNan | kFClassQuietNan);
+}
+
+void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
+                                           Label* normal_ir_body) {
+  DoubleIsClass(assembler, kFClassNegInfinity | kFClassPosInfinity);
+}
+
+void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
+                                           Label* normal_ir_body) {
+  DoubleIsClass(assembler, kFClassNegInfinity | kFClassNegNormal |
+                               kFClassNegSubnormal | kFClassNegZero);
+}
+
+void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
+                                      Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+//    var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
+//    _state[kSTATE_LO] = state & _MASK_32;
+//    _state[kSTATE_HI] = state >> 32;
+void AsmIntrinsifier::Random_nextState(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
+                                   Label* normal_ir_body) {
+  Label true_label;
+  __ lx(A0, Address(SP, 1 * target::kWordSize));
+  __ lx(A1, Address(SP, 0 * target::kWordSize));
+  __ beq(A0, A1, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+}
+
+// Return type quickly for simple types (not parameterized and not signature).
+void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
+                                                Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
+                                         Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+#if XLEN == 32
+  // Smi field.
+  __ lw(A0, FieldAddress(A0, target::String::hash_offset()));
+#else
+  // uint32_t field in header.
+  __ lwu(A0, FieldAddress(A0, target::String::hash_offset()));
+  __ SmiTag(A0);
+#endif
+  __ beqz(A0, normal_ir_body, Assembler::kNearJump);
+  __ ret();
+
+  // Hash not yet computed.
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ LoadCompressed(A0, FieldAddress(A0, target::Type::hash_offset()));
+  __ beqz(A0, normal_ir_body, Assembler::kNearJump);
+  __ ret();
+  // Hash not yet computed.
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::Type_equality(Assembler* assembler,
+                                    Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::FunctionType_getHashCode(Assembler* assembler,
+                                               Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 0 * target::kWordSize));
+  __ LoadCompressed(A0, FieldAddress(A0, target::FunctionType::hash_offset()));
+  __ beqz(A0, normal_ir_body, Assembler::kNearJump);
+  __ ret();
+  // Hash not yet computed.
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::FunctionType_equality(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// Keep in sync with Instance::IdentityHashCode.
+// Note int and double never reach here because they override _identityHashCode.
+// Special cases are also not needed for null or bool because they were pre-set
+// during VM isolate finalization.
+void AsmIntrinsifier::Object_getHash(Assembler* assembler,
+                                     Label* normal_ir_body) {
+#if XLEN == 32
+  UNREACHABLE();
+#else
+  Label not_yet_computed;
+  __ lx(A0, Address(SP, 0 * target::kWordSize));  // Object.
+  __ lwu(A0, FieldAddress(
+                 A0, target::Object::tags_offset() +
+                         target::UntaggedObject::kHashTagPos / kBitsPerByte));
+  __ beqz(A0, &not_yet_computed);
+  __ SmiTag(A0);
+  __ ret();
+
+  __ Bind(&not_yet_computed);
+  __ LoadFromOffset(A1, THR, target::Thread::random_offset());
+  __ AndImmediate(T2, A1, 0xffffffff);  // state_lo
+  __ srli(T3, A1, 32);                  // state_hi
+  __ LoadImmediate(A1, 0xffffda61);     // A
+  __ mul(A1, A1, T2);
+  __ add(A1, A1, T3);  // new_state = (A * state_lo) + state_hi
+  __ StoreToOffset(A1, THR, target::Thread::random_offset());
+  __ AndImmediate(A1, A1, 0x3fffffff);
+  __ beqz(A1, &not_yet_computed);
+
+  __ lx(A0, Address(SP, 0 * target::kWordSize));  // Object
+  __ subi(A0, A0, kHeapObjectTag);
+  __ slli(T3, A1, target::UntaggedObject::kHashTagPos);
+
+  Label retry, already_set_in_r4;
+  __ Bind(&retry);
+  __ lr(T2, Address(A0, 0));
+  __ srli(T4, T2, target::UntaggedObject::kHashTagPos);
+  __ bnez(T4, &already_set_in_r4);
+  __ or_(T2, T2, T3);
+  __ sc(T4, T2, Address(A0, 0));
+  __ bnez(T4, &retry);
+  // Fall-through with A1 containing new hash value (untagged).
+  __ SmiTag(A0, A1);
+  __ ret();
+  __ Bind(&already_set_in_r4);
+  __ SmiTag(A0, T4);
+  __ ret();
+#endif
+}
+
+void GenerateSubstringMatchesSpecialization(Assembler* assembler,
+                                            intptr_t receiver_cid,
+                                            intptr_t other_cid,
+                                            Label* return_true,
+                                            Label* return_false) {
+  UNIMPLEMENTED();
+}
+
+// bool _substringMatches(int start, String other)
+// This intrinsic handles a OneByteString or TwoByteString receiver with a
+// OneByteString other.
+void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
+                                                 Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
+                                       Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
+                                        Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
+                                                Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// Arg0: OneByteString (receiver).
+// Arg1: Start index as Smi.
+// Arg2: End index as Smi.
+// The indexes must be valid.
+void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
+                                                       Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 2 * target::kWordSize));  // OneByteString.
+  __ lx(A1, Address(SP, 1 * target::kWordSize));  // Index.
+  __ lx(A2, Address(SP, 0 * target::kWordSize));  // Value.
+  __ SmiUntag(A1);
+  __ SmiUntag(A2);
+  __ add(A1, A1, A0);
+  __ sb(A2, FieldAddress(A1, target::OneByteString::data_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  __ lx(A0, Address(SP, 2 * target::kWordSize));  // TwoByteString.
+  __ lx(A1, Address(SP, 1 * target::kWordSize));  // Index.
+  __ lx(A2, Address(SP, 0 * target::kWordSize));  // Value.
+  // Untag index and multiply by element size -> no-op.
+  __ SmiUntag(A2);
+  __ add(A1, A1, A0);
+  __ sh(A2, FieldAddress(A1, target::OneByteString::data_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
+                                            Label* normal_ir_body) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+// TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
+static void StringEquality(Assembler* assembler,
+                           Label* normal_ir_body,
+                           intptr_t string_cid) {
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  StringEquality(assembler, normal_ir_body, kOneByteStringCid);
+}
+
+void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
+}
+
+void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
+                                                   Label* normal_ir_body,
+                                                   bool sticky) {
+  if (FLAG_interpret_irregexp) return;
+
+  // TODO(riscv)
+  __ Bind(normal_ir_body);
+}
+
+void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
+                                         Label* normal_ir_body) {
+  __ LoadIsolate(A0);
+  __ lx(A0, Address(A0, target::Isolate::default_tag_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
+                                             Label* normal_ir_body) {
+  __ LoadIsolate(A0);
+  __ lx(A0, Address(A0, target::Isolate::current_tag_offset()));
+  __ ret();
+}
+
+void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
+                                                   Label* normal_ir_body) {
+#if !defined(SUPPORT_TIMELINE)
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+#else
+  Label true_label;
+  // Load TimelineStream*.
+  __ lx(A0, Address(THR, target::Thread::dart_stream_offset()));
+  // Load uintptr_t from TimelineStream*.
+  __ lx(A0, Address(A0, target::TimelineStream::enabled_offset()));
+  __ bnez(A0, &true_label, Assembler::kNearJump);
+  __ LoadObject(A0, CastHandle<Object>(FalseObject()));
+  __ ret();
+  __ Bind(&true_label);
+  __ LoadObject(A0, CastHandle<Object>(TrueObject()));
+  __ ret();
+#endif
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/asm_intrinsifier_x64.cc b/runtime/vm/compiler/asm_intrinsifier_x64.cc
index e2f7b24..656bce9 100644
--- a/runtime/vm/compiler/asm_intrinsifier_x64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_x64.cc
@@ -15,10 +15,12 @@
 namespace compiler {
 
 // When entering intrinsics code:
+// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
+// CODE_REG: Callee's Code in JIT / not passed in AOT
 // R10: Arguments descriptor
 // TOS: Return address
-// The R10 registers can be destroyed only if there is no slow-path, i.e.
-// if the intrinsified method always executes a return.
+// The R10 and CODE_REG registers can be destroyed only if there is no
+// slow-path, i.e. if the intrinsified method always executes a return.
 // The RBP register should not be modified, because it is used by the profiler.
 // The PP and THR registers (see constants_x64.h) must be preserved.
 
diff --git a/runtime/vm/compiler/assembler/assembler.h b/runtime/vm/compiler/assembler/assembler.h
index 336640a..cfed11a 100644
--- a/runtime/vm/compiler/assembler/assembler.h
+++ b/runtime/vm/compiler/assembler/assembler.h
@@ -25,6 +25,8 @@
 #include "vm/compiler/assembler/assembler_arm.h"
 #elif defined(TARGET_ARCH_ARM64)
 #include "vm/compiler/assembler/assembler_arm64.h"
+#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+#include "vm/compiler/assembler/assembler_riscv.h"
 #else
 #error Unknown architecture.
 #endif
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 3aeae48..139be88 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -30,9 +30,9 @@
 namespace compiler {
 
 Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches)
+                     intptr_t far_branch_level)
     : AssemblerBase(object_pool_builder),
-      use_far_branches_(use_far_branches),
+      use_far_branches_(far_branch_level != 0),
       constant_pool_allowed_(false) {
   generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) {
     Call(
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index c7168c3..78bb68e 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -354,7 +354,7 @@
 class Assembler : public AssemblerBase {
  public:
   explicit Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches = false);
+                     intptr_t far_branch_level = 0);
   ~Assembler() {}
 
   void PushRegister(Register r) { Push(r); }
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 5869617..943457c 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -26,9 +26,9 @@
 namespace compiler {
 
 Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches)
+                     intptr_t far_branch_level)
     : AssemblerBase(object_pool_builder),
-      use_far_branches_(use_far_branches),
+      use_far_branches_(far_branch_level != 0),
       constant_pool_allowed_(false) {
   generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
     Call(Address(THR,
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index e404453..a1126ec 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -512,7 +512,7 @@
 class Assembler : public AssemblerBase {
  public:
   explicit Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches = false);
+                     intptr_t far_branch_level = 0);
   ~Assembler() {}
 
   void PushRegister(Register r) { Push(r); }
diff --git a/runtime/vm/compiler/assembler/assembler_base.h b/runtime/vm/compiler/assembler/assembler_base.h
index c3410b4..85259a3 100644
--- a/runtime/vm/compiler/assembler/assembler_base.h
+++ b/runtime/vm/compiler/assembler/assembler_base.h
@@ -20,7 +20,8 @@
 
 namespace dart {
 
-#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
+#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \
+    defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
 DECLARE_FLAG(bool, use_far_branches);
 #endif
 
@@ -214,6 +215,64 @@
 class Address;
 class FieldAddress;
 
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+class Label : public ZoneAllocated {
+ public:
+  Label() {}
+  ~Label() {
+    // Assert if label is being destroyed with unresolved branches pending.
+    ASSERT(!IsLinked());
+  }
+
+  intptr_t Position() const {
+    ASSERT(IsBound());
+    return position_;
+  }
+
+  bool IsBound() const { return position_ != -1; }
+  bool IsUnused() const { return !IsBound() && !IsLinked(); }
+  bool IsLinked() const {
+    return unresolved_cb_ != -1 || unresolved_cj_ != -1 ||
+           unresolved_b_ != -1 || unresolved_j_ != -1 || unresolved_far_ != -1;
+  }
+
+ private:
+  int32_t position_ = -1;
+  void BindTo(intptr_t position) {
+    ASSERT(!IsBound());
+    ASSERT(!IsLinked());
+    position_ = position;
+    ASSERT(IsBound());
+  }
+
+  // Linked lists of unresolved forward branches, threaded through the branch
+  // instructions. The offset encoded in each unresolved branch the delta to the
+  // next instruction in the list, terminated with 0 delta. Each branch class
+  // has a separate list because the offset range of each is different.
+#define DEFINE_BRANCH_CLASS(name)                                              \
+  int32_t unresolved_##name##_ = -1;                                           \
+  int32_t link_##name(int32_t position) {                                      \
+    ASSERT(position > unresolved_##name##_);                                   \
+    int32_t offset;                                                            \
+    if (unresolved_##name##_ == -1) {                                          \
+      offset = 0;                                                              \
+    } else {                                                                   \
+      offset = position - unresolved_##name##_;                                \
+      ASSERT(offset > 0);                                                      \
+    }                                                                          \
+    unresolved_##name##_ = position;                                           \
+    return offset;                                                             \
+  }
+  DEFINE_BRANCH_CLASS(cb);
+  DEFINE_BRANCH_CLASS(cj);
+  DEFINE_BRANCH_CLASS(b);
+  DEFINE_BRANCH_CLASS(j);
+  DEFINE_BRANCH_CLASS(far);
+
+  friend class MicroAssembler;
+  DISALLOW_COPY_AND_ASSIGN(Label);
+};
+#else
 class Label : public ZoneAllocated {
  public:
   Label() : position_(0), unresolved_(0) {
@@ -324,6 +383,7 @@
   friend class Assembler;
   DISALLOW_COPY_AND_ASSIGN(Label);
 };
+#endif
 
 // External labels keep a function pointer to allow them
 // to be called from code generated by the assembler.
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index d5b61ee..69207d5 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -229,12 +229,12 @@
 class Assembler : public AssemblerBase {
  public:
   explicit Assembler(ObjectPoolBuilder* object_pool_builder,
-                     bool use_far_branches = false)
+                     intptr_t far_branch_level = 0)
       : AssemblerBase(object_pool_builder),
         jit_cookie_(0),
         code_(NewZoneHandle(ThreadState::Current()->zone())) {
     // This mode is only needed and implemented for ARM.
-    ASSERT(!use_far_branches);
+    ASSERT(far_branch_level == 0);
   }
   ~Assembler() {}
 
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.cc b/runtime/vm/compiler/assembler/assembler_riscv.cc
new file mode 100644
index 0000000..b86d5c7
--- /dev/null
+++ b/runtime/vm/compiler/assembler/assembler_riscv.cc
@@ -0,0 +1,4391 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"  // NOLINT
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/cpu.h"
+#include "vm/instructions.h"
+#include "vm/simulator.h"
+
+namespace dart {
+
+DECLARE_FLAG(bool, check_code_pointer);
+DECLARE_FLAG(bool, precompiled_mode);
+
+DEFINE_FLAG(int, far_branch_level, 0, "Always use far branches");
+
+namespace compiler {
+
+MicroAssembler::MicroAssembler(ObjectPoolBuilder* object_pool_builder,
+                               intptr_t far_branch_level,
+                               ExtensionSet extensions)
+    : AssemblerBase(object_pool_builder),
+      extensions_(extensions),
+      far_branch_level_(far_branch_level) {
+  ASSERT(far_branch_level >= 0);
+  ASSERT(far_branch_level <= 2);
+}
+
+MicroAssembler::~MicroAssembler() {}
+
+void MicroAssembler::Bind(Label* label) {
+  ASSERT(!label->IsBound());
+  intptr_t target_position = Position();
+  intptr_t branch_position;
+
+#define BIND(head, update)                                                     \
+  branch_position = label->head;                                               \
+  while (branch_position >= 0) {                                               \
+    ASSERT(Utils::IsAligned(branch_position, Supports(RV_C) ? 2 : 4));         \
+    intptr_t new_offset = target_position - branch_position;                   \
+    ASSERT(Utils::IsAligned(new_offset, Supports(RV_C) ? 2 : 4));              \
+    intptr_t old_offset = update(branch_position, new_offset);                 \
+    if (old_offset == 0) break;                                                \
+    branch_position -= old_offset;                                             \
+  }                                                                            \
+  label->head = -1
+
+  BIND(unresolved_cb_, UpdateCBOffset);
+  BIND(unresolved_cj_, UpdateCJOffset);
+  BIND(unresolved_b_, UpdateBOffset);
+  BIND(unresolved_j_, UpdateJOffset);
+  BIND(unresolved_far_, UpdateFarOffset);
+
+  label->BindTo(target_position);
+}
+
+intptr_t MicroAssembler::UpdateCBOffset(intptr_t branch_position,
+                                        intptr_t new_offset) {
+  CInstr instr(Read16(branch_position));
+  ASSERT((instr.opcode() == C_BEQZ) || (instr.opcode() == C_BNEZ));
+  intptr_t old_offset = instr.b_imm();
+  if (!IsCBImm(new_offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Write16(branch_position,
+          instr.opcode() | EncodeCRs1p(instr.rs1p()) | EncodeCBImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateCJOffset(intptr_t branch_position,
+                                        intptr_t new_offset) {
+  CInstr instr(Read16(branch_position));
+  ASSERT((instr.opcode() == C_J) || (instr.opcode() == C_JAL));
+  intptr_t old_offset = instr.j_imm();
+  if (!IsCJImm(new_offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Write16(branch_position, instr.opcode() | EncodeCJImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateBOffset(intptr_t branch_position,
+                                       intptr_t new_offset) {
+  Instr instr(Read32(branch_position));
+  ASSERT(instr.opcode() == BRANCH);
+  intptr_t old_offset = instr.btype_imm();
+  if (!IsBTypeImm(new_offset)) {
+    BailoutWithBranchOffsetError();
+  }
+  Write32(branch_position, EncodeRs2(instr.rs2()) | EncodeRs1(instr.rs1()) |
+                               EncodeFunct3(instr.funct3()) |
+                               EncodeOpcode(instr.opcode()) |
+                               EncodeBTypeImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateJOffset(intptr_t branch_position,
+                                       intptr_t new_offset) {
+  Instr instr(Read32(branch_position));
+  ASSERT(instr.opcode() == JAL);
+  intptr_t old_offset = instr.jtype_imm();
+  if (!IsJTypeImm(new_offset)) {
+    BailoutWithBranchOffsetError();
+  }
+  Write32(branch_position, EncodeRd(instr.rd()) | EncodeOpcode(instr.opcode()) |
+                               EncodeJTypeImm(new_offset));
+  return old_offset;
+}
+
+intptr_t MicroAssembler::UpdateFarOffset(intptr_t branch_position,
+                                         intptr_t new_offset) {
+  Instr auipc_instr(Read32(branch_position));
+  ASSERT(auipc_instr.opcode() == AUIPC);
+  ASSERT(auipc_instr.rd() == FAR_TMP);
+  Instr jr_instr(Read32(branch_position + 4));
+  ASSERT(jr_instr.opcode() == JALR);
+  ASSERT(jr_instr.rd() == ZR);
+  ASSERT(jr_instr.funct3() == F3_0);
+  ASSERT(jr_instr.rs1() == FAR_TMP);
+  intptr_t old_offset = auipc_instr.utype_imm() + jr_instr.itype_imm();
+  intx_t lo = new_offset << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (new_offset - lo) << (XLEN - 32) >> (XLEN - 32);
+  if (!IsUTypeImm(hi)) {
+    FATAL("Jump/branch distance exceeds 2GB!");
+  }
+  Write32(branch_position,
+          EncodeUTypeImm(hi) | EncodeRd(FAR_TMP) | EncodeOpcode(AUIPC));
+  Write32(branch_position + 4, EncodeITypeImm(lo) | EncodeRs1(FAR_TMP) |
+                                   EncodeFunct3(F3_0) | EncodeRd(ZR) |
+                                   EncodeOpcode(JALR));
+  return old_offset;
+}
+
+void MicroAssembler::lui(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) && (rd != ZR) && (rd != SP) && IsCUImm(imm)) {
+    c_lui(rd, imm);
+    return;
+  }
+  EmitUType(imm, rd, LUI);
+}
+
+void MicroAssembler::lui_fixed(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitUType(imm, rd, LUI);
+}
+
+void MicroAssembler::auipc(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitUType(imm, rd, AUIPC);
+}
+
+void MicroAssembler::jal(Register rd, Label* label, JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) &&
+      ((distance == kNearJump) ||
+       (label->IsBound() && IsCJImm(label->Position() - Position())))) {
+    if (rd == ZR) {
+      c_j(label);
+      return;
+    }
+#if XLEN == 32
+    if (rd == RA) {
+      c_jal(label);
+      return;
+    }
+#endif  // XLEN == 32
+  }
+  EmitJump(rd, label, JAL, distance);
+}
+
+void MicroAssembler::jalr(Register rd, Register rs1, intptr_t offset) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if (rs1 != ZR && offset == 0) {
+      if (rd == ZR) {
+        c_jr(rs1);
+        return;
+      } else if (rd == RA) {
+        c_jalr(rs1);
+        return;
+      }
+    }
+  }
+  EmitIType(offset, rs1, F3_0, rd, JALR);
+}
+
+void MicroAssembler::jalr_fixed(Register rd, Register rs1, intptr_t offset) {
+  ASSERT(Supports(RV_I));
+  EmitIType(offset, rs1, F3_0, rd, JALR);
+}
+
+void MicroAssembler::beq(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) &&
+      ((distance == kNearJump) ||
+       (label->IsBound() && IsCBImm(label->Position() - Position())))) {
+    if ((rs1 == ZR) && IsCRs1p(rs2)) {
+      c_beqz(rs2, label);
+      return;
+    } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
+      c_beqz(rs1, label);
+      return;
+    }
+  }
+  EmitBranch(rs1, rs2, label, BEQ, distance);
+}
+
+void MicroAssembler::bne(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C) &&
+      ((distance == kNearJump) ||
+       (label->IsBound() && IsCBImm(label->Position() - Position())))) {
+    if ((rs1 == ZR) && IsCRs1p(rs2)) {
+      c_bnez(rs2, label);
+      return;
+    } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
+      c_bnez(rs1, label);
+      return;
+    }
+  }
+  EmitBranch(rs1, rs2, label, BNE, distance);
+}
+
+void MicroAssembler::blt(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  EmitBranch(rs1, rs2, label, BLT, distance);
+}
+
+void MicroAssembler::bge(Register rs1,
+                         Register rs2,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  EmitBranch(rs1, rs2, label, BGE, distance);
+}
+
+void MicroAssembler::bltu(Register rs1,
+                          Register rs2,
+                          Label* label,
+                          JumpDistance distance) {
+  ASSERT(Supports(RV_I));
+  EmitBranch(rs1, rs2, label, BLTU, distance);
+}
+
+void MicroAssembler::bgeu(Register rs1,
+                          Register rs2,
+                          Label* label,
+                          JumpDistance distance) {
+  EmitBranch(rs1, rs2, label, BGEU, distance);
+}
+
+void MicroAssembler::lb(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LB, rd, LOAD);
+}
+
+void MicroAssembler::lh(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LH, rd, LOAD);
+}
+
+void MicroAssembler::lw(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
+      c_lwsp(rd, addr);
+      return;
+    }
+    if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_lw(rd, addr);
+      return;
+    }
+  }
+  EmitIType(addr.offset(), addr.base(), LW, rd, LOAD);
+}
+
+void MicroAssembler::lbu(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LBU, rd, LOAD);
+}
+
+void MicroAssembler::lhu(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LHU, rd, LOAD);
+}
+
+void MicroAssembler::sb(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitSType(addr.offset(), rs2, addr.base(), SB, STORE);
+}
+
+void MicroAssembler::sh(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitSType(addr.offset(), rs2, addr.base(), SH, STORE);
+}
+
+void MicroAssembler::sw(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
+      c_swsp(rs2, addr);
+      return;
+    }
+    if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_sw(rs2, addr);
+      return;
+    }
+  }
+  EmitSType(addr.offset(), rs2, addr.base(), SW, STORE);
+}
+
+void MicroAssembler::addi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
+      c_li(rd, imm);
+      return;
+    }
+    if ((rd == rs1) && IsCIImm(imm) && (imm != 0)) {
+      c_addi(rd, rs1, imm);
+      return;
+    }
+    if ((rd == SP) && (rs1 == SP) && IsCI16Imm(imm) && (imm != 0)) {
+      c_addi16sp(rd, rs1, imm);
+      return;
+    }
+    if (IsCRdp(rd) && (rs1 == SP) && IsCI4SPNImm(imm) && (imm != 0)) {
+      c_addi4spn(rd, rs1, imm);
+      return;
+    }
+    if (imm == 0) {
+      if ((rd == ZR) && (rs1 == ZR)) {
+        c_nop();
+        return;
+      }
+      if ((rd != ZR) && (rs1 != ZR)) {
+        c_mv(rd, rs1);
+        return;
+      }
+    }
+  }
+  EmitIType(imm, rs1, ADDI, rd, OPIMM);
+}
+
+void MicroAssembler::slti(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, SLTI, rd, OPIMM);
+}
+
+void MicroAssembler::sltiu(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, SLTIU, rd, OPIMM);
+}
+
+void MicroAssembler::xori(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, XORI, rd, OPIMM);
+}
+
+void MicroAssembler::ori(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(imm, rs1, ORI, rd, OPIMM);
+}
+
+void MicroAssembler::andi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCIImm(imm)) {
+      c_andi(rd, rs1, imm);
+      return;
+    }
+  }
+  EmitIType(imm, rs1, ANDI, rd, OPIMM);
+}
+
+void MicroAssembler::slli(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && (shamt != 0) && IsCIImm(shamt)) {
+      c_slli(rd, rs1, shamt);
+      return;
+    }
+  }
+  EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM);
+}
+
+void MicroAssembler::srli(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
+      c_srli(rd, rs1, shamt);
+      return;
+    }
+  }
+  EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM);
+}
+
+void MicroAssembler::srai(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
+      c_srai(rd, rs1, shamt);
+      return;
+    }
+  }
+  EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM);
+}
+
+void MicroAssembler::add(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if (rd == rs1) {
+      c_add(rd, rs1, rs2);
+      return;
+    }
+    if (rd == rs2) {
+      c_add(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, ADD, rd, OP);
+}
+
+void MicroAssembler::sub(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_sub(rd, rs1, rs2);
+      return;
+    }
+  }
+  EmitRType(SUB, rs2, rs1, ADD, rd, OP);
+}
+
+void MicroAssembler::sll(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLL, rd, OP);
+}
+
+void MicroAssembler::slt(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLT, rd, OP);
+}
+
+void MicroAssembler::sltu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLTU, rd, OP);
+}
+
+void MicroAssembler::xor_(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_xor(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_xor(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, XOR, rd, OP);
+}
+
+void MicroAssembler::srl(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SR, rd, OP);
+}
+
+void MicroAssembler::sra(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(SRA, rs2, rs1, SR, rd, OP);
+}
+
+void MicroAssembler::or_(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_or(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_or(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, OR, rd, OP);
+}
+
+void MicroAssembler::and_(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_and(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_and(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, AND, rd, OP);
+}
+
+void MicroAssembler::fence(HartEffects predecessor, HartEffects successor) {
+  ASSERT((predecessor & kAll) == predecessor);
+  ASSERT((successor & kAll) == successor);
+  ASSERT(Supports(RV_I));
+  EmitIType((predecessor << 4) | successor, ZR, FENCE, ZR, MISCMEM);
+}
+
+void MicroAssembler::fencei() {
+  ASSERT(Supports(RV_I));
+  EmitIType(0, ZR, FENCEI, ZR, MISCMEM);
+}
+
+void MicroAssembler::ecall() {
+  ASSERT(Supports(RV_I));
+  EmitIType(ECALL, ZR, F3_0, ZR, SYSTEM);
+}
+void MicroAssembler::ebreak() {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    c_ebreak();
+    return;
+  }
+  EmitIType(EBREAK, ZR, F3_0, ZR, SYSTEM);
+}
+void MicroAssembler::SimulatorPrintObject(Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(ECALL, rs1, F3_0, ZR, SYSTEM);
+}
+
+void MicroAssembler::csrrw(Register rd, uint32_t csr, Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, rs1, CSRRW, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrs(Register rd, uint32_t csr, Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, rs1, CSRRS, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrc(Register rd, uint32_t csr, Register rs1) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, rs1, CSRRC, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrwi(Register rd, uint32_t csr, uint32_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, Register(imm), CSRRWI, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrsi(Register rd, uint32_t csr, uint32_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, Register(imm), CSRRSI, rd, SYSTEM);
+}
+
+void MicroAssembler::csrrci(Register rd, uint32_t csr, uint32_t imm) {
+  ASSERT(Supports(RV_I));
+  EmitIType(csr, Register(imm), CSRRCI, rd, SYSTEM);
+}
+
+void MicroAssembler::trap() {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    Emit16(0);  // Permanently reserved illegal instruction.
+  } else {
+    Emit32(0);  // Permanently reserved illegal instruction.
+  }
+}
+
+#if XLEN >= 64
+void MicroAssembler::lwu(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  EmitIType(addr.offset(), addr.base(), LWU, rd, LOAD);
+}
+
+void MicroAssembler::ld(Register rd, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
+      c_ldsp(rd, addr);
+      return;
+    }
+    if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_ld(rd, addr);
+      return;
+    }
+  }
+  EmitIType(addr.offset(), addr.base(), LD, rd, LOAD);
+}
+
+void MicroAssembler::sd(Register rs2, Address addr) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
+      c_sdsp(rs2, addr);
+      return;
+    }
+    if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_sd(rs2, addr);
+      return;
+    }
+  }
+  EmitSType(addr.offset(), rs2, addr.base(), SD, STORE);
+}
+
+void MicroAssembler::addiw(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
+      c_li(rd, imm);
+      return;
+    }
+    if ((rd == rs1) && (rd != ZR) && IsCIImm(imm)) {
+      c_addiw(rd, rs1, imm);
+      return;
+    }
+  }
+  EmitIType(imm, rs1, ADDI, rd, OPIMM32);
+}
+
+void MicroAssembler::slliw(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < 32));
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM32);
+}
+
+void MicroAssembler::srliw(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < 32));
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM32);
+}
+
+void MicroAssembler::sraiw(Register rd, Register rs1, intptr_t shamt) {
+  ASSERT((shamt > 0) && (shamt < XLEN));
+  ASSERT(Supports(RV_I));
+  EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM32);
+}
+
+void MicroAssembler::addw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_addw(rd, rs1, rs2);
+      return;
+    }
+    if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_addw(rd, rs2, rs1);
+      return;
+    }
+  }
+  EmitRType(F7_0, rs2, rs1, ADD, rd, OP32);
+}
+
+void MicroAssembler::subw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  if (Supports(RV_C)) {
+    if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
+      c_subw(rd, rs1, rs2);
+      return;
+    }
+  }
+  EmitRType(SUB, rs2, rs1, ADD, rd, OP32);
+}
+
+void MicroAssembler::sllw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SLL, rd, OP32);
+}
+
+void MicroAssembler::srlw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(F7_0, rs2, rs1, SR, rd, OP32);
+}
+void MicroAssembler::sraw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_I));
+  EmitRType(SRA, rs2, rs1, SR, rd, OP32);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::mul(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MUL, rd, OP);
+}
+
+void MicroAssembler::mulh(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULH, rd, OP);
+}
+
+void MicroAssembler::mulhsu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULHSU, rd, OP);
+}
+
+void MicroAssembler::mulhu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULHU, rd, OP);
+}
+
+void MicroAssembler::div(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIV, rd, OP);
+}
+
+void MicroAssembler::divu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIVU, rd, OP);
+}
+
+void MicroAssembler::rem(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REM, rd, OP);
+}
+
+void MicroAssembler::remu(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REMU, rd, OP);
+}
+
+#if XLEN >= 64
+void MicroAssembler::mulw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, MULW, rd, OP32);
+}
+
+void MicroAssembler::divw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIVW, rd, OP32);
+}
+
+void MicroAssembler::divuw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, DIVUW, rd, OP32);
+}
+
+void MicroAssembler::remw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REMW, rd, OP32);
+}
+
+void MicroAssembler::remuw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_M));
+  EmitRType(MULDIV, rs2, rs1, REMUW, rd, OP32);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::lrw(Register rd, Address addr, std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(LR, order, ZR, addr.base(), WIDTH32, rd, AMO);
+}
+void MicroAssembler::scw(Register rd,
+                         Register rs2,
+                         Address addr,
+                         std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(SC, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoswapw(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoaddw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOADD, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoxorw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoandw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOAND, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amoorw(Register rd,
+                            Register rs2,
+                            Address addr,
+                            std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amominw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amomaxw(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amominuw(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+void MicroAssembler::amomaxuw(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH32, rd, AMO);
+}
+
+#if XLEN >= 64
+void MicroAssembler::lrd(Register rd, Address addr, std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(LR, order, ZR, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::scd(Register rd,
+                         Register rs2,
+                         Address addr,
+                         std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(SC, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoswapd(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoaddd(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOADD, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoxord(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoandd(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOAND, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amoord(Register rd,
+                            Register rs2,
+                            Address addr,
+                            std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amomind(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amomaxd(Register rd,
+                             Register rs2,
+                             Address addr,
+                             std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amominud(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+
+void MicroAssembler::amomaxud(Register rd,
+                              Register rs2,
+                              Address addr,
+                              std::memory_order order) {
+  ASSERT(addr.offset() == 0);
+  ASSERT(Supports(RV_A));
+  EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH64, rd, AMO);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::flw(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_F));
+#if XLEN == 32
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
+      c_flwsp(rd, addr);
+      return;
+    }
+    if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_flw(rd, addr);
+      return;
+    }
+  }
+#endif  // XLEN == 32
+  EmitIType(addr.offset(), addr.base(), S, rd, LOADFP);
+}
+
+void MicroAssembler::fsw(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_F));
+#if XLEN == 32
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
+      c_fswsp(rs2, addr);
+      return;
+    }
+    if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
+      c_fsw(rs2, addr);
+      return;
+    }
+  }
+#endif  // XLEN == 32
+  EmitSType(addr.offset(), rs2, addr.base(), S, STOREFP);
+}
+
+void MicroAssembler::fmadds(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMADD);
+}
+
+void MicroAssembler::fmsubs(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMSUB);
+}
+
+void MicroAssembler::fnmsubs(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMSUB);
+}
+
+void MicroAssembler::fnmadds(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMADD);
+}
+
+void MicroAssembler::fadds(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FADDS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsubs(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSUBS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmuls(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMULS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fdivs(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FDIVS, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsqrts(FRegister rd,
+                            FRegister rs1,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSQRTS, FRegister(0), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjs(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSGNJS, rs2, rs1, J, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjns(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSGNJS, rs2, rs1, JN, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjxs(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FSGNJS, rs2, rs1, JX, rd, OPFP);
+}
+
+void MicroAssembler::fmins(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMINMAXS, rs2, rs1, MIN, rd, OPFP);
+}
+
+void MicroAssembler::fmaxs(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMINMAXS, rs2, rs1, MAX, rd, OPFP);
+}
+
+void MicroAssembler::feqs(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCMPS, rs2, rs1, FEQ, rd, OPFP);
+}
+
+void MicroAssembler::flts(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCMPS, rs2, rs1, FLT, rd, OPFP);
+}
+
+void MicroAssembler::fles(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCMPS, rs2, rs1, FLE, rd, OPFP);
+}
+
+void MicroAssembler::fclasss(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCLASSS, FRegister(0), rs1, F3_1, rd, OPFP);
+}
+
+void MicroAssembler::fcvtws(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtwus(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtsw(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtswu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmvxw(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMVXW, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+void MicroAssembler::fmvwx(FRegister rd, Register rs1) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FMVWX, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+#if XLEN >= 64
+void MicroAssembler::fcvtls(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtlus(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTintS, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtsl(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtslu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_F));
+  EmitRType(FCVTSint, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::fld(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_D));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
+      c_fldsp(rd, addr);
+      return;
+    }
+    if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_fld(rd, addr);
+      return;
+    }
+  }
+  EmitIType(addr.offset(), addr.base(), D, rd, LOADFP);
+}
+
+void MicroAssembler::fsd(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_D));
+  if (Supports(RV_C)) {
+    if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
+      c_fsdsp(rs2, addr);
+      return;
+    }
+    if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
+      c_fsd(rs2, addr);
+      return;
+    }
+  }
+  EmitSType(addr.offset(), rs2, addr.base(), D, STOREFP);
+}
+
+void MicroAssembler::fmaddd(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMADD);
+}
+
+void MicroAssembler::fmsubd(FRegister rd,
+                            FRegister rs1,
+                            FRegister rs2,
+                            FRegister rs3,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMSUB);
+}
+
+void MicroAssembler::fnmsubd(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMSUB);
+}
+
+void MicroAssembler::fnmaddd(FRegister rd,
+                             FRegister rs1,
+                             FRegister rs2,
+                             FRegister rs3,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMADD);
+}
+
+void MicroAssembler::faddd(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FADDD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsubd(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSUBD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmuld(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMULD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fdivd(FRegister rd,
+                           FRegister rs1,
+                           FRegister rs2,
+                           RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FDIVD, rs2, rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsqrtd(FRegister rd,
+                            FRegister rs1,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSQRTD, FRegister(0), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSGNJD, rs2, rs1, J, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjnd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSGNJD, rs2, rs1, JN, rd, OPFP);
+}
+
+void MicroAssembler::fsgnjxd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FSGNJD, rs2, rs1, JX, rd, OPFP);
+}
+
+void MicroAssembler::fmind(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMINMAXD, rs2, rs1, MIN, rd, OPFP);
+}
+
+void MicroAssembler::fmaxd(FRegister rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMINMAXD, rs2, rs1, MAX, rd, OPFP);
+}
+
+void MicroAssembler::fcvtsd(FRegister rd,
+                            FRegister rs1,
+                            RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTS, FRegister(1), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtds(FRegister rd, FRegister rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTD, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+void MicroAssembler::feqd(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCMPD, rs2, rs1, FEQ, rd, OPFP);
+}
+
+void MicroAssembler::fltd(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCMPD, rs2, rs1, FLT, rd, OPFP);
+}
+
+void MicroAssembler::fled(Register rd, FRegister rs1, FRegister rs2) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCMPD, rs2, rs1, FLE, rd, OPFP);
+}
+
+void MicroAssembler::fclassd(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCLASSD, FRegister(0), rs1, F3_1, rd, OPFP);
+}
+
+void MicroAssembler::fcvtwd(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtwud(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdw(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(W), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdwu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(WU), rs1, rounding, rd, OPFP);
+}
+
+#if XLEN >= 64
+void MicroAssembler::fcvtld(Register rd, FRegister rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtlud(Register rd,
+                             FRegister rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTintD, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmvxd(Register rd, FRegister rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMVXD, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdl(FRegister rd, Register rs1, RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(L), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fcvtdlu(FRegister rd,
+                             Register rs1,
+                             RoundingMode rounding) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FCVTDint, FRegister(LU), rs1, rounding, rd, OPFP);
+}
+
+void MicroAssembler::fmvdx(FRegister rd, Register rs1) {
+  ASSERT(Supports(RV_D));
+  EmitRType(FMVDX, FRegister(0), rs1, F3_0, rd, OPFP);
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::c_lwsp(Register rd, Address addr) {
+  ASSERT(rd != ZR);
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_LWSP | EncodeCRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
+}
+
+#if XLEN == 32
+void MicroAssembler::c_flwsp(FRegister rd, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FLWSP | EncodeCFRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
+}
+#else
+void MicroAssembler::c_ldsp(Register rd, Address addr) {
+  ASSERT(rd != ZR);
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_LDSP | EncodeCRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
+}
+#endif
+
+void MicroAssembler::c_fldsp(FRegister rd, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FLDSP | EncodeCFRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_swsp(Register rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_SWSP | EncodeCRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
+}
+
+#if XLEN == 32
+void MicroAssembler::c_fswsp(FRegister rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FSWSP | EncodeCFRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
+}
+#else
+void MicroAssembler::c_sdsp(Register rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  Emit16(C_SDSP | EncodeCRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
+}
+#endif
+void MicroAssembler::c_fsdsp(FRegister rs2, Address addr) {
+  ASSERT(addr.base() == SP);
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FSDSP | EncodeCFRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_lw(Register rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_LW | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_ld(Register rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_LD | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_flw(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FLW | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_fld(FRegister rd, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FLD | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_sw(Register rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SW | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_sd(Register rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SD | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_fsw(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_F));
+  Emit16(C_FSW | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
+         EncodeCMem4Imm(addr.offset()));
+}
+
+void MicroAssembler::c_fsd(FRegister rs2, Address addr) {
+  ASSERT(Supports(RV_C));
+  ASSERT(Supports(RV_D));
+  Emit16(C_FSD | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
+         EncodeCMem8Imm(addr.offset()));
+}
+
+void MicroAssembler::c_j(Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCJump(label, C_J);
+}
+
+#if XLEN == 32
+void MicroAssembler::c_jal(Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCJump(label, C_JAL);
+}
+#endif  // XLEN == 32
+
+void MicroAssembler::c_jr(Register rs1) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rs1 != ZR);
+  Emit16(C_JR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
+}
+
+void MicroAssembler::c_jalr(Register rs1) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_JALR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
+}
+
+void MicroAssembler::c_beqz(Register rs1p, Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCBranch(rs1p, label, C_BEQZ);
+}
+
+void MicroAssembler::c_bnez(Register rs1p, Label* label) {
+  ASSERT(Supports(RV_C));
+  EmitCBranch(rs1p, label, C_BNEZ);
+}
+
+void MicroAssembler::c_li(Register rd, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  Emit16(C_LI | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_lui(Register rd, uintptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  ASSERT(rd != SP);
+  Emit16(C_LUI | EncodeCRd(rd) | EncodeCUImm(imm));
+}
+
+void MicroAssembler::c_addi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(imm != 0);
+  ASSERT(rd == rs1);
+  Emit16(C_ADDI | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+
+#if XLEN >= 64
+void MicroAssembler::c_addiw(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_ADDIW | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+#endif
+void MicroAssembler::c_addi16sp(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_ADDI16SP | EncodeCRd(rd) | EncodeCI16Imm(imm));
+}
+
+void MicroAssembler::c_addi4spn(Register rdp, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rs1 == SP);
+  ASSERT(imm != 0);
+  Emit16(C_ADDI4SPN | EncodeCRdp(rdp) | EncodeCI4SPNImm(imm));
+}
+
+void MicroAssembler::c_slli(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  ASSERT(imm != 0);
+  Emit16(C_SLLI | EncodeCRd(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_srli(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  ASSERT(imm != 0);
+  Emit16(C_SRLI | EncodeCRs1p(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_srai(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  ASSERT(imm != 0);
+  Emit16(C_SRAI | EncodeCRs1p(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_andi(Register rd, Register rs1, intptr_t imm) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_ANDI | EncodeCRs1p(rd) | EncodeCIImm(imm));
+}
+
+void MicroAssembler::c_mv(Register rd, Register rs2) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  ASSERT(rs2 != ZR);
+  Emit16(C_MV | EncodeCRd(rd) | EncodeCRs2(rs2));
+}
+
+void MicroAssembler::c_add(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd != ZR);
+  ASSERT(rd == rs1);
+  ASSERT(rs2 != ZR);
+  Emit16(C_ADD | EncodeCRd(rd) | EncodeCRs2(rs2));
+}
+
+void MicroAssembler::c_and(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  ASSERT(rd == rs1);
+  Emit16(C_AND | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_or(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_OR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_xor(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_XOR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_sub(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SUB | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+#if XLEN >= 64
+void MicroAssembler::c_addw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_ADDW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+
+void MicroAssembler::c_subw(Register rd, Register rs1, Register rs2) {
+  ASSERT(Supports(RV_C));
+  Emit16(C_SUBW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
+}
+#endif  // XLEN >= 64
+
+void MicroAssembler::c_nop() {
+  ASSERT(Supports(RV_C));
+  Emit16(C_NOP);
+}
+
+void MicroAssembler::c_ebreak() {
+  ASSERT(Supports(RV_C));
+  Emit16(C_EBREAK);
+}
+
+static Funct3 InvertFunct3(Funct3 func) {
+  switch (func) {
+    case BEQ:
+      return BNE;
+    case BNE:
+      return BEQ;
+    case BGE:
+      return BLT;
+    case BGEU:
+      return BLTU;
+    case BLT:
+      return BGE;
+    case BLTU:
+      return BGEU;
+    default:
+      UNREACHABLE();
+  }
+}
+
+void MicroAssembler::EmitBranch(Register rs1,
+                                Register rs2,
+                                Label* label,
+                                Funct3 func,
+                                JumpDistance distance) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    // Backward branch: use near or far branch based on actual distance.
+    offset = label->Position() - Position();
+    if (IsBTypeImm(offset)) {
+      EmitBType(offset, rs2, rs1, func, BRANCH);
+      return;
+    }
+
+    if (IsJTypeImm(offset + 4)) {
+      intptr_t start = Position();
+      const intptr_t kFarBranchLength = 8;
+      EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+      offset = label->Position() - Position();
+      EmitJType(offset, ZR, JAL);
+      intptr_t end = Position();
+      ASSERT_EQUAL(end - start, kFarBranchLength);
+      return;
+    }
+
+    intptr_t start = Position();
+    const intptr_t kFarBranchLength = 12;
+    EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+    offset = label->Position() - Position();
+    intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (!IsUTypeImm(hi)) {
+      FATAL("Branch distance exceeds 2GB!");
+    }
+    EmitUType(hi, FAR_TMP, AUIPC);
+    EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+    intptr_t end = Position();
+    ASSERT_EQUAL(end - start, kFarBranchLength);
+    return;
+  } else {
+    // Forward branch: speculatively use near branches and re-assemble with far
+    // branches if any need greater length.
+    if (distance == kNearJump) {
+      offset = label->link_b(Position());
+      if (!IsBTypeImm(offset)) {
+        FATAL("Incorrect Assembler::kNearJump");
+      }
+      EmitBType(offset, rs2, rs1, func, BRANCH);
+    } else if (far_branch_level() == 0) {
+      offset = label->link_b(Position());
+      if (!IsBTypeImm(offset)) {
+        // TODO(riscv): This isn't so much because the branch is out of range
+        // as some previous jump to the same target would be out of B-type
+        // range... A possible alternative is to have separate lists on Labels
+        // for pending B-type and J-type instructions.
+        BailoutWithBranchOffsetError();
+      }
+      EmitBType(offset, rs2, rs1, func, BRANCH);
+    } else if (far_branch_level() == 1) {
+      intptr_t start = Position();
+      const intptr_t kFarBranchLength = 8;
+      EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+      offset = label->link_j(Position());
+      EmitJType(offset, ZR, JAL);
+      intptr_t end = Position();
+      ASSERT_EQUAL(end - start, kFarBranchLength);
+    } else {
+      intptr_t start = Position();
+      const intptr_t kFarBranchLength = 12;
+      EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
+      offset = label->link_far(Position());
+      intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+      intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+      if (!IsUTypeImm(hi)) {
+        FATAL("Branch distance exceeds 2GB!");
+      }
+      EmitUType(hi, FAR_TMP, AUIPC);
+      EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+      intptr_t end = Position();
+      ASSERT_EQUAL(end - start, kFarBranchLength);
+    }
+  }
+}
+
+void MicroAssembler::EmitJump(Register rd,
+                              Label* label,
+                              Opcode op,
+                              JumpDistance distance) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    // Backward jump: use near or far jump based on actual distance.
+    offset = label->Position() - Position();
+
+    if (IsJTypeImm(offset)) {
+      EmitJType(offset, rd, JAL);
+      return;
+    }
+    intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (!IsUTypeImm(hi)) {
+      FATAL("Jump distance exceeds 2GB!");
+    }
+    EmitUType(hi, FAR_TMP, AUIPC);
+    EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+    return;
+  } else {
+    // Forward jump: speculatively use near jumps and re-assemble with far
+    // jumps if any need greater length.
+    if (distance == kNearJump) {
+      offset = label->link_j(Position());
+      if (!IsJTypeImm(offset)) {
+        FATAL("Incorrect Assembler::kNearJump");
+      }
+      EmitJType(offset, rd, JAL);
+    } else if (far_branch_level() < 2) {
+      offset = label->link_j(Position());
+      if (!IsJTypeImm(offset)) {
+        BailoutWithBranchOffsetError();
+      }
+      EmitJType(offset, rd, JAL);
+    } else {
+      offset = label->link_far(Position());
+      intx_t lo = offset << (XLEN - 12) >> (XLEN - 12);
+      intx_t hi = (offset - lo) << (XLEN - 32) >> (XLEN - 32);
+      if (!IsUTypeImm(hi)) {
+        FATAL("Jump distance exceeds 2GB!");
+      }
+      EmitUType(hi, FAR_TMP, AUIPC);
+      EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
+    }
+  }
+}
+
+void MicroAssembler::EmitCBranch(Register rs1p, Label* label, COpcode op) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    offset = label->Position() - Position();
+  } else {
+    offset = label->link_cb(Position());
+  }
+  if (!IsCBImm(offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Emit16(op | EncodeCRs1p(rs1p) | EncodeCBImm(offset));
+}
+
+void MicroAssembler::EmitCJump(Label* label, COpcode op) {
+  intptr_t offset;
+  if (label->IsBound()) {
+    offset = label->Position() - Position();
+  } else {
+    offset = label->link_cj(Position());
+  }
+  if (!IsCJImm(offset)) {
+    FATAL("Incorrect Assembler::kNearJump");
+  }
+  Emit16(op | EncodeCJImm(offset));
+}
+
+void MicroAssembler::EmitRType(Funct5 funct5,
+                               std::memory_order order,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  intptr_t funct7 = funct5 << 2;
+  switch (order) {
+    case std::memory_order_acq_rel:
+      funct7 |= 0b11;
+      break;
+    case std::memory_order_acquire:
+      funct7 |= 0b10;
+      break;
+    case std::memory_order_release:
+      funct7 |= 0b01;
+      break;
+    case std::memory_order_relaxed:
+      funct7 |= 0b00;
+      break;
+    default:
+      FATAL("Invalid memory order");
+  }
+  EmitRType((Funct7)funct7, rs2, rs1, funct3, rd, opcode);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               Funct3 funct3,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               RoundingMode round,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               Register rs1,
+                               RoundingMode round,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               FRegister rs2,
+                               FRegister rs1,
+                               RoundingMode round,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitRType(Funct7 funct7,
+                               intptr_t shamt,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFunct7(funct7);
+  e |= EncodeShamt(shamt);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitR4Type(FRegister rs3,
+                                Funct2 funct2,
+                                FRegister rs2,
+                                FRegister rs1,
+                                RoundingMode round,
+                                FRegister rd,
+                                Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeFRs3(rs3);
+  e |= EncodeFunct2(funct2);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeFRs1(rs1);
+  e |= EncodeRoundingMode(round);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitIType(intptr_t imm,
+                               Register rs1,
+                               Funct3 funct3,
+                               Register rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeITypeImm(imm);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitIType(intptr_t imm,
+                               Register rs1,
+                               Funct3 funct3,
+                               FRegister rd,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeITypeImm(imm);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeFRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitSType(intptr_t imm,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeSTypeImm(imm);
+  e |= EncodeRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitSType(intptr_t imm,
+                               FRegister rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeSTypeImm(imm);
+  e |= EncodeFRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitBType(intptr_t imm,
+                               Register rs2,
+                               Register rs1,
+                               Funct3 funct3,
+                               Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeBTypeImm(imm);
+  e |= EncodeRs2(rs2);
+  e |= EncodeRs1(rs1);
+  e |= EncodeFunct3(funct3);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitUType(intptr_t imm, Register rd, Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeUTypeImm(imm);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+void MicroAssembler::EmitJType(intptr_t imm, Register rd, Opcode opcode) {
+  uint32_t e = 0;
+  e |= EncodeJTypeImm(imm);
+  e |= EncodeRd(rd);
+  e |= EncodeOpcode(opcode);
+  Emit32(e);
+}
+
+Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
+                     intptr_t far_branch_level)
+    : MicroAssembler(object_pool_builder,
+                     far_branch_level,
+                     FLAG_use_compressed_instructions ? RV_GC : RV_G),
+      constant_pool_allowed_(false) {
+  generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
+    // Note this does not destory RA.
+    lx(TMP,
+       Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)));
+    jalr(TMP, TMP);
+  };
+  generate_invoke_array_write_barrier_ = [&]() {
+    Call(
+        Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
+  };
+}
+
+void Assembler::PushRegister(Register r) {
+  ASSERT(r != SP);
+  subi(SP, SP, target::kWordSize);
+  sx(r, Address(SP, 0));
+}
+void Assembler::PopRegister(Register r) {
+  ASSERT(r != SP);
+  lx(r, Address(SP, 0));
+  addi(SP, SP, target::kWordSize);
+}
+
+void Assembler::PushRegisterPair(Register r0, Register r1) {
+  ASSERT(r0 != SP);
+  ASSERT(r1 != SP);
+  subi(SP, SP, 2 * target::kWordSize);
+  sx(r1, Address(SP, target::kWordSize));
+  sx(r0, Address(SP, 0));
+}
+
+void Assembler::PopRegisterPair(Register r0, Register r1) {
+  ASSERT(r0 != SP);
+  ASSERT(r1 != SP);
+  lx(r1, Address(SP, target::kWordSize));
+  lx(r0, Address(SP, 0));
+  addi(SP, SP, 2 * target::kWordSize);
+}
+
+void Assembler::PushRegisters(const RegisterSet& regs) {
+  // The order in which the registers are pushed must match the order
+  // in which the registers are encoded in the safepoint's stack map.
+
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * kFpuRegisterSize);
+  if (size == 0) {
+    return;  // Skip no-op SP update.
+  }
+
+  subi(SP, SP, size);
+  intptr_t offset = size;
+  for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; i--) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      offset -= kFpuRegisterSize;
+      fsd(reg, Address(SP, offset));
+    }
+  }
+  for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      offset -= target::kWordSize;
+      sx(reg, Address(SP, offset));
+    }
+  }
+  ASSERT(offset == 0);
+}
+void Assembler::PopRegisters(const RegisterSet& regs) {
+  // The order in which the registers are pushed must match the order
+  // in which the registers are encoded in the safepoint's stack map.
+
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * kFpuRegisterSize);
+  if (size == 0) {
+    return;  // Skip no-op SP update.
+  }
+  intptr_t offset = 0;
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      lx(reg, Address(SP, offset));
+      offset += target::kWordSize;
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      fld(reg, Address(SP, offset));
+      offset += kFpuRegisterSize;
+    }
+  }
+  ASSERT(offset == size);
+  addi(SP, SP, size);
+}
+
+void Assembler::PushNativeCalleeSavedRegisters() {
+  RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * sizeof(double));
+  subi(SP, SP, size);
+  intptr_t offset = 0;
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      fsd(reg, Address(SP, offset));
+      offset += sizeof(double);
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      sx(reg, Address(SP, offset));
+      offset += target::kWordSize;
+    }
+  }
+  ASSERT(offset == size);
+}
+
+void Assembler::PopNativeCalleeSavedRegisters() {
+  RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
+  intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
+                  (regs.FpuRegisterCount() * sizeof(double));
+  intptr_t offset = 0;
+  for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+    FRegister reg = static_cast<FRegister>(i);
+    if (regs.ContainsFpuRegister(reg)) {
+      fld(reg, Address(SP, offset));
+      offset += sizeof(double);
+    }
+  }
+  for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+    Register reg = static_cast<Register>(i);
+    if (regs.ContainsRegister(reg)) {
+      lx(reg, Address(SP, offset));
+      offset += target::kWordSize;
+    }
+  }
+  ASSERT(offset == size);
+  addi(SP, SP, size);
+}
+
+void Assembler::ExtendValue(Register rd, Register rn, OperandSize sz) {
+  switch (sz) {
+#if XLEN == 64
+    case kEightBytes:
+      if (rd == rn) return;  // No operation needed.
+      return mv(rd, rn);
+    case kUnsignedFourBytes:
+      return UNIMPLEMENTED();
+    case kFourBytes:
+      return sextw(rd, rn);
+#elif XLEN == 32
+    case kUnsignedFourBytes:
+    case kFourBytes:
+      if (rd == rn) return;  // No operation needed.
+      return mv(rd, rn);
+#endif
+    case kUnsignedTwoBytes:
+    case kTwoBytes:
+    case kUnsignedByte:
+    case kByte:
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  UNIMPLEMENTED();
+}
+void Assembler::ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz) {
+  if (sz == kWordBytes) {
+    SmiTag(rd, rn);
+    return;
+  }
+
+  switch (sz) {
+#if XLEN == 64
+    case kUnsignedFourBytes:
+      slli(rd, rn, XLEN - kBitsPerInt32);
+      srli(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
+      return;
+    case kFourBytes:
+      slli(rd, rn, XLEN - kBitsPerInt32);
+      srai(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
+      return;
+#endif
+    case kUnsignedTwoBytes:
+      slli(rd, rn, XLEN - kBitsPerInt16);
+      srli(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
+      return;
+    case kTwoBytes:
+      slli(rd, rn, XLEN - kBitsPerInt16);
+      srai(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
+      return;
+    case kUnsignedByte:
+      slli(rd, rn, XLEN - kBitsPerInt8);
+      srli(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
+      return;
+    case kByte:
+      slli(rd, rn, XLEN - kBitsPerInt8);
+      srai(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
+      return;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+}
+
+// Unconditional jump to a given address in memory. Clobbers TMP.
+void Assembler::Jump(const Address& address) {
+  lx(TMP2, address);
+  jr(TMP2);
+}
+
+void Assembler::LoadField(Register dst, const FieldAddress& address) {
+  lx(dst, address);
+}
+
+#if defined(USING_THREAD_SANITIZER)
+void Assembler::TsanLoadAcquire(Register addr) {
+  UNIMPLEMENTED();
+}
+void Assembler::TsanStoreRelease(Register addr) {
+  UNIMPLEMENTED();
+}
+#endif
+
+void Assembler::LoadAcquire(Register dst, Register address, int32_t offset) {
+  ASSERT(dst != address);
+  LoadFromOffset(dst, address, offset);
+  fence(HartEffects::kRead, HartEffects::kMemory);
+
+#if defined(USING_THREAD_SANITIZER)
+  if (offset == 0) {
+    TsanLoadAcquire(address);
+  } else {
+    AddImmediate(TMP2, address, offset);
+    TsanLoadAcquire(TMP2);
+  }
+#endif
+}
+
+void Assembler::LoadAcquireCompressed(Register dst,
+                                      Register address,
+                                      int32_t offset) {
+  LoadAcquire(dst, address, offset);
+}
+
+void Assembler::StoreRelease(Register src, Register address, int32_t offset) {
+  fence(HartEffects::kMemory, HartEffects::kRead);
+  StoreToOffset(src, address, offset);
+}
+
+void Assembler::StoreReleaseCompressed(Register src,
+                                       Register address,
+                                       int32_t offset) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::CompareWithCompressedFieldFromOffset(Register value,
+                                                     Register base,
+                                                     int32_t offset) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::CompareWithMemoryValue(Register value,
+                                       Address address,
+                                       OperandSize sz) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::CompareFunctionTypeNullabilityWith(Register type,
+                                                   int8_t value) {
+  EnsureHasClassIdInDEBUG(kFunctionTypeCid, type, TMP);
+  lbu(TMP,
+      FieldAddress(type, compiler::target::FunctionType::nullability_offset()));
+  CompareImmediate(TMP, value);
+}
+void Assembler::CompareTypeNullabilityWith(Register type, int8_t value) {
+  EnsureHasClassIdInDEBUG(kTypeCid, type, TMP);
+  lbu(TMP, FieldAddress(type, compiler::target::Type::nullability_offset()));
+  CompareImmediate(TMP, value);
+}
+
+void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
+  if (frame_space != 0) {
+    addi(SP, SP, -frame_space);
+  }
+  const intptr_t kAbiStackAlignment = 16;  // For both 32 and 64 bit.
+  andi(SP, SP, ~(kAbiStackAlignment - 1));
+}
+
+// In debug mode, this generates code to check that:
+//   FP + kExitLinkSlotFromEntryFp == SP
+// or triggers breakpoint otherwise.
+void Assembler::EmitEntryFrameVerification() {
+#if defined(DEBUG)
+  Label done;
+  ASSERT(!constant_pool_allowed());
+  LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
+                         target::kWordSize);
+  add(TMP, TMP, FPREG);
+  beq(TMP, SPREG, &done, kNearJump);
+
+  Breakpoint();
+
+  Bind(&done);
+#endif
+}
+
+void Assembler::CompareRegisters(Register rn, Register rm) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kCompareReg;
+  deferred_left_ = rn;
+  deferred_reg_ = rm;
+}
+void Assembler::CompareObjectRegisters(Register rn, Register rm) {
+  CompareRegisters(rn, rm);
+}
+void Assembler::TestRegisters(Register rn, Register rm) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kTestReg;
+  deferred_left_ = rn;
+  deferred_reg_ = rm;
+}
+
+void Assembler::BranchIf(Condition condition,
+                         Label* label,
+                         JumpDistance distance) {
+  ASSERT(deferred_compare_ != kNone);
+
+  if (deferred_compare_ == kCompareImm || deferred_compare_ == kCompareReg) {
+    Register left = deferred_left_;
+    Register right;
+    if (deferred_compare_ == kCompareImm) {
+      if (deferred_imm_ == 0) {
+        right = ZR;
+      } else {
+        LoadImmediate(TMP2, deferred_imm_);
+        right = TMP2;
+      }
+    } else {
+      right = deferred_reg_;
+    }
+    switch (condition) {
+      case EQUAL:
+        beq(left, right, label, distance);
+        break;
+      case NOT_EQUAL:
+        bne(left, right, label, distance);
+        break;
+      case LESS:
+        blt(left, right, label, distance);
+        break;
+      case LESS_EQUAL:
+        ble(left, right, label, distance);
+        break;
+      case GREATER_EQUAL:
+        bge(left, right, label, distance);
+        break;
+      case GREATER:
+        bgt(left, right, label, distance);
+        break;
+      case UNSIGNED_LESS:
+        bltu(left, right, label, distance);
+        break;
+      case UNSIGNED_LESS_EQUAL:
+        bleu(left, right, label, distance);
+        break;
+      case UNSIGNED_GREATER_EQUAL:
+        bgeu(left, right, label, distance);
+        break;
+      case UNSIGNED_GREATER:
+        bgtu(left, right, label, distance);
+        break;
+      case OVERFLOW:
+      case NO_OVERFLOW:
+        FATAL("Use Add/Subtract/MultiplyBranchOverflow instead.");
+      default:
+        UNREACHABLE();
+    }
+  } else if (deferred_compare_ == kTestImm || deferred_compare_ == kTestReg) {
+    if (deferred_compare_ == kTestImm) {
+      AndImmediate(TMP2, deferred_left_, deferred_imm_);
+    } else {
+      and_(TMP2, deferred_left_, deferred_reg_);
+    }
+    switch (condition) {
+      case ZERO:
+        beqz(TMP2, label, distance);
+        break;
+      case NOT_ZERO:
+        bnez(TMP2, label, distance);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    UNREACHABLE();
+  }
+  deferred_compare_ = kNone;  // Consumed.
+}
+
+void Assembler::SetIf(Condition condition, Register rd) {
+  ASSERT(deferred_compare_ != kNone);
+
+  if (deferred_compare_ == kCompareImm) {
+    if (deferred_imm_ == 0) {
+      deferred_compare_ = kCompareReg;
+      deferred_reg_ = ZR;
+      SetIf(condition, rd);
+      return;
+    }
+    if (!IsITypeImm(deferred_imm_) || !IsITypeImm(deferred_imm_ + 1)) {
+      LoadImmediate(TMP2, deferred_imm_);
+      deferred_compare_ = kCompareReg;
+      deferred_reg_ = TMP2;
+      SetIf(condition, rd);
+      return;
+    }
+    Register left = deferred_left_;
+    intx_t right = deferred_imm_;
+    switch (condition) {
+      case EQUAL:
+        xori(rd, left, right);
+        seqz(rd, rd);
+        break;
+      case NOT_EQUAL:
+        xori(rd, left, right);
+        snez(rd, rd);
+        break;
+      case LESS:
+        slti(rd, left, right);
+        break;
+      case LESS_EQUAL:
+        slti(rd, left, right + 1);
+        break;
+      case GREATER_EQUAL:
+        slti(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case GREATER:
+        slti(rd, left, right + 1);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_LESS:
+        sltiu(rd, left, right);
+        break;
+      case UNSIGNED_LESS_EQUAL:
+        sltiu(rd, left, right + 1);
+        break;
+      case UNSIGNED_GREATER_EQUAL:
+        sltiu(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_GREATER:
+        sltiu(rd, left, right + 1);
+        xori(rd, rd, 1);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (deferred_compare_ == kCompareReg) {
+    Register left = deferred_left_;
+    Register right = deferred_reg_;
+    switch (condition) {
+      case EQUAL:
+        if (right == ZR) {
+          seqz(rd, left);
+        } else {
+          xor_(rd, left, right);
+          seqz(rd, rd);
+        }
+        break;
+      case NOT_EQUAL:
+        if (right == ZR) {
+          snez(rd, left);
+        } else {
+          xor_(rd, left, right);
+          snez(rd, rd);
+        }
+        break;
+      case LESS:
+        slt(rd, left, right);
+        break;
+      case LESS_EQUAL:
+        slt(rd, right, left);
+        xori(rd, rd, 1);
+        break;
+      case GREATER_EQUAL:
+        slt(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case GREATER:
+        slt(rd, right, left);
+        break;
+      case UNSIGNED_LESS:
+        sltu(rd, left, right);
+        break;
+      case UNSIGNED_LESS_EQUAL:
+        sltu(rd, right, left);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_GREATER_EQUAL:
+        sltu(rd, left, right);
+        xori(rd, rd, 1);
+        break;
+      case UNSIGNED_GREATER:
+        sltu(rd, right, left);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (deferred_compare_ == kTestImm || deferred_compare_ == kTestReg) {
+    if (deferred_compare_ == kTestImm) {
+      AndImmediate(TMP2, deferred_left_, deferred_imm_);
+    } else {
+      and_(TMP2, deferred_left_, deferred_reg_);
+    }
+    switch (condition) {
+      case ZERO:
+        seqz(rd, TMP2);
+        break;
+      case NOT_ZERO:
+        snez(rd, TMP2);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  deferred_compare_ = kNone;  // Consumed.
+}
+
+void Assembler::BranchIfZero(Register rn, Label* label, JumpDistance distance) {
+  beqz(rn, label, distance);
+}
+
+void Assembler::BranchIfNotSmi(Register reg,
+                               Label* label,
+                               JumpDistance distance) {
+  ASSERT(reg != TMP2);
+  andi(TMP2, reg, kSmiTagMask);
+  bnez(TMP2, label, distance);
+}
+void Assembler::BranchIfSmi(Register reg, Label* label, JumpDistance distance) {
+  ASSERT(reg != TMP2);
+  andi(TMP2, reg, kSmiTagMask);
+  beqz(TMP2, label, distance);
+}
+
+void Assembler::Jump(const Code& target,
+                     Register pp,
+                     ObjectPoolBuilderEntry::Patchability patchable) {
+  const intptr_t index =
+      object_pool_builder().FindObject(ToObject(target), patchable);
+  LoadWordFromPoolIndex(CODE_REG, index, pp);
+  Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
+}
+
+void Assembler::JumpAndLink(const Code& target,
+                            ObjectPoolBuilderEntry::Patchability patchable,
+                            CodeEntryKind entry_kind) {
+  const intptr_t index =
+      object_pool_builder().FindObject(ToObject(target), patchable);
+  LoadWordFromPoolIndex(CODE_REG, index);
+  Call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
+}
+
+void Assembler::JumpAndLinkToRuntime() {
+  Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
+}
+
+void Assembler::JumpAndLinkWithEquivalence(const Code& target,
+                                           const Object& equivalence,
+                                           CodeEntryKind entry_kind) {
+  const intptr_t index =
+      object_pool_builder().FindObject(ToObject(target), equivalence);
+  LoadWordFromPoolIndex(CODE_REG, index);
+  Call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
+}
+
+void Assembler::Call(Address target) {
+  lx(RA, target);
+  jalr(RA);
+}
+
+void Assembler::AddImmediate(Register rd,
+                             Register rs1,
+                             intx_t imm,
+                             OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    addi(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    add(rd, rs1, TMP2);
+  }
+}
+void Assembler::AndImmediate(Register rd,
+                             Register rs1,
+                             intx_t imm,
+                             OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    andi(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    and_(rd, rs1, TMP2);
+  }
+}
+void Assembler::OrImmediate(Register rd,
+                            Register rs1,
+                            intx_t imm,
+                            OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    ori(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    or_(rd, rs1, TMP2);
+  }
+}
+void Assembler::XorImmediate(Register rd,
+                             Register rs1,
+                             intx_t imm,
+                             OperandSize sz) {
+  if (IsITypeImm(imm)) {
+    xori(rd, rs1, imm);
+  } else {
+    ASSERT(rs1 != TMP2);
+    LoadImmediate(TMP2, imm);
+    xor_(rd, rs1, TMP2);
+  }
+}
+
+void Assembler::TestImmediate(Register rn, intx_t imm, OperandSize sz) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kTestImm;
+  deferred_left_ = rn;
+  deferred_imm_ = imm;
+}
+void Assembler::CompareImmediate(Register rn, intx_t imm, OperandSize sz) {
+  ASSERT(deferred_compare_ == kNone);
+  deferred_compare_ = kCompareImm;
+  deferred_left_ = rn;
+  deferred_imm_ = imm;
+}
+
+void Assembler::LoadFromOffset(Register dest,
+                               const Address& address,
+                               OperandSize sz) {
+  LoadFromOffset(dest, address.base(), address.offset(), sz);
+}
+void Assembler::LoadFromOffset(Register dest,
+                               Register base,
+                               int32_t offset,
+                               OperandSize sz) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  switch (sz) {
+#if XLEN == 64
+    case kEightBytes:
+      return ld(dest, Address(base, offset));
+    case kUnsignedFourBytes:
+      return lwu(dest, Address(base, offset));
+#elif XLEN == 32
+    case kUnsignedFourBytes:
+      return lw(dest, Address(base, offset));
+#endif
+    case kFourBytes:
+      return lw(dest, Address(base, offset));
+    case kUnsignedTwoBytes:
+      return lhu(dest, Address(base, offset));
+    case kTwoBytes:
+      return lh(dest, Address(base, offset));
+    case kUnsignedByte:
+      return lbu(dest, Address(base, offset));
+    case kByte:
+      return lb(dest, Address(base, offset));
+    default:
+      UNREACHABLE();
+  }
+}
+// For loading indexed payloads out of tagged objects like Arrays. If the
+// payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
+// [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
+void Assembler::LoadIndexedPayload(Register dest,
+                                   Register base,
+                                   int32_t payload_offset,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   OperandSize sz) {
+  slli(TMP, index, scale);
+  add(TMP, TMP, base);
+  LoadFromOffset(dest, TMP, payload_offset - kHeapObjectTag, sz);
+}
+void Assembler::LoadIndexedCompressed(Register dest,
+                                      Register base,
+                                      int32_t offset,
+                                      Register index) {
+  LoadIndexedPayload(dest, base, offset, index, TIMES_WORD_SIZE, kObjectBytes);
+}
+
+void Assembler::LoadSFromOffset(FRegister dest, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  flw(dest, Address(base, offset));
+}
+
+void Assembler::LoadDFromOffset(FRegister dest, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  fld(dest, Address(base, offset));
+}
+
+void Assembler::LoadFromStack(Register dst, intptr_t depth) {
+  UNIMPLEMENTED();
+}
+void Assembler::StoreToStack(Register src, intptr_t depth) {
+  UNIMPLEMENTED();
+}
+void Assembler::CompareToStack(Register src, intptr_t depth) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::StoreToOffset(Register src,
+                              const Address& address,
+                              OperandSize sz) {
+  StoreToOffset(src, address.base(), address.offset(), sz);
+}
+void Assembler::StoreToOffset(Register src,
+                              Register base,
+                              int32_t offset,
+                              OperandSize sz) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  switch (sz) {
+#if XLEN == 64
+    case kEightBytes:
+      return sd(src, Address(base, offset));
+#endif
+    case kUnsignedFourBytes:
+    case kFourBytes:
+      return sw(src, Address(base, offset));
+    case kUnsignedTwoBytes:
+    case kTwoBytes:
+      return sh(src, Address(base, offset));
+    case kUnsignedByte:
+    case kByte:
+      return sb(src, Address(base, offset));
+    default:
+      UNREACHABLE();
+  }
+}
+
+void Assembler::StoreSToOffset(FRegister src, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  fsw(src, Address(base, offset));
+}
+
+void Assembler::StoreDToOffset(FRegister src, Register base, int32_t offset) {
+  ASSERT(base != TMP2);
+  if (!IsITypeImm(offset)) {
+    intx_t imm = offset;
+    intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+    intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+    if (hi == 0) {
+      UNREACHABLE();
+    } else {
+      lui(TMP2, hi);
+      add(TMP2, TMP2, base);
+      base = TMP2;
+      offset = lo;
+    }
+  }
+  fsd(src, Address(base, offset));
+}
+
+void Assembler::LoadUnboxedDouble(FpuRegister dst,
+                                  Register base,
+                                  int32_t offset) {
+  fld(dst, Address(base, offset));
+}
+void Assembler::StoreUnboxedDouble(FpuRegister src,
+                                   Register base,
+                                   int32_t offset) {
+  fsd(src, Address(base, offset));
+}
+void Assembler::MoveUnboxedDouble(FpuRegister dst, FpuRegister src) {
+  fmvd(dst, src);
+}
+
+void Assembler::LoadCompressed(Register dest, const Address& slot) {
+  lx(dest, slot);
+}
+void Assembler::LoadCompressedFromOffset(Register dest,
+                                         Register base,
+                                         int32_t offset) {
+  lx(dest, Address(base, offset));
+}
+void Assembler::LoadCompressedSmi(Register dest, const Address& slot) {
+  lx(dest, slot);
+}
+void Assembler::LoadCompressedSmiFromOffset(Register dest,
+                                            Register base,
+                                            int32_t offset) {
+  lx(dest, Address(base, offset));
+}
+
+// Store into a heap object and apply the generational and incremental write
+// barriers. All stores into heap objects must pass through this function or,
+// if the value can be proven either Smi or old-and-premarked, its NoBarrier
+// variants.
+// Preserves object and value registers.
+void Assembler::StoreIntoObject(Register object,
+                                const Address& dest,
+                                Register value,
+                                CanBeSmi can_value_be_smi,
+                                MemoryOrder memory_order) {
+  // stlr does not feature an address operand.
+  ASSERT(memory_order == kRelaxedNonAtomic);
+  sx(value, dest);
+  StoreBarrier(object, value, can_value_be_smi);
+}
+void Assembler::StoreCompressedIntoObject(Register object,
+                                          const Address& dest,
+                                          Register value,
+                                          CanBeSmi can_value_be_smi,
+                                          MemoryOrder memory_order) {
+  StoreIntoObject(object, dest, value, can_value_be_smi, memory_order);
+}
+void Assembler::StoreBarrier(Register object,
+                             Register value,
+                             CanBeSmi can_value_be_smi) {
+  // x.slot = x. Barrier should have be removed at the IL level.
+  ASSERT(object != value);
+  ASSERT(object != RA);
+  ASSERT(value != RA);
+  ASSERT(object != TMP);
+  ASSERT(object != TMP2);
+  ASSERT(value != TMP);
+  ASSERT(value != TMP2);
+
+  // In parallel, test whether
+  //  - object is old and not remembered and value is new, or
+  //  - object is old and value is old and not marked and concurrent marking is
+  //    in progress
+  // If so, call the WriteBarrier stub, which will either add object to the
+  // store buffer (case 1) or add value to the marking stack (case 2).
+  // Compare UntaggedObject::StorePointer.
+  Label done;
+  if (can_value_be_smi == kValueCanBeSmi) {
+    BranchIfSmi(value, &done, kNearJump);
+  }
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+
+  Register objectForCall = object;
+  if (value != kWriteBarrierValueReg) {
+    // Unlikely. Only non-graph intrinsics.
+    // TODO(rmacnak): Shuffle registers in intrinsics.
+    if (object != kWriteBarrierValueReg) {
+      PushRegister(kWriteBarrierValueReg);
+    } else {
+      COMPILE_ASSERT(S2 != kWriteBarrierValueReg);
+      COMPILE_ASSERT(S3 != kWriteBarrierValueReg);
+      objectForCall = (value == S2) ? S3 : S2;
+      PushRegisterPair(kWriteBarrierValueReg, objectForCall);
+      mv(objectForCall, object);
+    }
+    mv(kWriteBarrierValueReg, value);
+  }
+
+  // Note this uses TMP as the link register, so RA remains preserved.
+  generate_invoke_write_barrier_wrapper_(objectForCall);
+
+  if (value != kWriteBarrierValueReg) {
+    if (object != kWriteBarrierValueReg) {
+      PopRegister(kWriteBarrierValueReg);
+    } else {
+      PopRegisterPair(kWriteBarrierValueReg, objectForCall);
+    }
+  }
+  Bind(&done);
+}
+void Assembler::StoreIntoArray(Register object,
+                               Register slot,
+                               Register value,
+                               CanBeSmi can_value_be_smi) {
+  sx(value, Address(slot, 0));
+  StoreIntoArrayBarrier(object, slot, value, can_value_be_smi);
+}
+void Assembler::StoreCompressedIntoArray(Register object,
+                                         Register slot,
+                                         Register value,
+                                         CanBeSmi can_value_be_smi) {
+  StoreIntoArray(object, slot, value, can_value_be_smi);
+}
+void Assembler::StoreIntoArrayBarrier(Register object,
+                                      Register slot,
+                                      Register value,
+                                      CanBeSmi can_value_be_smi) {
+  // TODO(riscv): Use RA2 to avoid spilling RA inline?
+  const bool spill_lr = true;
+  ASSERT(object != TMP);
+  ASSERT(object != TMP2);
+  ASSERT(value != TMP);
+  ASSERT(value != TMP2);
+  ASSERT(slot != TMP);
+  ASSERT(slot != TMP2);
+
+  // In parallel, test whether
+  //  - object is old and not remembered and value is new, or
+  //  - object is old and value is old and not marked and concurrent marking is
+  //    in progress
+  // If so, call the WriteBarrier stub, which will either add object to the
+  // store buffer (case 1) or add value to the marking stack (case 2).
+  // Compare UntaggedObject::StorePointer.
+  Label done;
+  if (can_value_be_smi == kValueCanBeSmi) {
+    BranchIfSmi(value, &done, kNearJump);
+  }
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+  if (spill_lr) {
+    PushRegister(RA);
+  }
+  if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
+      (slot != kWriteBarrierSlotReg)) {
+    // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
+    // from StoreIndexInstr, which gets these exact registers from the register
+    // allocator.
+    UNIMPLEMENTED();
+  }
+  generate_invoke_array_write_barrier_();
+  if (spill_lr) {
+    PopRegister(RA);
+  }
+  Bind(&done);
+}
+
+void Assembler::StoreIntoObjectOffset(Register object,
+                                      int32_t offset,
+                                      Register value,
+                                      CanBeSmi can_value_be_smi,
+                                      MemoryOrder memory_order) {
+  if (memory_order == kRelease) {
+    StoreRelease(value, object, offset - kHeapObjectTag);
+  } else {
+    StoreToOffset(value, object, offset - kHeapObjectTag);
+  }
+  StoreBarrier(object, value, can_value_be_smi);
+}
+void Assembler::StoreCompressedIntoObjectOffset(Register object,
+                                                int32_t offset,
+                                                Register value,
+                                                CanBeSmi can_value_be_smi,
+                                                MemoryOrder memory_order) {
+  StoreIntoObjectOffset(object, offset, value, can_value_be_smi, memory_order);
+}
+void Assembler::StoreIntoObjectNoBarrier(Register object,
+                                         const Address& dest,
+                                         Register value,
+                                         MemoryOrder memory_order) {
+  ASSERT(memory_order == kRelaxedNonAtomic);
+  sx(value, dest);
+#if defined(DEBUG)
+  Label done;
+  beq(object, value, &done, kNearJump);
+  BranchIfSmi(value, &done, kNearJump);
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+  Stop("Store buffer update is required");
+  Bind(&done);
+#endif
+}
+void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
+                                                   const Address& dest,
+                                                   Register value,
+                                                   MemoryOrder memory_order) {
+  StoreIntoObjectNoBarrier(object, dest, value, memory_order);
+}
+void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
+                                               int32_t offset,
+                                               Register value,
+                                               MemoryOrder memory_order) {
+  if (memory_order == kRelease) {
+    StoreRelease(value, object, offset);
+  } else {
+    StoreToOffset(value, object, offset - kHeapObjectTag);
+  }
+#if defined(DEBUG)
+  Label done;
+  beq(object, value, &done, kNearJump);
+  BranchIfSmi(value, &done, kNearJump);
+  lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
+  lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
+  srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
+  and_(TMP, TMP, TMP2);
+  and_(TMP, TMP, WRITE_BARRIER_MASK);
+  beqz(TMP, &done, kNearJump);
+  Stop("Store buffer update is required");
+  Bind(&done);
+#endif
+}
+void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
+    Register object,
+    int32_t offset,
+    Register value,
+    MemoryOrder memory_order) {
+  StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order);
+}
+void Assembler::StoreIntoObjectNoBarrier(Register object,
+                                         const Address& dest,
+                                         const Object& value) {
+  ASSERT(IsOriginalObject(value));
+  ASSERT(IsNotTemporaryScopedHandle(value));
+  // No store buffer update.
+  if (IsSameObject(compiler::NullObject(), value)) {
+    sx(NULL_REG, dest);
+  } else if (target::IsSmi(object) && (target::ToRawSmi(object) == 0)) {
+    sx(ZR, dest);
+  } else {
+    LoadObject(TMP2, value);
+    sx(TMP2, dest);
+  }
+}
+void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
+                                                   const Address& dest,
+                                                   const Object& value,
+                                                   MemoryOrder memory_order) {
+  UNIMPLEMENTED();
+}
+void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
+                                               int32_t offset,
+                                               const Object& value,
+                                               MemoryOrder memory_order) {
+  if (memory_order == kRelease) {
+    Register value_reg = TMP2;
+    if (IsSameObject(compiler::NullObject(), value)) {
+      value_reg = NULL_REG;
+    } else if (target::IsSmi(object) && (target::ToRawSmi(object) == 0)) {
+      value_reg = ZR;
+    } else {
+      LoadObject(value_reg, value);
+    }
+    StoreIntoObjectOffsetNoBarrier(object, offset, value_reg, memory_order);
+  } else if (IsITypeImm(offset - kHeapObjectTag)) {
+    StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
+  } else {
+    AddImmediate(TMP, object, offset - kHeapObjectTag);
+    StoreIntoObjectNoBarrier(object, Address(TMP), value);
+  }
+}
+void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
+    Register object,
+    int32_t offset,
+    const Object& value,
+    MemoryOrder memory_order) {
+  UNIMPLEMENTED();
+}
+
+// Stores a non-tagged value into a heap object.
+void Assembler::StoreInternalPointer(Register object,
+                                     const Address& dest,
+                                     Register value) {
+  sx(value, dest);
+}
+
+// Object pool, loading from pool, etc.
+void Assembler::LoadPoolPointer(Register pp) {
+  CheckCodePointer();
+  lx(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
+
+  // When in the PP register, the pool pointer is untagged. When we
+  // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
+  // then untags when restoring from the stack. This will make loading from the
+  // object pool only one instruction for the first 4096 entries. Otherwise,
+  // because the offset wouldn't be aligned, it would be only one instruction
+  // for the first 64 entries.
+  subi(pp, pp, kHeapObjectTag);
+  set_constant_pool_allowed(pp == PP);
+}
+
+intptr_t Assembler::FindImmediate(int64_t imm) {
+  UNIMPLEMENTED();
+}
+bool Assembler::CanLoadFromObjectPool(const Object& object) const {
+  ASSERT(IsOriginalObject(object));
+  if (!constant_pool_allowed()) {
+    return false;
+  }
+
+  ASSERT(IsNotTemporaryScopedHandle(object));
+  ASSERT(IsInOldSpace(object));
+  return true;
+}
+void Assembler::LoadNativeEntry(
+    Register dst,
+    const ExternalLabel* label,
+    ObjectPoolBuilderEntry::Patchability patchable) {
+  const intptr_t index =
+      object_pool_builder().FindNativeFunction(label, patchable);
+  LoadWordFromPoolIndex(dst, index);
+}
+void Assembler::LoadIsolate(Register dst) {
+  lx(dst, Address(THR, target::Thread::isolate_offset()));
+}
+void Assembler::LoadIsolateGroup(Register dst) {
+  lx(dst, Address(THR, target::Thread::isolate_group_offset()));
+}
+
+void Assembler::LoadImmediate(Register reg, intx_t imm) {
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+
+#if XLEN > 32
+  if (!Utils::IsInt(32, imm)) {
+    LoadImmediate(reg, (imm - lo) >> 12);
+    slli(reg, reg, 12);
+    if (lo != 0) {
+      addi(reg, reg, lo);
+    }
+    return;
+  }
+#endif
+
+  if (hi == 0) {
+    addi(reg, ZR, lo);
+  } else {
+    lui(reg, hi);
+    if (lo != 0) {
+#if XLEN == 32
+      addi(reg, reg, lo);
+#else
+      addiw(reg, reg, lo);
+#endif
+    }
+  }
+}
+
+void Assembler::LoadDImmediate(FRegister reg, double immd) {
+  int64_t imm = bit_cast<int64_t, double>(immd);
+  if (imm == 0) {
+#if XLEN >= 64
+    fmvdx(reg, ZR);  // bit_cast uint64_t -> double
+#else
+    fcvtdwu(reg, ZR);  // static_cast uint32_t -> double
+#endif
+  } else {
+    ASSERT(constant_pool_allowed());
+#if XLEN >= 64
+    intptr_t index = object_pool_builder().FindImmediate(imm);
+    intptr_t offset = target::ObjectPool::element_offset(index);
+#else
+    intptr_t lo_index =
+        object_pool_builder().AddImmediate(Utils::Low32Bits(imm));
+    intptr_t hi_index =
+        object_pool_builder().AddImmediate(Utils::High32Bits(imm));
+    ASSERT(lo_index + 1 == hi_index);
+    intptr_t offset = target::ObjectPool::element_offset(lo_index);
+#endif
+    LoadDFromOffset(reg, PP, offset);
+  }
+}
+
+// Load word from pool from the given offset using encoding that
+// InstructionPattern::DecodeLoadWordFromPool can decode.
+//
+// Note: the function never clobbers TMP, TMP2 scratch registers.
+void Assembler::LoadWordFromPoolIndex(Register dst,
+                                      intptr_t index,
+                                      Register pp) {
+  ASSERT((pp != PP) || constant_pool_allowed());
+  ASSERT(dst != pp);
+  const uint32_t offset = target::ObjectPool::element_offset(index);
+  // PP is untagged.
+  intx_t imm = offset;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  if (hi == 0) {
+    lx(dst, Address(pp, lo));
+  } else {
+    lui(dst, hi);
+    add(dst, dst, pp);
+    lx(dst, Address(dst, lo));
+  }
+}
+
+void Assembler::CompareObject(Register reg, const Object& object) {
+  ASSERT(IsOriginalObject(object));
+  if (IsSameObject(compiler::NullObject(), object)) {
+    CompareObjectRegisters(reg, NULL_REG);
+  } else if (target::IsSmi(object)) {
+    CompareImmediate(reg, target::ToRawSmi(object));
+  } else {
+    LoadObject(TMP, object);
+    CompareObjectRegisters(reg, TMP);
+  }
+}
+
+void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
+  ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
+  ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
+#if XLEN == 64
+  srliw(result, tags, target::UntaggedObject::kClassIdTagPos);
+#else
+  srli(result, tags, target::UntaggedObject::kClassIdTagPos);
+#endif
+}
+void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
+  ASSERT(target::UntaggedObject::kSizeTagPos == 8);
+  ASSERT(target::UntaggedObject::kSizeTagSize == 8);
+  srli(result, tags, target::UntaggedObject::kSizeTagPos);
+  andi(result, result, (1 << target::UntaggedObject::kSizeTagSize) - 1);
+  slli(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
+}
+
+void Assembler::LoadClassId(Register result, Register object) {
+  ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
+  ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
+  const intptr_t class_id_offset =
+      target::Object::tags_offset() +
+      target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
+  lhu(result, FieldAddress(object, class_id_offset));
+}
+void Assembler::LoadClassById(Register result, Register class_id) {
+  ASSERT(result != class_id);
+
+  const intptr_t table_offset =
+      target::IsolateGroup::cached_class_table_table_offset();
+
+  LoadIsolateGroup(result);
+  LoadFromOffset(result, result, table_offset);
+  slli(TMP, class_id, target::kWordSizeLog2);
+  add(result, result, TMP);
+  lx(result, Address(result, 0));
+}
+void Assembler::CompareClassId(Register object,
+                               intptr_t class_id,
+                               Register scratch) {
+  ASSERT(scratch != kNoRegister);
+  LoadClassId(scratch, object);
+  CompareImmediate(scratch, class_id);
+}
+// Note: input and output registers must be different.
+void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
+  ASSERT(result != object);
+  ASSERT(result != TMP2);
+  ASSERT(object != TMP2);
+  li(result, kSmiCid);
+  Label done;
+  BranchIfSmi(object, &done, kNearJump);
+  LoadClassId(result, object);
+  Bind(&done);
+}
+void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
+  LoadClassIdMayBeSmi(result, object);
+  SmiTag(result);
+}
+void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
+                                        Register src,
+                                        Register scratch,
+                                        bool can_be_null) {
+#if defined(DEBUG)
+  Comment("Check that object in register has cid %" Pd "", cid);
+  Label matches;
+  LoadClassIdMayBeSmi(scratch, src);
+  CompareImmediate(scratch, cid);
+  BranchIf(EQUAL, &matches, Assembler::kNearJump);
+  if (can_be_null) {
+    CompareImmediate(scratch, kNullCid);
+    BranchIf(EQUAL, &matches, Assembler::kNearJump);
+  }
+  trap();
+  Bind(&matches);
+#endif
+}
+
+void Assembler::EnterFrame(intptr_t frame_size) {
+  // N.B. The ordering here is important. We must never write beyond SP or
+  // it can be clobbered by a signal handler.
+  subi(SP, SP, frame_size + 2 * target::kWordSize);
+  sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
+  sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
+  addi(FP, SP, frame_size + 0 * target::kWordSize);
+}
+void Assembler::LeaveFrame() {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  mv(SP, FP);
+  lx(FP, Address(SP, 0 * target::kWordSize));
+  lx(RA, Address(SP, 1 * target::kWordSize));
+  addi(SP, SP, 2 * target::kWordSize);
+}
+
+void Assembler::TransitionGeneratedToNative(Register destination,
+                                            Register new_exit_frame,
+                                            Register new_exit_through_ffi,
+                                            bool enter_safepoint) {
+  // Save exit frame information to enable stack walking.
+  sx(new_exit_frame,
+     Address(THR, target::Thread::top_exit_frame_info_offset()));
+
+  sx(new_exit_through_ffi,
+     Address(THR, target::Thread::exit_through_ffi_offset()));
+  Register tmp = new_exit_through_ffi;
+
+  // Mark that the thread is executing native code.
+  sx(destination, Address(THR, target::Thread::vm_tag_offset()));
+  li(tmp, target::Thread::native_execution_state());
+  sx(tmp, Address(THR, target::Thread::execution_state_offset()));
+
+  if (enter_safepoint) {
+    EnterFullSafepoint(tmp);
+  }
+}
+
+void Assembler::TransitionNativeToGenerated(Register state,
+                                            bool exit_safepoint) {
+  if (exit_safepoint) {
+    ExitFullSafepoint(state);
+  } else {
+#if defined(DEBUG)
+    // Ensure we've already left the safepoint.
+    ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
+    li(state, target::Thread::full_safepoint_state_acquired());
+    lx(RA, Address(THR, target::Thread::safepoint_state_offset()));
+    and_(RA, RA, state);
+    Label ok;
+    beqz(RA, &ok, Assembler::kNearJump);
+    Breakpoint();
+    Bind(&ok);
+#endif
+  }
+
+  // Mark that the thread is executing Dart code.
+  li(state, target::Thread::vm_tag_dart_id());
+  sx(state, Address(THR, target::Thread::vm_tag_offset()));
+  li(state, target::Thread::generated_execution_state());
+  sx(state, Address(THR, target::Thread::execution_state_offset()));
+
+  // Reset exit frame information in Isolate's mutator thread structure.
+  sx(ZR, Address(THR, target::Thread::top_exit_frame_info_offset()));
+  sx(ZR, Address(THR, target::Thread::exit_through_ffi_offset()));
+}
+
+void Assembler::EnterFullSafepoint(Register state) {
+  // We generate the same number of instructions whether or not the slow-path is
+  // forced. This simplifies GenerateJitCallbackTrampolines.
+
+  Register addr = RA;
+  ASSERT(addr != state);
+
+  Label slow_path, done, retry;
+  if (FLAG_use_slow_path) {
+    j(&slow_path, Assembler::kNearJump);
+  }
+
+  addi(addr, THR, target::Thread::safepoint_state_offset());
+  Bind(&retry);
+  lr(state, Address(addr, 0));
+  subi(state, state, target::Thread::full_safepoint_state_unacquired());
+  bnez(state, &slow_path, Assembler::kNearJump);
+
+  li(state, target::Thread::full_safepoint_state_acquired());
+  sc(state, state, Address(addr, 0));
+  beqz(state, &done, Assembler::kNearJump);  // 0 means sc was successful.
+
+  if (!FLAG_use_slow_path) {
+    j(&retry, Assembler::kNearJump);
+  }
+
+  Bind(&slow_path);
+  lx(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
+  lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
+  jalr(addr);
+
+  Bind(&done);
+}
+
+void Assembler::ExitFullSafepoint(Register state) {
+  // We generate the same number of instructions whether or not the slow-path is
+  // forced, for consistency with EnterFullSafepoint.
+  Register addr = RA;
+  ASSERT(addr != state);
+
+  Label slow_path, done, retry;
+  if (FLAG_use_slow_path) {
+    j(&slow_path, Assembler::kNearJump);
+  }
+
+  addi(addr, THR, target::Thread::safepoint_state_offset());
+  Bind(&retry);
+  lr(state, Address(addr, 0));
+  subi(state, state, target::Thread::full_safepoint_state_acquired());
+  bnez(state, &slow_path, Assembler::kNearJump);
+
+  li(state, target::Thread::full_safepoint_state_unacquired());
+  sc(state, state, Address(addr, 0));
+  beqz(state, &done, Assembler::kNearJump);  // 0 means sc was successful.
+
+  if (!FLAG_use_slow_path) {
+    j(&retry, Assembler::kNearJump);
+  }
+
+  Bind(&slow_path);
+  lx(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
+  lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
+  jalr(addr);
+
+  Bind(&done);
+}
+
+void Assembler::CheckCodePointer() {
+#ifdef DEBUG
+  if (!FLAG_check_code_pointer) {
+    return;
+  }
+  Comment("CheckCodePointer");
+  Label cid_ok, instructions_ok;
+  CompareClassId(CODE_REG, kCodeCid, TMP);
+  BranchIf(EQ, &cid_ok, kNearJump);
+  ebreak();
+  Bind(&cid_ok);
+
+  const intptr_t entry_offset =
+      CodeSize() + target::Instructions::HeaderSize() - kHeapObjectTag;
+  intx_t imm = -entry_offset;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  auipc(TMP, hi);
+  addi(TMP, TMP, lo);
+  lx(TMP2, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
+  beq(TMP, TMP2, &instructions_ok, kNearJump);
+  ebreak();
+  Bind(&instructions_ok);
+#endif
+}
+
+void Assembler::RestoreCodePointer() {
+  lx(CODE_REG,
+     Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
+  CheckCodePointer();
+}
+
+// Restores the values of the registers that are blocked to cache some values
+// e.g. BARRIER_MASK and NULL_REG.
+void Assembler::RestorePinnedRegisters() {
+  lx(WRITE_BARRIER_MASK,
+     Address(THR, target::Thread::write_barrier_mask_offset()));
+  lx(NULL_REG, Address(THR, target::Thread::object_null_offset()));
+}
+
+void Assembler::SetupGlobalPoolAndDispatchTable() {
+  ASSERT(FLAG_precompiled_mode);
+  lx(PP, Address(THR, target::Thread::global_object_pool_offset()));
+  subi(PP, PP, kHeapObjectTag);  // Pool in PP is untagged!
+  lx(DISPATCH_TABLE_REG,
+     Address(THR, target::Thread::dispatch_table_array_offset()));
+}
+
+void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
+  ASSERT(!constant_pool_allowed());
+
+  if (!IsITypeImm(frame_size + 4 * target::kWordSize)) {
+    EnterDartFrame(0, new_pp);
+    AddImmediate(SP, SP, -frame_size);
+    return;
+  }
+
+  // N.B. The ordering here is important. We must never write beyond SP or
+  // it can be clobbered by a signal handler.
+  if (FLAG_precompiled_mode) {
+    subi(SP, SP, frame_size + 2 * target::kWordSize);
+    sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
+    sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
+    addi(FP, SP, frame_size + 0 * target::kWordSize);
+  } else {
+    subi(SP, SP, frame_size + 4 * target::kWordSize);
+    sx(RA, Address(SP, frame_size + 3 * target::kWordSize));
+    sx(FP, Address(SP, frame_size + 2 * target::kWordSize));
+    sx(CODE_REG, Address(SP, frame_size + 1 * target::kWordSize));
+    addi(PP, PP, kHeapObjectTag);
+    sx(PP, Address(SP, frame_size + 0 * target::kWordSize));
+    addi(FP, SP, frame_size + 2 * target::kWordSize);
+    if (new_pp == kNoRegister) {
+      LoadPoolPointer();
+    } else {
+      mv(PP, new_pp);
+    }
+  }
+  set_constant_pool_allowed(true);
+}
+
+// On entry to a function compiled for OSR, the caller's frame pointer, the
+// stack locals, and any copied parameters are already in place.  The frame
+// pointer is already set up.  The PC marker is not correct for the
+// optimized function and there may be extra space for spill slots to
+// allocate. We must also set up the pool pointer for the function.
+void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
+  ASSERT(!constant_pool_allowed());
+  Comment("EnterOsrFrame");
+  RestoreCodePointer();
+  LoadPoolPointer();
+
+  if (extra_size > 0) {
+    AddImmediate(SP, -extra_size);
+  }
+}
+
+void Assembler::LeaveDartFrame(RestorePP restore_pp) {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  if (!FLAG_precompiled_mode) {
+    if (restore_pp == kRestoreCallerPP) {
+      lx(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
+                             target::kWordSize));
+      subi(PP, PP, kHeapObjectTag);
+    }
+  }
+  set_constant_pool_allowed(false);
+  mv(SP, FP);
+  lx(FP, Address(SP, 0 * target::kWordSize));
+  lx(RA, Address(SP, 1 * target::kWordSize));
+  addi(SP, SP, 2 * target::kWordSize);
+
+  // TODO(riscv): When we know the stack depth, we can avoid updating SP twice.
+}
+
+void Assembler::CallRuntime(const RuntimeEntry& entry,
+                            intptr_t argument_count) {
+  entry.Call(this, argument_count);
+}
+
+void Assembler::EnterCFrame(intptr_t frame_space) {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  subi(SP, SP, frame_space + 2 * target::kWordSize);
+  sx(RA, Address(SP, frame_space + 1 * target::kWordSize));
+  sx(FP, Address(SP, frame_space + 0 * target::kWordSize));
+  addi(FP, SP, frame_space);
+}
+
+void Assembler::LeaveCFrame() {
+  // N.B. The ordering here is important. We must never read beyond SP or
+  // it may have already been clobbered by a signal handler.
+  mv(SP, FP);
+  lx(FP, Address(SP, 0 * target::kWordSize));
+  lx(RA, Address(SP, 1 * target::kWordSize));
+  addi(SP, SP, 2 * target::kWordSize);
+}
+
+// A0: Receiver
+// S5: ICData entry array
+// PP: Caller's PP (preserved)
+void Assembler::MonomorphicCheckedEntryJIT() {
+  has_monomorphic_entry_ = true;
+  const intptr_t saved_far_branch_level = far_branch_level();
+  set_far_branch_level(0);
+  const intptr_t start = CodeSize();
+
+  Label immediate, miss;
+  Bind(&miss);
+  lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
+  jr(TMP);
+
+  Comment("MonomorphicCheckedEntry");
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kMonomorphicEntryOffsetJIT);
+
+  Register entries_reg = IC_DATA_REG;  // Contains ICData::entries().
+  const intptr_t cid_offset = target::Array::element_offset(0);
+  const intptr_t count_offset = target::Array::element_offset(1);
+  ASSERT(A1 != PP);
+  ASSERT(A1 != entries_reg);
+  ASSERT(A1 != CODE_REG);
+
+  lx(TMP, FieldAddress(entries_reg, cid_offset));
+  LoadTaggedClassIdMayBeSmi(A1, A0);
+  bne(TMP, A1, &miss, kNearJump);
+
+  lx(TMP, FieldAddress(entries_reg, count_offset));
+  addi(TMP, TMP, target::ToRawSmi(1));
+  sx(TMP, FieldAddress(entries_reg, count_offset));
+
+  li(ARGS_DESC_REG, 0);  // GC-safe for OptimizeInvokedFunction
+
+  // Fall through to unchecked entry.
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kPolymorphicEntryOffsetJIT);
+
+  set_far_branch_level(saved_far_branch_level);
+}
+
+// A0 receiver, S5 guarded cid as Smi.
+// Preserve S4 (ARGS_DESC_REG), not required today, but maybe later.
+// PP: Caller's PP (preserved)
+void Assembler::MonomorphicCheckedEntryAOT() {
+  has_monomorphic_entry_ = true;
+  intptr_t saved_far_branch_level = far_branch_level();
+  set_far_branch_level(0);
+
+  const intptr_t start = CodeSize();
+
+  Label immediate, miss;
+  Bind(&miss);
+  lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
+  jr(TMP);
+
+  Comment("MonomorphicCheckedEntry");
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kMonomorphicEntryOffsetAOT);
+  LoadClassId(TMP, A0);
+  SmiTag(TMP);
+  bne(S5, TMP, &miss, kNearJump);
+
+  // Fall through to unchecked entry.
+  ASSERT_EQUAL(CodeSize() - start,
+               target::Instructions::kPolymorphicEntryOffsetAOT);
+
+  set_far_branch_level(saved_far_branch_level);
+}
+
+void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
+  has_monomorphic_entry_ = true;
+  while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
+    ebreak();
+  }
+  j(label);
+  while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
+    ebreak();
+  }
+}
+
+#ifndef PRODUCT
+void Assembler::MaybeTraceAllocation(intptr_t cid,
+                                     Register temp_reg,
+                                     Label* trace) {
+  ASSERT(cid > 0);
+
+  const intptr_t shared_table_offset =
+      target::IsolateGroup::shared_class_table_offset();
+  const intptr_t table_offset =
+      target::SharedClassTable::class_heap_stats_table_offset();
+  const intptr_t class_offset = target::ClassTable::ClassOffsetFor(cid);
+
+  LoadIsolateGroup(temp_reg);
+  lx(temp_reg, Address(temp_reg, shared_table_offset));
+  lx(temp_reg, Address(temp_reg, table_offset));
+  if (IsITypeImm(class_offset)) {
+    lbu(temp_reg, Address(temp_reg, class_offset));
+  } else {
+    AddImmediate(temp_reg, class_offset);
+    lbu(temp_reg, Address(temp_reg, 0));
+  }
+  bnez(temp_reg, trace);
+}
+#endif  // !PRODUCT
+
+void Assembler::TryAllocateObject(intptr_t cid,
+                                  intptr_t instance_size,
+                                  Label* failure,
+                                  JumpDistance distance,
+                                  Register instance_reg,
+                                  Register temp_reg) {
+  ASSERT(failure != NULL);
+  ASSERT(instance_size != 0);
+  ASSERT(instance_reg != temp_reg);
+  ASSERT(temp_reg != kNoRegister);
+  ASSERT(Utils::IsAligned(instance_size,
+                          target::ObjectAlignment::kObjectAlignment));
+  if (FLAG_inline_alloc &&
+      target::Heap::IsAllocatableInNewSpace(instance_size)) {
+    // If this allocation is traced, program will jump to failure path
+    // (i.e. the allocation stub) which will allocate the object and trace the
+    // allocation call site.
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure));
+
+    lx(instance_reg, Address(THR, target::Thread::top_offset()));
+    lx(temp_reg, Address(THR, target::Thread::end_offset()));
+    // instance_reg: current top (next object start).
+    // temp_reg: heap end
+
+    // TODO(koda): Protect against unsigned overflow here.
+    AddImmediate(instance_reg, instance_size);
+    // instance_reg: potential top (next object start).
+    // fail if heap end unsigned less than or equal to new heap top.
+    bleu(temp_reg, instance_reg, failure, distance);
+
+    // Successfully allocated the object, now update temp to point to
+    // next object start and store the class in the class field of object.
+    sx(instance_reg, Address(THR, target::Thread::top_offset()));
+    // Move instance_reg back to the start of the object and tag it.
+    AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
+
+    const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
+    LoadImmediate(temp_reg, tags);
+    StoreToOffset(temp_reg,
+                  FieldAddress(instance_reg, target::Object::tags_offset()));
+  } else {
+    j(failure, distance);
+  }
+}
+
+void Assembler::TryAllocateArray(intptr_t cid,
+                                 intptr_t instance_size,
+                                 Label* failure,
+                                 Register instance,
+                                 Register end_address,
+                                 Register temp1,
+                                 Register temp2) {
+  if (FLAG_inline_alloc &&
+      target::Heap::IsAllocatableInNewSpace(instance_size)) {
+    // If this allocation is traced, program will jump to failure path
+    // (i.e. the allocation stub) which will allocate the object and trace the
+    // allocation call site.
+    NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure));
+    // Potential new object start.
+    lx(instance, Address(THR, target::Thread::top_offset()));
+    addi(end_address, instance, instance_size);
+    bltu(end_address, instance, failure);  // Fail on unsigned overflow.
+
+    // Check if the allocation fits into the remaining space.
+    // instance: potential new object start.
+    // end_address: potential next object start.
+    lx(temp2, Address(THR, target::Thread::end_offset()));
+    bgeu(end_address, temp2, failure);
+
+    // Successfully allocated the object(s), now update top to point to
+    // next object start and initialize the object.
+    sx(end_address, Address(THR, target::Thread::top_offset()));
+    addi(instance, instance, kHeapObjectTag);
+    NOT_IN_PRODUCT(LoadImmediate(temp2, instance_size));
+
+    // Initialize the tags.
+    // instance: new object start as a tagged pointer.
+    const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
+    LoadImmediate(temp2, tags);
+    sx(temp2, FieldAddress(instance, target::Object::tags_offset()));
+  } else {
+    j(failure);
+  }
+}
+
+void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
+  // JAL only has a +/- 1MB range. AUIPC+JALR has a +/- 2GB range.
+  intx_t imm = offset_into_target;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  auipc(RA, hi);
+  jalr_fixed(RA, RA, lo);
+}
+
+void Assembler::GenerateUnRelocatedPcRelativeTailCall(
+    intptr_t offset_into_target) {
+  // J only has a +/- 1MB range. AUIPC+JR has a +/- 2GB range.
+  intx_t imm = offset_into_target;
+  intx_t lo = imm << (XLEN - 12) >> (XLEN - 12);
+  intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
+  auipc(TMP, hi);
+  jalr_fixed(ZR, TMP, lo);
+}
+
+static OperandSize OperandSizeFor(intptr_t cid) {
+  switch (cid) {
+    case kArrayCid:
+    case kImmutableArrayCid:
+    case kTypeArgumentsCid:
+      return kObjectBytes;
+    case kOneByteStringCid:
+    case kExternalOneByteStringCid:
+      return kByte;
+    case kTwoByteStringCid:
+    case kExternalTwoByteStringCid:
+      return kTwoBytes;
+    case kTypedDataInt8ArrayCid:
+      return kByte;
+    case kTypedDataUint8ArrayCid:
+    case kTypedDataUint8ClampedArrayCid:
+    case kExternalTypedDataUint8ArrayCid:
+    case kExternalTypedDataUint8ClampedArrayCid:
+      return kUnsignedByte;
+    case kTypedDataInt16ArrayCid:
+      return kTwoBytes;
+    case kTypedDataUint16ArrayCid:
+      return kUnsignedTwoBytes;
+    case kTypedDataInt32ArrayCid:
+      return kFourBytes;
+    case kTypedDataUint32ArrayCid:
+      return kUnsignedFourBytes;
+    case kTypedDataInt64ArrayCid:
+    case kTypedDataUint64ArrayCid:
+      return kDWord;
+    case kTypedDataFloat32ArrayCid:
+      return kSWord;
+    case kTypedDataFloat64ArrayCid:
+      return kDWord;
+    case kTypedDataFloat32x4ArrayCid:
+    case kTypedDataInt32x4ArrayCid:
+    case kTypedDataFloat64x2ArrayCid:
+      return kQWord;
+    case kTypedDataInt8ArrayViewCid:
+      UNREACHABLE();
+      return kByte;
+    default:
+      UNREACHABLE();
+      return kByte;
+  }
+}
+
+Address Assembler::ElementAddressForIntIndex(bool is_external,
+                                             intptr_t cid,
+                                             intptr_t index_scale,
+                                             Register array,
+                                             intptr_t index) const {
+  const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
+  ASSERT(Utils::IsInt(32, offset));
+  return Address(array, static_cast<int32_t>(offset));
+}
+void Assembler::ComputeElementAddressForIntIndex(Register address,
+                                                 bool is_external,
+                                                 intptr_t cid,
+                                                 intptr_t index_scale,
+                                                 Register array,
+                                                 intptr_t index) {
+  const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
+  AddImmediate(address, array, offset);
+}
+
+Address Assembler::ElementAddressForRegIndex(bool is_external,
+                                             intptr_t cid,
+                                             intptr_t index_scale,
+                                             bool index_unboxed,
+                                             Register array,
+                                             Register index,
+                                             Register temp) {
+  return ElementAddressForRegIndexWithSize(is_external, cid,
+                                           OperandSizeFor(cid), index_scale,
+                                           index_unboxed, array, index, temp);
+}
+
+Address Assembler::ElementAddressForRegIndexWithSize(bool is_external,
+                                                     intptr_t cid,
+                                                     OperandSize size,
+                                                     intptr_t index_scale,
+                                                     bool index_unboxed,
+                                                     Register array,
+                                                     Register index,
+                                                     Register temp) {
+  // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
+  const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
+  const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
+  const int32_t offset = HeapDataOffset(is_external, cid);
+  ASSERT(array != temp);
+  ASSERT(index != temp);
+  if (shift == 0) {
+    add(temp, array, index);
+  } else if (shift < 0) {
+    ASSERT(shift == -1);
+    srai(temp, index, 1);
+    add(temp, array, temp);
+  } else {
+    slli(temp, index, shift);
+    add(temp, array, temp);
+  }
+  return Address(temp, offset);
+}
+
+void Assembler::ComputeElementAddressForRegIndex(Register address,
+                                                 bool is_external,
+                                                 intptr_t cid,
+                                                 intptr_t index_scale,
+                                                 bool index_unboxed,
+                                                 Register array,
+                                                 Register index) {
+  // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
+  const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
+  const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
+  const int32_t offset = HeapDataOffset(is_external, cid);
+  ASSERT(array != address);
+  ASSERT(index != address);
+  if (shift == 0) {
+    add(address, array, index);
+  } else if (shift < 0) {
+    ASSERT(shift == -1);
+    srai(address, index, 1);
+    add(address, array, address);
+  } else {
+    slli(address, index, shift);
+    add(address, array, address);
+  }
+  if (offset != 0) {
+    AddImmediate(address, address, offset);
+  }
+}
+
+void Assembler::LoadStaticFieldAddress(Register address,
+                                       Register field,
+                                       Register scratch) {
+  LoadCompressedSmiFieldFromOffset(
+      scratch, field, target::Field::host_offset_or_field_id_offset());
+  const intptr_t field_table_offset =
+      compiler::target::Thread::field_table_values_offset();
+  LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
+  slli(scratch, scratch, target::kWordSizeLog2 - kSmiTagShift);
+  add(address, address, scratch);
+}
+
+void Assembler::LoadCompressedFieldAddressForRegOffset(
+    Register address,
+    Register instance,
+    Register offset_in_words_as_smi) {
+  slli(TMP, offset_in_words_as_smi,
+       target::kCompressedWordSizeLog2 - kSmiTagShift);
+  add(TMP, TMP, instance);
+  addi(address, TMP, -kHeapObjectTag);
+}
+
+void Assembler::LoadFieldAddressForRegOffset(Register address,
+                                             Register instance,
+                                             Register offset_in_words_as_smi) {
+  slli(TMP, offset_in_words_as_smi, target::kWordSizeLog2 - kSmiTagShift);
+  add(TMP, TMP, instance);
+  addi(address, TMP, -kHeapObjectTag);
+}
+
+// Note: the function never clobbers TMP, TMP2 scratch registers.
+void Assembler::LoadObjectHelper(Register dst,
+                                 const Object& object,
+                                 bool is_unique) {
+  ASSERT(IsOriginalObject(object));
+  // `is_unique == true` effectively means object has to be patchable.
+  // (even if the object is null)
+  if (!is_unique) {
+    if (IsSameObject(compiler::NullObject(), object)) {
+      mv(dst, NULL_REG);
+      return;
+    }
+    if (IsSameObject(CastHandle<Object>(compiler::TrueObject()), object)) {
+      addi(dst, NULL_REG, kTrueOffsetFromNull);
+      return;
+    }
+    if (IsSameObject(CastHandle<Object>(compiler::FalseObject()), object)) {
+      addi(dst, NULL_REG, kFalseOffsetFromNull);
+      return;
+    }
+    word offset = 0;
+    if (target::CanLoadFromThread(object, &offset)) {
+      lx(dst, Address(THR, offset));
+      return;
+    }
+    if (target::IsSmi(object)) {
+      intx_t raw_smi = target::ToRawSmi(object);
+      if (IsITypeImm(raw_smi)) {
+        li(dst, raw_smi);
+        return;
+      }
+      if (IsUTypeImm(raw_smi)) {
+        lui(dst, raw_smi);
+        return;
+      }
+    }
+  }
+  if (CanLoadFromObjectPool(object)) {
+    const intptr_t index =
+        is_unique ? object_pool_builder().AddObject(
+                        object, ObjectPoolBuilderEntry::kPatchable)
+                  : object_pool_builder().FindObject(
+                        object, ObjectPoolBuilderEntry::kNotPatchable);
+    LoadWordFromPoolIndex(dst, index);
+    return;
+  }
+  ASSERT(target::IsSmi(object));
+  LoadImmediate(dst, target::ToRawSmi(object));
+}
+
+// Note: leaf call sequence uses some abi callee save registers as scratch
+// so they should be manually preserved.
+void Assembler::EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf) {
+  // N.B. The ordering here is important. We must never write beyond SP or
+  // it can be clobbered by a signal handler.
+  if (FLAG_precompiled_mode) {
+    subi(SP, SP, 2 * target::kWordSize + frame_size);
+    sx(RA, Address(SP, 1 * target::kWordSize + frame_size));
+    sx(FP, Address(SP, 0 * target::kWordSize + frame_size));
+    addi(FP, SP, 0 * target::kWordSize + frame_size);
+  } else {
+    subi(SP, SP, 4 * target::kWordSize + frame_size);
+    sx(RA, Address(SP, 3 * target::kWordSize + frame_size));
+    sx(FP, Address(SP, 2 * target::kWordSize + frame_size));
+    sx(CODE_REG, Address(SP, 1 * target::kWordSize + frame_size));
+    addi(PP, PP, kHeapObjectTag);
+    sx(PP, Address(SP, 0 * target::kWordSize + frame_size));
+    addi(FP, SP, 2 * target::kWordSize + frame_size);
+  }
+
+  const RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs,
+                                         kAbiVolatileFpuRegs);
+  PushRegisters(kVolatileRegisterSet);
+
+  if (!is_leaf) {  // Leaf calling sequence aligns the stack itself.
+    ReserveAlignedFrameSpace(0);
+  }
+}
+
+void Assembler::LeaveCallRuntimeFrame(bool is_leaf) {
+  const RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs,
+                                         kAbiVolatileFpuRegs);
+
+  const intptr_t kPushedRegistersSize =
+      kVolatileRegisterSet.CpuRegisterCount() * target::kWordSize +
+      kVolatileRegisterSet.FpuRegisterCount() * kFpuRegisterSize +
+      (target::frame_layout.dart_fixed_frame_size - 2) *
+          target::kWordSize;  // From EnterStubFrame (excluding PC / FP)
+
+  subi(SP, FP, kPushedRegistersSize);
+
+  PopRegisters(kVolatileRegisterSet);
+
+  LeaveStubFrame();
+}
+
+void Assembler::CallRuntimeScope::Call(intptr_t argument_count) {
+  assembler_->CallRuntime(entry_, argument_count);
+}
+
+Assembler::CallRuntimeScope::~CallRuntimeScope() {
+  if (preserve_registers_) {
+    assembler_->LeaveCallRuntimeFrame(entry_.is_leaf());
+    if (restore_code_reg_) {
+      assembler_->PopRegister(CODE_REG);
+    }
+  }
+}
+
+Assembler::CallRuntimeScope::CallRuntimeScope(Assembler* assembler,
+                                              const RuntimeEntry& entry,
+                                              intptr_t frame_size,
+                                              bool preserve_registers,
+                                              const Address* caller)
+    : assembler_(assembler),
+      entry_(entry),
+      preserve_registers_(preserve_registers),
+      restore_code_reg_(caller != nullptr) {
+  if (preserve_registers_) {
+    if (caller != nullptr) {
+      assembler_->PushRegister(CODE_REG);
+      assembler_->lx(CODE_REG, *caller);
+    }
+    assembler_->EnterCallRuntimeFrame(frame_size, entry.is_leaf());
+  }
+}
+
+void Assembler::AddImmediateBranchOverflow(Register rd,
+                                           Register rs1,
+                                           intx_t imm,
+                                           Label* overflow) {
+  ASSERT(rd != TMP2);
+  if (rd == rs1) {
+    mv(TMP2, rs1);
+    AddImmediate(rd, rs1, imm);
+    if (imm > 0) {
+      blt(rd, TMP2, overflow);
+    } else if (imm < 0) {
+      bgt(rd, TMP2, overflow);
+    }
+  } else {
+    AddImmediate(rd, rs1, imm);
+    if (imm > 0) {
+      blt(rd, rs1, overflow);
+    } else if (imm < 0) {
+      bgt(rd, rs1, overflow);
+    }
+  }
+}
+void Assembler::SubtractImmediateBranchOverflow(Register rd,
+                                                Register rs1,
+                                                intx_t imm,
+                                                Label* overflow) {
+  // TODO(riscv): Incorrect for MIN_INTX_T!
+  AddImmediateBranchOverflow(rd, rs1, -imm, overflow);
+}
+void Assembler::MultiplyImmediateBranchOverflow(Register rd,
+                                                Register rs1,
+                                                intx_t imm,
+                                                Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+
+  LoadImmediate(TMP2, imm);
+  // Macro-op fusion: when both products are needed, the recommended sequence
+  // is mulh first.
+  mulh(TMP, rs1, TMP2);
+  mul(rd, rs1, TMP2);
+  srai(TMP2, rd, XLEN - 1);
+  bne(TMP, TMP2, overflow);
+}
+void Assembler::AddBranchOverflow(Register rd,
+                                  Register rs1,
+                                  Register rs2,
+                                  Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+  ASSERT(rs2 != TMP);
+  ASSERT(rs2 != TMP2);
+
+  if ((rd == rs1) && (rd == rs2)) {
+    ASSERT(rs1 == rs2);
+    mv(TMP, rs1);
+    add(rd, rs1, rs2);   // rs1, rs2 destroyed
+    xor_(TMP, TMP, rd);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rs1 == rs2) {
+    ASSERT(rd != rs1);
+    ASSERT(rd != rs2);
+    add(rd, rs1, rs2);
+    xor_(TMP, rd, rs1);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rd == rs1) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs1, 0);
+    add(rd, rs1, rs2);  // rs1 destroyed
+    slt(TMP2, rd, rs2);
+    bne(TMP, TMP2, overflow);
+  } else if (rd == rs2) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs2, 0);
+    add(rd, rs1, rs2);  // rs2 destroyed
+    slt(TMP2, rd, rs1);
+    bne(TMP, TMP2, overflow);
+  } else {
+    add(rd, rs1, rs2);
+    slti(TMP, rs2, 0);
+    slt(TMP2, rd, rs1);
+    bne(TMP, TMP2, overflow);
+  }
+}
+
+void Assembler::SubtractBranchOverflow(Register rd,
+                                       Register rs1,
+                                       Register rs2,
+                                       Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+  ASSERT(rs2 != TMP);
+  ASSERT(rs2 != TMP2);
+
+  if ((rd == rs1) && (rd == rs2)) {
+    ASSERT(rs1 == rs2);
+    mv(TMP, rs1);
+    sub(rd, rs1, rs2);   // rs1, rs2 destroyed
+    xor_(TMP, TMP, rd);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rs1 == rs2) {
+    ASSERT(rd != rs1);
+    ASSERT(rd != rs2);
+    sub(rd, rs1, rs2);
+    xor_(TMP, rd, rs1);  // TMP negative if sign changed
+    bltz(TMP, overflow);
+  } else if (rd == rs1) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs1, 0);
+    sub(rd, rs1, rs2);  // rs1 destroyed
+    slt(TMP2, rd, rs2);
+    bne(TMP, TMP2, overflow);
+  } else if (rd == rs2) {
+    ASSERT(rs1 != rs2);
+    slti(TMP, rs2, 0);
+    sub(rd, rs1, rs2);  // rs2 destroyed
+    slt(TMP2, rd, rs1);
+    bne(TMP, TMP2, overflow);
+  } else {
+    sub(rd, rs1, rs2);
+    slti(TMP, rs2, 0);
+    slt(TMP2, rs1, rd);
+    bne(TMP, TMP2, overflow);
+  }
+}
+
+void Assembler::MultiplyBranchOverflow(Register rd,
+                                       Register rs1,
+                                       Register rs2,
+                                       Label* overflow) {
+  ASSERT(rd != TMP);
+  ASSERT(rd != TMP2);
+  ASSERT(rs1 != TMP);
+  ASSERT(rs1 != TMP2);
+  ASSERT(rs2 != TMP);
+  ASSERT(rs2 != TMP2);
+
+  // Macro-op fusion: when both products are needed, the recommended sequence
+  // is mulh first.
+  mulh(TMP, rs1, rs2);
+  mul(rd, rs1, rs2);
+  srai(TMP2, rd, XLEN - 1);
+  bne(TMP, TMP2, overflow);
+}
+
+}  // namespace compiler
+
+}  // namespace dart
+
+#endif  // defined(TARGET_ARCH_RISCV)
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.h b/runtime/vm/compiler/assembler/assembler_riscv.h
new file mode 100644
index 0000000..c96fa8b
--- /dev/null
+++ b/runtime/vm/compiler/assembler/assembler_riscv.h
@@ -0,0 +1,1457 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_RISCV_H_
+#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_RISCV_H_
+
+#if defined(DART_PRECOMPILED_RUNTIME)
+#error "AOT runtime should not use compiler sources (including header files)"
+#endif  // defined(DART_PRECOMPILED_RUNTIME)
+
+#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
+#error Do not include assembler_riscv.h directly; use assembler.h instead.
+#endif
+
+#include <functional>
+
+#include "platform/assert.h"
+#include "platform/utils.h"
+#include "vm/class_id.h"
+#include "vm/compiler/assembler/assembler_base.h"
+#include "vm/constants.h"
+#include "vm/hash_map.h"
+#include "vm/simulator.h"
+
+namespace dart {
+
+// Forward declarations.
+class FlowGraphCompiler;
+class RuntimeEntry;
+class RegisterSet;
+
+namespace compiler {
+
+class Address {
+ public:
+  Address(Register base, intptr_t offset) : base_(base), offset_(offset) {}
+  explicit Address(Register base) : base_(base), offset_(0) {}
+
+  // Prevent implicit conversion of Register to intptr_t.
+  Address(Register base, Register index) = delete;
+
+  Register base() const { return base_; }
+  intptr_t offset() const { return offset_; }
+
+ private:
+  Register base_;
+  intptr_t offset_;
+};
+
+class FieldAddress : public Address {
+ public:
+  FieldAddress(Register base, intptr_t offset)
+      : Address(base, offset - kHeapObjectTag) {}
+
+  // Prevent implicit conversion of Register to intptr_t.
+  FieldAddress(Register base, Register index) = delete;
+};
+
+// All functions produce exactly one instruction.
+class MicroAssembler : public AssemblerBase {
+ public:
+  MicroAssembler(ObjectPoolBuilder* object_pool_builder,
+                 intptr_t far_branch_level,
+                 ExtensionSet extensions);
+  ~MicroAssembler();
+
+#if defined(TESTING)
+  void SetExtensions(ExtensionSet extensions) { extensions_ = extensions; }
+#endif
+  bool Supports(Extension extension) const {
+    return extensions_.Includes(extension);
+  }
+  bool Supports(ExtensionSet extensions) const {
+    return extensions_.IncludesAll(extensions);
+  }
+
+  intptr_t far_branch_level() const { return far_branch_level_; }
+  void set_far_branch_level(intptr_t level) { far_branch_level_ = level; }
+  void Bind(Label* label);
+
+  // ==== RV32I ====
+  void lui(Register rd, intptr_t imm);
+  void lui_fixed(Register rd, intptr_t imm);
+  void auipc(Register rd, intptr_t imm);
+
+  void jal(Register rd, Label* label, JumpDistance d = kFarJump);
+  void jal(Label* label, JumpDistance d = kFarJump) { jal(RA, label, d); }
+  void j(Label* label, JumpDistance d = kFarJump) { jal(ZR, label, d); }
+
+  void jalr(Register rd, Register rs1, intptr_t offset = 0);
+  void jalr_fixed(Register rd, Register rs1, intptr_t offset);
+  void jalr(Register rs1, intptr_t offset = 0) { jalr(RA, rs1, offset); }
+  void jr(Register rs1, intptr_t offset = 0) { jalr(ZR, rs1, offset); }
+  void ret() { jalr(ZR, RA, 0); }
+
+  void beq(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bne(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void blt(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bge(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bgt(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    blt(rs2, rs1, l, d);
+  }
+  void ble(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    bge(rs2, rs1, l, d);
+  }
+  void bltu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bgeu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump);
+  void bgtu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    bltu(rs2, rs1, l, d);
+  }
+  void bleu(Register rs1, Register rs2, Label* l, JumpDistance d = kFarJump) {
+    bgeu(rs2, rs1, l, d);
+  }
+
+  void lb(Register rd, Address addr);
+  void lh(Register rd, Address addr);
+  void lw(Register rd, Address addr);
+  void lbu(Register rd, Address addr);
+  void lhu(Register rd, Address addr);
+
+  void sb(Register rs2, Address addr);
+  void sh(Register rs2, Address addr);
+  void sw(Register rs2, Address addr);
+
+  void addi(Register rd, Register rs1, intptr_t imm);
+  void subi(Register rd, Register rs1, intptr_t imm) { addi(rd, rs1, -imm); }
+  void slti(Register rd, Register rs1, intptr_t imm);
+  void sltiu(Register rd, Register rs1, intptr_t imm);
+  void xori(Register rd, Register rs1, intptr_t imm);
+  void ori(Register rd, Register rs1, intptr_t imm);
+  void andi(Register rd, Register rs1, intptr_t imm);
+  void slli(Register rd, Register rs1, intptr_t shamt);
+  void srli(Register rd, Register rs1, intptr_t shamt);
+  void srai(Register rd, Register rs1, intptr_t shamt);
+
+  void add(Register rd, Register rs1, Register rs2);
+  void sub(Register rd, Register rs1, Register rs2);
+  void sll(Register rd, Register rs1, Register rs2);
+  void slt(Register rd, Register rs1, Register rs2);
+  void sltu(Register rd, Register rs1, Register rs2);
+  void xor_(Register rd, Register rs1, Register rs2);
+  void srl(Register rd, Register rs1, Register rs2);
+  void sra(Register rd, Register rs1, Register rs2);
+  void or_(Register rd, Register rs1, Register rs2);
+  void and_(Register rd, Register rs1, Register rs2);
+
+  void fence(HartEffects predecessor, HartEffects successor);
+  void fence() { fence(kAll, kAll); }
+  void fencei();
+  void ecall();
+  void ebreak();  // Causes SIGTRAP(5).
+
+  void csrrw(Register rd, uint32_t csr, Register rs1);
+  void csrrs(Register rd, uint32_t csr, Register rs1);
+  void csrrc(Register rd, uint32_t csr, Register rs1);
+  void csrr(Register rd, uint32_t csr) { csrrs(rd, csr, ZR); }
+  void csrw(uint32_t csr, Register rs) { csrrw(ZR, csr, rs); }
+  void csrs(uint32_t csr, Register rs) { csrrs(ZR, csr, rs); }
+  void csrc(uint32_t csr, Register rs) { csrrc(ZR, csr, rs); }
+  void csrrwi(Register rd, uint32_t csr, uint32_t imm);
+  void csrrsi(Register rd, uint32_t csr, uint32_t imm);
+  void csrrci(Register rd, uint32_t csr, uint32_t imm);
+  void csrwi(uint32_t csr, uint32_t imm) { csrrwi(ZR, csr, imm); }
+  void csrsi(uint32_t csr, uint32_t imm) { csrrsi(ZR, csr, imm); }
+  void csrci(uint32_t csr, uint32_t imm) { csrrci(ZR, csr, imm); }
+
+  void trap();  // Permanently reserved illegal instruction; causes SIGILL(4).
+
+  void nop() { addi(ZR, ZR, 0); }
+  void li(Register rd, intptr_t imm) { addi(rd, ZR, imm); }
+  void mv(Register rd, Register rs) { addi(rd, rs, 0); }
+  void not_(Register rd, Register rs) { xori(rd, rs, -1); }
+  void neg(Register rd, Register rs) { sub(rd, ZR, rs); }
+
+  void snez(Register rd, Register rs) { sltu(rd, ZR, rs); }
+  void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
+  void sltz(Register rd, Register rs) { slt(rd, rs, ZR); }
+  void sgtz(Register rd, Register rs) { slt(rd, ZR, rs); }
+
+  void beqz(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    beq(rs, ZR, label, distance);
+  }
+  void bnez(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    bne(rs, ZR, label, distance);
+  }
+  void blez(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    bge(ZR, rs, label, distance);
+  }
+  void bgez(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    bge(rs, ZR, label, distance);
+  }
+  void bltz(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    blt(rs, ZR, label, distance);
+  }
+  void bgtz(Register rs, Label* label, JumpDistance distance = kFarJump) {
+    blt(ZR, rs, label, distance);
+  }
+
+  // ==== RV64I ====
+#if XLEN >= 64
+  void lwu(Register rd, Address addr);
+  void ld(Register rd, Address addr);
+
+  void sd(Register rs2, Address addr);
+
+  void addiw(Register rd, Register rs1, intptr_t imm);
+  void subiw(Register rd, Register rs1, intptr_t imm) { addiw(rd, rs1, -imm); }
+  void slliw(Register rd, Register rs1, intptr_t shamt);
+  void srliw(Register rd, Register rs1, intptr_t shamt);
+  void sraiw(Register rd, Register rs1, intptr_t shamt);
+
+  void addw(Register rd, Register rs1, Register rs2);
+  void subw(Register rd, Register rs1, Register rs2);
+  void sllw(Register rd, Register rs1, Register rs2);
+  void srlw(Register rd, Register rs1, Register rs2);
+  void sraw(Register rd, Register rs1, Register rs2);
+
+  void negw(Register rd, Register rs) { subw(rd, ZR, rs); }
+  void sextw(Register rd, Register rs) { addiw(rd, rs, 0); }
+#endif  // XLEN >= 64
+
+#if XLEN == 32
+  void lx(Register rd, Address addr) { lw(rd, addr); }
+  void sx(Register rs2, Address addr) { sw(rs2, addr); }
+#elif XLEN == 64
+  void lx(Register rd, Address addr) { ld(rd, addr); }
+  void sx(Register rs2, Address addr) { sd(rs2, addr); }
+#elif XLEN == 128
+  void lx(Register rd, Address addr) { lq(rd, addr); }
+  void sx(Register rs2, Address addr) { sq(rs2, addr); }
+#endif
+
+  // ==== RV32M ====
+  void mul(Register rd, Register rs1, Register rs2);
+  void mulh(Register rd, Register rs1, Register rs2);
+  void mulhsu(Register rd, Register rs1, Register rs2);
+  void mulhu(Register rd, Register rs1, Register rs2);
+  void div(Register rd, Register rs1, Register rs2);
+  void divu(Register rd, Register rs1, Register rs2);
+  void rem(Register rd, Register rs1, Register rs2);
+  void remu(Register rd, Register rs1, Register rs2);
+
+  // ==== RV64M ====
+#if XLEN >= 64
+  void mulw(Register rd, Register rs1, Register rs2);
+  void divw(Register rd, Register rs1, Register rs2);
+  void divuw(Register rd, Register rs1, Register rs2);
+  void remw(Register rd, Register rs1, Register rs2);
+  void remuw(Register rd, Register rs1, Register rs2);
+#endif  // XLEN >= 64
+
+  // ==== RV32A ====
+  void lrw(Register rd,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void scw(Register rd,
+           Register rs2,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void amoswapw(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amoaddw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoxorw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoandw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoorw(Register rd,
+              Register rs2,
+              Address addr,
+              std::memory_order order = std::memory_order_relaxed);
+  void amominw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amomaxw(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amominuw(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amomaxuw(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+
+  // ==== RV64A ====
+#if XLEN >= 64
+  void lrd(Register rd,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void scd(Register rd,
+           Register rs2,
+           Address addr,
+           std::memory_order order = std::memory_order_relaxed);
+  void amoswapd(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amoaddd(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoxord(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoandd(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amoord(Register rd,
+              Register rs2,
+              Address addr,
+              std::memory_order order = std::memory_order_relaxed);
+  void amomind(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amomaxd(Register rd,
+               Register rs2,
+               Address addr,
+               std::memory_order order = std::memory_order_relaxed);
+  void amominud(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+  void amomaxud(Register rd,
+                Register rs2,
+                Address addr,
+                std::memory_order order = std::memory_order_relaxed);
+#endif  // XLEN >= 64
+
+#if XLEN == 32
+  void lr(Register rd,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    lrw(rd, addr, order);
+  }
+  void sc(Register rd,
+          Register rs2,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    scw(rd, rs2, addr, order);
+  }
+#elif XLEN == 64
+  void lr(Register rd,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    lrd(rd, addr, order);
+  }
+  void sc(Register rd,
+          Register rs2,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    scd(rd, rs2, addr, order);
+  }
+#elif XLEN == 128
+  void lr(Register rd,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    lrq(rd, addr, order);
+  }
+  void sc(Register rd,
+          Register rs2,
+          Address addr,
+          std::memory_order order = std::memory_order_relaxed) {
+    scq(rd, rs2, addr, order);
+  }
+#endif
+
+  // ==== RV32F ====
+  void flw(FRegister rd, Address addr);
+  void fsw(FRegister rs2, Address addr);
+  // rd := (rs1 * rs2) + rs3
+  void fmadds(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := (rs1 * rs2) - rs3
+  void fmsubs(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) + rs3
+  void fnmsubs(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) - rs3
+  void fnmadds(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  void fadds(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsubs(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fmuls(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fdivs(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsqrts(FRegister rd, FRegister rs1, RoundingMode rounding = RNE);
+  void fsgnjs(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjns(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjxs(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmins(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmaxs(FRegister rd, FRegister rs1, FRegister rs2);
+  void feqs(Register rd, FRegister rs1, FRegister rs2);
+  void flts(Register rd, FRegister rs1, FRegister rs2);
+  void fles(Register rd, FRegister rs1, FRegister rs2);
+  void fclasss(Register rd, FRegister rs1);
+  // int32_t <- float
+  void fcvtws(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint32_t <- float
+  void fcvtwus(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // float <- int32_t
+  void fcvtsw(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // float <- uint32_t
+  void fcvtswu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+
+  void fmvs(FRegister rd, FRegister rs) { fsgnjs(rd, rs, rs); }
+  void fabss(FRegister rd, FRegister rs) { fsgnjxs(rd, rs, rs); }
+  void fnegs(FRegister rd, FRegister rs) { fsgnjns(rd, rs, rs); }
+
+  // xlen <--bit_cast-- float
+  void fmvxw(Register rd, FRegister rs1);
+  // float <--bit_cast-- xlen
+  void fmvwx(FRegister rd, Register rs1);
+
+  // ==== RV64F ====
+#if XLEN >= 64
+  // int64_t <- double
+  void fcvtls(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint64_t <- double
+  void fcvtlus(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // double <- int64_t
+  void fcvtsl(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <- uint64_t
+  void fcvtslu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+#endif  // XLEN >= 64
+
+  // ==== RV32D ====
+  void fld(FRegister rd, Address addr);
+  void fsd(FRegister rs2, Address addr);
+  // rd := (rs1 * rs2) + rs3
+  void fmaddd(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := (rs1 * rs2) - rs3
+  void fmsubd(FRegister rd,
+              FRegister rs1,
+              FRegister rs2,
+              FRegister rs3,
+              RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) - rs3
+  void fnmsubd(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  // rd := -(rs1 * rs2) + rs3
+  void fnmaddd(FRegister rd,
+               FRegister rs1,
+               FRegister rs2,
+               FRegister rs3,
+               RoundingMode rounding = RNE);
+  void faddd(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsubd(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fmuld(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fdivd(FRegister rd,
+             FRegister rs1,
+             FRegister rs2,
+             RoundingMode rounding = RNE);
+  void fsqrtd(FRegister rd, FRegister rs1, RoundingMode rounding = RNE);
+  void fsgnjd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjnd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fsgnjxd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmind(FRegister rd, FRegister rs1, FRegister rs2);
+  void fmaxd(FRegister rd, FRegister rs1, FRegister rs2);
+  void fcvtsd(FRegister rd, FRegister rs1, RoundingMode rounding = RNE);
+  void fcvtds(FRegister rd, FRegister rs1);
+  void feqd(Register rd, FRegister rs1, FRegister rs2);
+  void fltd(Register rd, FRegister rs1, FRegister rs2);
+  void fled(Register rd, FRegister rs1, FRegister rs2);
+  void fclassd(Register rd, FRegister rs1);
+  // int32_t <- double
+  void fcvtwd(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint32_t <- double
+  void fcvtwud(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // double <- int32_t
+  void fcvtdw(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <- uint32_t
+  void fcvtdwu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+
+  void fmvd(FRegister rd, FRegister rs) { fsgnjd(rd, rs, rs); }
+  void fabsd(FRegister rd, FRegister rs) { fsgnjxd(rd, rs, rs); }
+  void fnegd(FRegister rd, FRegister rs) { fsgnjnd(rd, rs, rs); }
+
+  // ==== RV64D ====
+#if XLEN >= 64
+  // int64_t <- double
+  void fcvtld(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // uint64_t <- double
+  void fcvtlud(Register rd, FRegister rs1, RoundingMode rounding = RNE);
+  // xlen <--bit_cast-- double
+  void fmvxd(Register rd, FRegister rs1);
+  // double <- int64_t
+  void fcvtdl(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <- uint64_t
+  void fcvtdlu(FRegister rd, Register rs1, RoundingMode rounding = RNE);
+  // double <--bit_cast-- xlen
+  void fmvdx(FRegister rd, Register rs1);
+#endif  // XLEN >= 64
+
+  // ==== Dart Simulator Debugging ====
+  void SimulatorPrintObject(Register rs1);
+
+ private:
+  // ==== RV32/64C ====
+  void c_lwsp(Register rd, Address addr);
+#if XLEN == 32
+  void c_flwsp(FRegister rd, Address addr);
+#else
+  void c_ldsp(Register rd, Address addr);
+#endif
+  void c_fldsp(FRegister rd, Address addr);
+
+  void c_swsp(Register rs2, Address addr);
+#if XLEN == 32
+  void c_fswsp(FRegister rs2, Address addr);
+#else
+  void c_sdsp(Register rs2, Address addr);
+#endif
+  void c_fsdsp(FRegister rs2, Address addr);
+
+  void c_lw(Register rd, Address addr);
+  void c_ld(Register rd, Address addr);
+  void c_flw(FRegister rd, Address addr);
+  void c_fld(FRegister rd, Address addr);
+
+  void c_sw(Register rs2, Address addr);
+  void c_sd(Register rs2, Address addr);
+  void c_fsw(FRegister rs2, Address addr);
+  void c_fsd(FRegister rs2, Address addr);
+
+  void c_j(Label* label);
+#if XLEN == 32
+  void c_jal(Label* label);
+#endif
+  void c_jr(Register rs1);
+  void c_jalr(Register rs1);
+
+  void c_beqz(Register rs1p, Label* label);
+  void c_bnez(Register rs1p, Label* label);
+
+  void c_li(Register rd, intptr_t imm);
+  void c_lui(Register rd, uintptr_t imm);
+
+  void c_addi(Register rd, Register rs1, intptr_t imm);
+#if XLEN >= 64
+  void c_addiw(Register rd, Register rs1, intptr_t imm);
+#endif
+  void c_addi16sp(Register rd, Register rs1, intptr_t imm);
+  void c_addi4spn(Register rdp, Register rs1, intptr_t imm);
+
+  void c_slli(Register rd, Register rs1, intptr_t imm);
+  void c_srli(Register rd, Register rs1, intptr_t imm);
+  void c_srai(Register rd, Register rs1, intptr_t imm);
+  void c_andi(Register rd, Register rs1, intptr_t imm);
+
+  void c_mv(Register rd, Register rs2);
+
+  void c_add(Register rd, Register rs1, Register rs2);
+  void c_and(Register rd, Register rs1, Register rs2);
+  void c_or(Register rd, Register rs1, Register rs2);
+  void c_xor(Register rd, Register rs1, Register rs2);
+  void c_sub(Register rd, Register rs1, Register rs2);
+#if XLEN >= 64
+  void c_addw(Register rd, Register rs1, Register rs2);
+  void c_subw(Register rd, Register rs1, Register rs2);
+#endif
+
+  void c_nop();
+  void c_ebreak();
+
+ protected:
+  intptr_t UpdateCBOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateCJOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateBOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateJOffset(intptr_t branch_position, intptr_t new_offset);
+  intptr_t UpdateFarOffset(intptr_t branch_position, intptr_t new_offset);
+
+  intptr_t Position() { return buffer_.Size(); }
+  void EmitBranch(Register rs1,
+                  Register rs2,
+                  Label* label,
+                  Funct3 func,
+                  JumpDistance distance);
+  void EmitJump(Register rd, Label* label, Opcode op, JumpDistance distance);
+  void EmitCBranch(Register rs1p, Label* label, COpcode op);
+  void EmitCJump(Label* label, COpcode op);
+
+  void EmitRType(Funct5 funct5,
+                 std::memory_order order,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 Funct3 funct3,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 RoundingMode round,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 Register rs1,
+                 RoundingMode round,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 FRegister rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 FRegister rs2,
+                 FRegister rs1,
+                 RoundingMode round,
+                 Register rd,
+                 Opcode opcode);
+  void EmitRType(Funct7 funct7,
+                 intptr_t shamt,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+
+  void EmitR4Type(FRegister rs3,
+                  Funct2 funct2,
+                  FRegister rs2,
+                  FRegister rs1,
+                  RoundingMode round,
+                  FRegister rd,
+                  Opcode opcode);
+
+  void EmitIType(intptr_t imm,
+                 Register rs1,
+                 Funct3 funct3,
+                 Register rd,
+                 Opcode opcode);
+  void EmitIType(intptr_t imm,
+                 Register rs1,
+                 Funct3 funct3,
+                 FRegister rd,
+                 Opcode opcode);
+
+  void EmitSType(intptr_t imm,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Opcode opcode);
+  void EmitSType(intptr_t imm,
+                 FRegister rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Opcode opcode);
+
+  void EmitBType(intptr_t imm,
+                 Register rs2,
+                 Register rs1,
+                 Funct3 funct3,
+                 Opcode opcode);
+
+  void EmitUType(intptr_t imm, Register rd, Opcode opcode);
+
+  void EmitJType(intptr_t imm, Register rd, Opcode opcode);
+
+  uint16_t Read16(intptr_t position) {
+    return buffer_.Load<uint16_t>(position);
+  }
+  void Write16(intptr_t position, uint16_t instruction) {
+    return buffer_.Store<uint16_t>(position, instruction);
+  }
+  void Emit16(uint16_t instruction) {
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    buffer_.Emit<uint16_t>(instruction);
+  }
+  uint32_t Read32(intptr_t position) {
+    return buffer_.Load<uint32_t>(position);
+  }
+  void Write32(intptr_t position, uint32_t instruction) {
+    return buffer_.Store<uint32_t>(position, instruction);
+  }
+
+ public:
+  void Emit32(uint32_t instruction) {
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    buffer_.Emit<uint32_t>(instruction);
+  }
+  void Emit64(uint64_t instruction) {
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    buffer_.Emit<uint64_t>(instruction);
+  }
+
+ protected:
+  ExtensionSet extensions_;
+  intptr_t far_branch_level_;
+};
+
+class Assembler : public MicroAssembler {
+ public:
+  explicit Assembler(ObjectPoolBuilder* object_pool_builder,
+                     intptr_t far_branch_level = 0);
+  ~Assembler() {}
+
+  void PushRegister(Register r);
+  void PopRegister(Register r);
+
+  void PushRegisterPair(Register r0, Register r1);
+  void PopRegisterPair(Register r0, Register r1);
+
+  void PushRegisters(const RegisterSet& registers);
+  void PopRegisters(const RegisterSet& registers);
+
+  // Push all registers which are callee-saved according to the ARM64 ABI.
+  void PushNativeCalleeSavedRegisters();
+
+  // Pop all registers which are callee-saved according to the ARM64 ABI.
+  void PopNativeCalleeSavedRegisters();
+
+  void ExtendValue(Register rd, Register rn, OperandSize sz) override;
+  void ExtendAndSmiTagValue(Register rd,
+                            Register rn,
+                            OperandSize sz = kWordBytes) override;
+
+  void Drop(intptr_t stack_elements) {
+    ASSERT(stack_elements >= 0);
+    if (stack_elements > 0) {
+      AddImmediate(SP, SP, stack_elements * target::kWordSize);
+    }
+  }
+
+  void Bind(Label* label) { MicroAssembler::Bind(label); }
+  // Unconditional jump to a given label.
+  void Jump(Label* label, JumpDistance distance = kFarJump) {
+    j(label, distance);
+  }
+  // Unconditional jump to a given address in memory. Clobbers TMP.
+  void Jump(const Address& address);
+
+  void LoadField(Register dst, const FieldAddress& address) override;
+  void LoadCompressedField(Register dst, const FieldAddress& address) override {
+    LoadCompressed(dst, address);
+  }
+  void LoadMemoryValue(Register dst, Register base, int32_t offset) {
+    LoadFromOffset(dst, base, offset, kWordBytes);
+  }
+  void StoreMemoryValue(Register src, Register base, int32_t offset) {
+    StoreToOffset(src, base, offset, kWordBytes);
+  }
+
+#if defined(USING_THREAD_SANITIZER)
+  void TsanLoadAcquire(Register addr);
+  void TsanStoreRelease(Register addr);
+#endif
+
+  void LoadAcquire(Register dst, Register address, int32_t offset = 0);
+
+  void LoadAcquireCompressed(Register dst,
+                             Register address,
+                             int32_t offset = 0);
+
+  void StoreRelease(Register src,
+                    Register address,
+                    int32_t offset = 0) override;
+
+  void StoreReleaseCompressed(Register src,
+                              Register address,
+                              int32_t offset = 0);
+
+  void CompareWithFieldValue(Register value, FieldAddress address) {
+    CompareWithMemoryValue(value, address);
+  }
+  void CompareWithCompressedFieldFromOffset(Register value,
+                                            Register base,
+                                            int32_t offset);
+
+  void CompareWithMemoryValue(Register value,
+                              Address address,
+                              OperandSize sz = kWordBytes);
+
+  void CompareFunctionTypeNullabilityWith(Register type, int8_t value) override;
+  void CompareTypeNullabilityWith(Register type, int8_t value) override;
+
+  // Debugging and bringup support.
+  void Breakpoint() override { trap(); }
+
+  void SetPrologueOffset() {
+    if (prologue_offset_ == -1) {
+      prologue_offset_ = CodeSize();
+    }
+  }
+
+  void ReserveAlignedFrameSpace(intptr_t frame_space);
+
+  // In debug mode, this generates code to check that:
+  //   FP + kExitLinkSlotFromEntryFp == SP
+  // or triggers breakpoint otherwise.
+  void EmitEntryFrameVerification();
+
+  // Instruction pattern from entrypoint is used in Dart frame prologs
+  // to set up the frame and save a PC which can be used to figure out the
+  // RawInstruction object corresponding to the code running in the frame.
+  static const intptr_t kEntryPointToPcMarkerOffset = 0;
+  static intptr_t EntryPointToPcMarkerOffset() {
+    return kEntryPointToPcMarkerOffset;
+  }
+
+  // On some other platforms, we draw a distinction between safe and unsafe
+  // smis.
+  static bool IsSafe(const Object& object) { return true; }
+  static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
+
+  void CompareRegisters(Register rn, Register rm);
+  void CompareObjectRegisters(Register rn, Register rm);
+  void TestRegisters(Register rn, Register rm);
+
+  // Branches to the given label if the condition holds.
+  void BranchIf(Condition condition,
+                Label* label,
+                JumpDistance distance = kFarJump);
+  void BranchIfZero(Register rn,
+                    Label* label,
+                    JumpDistance distance = kFarJump);
+  void SetIf(Condition condition, Register rd);
+
+  void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+  void SmiUntag(Register dst, Register src) { srai(dst, src, kSmiTagSize); }
+  void SmiTag(Register reg) override { SmiTag(reg, reg); }
+  void SmiTag(Register dst, Register src) { slli(dst, src, kSmiTagSize); }
+
+  void BranchIfNotSmi(Register reg,
+                      Label* label,
+                      JumpDistance distance = kFarJump);
+  void BranchIfSmi(Register reg,
+                   Label* label,
+                   JumpDistance distance = kFarJump);
+
+  void Jump(const Code& code,
+            Register pp,
+            ObjectPoolBuilderEntry::Patchability patchable =
+                ObjectPoolBuilderEntry::kNotPatchable);
+
+  void JumpAndLink(const Code& code,
+                   ObjectPoolBuilderEntry::Patchability patchable =
+                       ObjectPoolBuilderEntry::kNotPatchable,
+                   CodeEntryKind entry_kind = CodeEntryKind::kNormal);
+
+  void JumpAndLinkPatchable(const Code& code,
+                            CodeEntryKind entry_kind = CodeEntryKind::kNormal) {
+    JumpAndLink(code, ObjectPoolBuilderEntry::kPatchable, entry_kind);
+  }
+  void JumpAndLinkToRuntime();
+
+  // Emit a call that shares its object pool entries with other calls
+  // that have the same equivalence marker.
+  void JumpAndLinkWithEquivalence(
+      const Code& code,
+      const Object& equivalence,
+      CodeEntryKind entry_kind = CodeEntryKind::kNormal);
+
+  void Call(Address target);
+  void Call(const Code& code) { JumpAndLink(code); }
+
+  void CallCFunction(Address target) { Call(target); }
+
+  void AddImmediate(Register dest, intx_t imm) {
+    AddImmediate(dest, dest, imm);
+  }
+
+  // Macros accepting a pp Register argument may attempt to load values from
+  // the object pool when possible. Unless you are sure that the untagged object
+  // pool pointer is in another register, or that it is not available at all,
+  // PP should be passed for pp. `dest` can be TMP2, `rn` cannot. `dest` can be
+  // TMP.
+  void AddImmediate(Register dest,
+                    Register rn,
+                    intx_t imm,
+                    OperandSize sz = kWordBytes);
+  void AndImmediate(Register rd,
+                    Register rn,
+                    intx_t imm,
+                    OperandSize sz = kWordBytes);
+  void OrImmediate(Register rd,
+                   Register rn,
+                   intx_t imm,
+                   OperandSize sz = kWordBytes);
+  void XorImmediate(Register rd,
+                    Register rn,
+                    intx_t imm,
+                    OperandSize sz = kWordBytes);
+  void TestImmediate(Register rn, intx_t imm, OperandSize sz = kWordBytes);
+  void CompareImmediate(Register rn, intx_t imm, OperandSize sz = kWordBytes);
+
+  void LoadFromOffset(Register dest,
+                      const Address& address,
+                      OperandSize sz = kWordBytes) override;
+  void LoadFromOffset(Register dest,
+                      Register base,
+                      int32_t offset,
+                      OperandSize sz = kWordBytes);
+  void LoadFieldFromOffset(Register dest,
+                           Register base,
+                           int32_t offset,
+                           OperandSize sz = kWordBytes) override {
+    LoadFromOffset(dest, base, offset - kHeapObjectTag, sz);
+  }
+  void LoadCompressedFieldFromOffset(Register dest,
+                                     Register base,
+                                     int32_t offset) override {
+    LoadCompressedFromOffset(dest, base, offset - kHeapObjectTag);
+  }
+  void LoadCompressedSmiFieldFromOffset(Register dest,
+                                        Register base,
+                                        int32_t offset) {
+    LoadCompressedSmiFromOffset(dest, base, offset - kHeapObjectTag);
+  }
+  // For loading indexed payloads out of tagged objects like Arrays. If the
+  // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
+  // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
+  void LoadIndexedPayload(Register dest,
+                          Register base,
+                          int32_t payload_offset,
+                          Register index,
+                          ScaleFactor scale,
+                          OperandSize sz = kWordBytes);
+  void LoadIndexedCompressed(Register dest,
+                             Register base,
+                             int32_t offset,
+                             Register index);
+  void LoadSFromOffset(FRegister dest, Register base, int32_t offset);
+  void LoadDFromOffset(FRegister dest, Register base, int32_t offset);
+  void LoadDFieldFromOffset(FRegister dest, Register base, int32_t offset) {
+    LoadDFromOffset(dest, base, offset - kHeapObjectTag);
+  }
+
+  void LoadFromStack(Register dst, intptr_t depth);
+  void StoreToStack(Register src, intptr_t depth);
+  void CompareToStack(Register src, intptr_t depth);
+
+  void StoreToOffset(Register src,
+                     const Address& address,
+                     OperandSize sz = kWordBytes) override;
+  void StoreToOffset(Register src,
+                     Register base,
+                     int32_t offset,
+                     OperandSize sz = kWordBytes);
+  void StoreFieldToOffset(Register src,
+                          Register base,
+                          int32_t offset,
+                          OperandSize sz = kWordBytes) {
+    StoreToOffset(src, base, offset - kHeapObjectTag, sz);
+  }
+  void StoreSToOffset(FRegister src, Register base, int32_t offset);
+  void StoreDToOffset(FRegister src, Register base, int32_t offset);
+  void StoreDFieldToOffset(FRegister src, Register base, int32_t offset) {
+    StoreDToOffset(src, base, offset - kHeapObjectTag);
+  }
+
+  void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset);
+  void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset);
+  void MoveUnboxedDouble(FpuRegister dst, FpuRegister src);
+
+  void LoadCompressed(Register dest, const Address& slot);
+  void LoadCompressedFromOffset(Register dest, Register base, int32_t offset);
+  void LoadCompressedSmi(Register dest, const Address& slot);
+  void LoadCompressedSmiFromOffset(Register dest,
+                                   Register base,
+                                   int32_t offset);
+
+  // Store into a heap object and apply the generational and incremental write
+  // barriers. All stores into heap objects must pass through this function or,
+  // if the value can be proven either Smi or old-and-premarked, its NoBarrier
+  // variants.
+  // Preserves object and value registers.
+  void StoreIntoObject(Register object,
+                       const Address& dest,
+                       Register value,
+                       CanBeSmi can_value_be_smi = kValueCanBeSmi,
+                       MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreCompressedIntoObject(
+      Register object,
+      const Address& dest,
+      Register value,
+      CanBeSmi can_value_be_smi = kValueCanBeSmi,
+      MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi);
+  void StoreIntoArray(Register object,
+                      Register slot,
+                      Register value,
+                      CanBeSmi can_value_be_smi = kValueCanBeSmi);
+  void StoreCompressedIntoArray(Register object,
+                                Register slot,
+                                Register value,
+                                CanBeSmi can_value_be_smi = kValueCanBeSmi);
+  void StoreIntoArrayBarrier(Register object,
+                             Register slot,
+                             Register value,
+                             CanBeSmi can_value_be_smi);
+
+  void StoreIntoObjectOffset(Register object,
+                             int32_t offset,
+                             Register value,
+                             CanBeSmi can_value_be_smi = kValueCanBeSmi,
+                             MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreCompressedIntoObjectOffset(
+      Register object,
+      int32_t offset,
+      Register value,
+      CanBeSmi can_value_be_smi = kValueCanBeSmi,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreIntoObjectNoBarrier(
+      Register object,
+      const Address& dest,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreCompressedIntoObjectNoBarrier(
+      Register object,
+      const Address& dest,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic) override;
+  void StoreIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreCompressedIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      Register value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreIntoObjectNoBarrier(Register object,
+                                const Address& dest,
+                                const Object& value);
+  void StoreCompressedIntoObjectNoBarrier(
+      Register object,
+      const Address& dest,
+      const Object& value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      const Object& value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+  void StoreCompressedIntoObjectOffsetNoBarrier(
+      Register object,
+      int32_t offset,
+      const Object& value,
+      MemoryOrder memory_order = kRelaxedNonAtomic);
+
+  // Stores a non-tagged value into a heap object.
+  void StoreInternalPointer(Register object,
+                            const Address& dest,
+                            Register value);
+
+  // Object pool, loading from pool, etc.
+  void LoadPoolPointer(Register pp = PP);
+
+  bool constant_pool_allowed() const { return constant_pool_allowed_; }
+  void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
+
+  intptr_t FindImmediate(int64_t imm);
+  bool CanLoadFromObjectPool(const Object& object) const;
+  void LoadNativeEntry(Register dst,
+                       const ExternalLabel* label,
+                       ObjectPoolBuilderEntry::Patchability patchable);
+  void LoadIsolate(Register dst);
+  void LoadIsolateGroup(Register dst);
+
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadObject(Register dst, const Object& obj) {
+    LoadObjectHelper(dst, obj, false);
+  }
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadUniqueObject(Register dst, const Object& obj) {
+    LoadObjectHelper(dst, obj, true);
+  }
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadImmediate(Register reg, intx_t imm);
+
+  void LoadDImmediate(FRegister reg, double immd);
+
+  // Load word from pool from the given offset using encoding that
+  // InstructionPattern::DecodeLoadWordFromPool can decode.
+  //
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadWordFromPoolIndex(Register dst, intptr_t index, Register pp = PP);
+
+  void PushObject(const Object& object) {
+    if (IsSameObject(compiler::NullObject(), object)) {
+      PushRegister(NULL_REG);
+    } else if (target::IsSmi(object) && (target::ToRawSmi(object) == 0)) {
+      PushRegister(ZR);
+    } else {
+      LoadObject(TMP, object);
+      PushRegister(TMP);
+    }
+  }
+  void PushImmediate(int64_t immediate) {
+    if (immediate == 0) {
+      PushRegister(ZR);
+    } else {
+      LoadImmediate(TMP, immediate);
+      PushRegister(TMP);
+    }
+  }
+  void CompareObject(Register reg, const Object& object);
+
+  void ExtractClassIdFromTags(Register result, Register tags);
+  void ExtractInstanceSizeFromTags(Register result, Register tags);
+
+  void LoadClassId(Register result, Register object);
+  void LoadClassById(Register result, Register class_id);
+  void CompareClassId(Register object,
+                      intptr_t class_id,
+                      Register scratch = kNoRegister);
+  // Note: input and output registers must be different.
+  void LoadClassIdMayBeSmi(Register result, Register object);
+  void LoadTaggedClassIdMayBeSmi(Register result, Register object);
+  void EnsureHasClassIdInDEBUG(intptr_t cid,
+                               Register src,
+                               Register scratch,
+                               bool can_be_null = false) override;
+
+  void EnterFrame(intptr_t frame_size);
+  void LeaveFrame();
+  void Ret() { ret(); }
+
+  // Emit code to transition between generated mode and native mode.
+  //
+  // These require and ensure that CSP and SP are equal and aligned and require
+  // a scratch register (in addition to TMP/TMP2).
+
+  void TransitionGeneratedToNative(Register destination_address,
+                                   Register new_exit_frame,
+                                   Register new_exit_through_ffi,
+                                   bool enter_safepoint);
+  void TransitionNativeToGenerated(Register scratch, bool exit_safepoint);
+  void EnterFullSafepoint(Register scratch);
+  void ExitFullSafepoint(Register scratch);
+
+  void CheckCodePointer();
+  void RestoreCodePointer();
+
+  // Restores the values of the registers that are blocked to cache some values
+  // e.g. BARRIER_MASK and NULL_REG.
+  void RestorePinnedRegisters();
+
+  void SetupGlobalPoolAndDispatchTable();
+
+  void EnterDartFrame(intptr_t frame_size, Register new_pp = kNoRegister);
+  void EnterOsrFrame(intptr_t extra_size, Register new_pp = kNoRegister);
+  void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP);
+
+  void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
+
+  // Helper method for performing runtime calls from callers requiring manual
+  // register preservation is required (e.g. outside IL instructions marked
+  // as calling).
+  class CallRuntimeScope : public ValueObject {
+   public:
+    CallRuntimeScope(Assembler* assembler,
+                     const RuntimeEntry& entry,
+                     intptr_t frame_size,
+                     bool preserve_registers = true)
+        : CallRuntimeScope(assembler,
+                           entry,
+                           frame_size,
+                           preserve_registers,
+                           /*caller=*/nullptr) {}
+
+    CallRuntimeScope(Assembler* assembler,
+                     const RuntimeEntry& entry,
+                     intptr_t frame_size,
+                     Address caller,
+                     bool preserve_registers = true)
+        : CallRuntimeScope(assembler,
+                           entry,
+                           frame_size,
+                           preserve_registers,
+                           &caller) {}
+
+    void Call(intptr_t argument_count);
+
+    ~CallRuntimeScope();
+
+   private:
+    CallRuntimeScope(Assembler* assembler,
+                     const RuntimeEntry& entry,
+                     intptr_t frame_size,
+                     bool preserve_registers,
+                     const Address* caller);
+
+    Assembler* const assembler_;
+    const RuntimeEntry& entry_;
+    const bool preserve_registers_;
+    const bool restore_code_reg_;
+  };
+
+  // Set up a stub frame so that the stack traversal code can easily identify
+  // a stub frame.
+  void EnterStubFrame() { EnterDartFrame(0); }
+  void LeaveStubFrame() { LeaveDartFrame(); }
+
+  // Set up a frame for calling a C function.
+  // Automatically save the pinned registers in Dart which are not callee-
+  // saved in the native calling convention.
+  // Use together with CallCFunction.
+  void EnterCFrame(intptr_t frame_space);
+  void LeaveCFrame();
+
+  void MonomorphicCheckedEntryJIT();
+  void MonomorphicCheckedEntryAOT();
+  void BranchOnMonomorphicCheckedEntryJIT(Label* label);
+
+  // If allocation tracing for |cid| is enabled, will jump to |trace| label,
+  // which will allocate in the runtime where tracing occurs.
+  void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace);
+
+  void TryAllocateObject(intptr_t cid,
+                         intptr_t instance_size,
+                         Label* failure,
+                         JumpDistance distance,
+                         Register instance_reg,
+                         Register temp_reg) override;
+
+  void TryAllocateArray(intptr_t cid,
+                        intptr_t instance_size,
+                        Label* failure,
+                        Register instance,
+                        Register end_address,
+                        Register temp1,
+                        Register temp2);
+
+  // This emits an PC-relative call of the form "bl <offset>".  The offset
+  // is not yet known and needs therefore relocation to the right place before
+  // the code can be used.
+  //
+  // The neccessary information for the "linker" (i.e. the relocation
+  // information) is stored in [UntaggedCode::static_calls_target_table_]: an
+  // entry of the form
+  //
+  //   (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
+  //
+  // will be used during relocation to fix the offset.
+  //
+  // The provided [offset_into_target] will be added to calculate the final
+  // destination.  It can be used e.g. for calling into the middle of a
+  // function.
+  void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target = 0);
+
+  // This emits an PC-relative tail call of the form "b <offset>".
+  //
+  // See also above for the pc-relative call.
+  void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target = 0);
+
+  Address ElementAddressForIntIndex(bool is_external,
+                                    intptr_t cid,
+                                    intptr_t index_scale,
+                                    Register array,
+                                    intptr_t index) const;
+  void ComputeElementAddressForIntIndex(Register address,
+                                        bool is_external,
+                                        intptr_t cid,
+                                        intptr_t index_scale,
+                                        Register array,
+                                        intptr_t index);
+  Address ElementAddressForRegIndex(bool is_external,
+                                    intptr_t cid,
+                                    intptr_t index_scale,
+                                    bool index_unboxed,
+                                    Register array,
+                                    Register index,
+                                    Register temp);
+
+  // Special version of ElementAddressForRegIndex for the case when cid and
+  // operand size for the target load don't match (e.g. when loading a few
+  // elements of the array with one load).
+  Address ElementAddressForRegIndexWithSize(bool is_external,
+                                            intptr_t cid,
+                                            OperandSize size,
+                                            intptr_t index_scale,
+                                            bool index_unboxed,
+                                            Register array,
+                                            Register index,
+                                            Register temp);
+
+  void ComputeElementAddressForRegIndex(Register address,
+                                        bool is_external,
+                                        intptr_t cid,
+                                        intptr_t index_scale,
+                                        bool index_unboxed,
+                                        Register array,
+                                        Register index);
+
+  void LoadStaticFieldAddress(Register address,
+                              Register field,
+                              Register scratch);
+
+  void LoadCompressedFieldAddressForRegOffset(Register address,
+                                              Register instance,
+                                              Register offset_in_words_as_smi);
+
+  void LoadFieldAddressForRegOffset(Register address,
+                                    Register instance,
+                                    Register offset_in_words_as_smi);
+
+  // Returns object data offset for address calculation; for heap objects also
+  // accounts for the tag.
+  static int32_t HeapDataOffset(bool is_external, intptr_t cid) {
+    return is_external
+               ? 0
+               : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
+  }
+
+  void AddImmediateBranchOverflow(Register rd,
+                                  Register rs1,
+                                  intx_t imm,
+                                  Label* overflow);
+  void SubtractImmediateBranchOverflow(Register rd,
+                                       Register rs1,
+                                       intx_t imm,
+                                       Label* overflow);
+  void MultiplyImmediateBranchOverflow(Register rd,
+                                       Register rs1,
+                                       intx_t imm,
+                                       Label* overflow);
+  void AddBranchOverflow(Register rd,
+                         Register rs1,
+                         Register rs2,
+                         Label* overflow);
+  void SubtractBranchOverflow(Register rd,
+                              Register rs1,
+                              Register rs2,
+                              Label* overflow);
+  void MultiplyBranchOverflow(Register rd,
+                              Register rs1,
+                              Register rs2,
+                              Label* overflow);
+
+ private:
+  bool constant_pool_allowed_;
+
+  enum DeferredCompareType {
+    kNone,
+    kCompareReg,
+    kCompareImm,
+    kTestReg,
+    kTestImm,
+  };
+  DeferredCompareType deferred_compare_ = kNone;
+  Register deferred_left_ = kNoRegister;
+  Register deferred_reg_ = kNoRegister;
+  intptr_t deferred_imm_ = 0;
+
+  // Note: the function never clobbers TMP, TMP2 scratch registers.
+  void LoadObjectHelper(Register dst, const Object& obj, bool is_unique);
+
+  enum BarrierFilterMode {
+    // Filter falls through into the barrier update code. Target label
+    // is a "after-store" label.
+    kJumpToNoUpdate,
+
+    // Filter falls through to the "after-store" code. Target label
+    // is barrier update code label.
+    kJumpToBarrier,
+  };
+
+  void StoreIntoObjectFilter(Register object,
+                             Register value,
+                             Label* label,
+                             CanBeSmi can_be_smi,
+                             BarrierFilterMode barrier_filter_mode);
+
+  // Note: leaf call sequence uses some abi callee save registers as scratch
+  // so they should be manually preserved.
+  void EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf);
+  void LeaveCallRuntimeFrame(bool is_leaf);
+
+  friend class dart::FlowGraphCompiler;
+  std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
+  std::function<void()> generate_invoke_array_write_barrier_;
+
+  DISALLOW_ALLOCATION();
+  DISALLOW_COPY_AND_ASSIGN(Assembler);
+};
+
+}  // namespace compiler
+}  // namespace dart
+
+#endif  // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_RISCV_H_
diff --git a/runtime/vm/compiler/assembler/assembler_riscv_test.cc b/runtime/vm/compiler/assembler/assembler_riscv_test.cc
new file mode 100644
index 0000000..b596017
--- /dev/null
+++ b/runtime/vm/compiler/assembler/assembler_riscv_test.cc
@@ -0,0 +1,6513 @@
+// Copyright (c) 2017, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/cpu.h"
+#include "vm/os.h"
+#include "vm/unit_test.h"
+#include "vm/virtual_memory.h"
+
+namespace dart {
+namespace compiler {
+#define __ assembler->
+
+#if defined(PRODUCT)
+#define EXPECT_DISASSEMBLY(expected)
+#else
+#define EXPECT_DISASSEMBLY(expected)                                           \
+  EXPECT_STREQ(expected, test->RelativeDisassembly())
+#endif
+
+// Called from assembler_test.cc.
+// RA: return address.
+// A0: value.
+// A1: growable array.
+// A2: current thread.
+ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) {
+  __ PushRegister(RA);
+  __ PushNativeCalleeSavedRegisters();
+
+  __ mv(THR, A2);
+  __ lx(WRITE_BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+
+  __ StoreIntoObject(A1, FieldAddress(A1, GrowableObjectArray::data_offset()),
+                     A0);
+
+  __ PopNativeCalleeSavedRegisters();
+  __ PopRegister(RA);
+  __ ret();
+}
+
+static intx_t Call(intx_t entry,
+                   intx_t arg0 = 0,
+                   intx_t arg1 = 0,
+                   intx_t arg2 = 0,
+                   intx_t arg3 = 0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->Call(entry, arg0, arg1, arg2, arg3);
+#else
+  typedef intx_t (*F)(intx_t, intx_t, intx_t, intx_t);
+  return reinterpret_cast<F>(entry)(arg0, arg1, arg2, arg3);
+#endif
+}
+static float CallF(intx_t entry, intx_t arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0);
+#else
+  typedef float (*F)(intx_t);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static float CallF(intx_t entry, intx_t arg0, float arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0, arg1);
+#else
+  typedef float (*F)(intx_t, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static float CallF(intx_t entry, double arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0);
+#else
+  typedef float (*F)(double);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static float CallF(intx_t entry, float arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0);
+#else
+  typedef float (*F)(float);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static float CallF(intx_t entry, float arg0, float arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0, arg1);
+#else
+  typedef float (*F)(float, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static float CallF(intx_t entry, float arg0, float arg1, float arg2) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallF(entry, arg0, arg1, arg2);
+#else
+  typedef float (*F)(float, float, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1, arg2);
+#endif
+}
+static intx_t CallI(intx_t entry, float arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0);
+#else
+  typedef intx_t (*F)(float);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static intx_t CallI(intx_t entry, float arg0, float arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0, arg1);
+#else
+  typedef intx_t (*F)(float, float);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static double CallD(intx_t entry, intx_t arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0);
+#else
+  typedef double (*F)(intx_t);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static double CallD(intx_t entry, intx_t arg0, double arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0, arg1);
+#else
+  typedef double (*F)(intx_t, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static double CallD(intx_t entry, float arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0);
+#else
+  typedef double (*F)(float);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static double CallD(intx_t entry, double arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0);
+#else
+  typedef double (*F)(double);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static double CallD(intx_t entry, double arg0, double arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0, arg1);
+#else
+  typedef double (*F)(double, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+static double CallD(intx_t entry, double arg0, double arg1, double arg2) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallD(entry, arg0, arg1, arg2);
+#else
+  typedef double (*F)(double, double, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1, arg2);
+#endif
+}
+static intx_t CallI(intx_t entry, double arg0) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0);
+#else
+  typedef intx_t (*F)(double);
+  return reinterpret_cast<F>(entry)(arg0);
+#endif
+}
+static intx_t CallI(intx_t entry, double arg0, double arg1) {
+#if defined(USING_SIMULATOR)
+  return Simulator::Current()->CallI(entry, arg0, arg1);
+#else
+  typedef intx_t (*F)(double, double);
+  return reinterpret_cast<F>(entry)(arg0, arg1);
+#endif
+}
+
+ASSEMBLER_TEST_GENERATE(LoadUpperImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  __ lui(A0, 42 << 16);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadUpperImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "002a0537 lui a0, 2752512\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42 << 16, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(AddUpperImmediatePC, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  __ auipc(A0, 0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddUpperImmediatePC, test) {
+  EXPECT_DISASSEMBLY(
+      "00000517 auipc a0, 0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(test->entry(), static_cast<uintx_t>(Call(test->entry())));
+}
+
+ASSEMBLER_TEST_GENERATE(JumpAndLink, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label1, label2;
+  __ jal(T4, &label1);  // Forward.
+  __ sub(A0, T0, T1);
+  __ ret();
+  __ trap();
+
+  __ Bind(&label2);
+  __ li(T1, 7);
+  __ jalr(ZR, T5);
+  __ trap();
+
+  __ Bind(&label1);
+  __ li(T0, 4);
+  __ jal(T5, &label2);  // Backward.
+  __ jalr(ZR, T4);
+  __ trap();
+}
+ASSEMBLER_TEST_RUN(JumpAndLink, test) {
+  EXPECT_DISASSEMBLY(
+      "01c00eef jal t4, +28\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "000f0067 jr t5\n"
+      "00000000 trap\n"
+      "00400293 li t0, 4\n"
+      "ff1fff6f jal t5, -16\n"
+      "000e8067 jr t4\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(Jump, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label1, label2;
+  __ j(&label1);  // Forward.
+  __ trap();
+  __ Bind(&label2);
+  __ li(T1, 7);
+  __ sub(A0, T0, T1);
+  __ ret();
+  __ Bind(&label1);
+  __ li(T0, 4);
+  __ j(&label2);  // Backward.
+  __ trap();
+}
+ASSEMBLER_TEST_RUN(Jump, test) {
+  EXPECT_DISASSEMBLY(
+      "0140006f j +20\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00400293 li t0, 4\n"
+      "ff1ff06f j -16\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(JumpAndLinkRegister, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  /* 00 */ __ jalr(T4, A1, 28);  // Forward.
+  /* 04 */ __ sub(A0, T0, T1);
+  /* 08 */ __ ret();
+  /* 12 */ __ trap();
+
+  /* 16 */ __ li(T1, 7);
+  /* 20 */ __ jalr(ZR, T5);
+  /* 24 */ __ trap();
+
+  /* 28 */ __ li(T0, 4);
+  /* 32 */ __ jalr(T5, A1, 16);  // Backward.
+  /* 36 */ __ jalr(ZR, T4);
+  /* 40 */ __ trap();
+}
+ASSEMBLER_TEST_RUN(JumpAndLinkRegister, test) {
+  EXPECT_DISASSEMBLY(
+      "01c58ee7 jalr t4, 28(a1)\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "000f0067 jr t5\n"
+      "00000000 trap\n"
+      "00400293 li t0, 4\n"
+      "01058f67 jalr t5, 16(a1)\n"
+      "000e8067 jr t4\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry(), 0, test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(JumpRegister, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  /* 00 */ __ jr(A1, 20);  // Forward.
+  /* 04 */ __ trap();
+  /* 08 */ __ li(T1, 7);
+  /* 12 */ __ sub(A0, T0, T1);
+  /* 16 */ __ ret();
+  /* 20 */ __ li(T0, 4);
+  /* 24 */ __ jr(A1, 8);  // Backward.
+  /* 28 */ __ trap();
+}
+ASSEMBLER_TEST_RUN(JumpRegister, test) {
+  EXPECT_DISASSEMBLY(
+      "01458067 jr 20(a1)\n"
+      "00000000 trap\n"
+      "00700313 li t1, 7\n"
+      "40628533 sub a0, t0, t1\n"
+      "00008067 ret\n"
+      "00400293 li t0, 4\n"
+      "00858067 jr 8(a1)\n"
+      "00000000 trap\n");
+  EXPECT_EQ(-3, Call(test->entry(), 0, test->entry()));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ beq(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b50663 beq a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchEqualForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ beq(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchEqualForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchNotEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bne(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchNotEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b51663 bne a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchNotEqualForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bne(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchNotEqualForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ blt(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b54663 blt a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ blt(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ ble(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5d663 ble a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualForwardFar, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ ble(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  for (intptr_t i = 0; i < (1 << 13); i++) {
+    __ ebreak();
+  }
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualForwardFar, test) {
+  //  EXPECT_DISASSEMBLY(constant too big);
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterThanForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bgt(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterThanForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5c663 blt a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterOrEqualForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bge(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterOrEqualForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b55663 ble a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bltu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b56663 bltu a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bleu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5f663 bleu a0, a1, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(3, Call(test->entry(), 1, 0));
+  EXPECT_EQ(4, Call(test->entry(), 1, -1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(4, Call(test->entry(), 0, -1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 1));
+  EXPECT_EQ(3, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterThanUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bgtu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterThanUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00a5e663 bltu a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(3, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterOrEqualUnsignedForward, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+
+  Label label;
+  __ bgeu(A0, A1, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterOrEqualUnsignedForward, test) {
+  EXPECT_DISASSEMBLY(
+      "00b57663 bleu a1, a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 1, 1));
+  EXPECT_EQ(4, Call(test->entry(), 1, 0));
+  EXPECT_EQ(3, Call(test->entry(), 1, -1));
+  EXPECT_EQ(3, Call(test->entry(), 0, 1));
+  EXPECT_EQ(4, Call(test->entry(), 0, 0));
+  EXPECT_EQ(3, Call(test->entry(), 0, -1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 1));
+  EXPECT_EQ(4, Call(test->entry(), -1, 0));
+  EXPECT_EQ(4, Call(test->entry(), -1, -1));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByte_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lb(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByte_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00050503 lb a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+  EXPECT_EQ(-51, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByte_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lb(A0, Address(A0, 1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByte_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00150503 lb a0, 1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(-17, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByte_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lb(A0, Address(A0, -1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByte_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "fff50503 lb a0, -1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(-85, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByteUnsigned_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lbu(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByteUnsigned_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00054503 lbu a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(0xCD, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByteUnsigned_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lbu(A0, Address(A0, 1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByteUnsigned_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00154503 lbu a0, 1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(0xEF, Call(test->entry(), reinterpret_cast<intx_t>((&values[1]))));
+  free(values);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadByteUnsigned_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lbu(A0, Address(A0, -1));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadByteUnsigned_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "fff54503 lbu a0, -1(a0)\n"
+      "00008067 ret\n");
+
+  uint8_t* values = reinterpret_cast<uint8_t*>(malloc(3 * sizeof(uint8_t)));
+  values[0] = 0xAB;
+  values[1] = 0xCD;
+  values[2] = 0xEF;
+
+  EXPECT_EQ(0xAB, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadHalfword_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lh(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfword_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00051503 lh a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(-13054, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadHalfword_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lh(A0, Address(A0, 2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfword_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00251503 lh a0, 2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(-4349, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadHalfword_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lh(A0, Address(A0, -2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfword_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffe51503 lh a0, -2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(-21759, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadHalfwordUnsigned_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lhu(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfwordUnsigned_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00055503 lhu a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(0xCD02, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadHalfwordUnsigned_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lhu(A0, Address(A0, 2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfwordUnsigned_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00255503 lhu a0, 2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(0xEF03, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadHalfwordUnsigned_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lhu(A0, Address(A0, -2));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadHalfwordUnsigned_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffe55503 lhu a0, -2(a0)\n"
+      "00008067 ret\n");
+
+  uint16_t* values = reinterpret_cast<uint16_t*>(malloc(3 * sizeof(uint16_t)));
+  values[0] = 0xAB01;
+  values[1] = 0xCD02;
+  values[2] = 0xEF03;
+
+  EXPECT_EQ(0xAB01, Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lw(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00052503 lw a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-855505915,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lw(A0, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00452503 lw a0, 4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-285014521,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lw(A0, Address(A0, -4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffc52503 lw a0, -4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(-1425997309,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sw(A1, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00b52023 sw a1, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xCD020405);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0xCD020405, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sw(A1, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00b52223 sw a1, 4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xEF030607);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0xEF030607, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sw(A1, Address(A0, -4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "feb52e23 sw a1, -4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xAB010203);
+  EXPECT_EQ(0xAB010203, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(LoadWordUnsigned_0, assembler) {
+  __ lwu(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWordUnsigned_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00056503 lwu a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(0xCD020405,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWordUnsigned_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lwu(A0, Address(A0, 4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWordUnsigned_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00456503 lwu a0, 4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(0xEF030607,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadWordUnsigned_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lwu(A0, Address(A0, -4));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadWordUnsigned_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ffc56503 lwu a0, -4(a0)\n"
+      "00008067 ret\n");
+
+  uint32_t* values = reinterpret_cast<uint32_t*>(malloc(3 * sizeof(uint32_t)));
+  values[0] = 0xAB010203;
+  values[1] = 0xCD020405;
+  values[2] = 0xEF030607;
+
+  EXPECT_EQ(0xAB010203,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(LoadDoubleWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ld(A0, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00053503 ld a0, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-3674369926375274744,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadDoubleWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ld(A0, Address(A0, 8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00853503 ld a0, 8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-1224128046445295093,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+ASSEMBLER_TEST_GENERATE(LoadDoubleWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ld(A0, Address(A0, -8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "ff853503 ld a0, -8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0xAB01020304050607;
+  values[1] = 0xCD02040505060708;
+  values[2] = 0xEF03060708090A0B;
+
+  EXPECT_EQ(-6124611806271568377,
+            Call(test->entry(), reinterpret_cast<intx_t>(&values[1])));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreDoubleWord_0, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sd(A1, Address(A0, 0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleWord_0, test) {
+  EXPECT_DISASSEMBLY(
+      "00b53023 sd a1, 0(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xCD02040505060708);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0xCD02040505060708, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreDoubleWord_Pos, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sd(A1, Address(A0, 8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleWord_Pos, test) {
+  EXPECT_DISASSEMBLY(
+      "00b53423 sd a1, 8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xEF03060708090A0B);
+  EXPECT_EQ(0u, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0xEF03060708090A0B, values[2]);
+}
+ASSEMBLER_TEST_GENERATE(StoreDoubleWord_Neg, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sd(A1, Address(A0, -8));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleWord_Neg, test) {
+  EXPECT_DISASSEMBLY(
+      "feb53c23 sd a1, -8(a0)\n"
+      "00008067 ret\n");
+
+  uint64_t* values = reinterpret_cast<uint64_t*>(malloc(3 * sizeof(uint64_t)));
+  values[0] = 0;
+  values[1] = 0;
+  values[2] = 0;
+
+  Call(test->entry(), reinterpret_cast<intx_t>(&values[1]), 0xAB01020304050607);
+  EXPECT_EQ(0xAB01020304050607, values[0]);
+  EXPECT_EQ(0u, values[1]);
+  EXPECT_EQ(0u, values[2]);
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(AddImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addi(A0, A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "02a50513 addi a0, a0, 42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0));
+  EXPECT_EQ(40, Call(test->entry(), -2));
+  EXPECT_EQ(0, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(AddImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addi(A0, A0, -42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "fd650513 addi a0, a0, -42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 0));
+  EXPECT_EQ(-44, Call(test->entry(), -2));
+  EXPECT_EQ(38, Call(test->entry(), 80));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slti(A0, A0, 7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "00752513 slti a0, a0, 7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 6));
+  EXPECT_EQ(0, Call(test->entry(), 7));
+  EXPECT_EQ(0, Call(test->entry(), 8));
+  EXPECT_EQ(1, Call(test->entry(), -6));
+  EXPECT_EQ(1, Call(test->entry(), -7));
+  EXPECT_EQ(1, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slti(A0, A0, -7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "ff952513 slti a0, a0, -7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 6));
+  EXPECT_EQ(0, Call(test->entry(), 7));
+  EXPECT_EQ(0, Call(test->entry(), 8));
+  EXPECT_EQ(0, Call(test->entry(), -6));
+  EXPECT_EQ(0, Call(test->entry(), -7));
+  EXPECT_EQ(1, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediateUnsigned1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltiu(A0, A0, 7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediateUnsigned1, test) {
+  EXPECT_DISASSEMBLY(
+      "00753513 sltiu a0, a0, 7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 6));
+  EXPECT_EQ(0, Call(test->entry(), 7));
+  EXPECT_EQ(0, Call(test->entry(), 8));
+  EXPECT_EQ(0, Call(test->entry(), -6));
+  EXPECT_EQ(0, Call(test->entry(), -7));
+  EXPECT_EQ(0, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanImmediateUnsigned2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltiu(A0, A0, -7);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanImmediateUnsigned2, test) {
+  EXPECT_DISASSEMBLY(
+      "ff953513 sltiu a0, a0, -7\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 6));
+  EXPECT_EQ(1, Call(test->entry(), 7));
+  EXPECT_EQ(1, Call(test->entry(), 8));
+  EXPECT_EQ(0, Call(test->entry(), -6));
+  EXPECT_EQ(0, Call(test->entry(), -7));
+  EXPECT_EQ(1, Call(test->entry(), -8));
+}
+
+ASSEMBLER_TEST_GENERATE(XorImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ xori(A0, A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(XorImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "02a54513 xori a0, a0, 42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0));
+  EXPECT_EQ(43, Call(test->entry(), 1));
+  EXPECT_EQ(32, Call(test->entry(), 10));
+  EXPECT_EQ(-43, Call(test->entry(), -1));
+  EXPECT_EQ(-36, Call(test->entry(), -10));
+}
+
+ASSEMBLER_TEST_GENERATE(XorImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ xori(A0, A0, -42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(XorImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "fd654513 xori a0, a0, -42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 0));
+  EXPECT_EQ(-41, Call(test->entry(), 1));
+  EXPECT_EQ(-36, Call(test->entry(), 10));
+  EXPECT_EQ(41, Call(test->entry(), -1));
+  EXPECT_EQ(32, Call(test->entry(), -10));
+}
+
+ASSEMBLER_TEST_GENERATE(OrImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ori(A0, A0, -6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(OrImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "ffa56513 ori a0, a0, -6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-6, Call(test->entry(), 0));
+  EXPECT_EQ(-5, Call(test->entry(), 1));
+  EXPECT_EQ(-5, Call(test->entry(), 11));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-1, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(OrImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ori(A0, A0, 6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(OrImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "00656513 ori a0, a0, 6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(6, Call(test->entry(), 0));
+  EXPECT_EQ(7, Call(test->entry(), 1));
+  EXPECT_EQ(15, Call(test->entry(), 11));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-9, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(AndImmediate1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ andi(A0, A0, -6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AndImmediate1, test) {
+  EXPECT_DISASSEMBLY(
+      "ffa57513 andi a0, a0, -6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(10, Call(test->entry(), 11));
+  EXPECT_EQ(-6, Call(test->entry(), -1));
+  EXPECT_EQ(-16, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(AndImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ andi(A0, A0, 6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AndImmediate2, test) {
+  EXPECT_DISASSEMBLY(
+      "00657513 andi a0, a0, 6\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(2, Call(test->entry(), 11));
+  EXPECT_EQ(6, Call(test->entry(), -1));
+  EXPECT_EQ(4, Call(test->entry(), -11));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slli(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "00251513 slli a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(84, Call(test->entry(), 21));
+  EXPECT_EQ(4, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-4, Call(test->entry(), -1));
+  EXPECT_EQ(-84, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slli(A0, A0, XLEN - 1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalImmediate2, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "01f51513 slli a0, a0, 0x1f\n"
+      "00008067 ret\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "03f51513 slli a0, a0, 0x3f\n"
+      "00008067 ret\n");
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 2));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(kMinIntX, Call(test->entry(), -1));
+  EXPECT_EQ(0, Call(test->entry(), -2));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srli(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "00255513 srli a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-1) >> 2),
+            Call(test->entry(), -1));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-21) >> 2),
+            Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srli(A0, A0, XLEN - 1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalImmediate2, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "01f55513 srli a0, a0, 0x1f\n"
+      "00008067 ret\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "03f55513 srli a0, a0, 0x3f\n"
+      "00008067 ret\n");
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(1, Call(test->entry(), -1));
+  EXPECT_EQ(1, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticImmediate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srai(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticImmediate, test) {
+  EXPECT_DISASSEMBLY(
+      "40255513 srai a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-6, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticImmediate2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srai(A0, A0, XLEN - 1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticImmediate2, test) {
+#if XLEN == 32
+  EXPECT_DISASSEMBLY(
+      "41f55513 srai a0, a0, 0x1f\n"  // CHECK
+      "00008067 ret\n");
+#elif XLEN == 64
+  EXPECT_DISASSEMBLY(
+      "43f55513 srai a0, a0, 0x3f\n"  // CHECK
+      "00008067 ret\n");
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-1, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(Add, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ add(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Add, test) {
+  EXPECT_DISASSEMBLY(
+      "00b50533 add a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(24, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-10, Call(test->entry(), 7, -17));
+  EXPECT_EQ(10, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, -17));
+  EXPECT_EQ(24, Call(test->entry(), 17, 7));
+  EXPECT_EQ(10, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(Subtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sub(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Subtract, test) {
+  EXPECT_DISASSEMBLY(
+      "40b50533 sub a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-10, Call(test->entry(), 7, 17));
+  EXPECT_EQ(24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(10, Call(test->entry(), -7, -17));
+  EXPECT_EQ(10, Call(test->entry(), 17, 7));
+  EXPECT_EQ(24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogical, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sll(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogical, test) {
+  EXPECT_DISASSEMBLY(
+      "00b51533 sll a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2176, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-2176, Call(test->entry(), -17, 7));
+  EXPECT_EQ(34, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-34, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThan, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slt(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThan, test) {
+  EXPECT_DISASSEMBLY(
+      "00b52533 slt a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 7, 7));
+  EXPECT_EQ(0, Call(test->entry(), -7, -7));
+  EXPECT_EQ(1, Call(test->entry(), 7, 17));
+  EXPECT_EQ(0, Call(test->entry(), 7, -17));
+  EXPECT_EQ(1, Call(test->entry(), -7, 17));
+  EXPECT_EQ(0, Call(test->entry(), -7, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(0, Call(test->entry(), 17, -7));
+  EXPECT_EQ(1, Call(test->entry(), -17, 7));
+  EXPECT_EQ(1, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "00b53533 sltu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 7, 7));
+  EXPECT_EQ(0, Call(test->entry(), -7, -7));
+  EXPECT_EQ(1, Call(test->entry(), 7, 17));
+  EXPECT_EQ(1, Call(test->entry(), 7, -17));
+  EXPECT_EQ(0, Call(test->entry(), -7, 17));
+  EXPECT_EQ(0, Call(test->entry(), -7, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(1, Call(test->entry(), 17, -7));
+  EXPECT_EQ(0, Call(test->entry(), -17, 7));
+  EXPECT_EQ(1, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(Xor, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ xor_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Xor, test) {
+  EXPECT_DISASSEMBLY(
+      "00b54533 xor a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(22, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(22, Call(test->entry(), -7, -17));
+  EXPECT_EQ(22, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(22, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogical, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srl(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogical, test) {
+  EXPECT_DISASSEMBLY(
+      "00b55533 srl a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-17) >> 7),
+            Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(static_cast<intx_t>(static_cast<uintx_t>(-17) >> 1),
+            Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmetic, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sra(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmetic, test) {
+  EXPECT_DISASSEMBLY(
+      "40b55533 sra a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-9, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(Or, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ or_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Or, test) {
+  EXPECT_DISASSEMBLY(
+      "00b56533 or a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(23, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-17, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-7, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-1, Call(test->entry(), -7, -17));
+  EXPECT_EQ(23, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-7, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-1, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(And, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ and_(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(And, test) {
+  EXPECT_DISASSEMBLY(
+      "00b57533 and a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), 7, 17));
+  EXPECT_EQ(7, Call(test->entry(), 7, -17));
+  EXPECT_EQ(17, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-23, Call(test->entry(), -7, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 7));
+  EXPECT_EQ(17, Call(test->entry(), 17, -7));
+  EXPECT_EQ(7, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-23, Call(test->entry(), -17, -7));
+}
+
+ASSEMBLER_TEST_GENERATE(Fence, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fence();
+  __ fence(kRead, kWrite);
+  __ fence(kInput, kOutput);
+  __ fence(kMemory, kMemory);
+  __ fence(kAll, kAll);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Fence, test) {
+  EXPECT_DISASSEMBLY(
+      "0ff0000f fence\n"
+      "0210000f fence r,w\n"
+      "0840000f fence i,o\n"
+      "0330000f fence rw,rw\n"
+      "0ff0000f fence\n"
+      "00008067 ret\n");
+  Call(test->entry());
+}
+
+ASSEMBLER_TEST_GENERATE(InstructionFence, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fencei();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(InstructionFence, test) {
+  EXPECT_DISASSEMBLY(
+      "0000100f fence.i\n"
+      "00008067 ret\n");
+  Call(test->entry());
+}
+
+ASSEMBLER_TEST_GENERATE(EnvironmentCall, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ecall();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(EnvironmentCall, test) {
+  EXPECT_DISASSEMBLY(
+      "00000073 ecall\n"
+      "00008067 ret\n");
+
+  // Not running: would trap.
+}
+
+ASSEMBLER_TEST_GENERATE(EnvironmentBreak, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ ebreak();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(EnvironmentBreak, test) {
+  EXPECT_DISASSEMBLY(
+      "00100073 ebreak\n"
+      "00008067 ret\n");
+
+  // Not running: would trap.
+}
+
+ASSEMBLER_TEST_GENERATE(ControlStatusRegisters, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ csrrw(T0, 0x123, S1);
+  __ csrrs(T1, 0x123, S2);
+  __ csrrc(T2, 0x123, S3);
+  __ csrr(T3, 0x123);
+  __ csrw(0x123, S4);
+  __ csrs(0x123, S5);
+  __ csrc(0x123, S6);
+  __ csrrwi(T1, 0x123, 1);
+  __ csrrsi(T2, 0x123, 2);
+  __ csrrci(T3, 0x123, 3);
+  __ csrwi(0x123, 4);
+  __ csrsi(0x123, 5);
+  __ csrci(0x123, 6);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ControlStatusRegisters, test) {
+  EXPECT_DISASSEMBLY(
+      "123492f3 csrrw t0, 0x123, thr\n"
+      "12392373 csrrs t1, 0x123, s2\n"
+      "1239b3f3 csrrc t2, 0x123, s3\n"
+      "12302e73 csrr t3, 0x123\n"
+      "123a1073 csrw 0x123, s4\n"
+      "123aa073 csrs 0x123, s5\n"
+      "123b3073 csrc 0x123, s6\n"
+      "1230d373 csrrwi t1, 0x123, 1\n"
+      "123163f3 csrrsi t2, 0x123, 2\n"
+      "1231fe73 csrrci t3, 0x123, 3\n"
+      "12325073 csrwi 0x123, 4\n"
+      "1232e073 csrsi 0x123, 5\n"
+      "12337073 csrci 0x123, 6\n"
+      "00008067 ret\n");
+
+  // Not running: would trap.
+}
+
+ASSEMBLER_TEST_GENERATE(Nop, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ nop();
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Nop, test) {
+  EXPECT_DISASSEMBLY(
+      "00000013 nop\n"
+      "00008067 ret\n");
+  EXPECT_EQ(123, Call(test->entry(), 123));
+}
+
+ASSEMBLER_TEST_GENERATE(Move, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mv(A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Move, test) {
+  EXPECT_DISASSEMBLY(
+      "00058513 mv a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(36, Call(test->entry(), 42, 36));
+}
+
+ASSEMBLER_TEST_GENERATE(Not, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ not_(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Not, test) {
+  EXPECT_DISASSEMBLY(
+      "fff54513 not a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(~42, Call(test->entry(), 42));
+  EXPECT_EQ(~-42, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(Negate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ neg(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Negate, test) {
+  EXPECT_DISASSEMBLY(
+      "40a00533 neg a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 42));
+  EXPECT_EQ(42, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetNotEqualToZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ snez(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetNotEqualToZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a03533 snez a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), -42));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(1, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetEqualToZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ seqz(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetEqualToZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00153513 seqz a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), -42));
+  EXPECT_EQ(1, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetLessThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sltz(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetLessThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00052533 sltz a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1, Call(test->entry(), -42));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(0, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(SetGreaterThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sgtz(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SetGreaterThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a02533 sgtz a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), -42));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(1, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ beqz(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00050663 beqz a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), -42));
+  EXPECT_EQ(4, Call(test->entry(), 0));
+  EXPECT_EQ(3, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchNotEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bnez(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchNotEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00051663 bnez a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), -42));
+  EXPECT_EQ(3, Call(test->entry(), 0));
+  EXPECT_EQ(4, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessOrEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ blez(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessOrEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a05663 blez a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), -42));
+  EXPECT_EQ(4, Call(test->entry(), 0));
+  EXPECT_EQ(3, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterOrEqualZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bgez(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterOrEqualZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00055663 bgez a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), -42));
+  EXPECT_EQ(4, Call(test->entry(), 0));
+  EXPECT_EQ(4, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchLessThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bltz(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchLessThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00054663 bltz a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), -42));
+  EXPECT_EQ(3, Call(test->entry(), 0));
+  EXPECT_EQ(3, Call(test->entry(), 42));
+}
+
+ASSEMBLER_TEST_GENERATE(BranchGreaterThanZero, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  Label label;
+  __ bgtz(A0, &label);
+  __ li(A0, 3);
+  __ ret();
+  __ Bind(&label);
+  __ li(A0, 4);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BranchGreaterThanZero, test) {
+  EXPECT_DISASSEMBLY(
+      "00a04663 bgtz a0, +12\n"
+      "00300513 li a0, 3\n"
+      "00008067 ret\n"
+      "00400513 li a0, 4\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3, Call(test->entry(), -42));
+  EXPECT_EQ(3, Call(test->entry(), 0));
+  EXPECT_EQ(4, Call(test->entry(), 42));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(AddImmediateWord1, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addiw(A0, A0, 42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediateWord1, test) {
+  EXPECT_DISASSEMBLY(
+      "02a5051b addiw a0, a0, 42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(42, Call(test->entry(), 0));
+  EXPECT_EQ(40, Call(test->entry(), -2));
+  EXPECT_EQ(0, Call(test->entry(), -42));
+}
+
+ASSEMBLER_TEST_GENERATE(AddImmediateWord2, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addiw(A0, A0, -42);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddImmediateWord2, test) {
+  EXPECT_DISASSEMBLY(
+      "fd65051b addiw a0, a0, -42\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, Call(test->entry(), 0));
+  EXPECT_EQ(-44, Call(test->entry(), -2));
+  EXPECT_EQ(38, Call(test->entry(), 80));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalImmediateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ slliw(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalImmediateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "0025151b slliw a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(84, Call(test->entry(), 21));
+  EXPECT_EQ(4, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-4, Call(test->entry(), -1));
+  EXPECT_EQ(-84, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalImmediateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srliw(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalImmediateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "0025551b srliw a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-1) >> 2),
+            Call(test->entry(), -1));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-21) >> 2),
+            Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticImmediateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sraiw(A0, A0, 2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticImmediateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "4025551b sraiw a0, a0, 0x2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(5, Call(test->entry(), 21));
+  EXPECT_EQ(0, Call(test->entry(), 1));
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-1, Call(test->entry(), -1));
+  EXPECT_EQ(-6, Call(test->entry(), -21));
+}
+
+ASSEMBLER_TEST_GENERATE(AddWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ addw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AddWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5053b addw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(24, Call(test->entry(), 7, 17));
+  EXPECT_EQ(-10, Call(test->entry(), 7, -17));
+  EXPECT_EQ(10, Call(test->entry(), -7, 17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, -17));
+  EXPECT_EQ(24, Call(test->entry(), 17, 7));
+  EXPECT_EQ(10, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, -7));
+  EXPECT_EQ(3, Call(test->entry(), 0x200000002, 0x100000001));
+}
+
+ASSEMBLER_TEST_GENERATE(SubtractWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ subw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SubtractWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5053b subw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-10, Call(test->entry(), 7, 17));
+  EXPECT_EQ(24, Call(test->entry(), 7, -17));
+  EXPECT_EQ(-24, Call(test->entry(), -7, 17));
+  EXPECT_EQ(10, Call(test->entry(), -7, -17));
+  EXPECT_EQ(10, Call(test->entry(), 17, 7));
+  EXPECT_EQ(24, Call(test->entry(), 17, -7));
+  EXPECT_EQ(-24, Call(test->entry(), -17, 7));
+  EXPECT_EQ(-10, Call(test->entry(), -17, -7));
+  EXPECT_EQ(1, Call(test->entry(), 0x200000002, 0x100000001));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftLeftLogicalWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sllw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftLeftLogicalWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5153b sllw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2176, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-2176, Call(test->entry(), -17, 7));
+  EXPECT_EQ(34, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-34, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+  EXPECT_EQ(0x10, Call(test->entry(), 0x10000001, 4));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightLogicalWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ srlw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightLogicalWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5553b srlw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-17) >> 7),
+            Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-17) >> 1),
+            Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(ShiftRightArithmeticWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sraw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ShiftRightArithmeticWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5553b sraw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 17, 7));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 7));
+  EXPECT_EQ(8, Call(test->entry(), 17, 1));
+  EXPECT_EQ(-9, Call(test->entry(), -17, 1));
+  EXPECT_EQ(17, Call(test->entry(), 17, 0));
+  EXPECT_EQ(-17, Call(test->entry(), -17, 0));
+}
+
+ASSEMBLER_TEST_GENERATE(NegateWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ negw(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(NegateWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40a0053b negw a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(-42, Call(test->entry(), 42));
+  EXPECT_EQ(42, Call(test->entry(), -42));
+  EXPECT_EQ(1, Call(test->entry(), 0x10FFFFFFFF));
+}
+
+ASSEMBLER_TEST_GENERATE(SignExtendWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ sextw(A0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SignExtendWord, test) {
+  EXPECT_DISASSEMBLY(
+      "0005051b sext.w a0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 0));
+  EXPECT_EQ(42, Call(test->entry(), 42));
+  EXPECT_EQ(-42, Call(test->entry(), -42));
+  EXPECT_EQ(-1, Call(test->entry(), 0x10FFFFFFFF));
+}
+#endif  // XLEN >= 64
+
+ASSEMBLER_TEST_GENERATE(Multiply, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mul(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Multiply, test) {
+  EXPECT_DISASSEMBLY(
+      "02b50533 mul a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(68, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), -4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), 4, -17));
+  EXPECT_EQ(68, Call(test->entry(), -4, -17));
+  EXPECT_EQ(68, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), 17, -4));
+  EXPECT_EQ(68, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyHigh, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulh(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyHigh, test) {
+  EXPECT_DISASSEMBLY(
+      "02b51533 mulh a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-1, Call(test->entry(), -4, 17));
+  EXPECT_EQ(-1, Call(test->entry(), 4, -17));
+  EXPECT_EQ(0, Call(test->entry(), -4, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), 17, -4));
+  EXPECT_EQ(0, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyHighSignedUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulhsu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyHighSignedUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b52533 mulhsu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-1, Call(test->entry(), -4, 17));
+  EXPECT_EQ(3, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(16, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-17, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(MultiplyHighUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulhu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyHighUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b53533 mulhu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(16, Call(test->entry(), -4, 17));
+  EXPECT_EQ(3, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-21, Call(test->entry(), -4, -17));
+  EXPECT_EQ(0, Call(test->entry(), 17, 4));
+  EXPECT_EQ(3, Call(test->entry(), -17, 4));
+  EXPECT_EQ(16, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-21, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(Divide, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ div(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Divide, test) {
+  EXPECT_DISASSEMBLY(
+      "02b54533 div a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(0, Call(test->entry(), -4, 17));
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(0, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), 17, -4));
+  EXPECT_EQ(4, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(DivideUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ divu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DivideUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b55533 divu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+#if XLEN == 32
+  EXPECT_EQ(252645134, Call(test->entry(), -4, 17));
+#else
+  EXPECT_EQ(1085102592571150094, Call(test->entry(), -4, 17));
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(1, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+#if XLEN == 32
+  EXPECT_EQ(1073741819, Call(test->entry(), -17, 4));
+#else
+  EXPECT_EQ(4611686018427387899, Call(test->entry(), -17, 4));
+#endif
+  EXPECT_EQ(0, Call(test->entry(), 17, -4));
+  EXPECT_EQ(0, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(Remainder, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ rem(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(Remainder, test) {
+  EXPECT_DISASSEMBLY(
+      "02b56533 rem a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(1, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(RemainderUnsigned, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ remu(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(RemainderUnsigned, test) {
+  EXPECT_DISASSEMBLY(
+      "02b57533 remu a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(14, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(13, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(3, Call(test->entry(), -17, 4));
+  EXPECT_EQ(17, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-17, Call(test->entry(), -17, -4));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(MultiplyWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ mulw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(MultiplyWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5053b mulw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(68, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), -4, 17));
+  EXPECT_EQ(-68, Call(test->entry(), 4, -17));
+  EXPECT_EQ(68, Call(test->entry(), -4, -17));
+  EXPECT_EQ(68, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-68, Call(test->entry(), 17, -4));
+  EXPECT_EQ(68, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(DivideWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ divw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DivideWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5453b divw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(0, Call(test->entry(), -4, 17));
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(0, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), -17, 4));
+  EXPECT_EQ(-4, Call(test->entry(), 17, -4));
+  EXPECT_EQ(4, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(DivideUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ divuw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DivideUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5553b divuw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, Call(test->entry(), 4, 17));
+  EXPECT_EQ(252645134, Call(test->entry(), -4, 17));
+  EXPECT_EQ(0, Call(test->entry(), 4, -17));
+  EXPECT_EQ(1, Call(test->entry(), -4, -17));
+  EXPECT_EQ(4, Call(test->entry(), 17, 4));
+  EXPECT_EQ(1073741819, Call(test->entry(), -17, 4));
+  EXPECT_EQ(0, Call(test->entry(), 17, -4));
+  EXPECT_EQ(0, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(RemainderWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ remw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(RemainderWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5653b remw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(-4, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, 4));
+  EXPECT_EQ(1, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-1, Call(test->entry(), -17, -4));
+}
+
+ASSEMBLER_TEST_GENERATE(RemainderUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ remuw(A0, A0, A1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(RemainderUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "02b5753b remuw a0, a0, a1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(4, Call(test->entry(), 4, 17));
+  EXPECT_EQ(14, Call(test->entry(), -4, 17));
+  EXPECT_EQ(4, Call(test->entry(), 4, -17));
+  EXPECT_EQ(13, Call(test->entry(), -4, -17));
+  EXPECT_EQ(1, Call(test->entry(), 17, 4));
+  EXPECT_EQ(3, Call(test->entry(), -17, 4));
+  EXPECT_EQ(17, Call(test->entry(), 17, -4));
+  EXPECT_EQ(-17, Call(test->entry(), -17, -4));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalWord_Success, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lrw(T0, Address(A0));
+  __ addi(T0, T0, 1);
+  __ scw(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalWord_Success, test) {
+  EXPECT_DISASSEMBLY(
+      "100522af lr.w t0, (a0)\n"
+      "00128293 addi t0, t0, 1\n"
+      "1855252f sc.w a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0, Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1101, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalWord_Failure, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ li(T0, 42);
+  __ scw(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalWord_Failure, test) {
+  EXPECT_DISASSEMBLY(
+      "02a00293 li t0, 42\n"
+      "1855252f sc.w a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(false, 0 == Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1100, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoSwapWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoswapw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoSwapWord, test) {
+  EXPECT_DISASSEMBLY(
+      "08b5252f amoswap.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1010, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAddWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoaddw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAddWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5252f amoadd.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 42;
+  EXPECT_EQ(42, Call(test->entry(), reinterpret_cast<intx_t>(value), 10));
+  EXPECT_EQ(52, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoXorWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoxorw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoXorWord, test) {
+  EXPECT_DISASSEMBLY(
+      "20b5252f amoxor.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b0110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAndWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoandw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAndWord, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5252f amoand.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1000, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoOrWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoorw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoOrWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5252f amoor.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amominw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinWord, test) {
+  EXPECT_DISASSEMBLY(
+      "80b5252f amomin.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxWord, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b5252f amomax.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amominuw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0b5252f amominu.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxuw(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "e0b5252f amomaxu.w a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int32_t* value = reinterpret_cast<int32_t*>(malloc(sizeof(int32_t)));
+  *value = -7;
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(sign_extend(static_cast<uint32_t>(-7)),
+            Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalDoubleWord_Success,
+                        assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ lrd(T0, Address(A0));
+  __ addi(T0, T0, 1);
+  __ scd(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalDoubleWord_Success, test) {
+  EXPECT_DISASSEMBLY(
+      "100532af lr.d t0, (a0)\n"
+      "00128293 addi t0, t0, 1\n"
+      "1855352f sc.d a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0, Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1101, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(LoadReserveStoreConditionalDoubleWord_Failure,
+                        assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ li(T0, 42);
+  __ scd(A0, T0, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadReserveStoreConditionalDoubleWord_Failure, test) {
+  EXPECT_DISASSEMBLY(
+      "02a00293 li t0, 42\n"
+      "1855352f sc.d a0, t0, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(false, 0 == Call(test->entry(), reinterpret_cast<intx_t>(value)));
+  EXPECT_EQ(0b1100, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoSwapDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoswapd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoSwapDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "08b5352f amoswap.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1010, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAddDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoaddd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAddDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "00b5352f amoadd.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 42;
+  EXPECT_EQ(42, Call(test->entry(), reinterpret_cast<intx_t>(value), 10));
+  EXPECT_EQ(52, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoXorDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoxord(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoXorDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "20b5352f amoxor.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b0110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoAndDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoandd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoAndDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5352f amoand.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1000, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoOrDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amoord(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoOrDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "40b5352f amoor.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = 0b1100;
+  EXPECT_EQ(0b1100,
+            Call(test->entry(), reinterpret_cast<intx_t>(value), 0b1010));
+  EXPECT_EQ(0b1110, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomind(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "80b5352f amomin.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxd(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b5352f amomax.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMinUnsignedDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amominud(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMinUnsignedDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0b5352f amominu.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-11, *value);
+}
+
+ASSEMBLER_TEST_GENERATE(AmoMaxUnsignedDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ amomaxud(A0, A1, Address(A0));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(AmoMaxUnsignedDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "e0b5352f amomaxu.d a0, a1, (a0)\n"
+      "00008067 ret\n");
+
+  int64_t* value = reinterpret_cast<int64_t*>(malloc(sizeof(int64_t)));
+  *value = -7;
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -11));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -7));
+  EXPECT_EQ(-7, *value);
+  EXPECT_EQ(-7, Call(test->entry(), reinterpret_cast<intx_t>(value), -4));
+  EXPECT_EQ(-4, *value);
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(LoadSingleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ flw(FA0, Address(A0, 1 * sizeof(float)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadSingleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00452507 flw fa0, 4(a0)\n"
+      "00008067 ret\n");
+
+  float* data = reinterpret_cast<float*>(malloc(3 * sizeof(float)));
+  data[0] = 1.7f;
+  data[1] = 2.8f;
+  data[2] = 3.9f;
+  EXPECT_EQ(data[1], CallF(test->entry(), reinterpret_cast<intx_t>(data)));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreSingleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsw(FA0, Address(A0, 1 * sizeof(float)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreSingleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00a52227 fsw fa0, 4(a0)\n"
+      "00008067 ret\n");
+
+  float* data = reinterpret_cast<float*>(malloc(3 * sizeof(float)));
+  data[0] = 1.7f;
+  data[1] = 2.8f;
+  data[2] = 3.9f;
+  CallF(test->entry(), reinterpret_cast<intx_t>(data), 4.2f);
+  EXPECT_EQ(4.2f, data[1]);
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmadds(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "60b50543 fmadd.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(22.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(8.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(26.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(16.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmsubs(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "60b50547 fmsub.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(22.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(16.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(26.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegateMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmsubs(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegateMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5054b fnmsub.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-8.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-22.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-16.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-26.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegateMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmadds(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegateMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "60b5054f fnmadd.s fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-22.0, CallF(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallF(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallF(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-8.0, CallF(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-26.0, CallF(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallF(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallF(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-16.0, CallF(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fadds(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "00b50553 fadd.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-2.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(-8.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  EXPECT_EQ(10.0f, CallF(test->entry(), 7.0f, 3.0f));
+  EXPECT_EQ(-4.0f, CallF(test->entry(), -7.0f, 3.0f));
+  EXPECT_EQ(4.0f, CallF(test->entry(), 7.0f, -3.0f));
+  EXPECT_EQ(-10.0f, CallF(test->entry(), -7.0f, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleSubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsubs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleSubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "08b50553 fsub.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-2.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-8.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(8.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  EXPECT_EQ(4.0f, CallF(test->entry(), 7.0f, 3.0f));
+  EXPECT_EQ(-10.0f, CallF(test->entry(), -7.0f, 3.0f));
+  EXPECT_EQ(10.0f, CallF(test->entry(), 7.0f, -3.0f));
+  EXPECT_EQ(-4.0f, CallF(test->entry(), -7.0f, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMultiply, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmuls(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMultiply, test) {
+  EXPECT_DISASSEMBLY(
+      "10b50553 fmul.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(15.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-15.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-15.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(15.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  EXPECT_EQ(21.0f, CallF(test->entry(), 7.0f, 3.0f));
+  EXPECT_EQ(-21.0f, CallF(test->entry(), -7.0f, 3.0f));
+  EXPECT_EQ(-21.0f, CallF(test->entry(), 7.0f, -3.0f));
+  EXPECT_EQ(21.0f, CallF(test->entry(), -7.0f, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleDivide, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fdivs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleDivide, test) {
+  EXPECT_DISASSEMBLY(
+      "18b50553 fdiv.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2.0f, CallF(test->entry(), 10.0f, 5.0f));
+  EXPECT_EQ(-2.0f, CallF(test->entry(), -10.0f, 5.0f));
+  EXPECT_EQ(-2.0f, CallF(test->entry(), 10.0f, -5.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), -10.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleSquareRoot, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsqrts(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleSquareRoot, test) {
+  EXPECT_DISASSEMBLY(
+      "58050553 fsqrt.s fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), 0.0f));
+  EXPECT_EQ(1.0f, CallF(test->entry(), 1.0f));
+  EXPECT_EQ(2.0f, CallF(test->entry(), 4.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 9.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "20b50553 fsgnj.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegatedSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjns(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegatedSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "20b51553 fsgnjn.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleXorSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjxs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleXorSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "20b52553 fsgnjx.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, -5.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMin, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmins(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMin, test) {
+  EXPECT_DISASSEMBLY(
+      "28b50553 fmin.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1.0f, CallF(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(-1.0f, CallF(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(-5.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(-5.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(3.0f, CallF(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMax, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmaxs(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMax, test) {
+  EXPECT_DISASSEMBLY(
+      "28b51553 fmax.s fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(5.0f, CallF(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(1.0f, CallF(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(3.0f, CallF(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(5.0f, CallF(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(-1.0f, CallF(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(3.0f, CallF(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(3.0f, CallF(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(-3.0f, CallF(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleEqual, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ feqs(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleEqual, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b52553 feq.s a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleLessThan, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ flts(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleLessThan, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b51553 flt.s a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleLessOrEqual, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fles(A0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleLessOrEqual, test) {
+  EXPECT_DISASSEMBLY(
+      "a0b50553 fle.s a0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), 3.0f, 5.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -1.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, -5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 3.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, 5.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -1.0f));
+  EXPECT_EQ(1, CallI(test->entry(), -3.0f, -3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, -5.0f));
+
+  float qNAN = std::numeric_limits<float>::quiet_NaN();
+  EXPECT_EQ(0, CallI(test->entry(), 3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, 3.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -3.0f, qNAN));
+  EXPECT_EQ(0, CallI(test->entry(), qNAN, -3.0f));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleClassify, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fclasss(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleClassify, test) {
+  EXPECT_DISASSEMBLY(
+      "e0051553 fclass.s a0, fa0\n"
+      "00008067 ret\n");
+  // Neg infinity
+  EXPECT_EQ(1 << 0,
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  // Neg normal
+  EXPECT_EQ(1 << 1, CallI(test->entry(), -1.0f));
+  // Neg subnormal
+  EXPECT_EQ(1 << 2,
+            CallI(test->entry(), -std::numeric_limits<float>::min() / 2.0f));
+  // Neg zero
+  EXPECT_EQ(1 << 3, CallI(test->entry(), -0.0f));
+  // Pos zero
+  EXPECT_EQ(1 << 4, CallI(test->entry(), 0.0f));
+  // Pos subnormal
+  EXPECT_EQ(1 << 5,
+            CallI(test->entry(), std::numeric_limits<float>::min() / 2.0f));
+  // Pos normal
+  EXPECT_EQ(1 << 6, CallI(test->entry(), 1.0f));
+  // Pos infinity
+  EXPECT_EQ(1 << 7,
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  // Signaling NaN
+  EXPECT_EQ(1 << 8,
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+  // Queit NaN
+  EXPECT_EQ(1 << 9,
+            CallI(test->entry(), std::numeric_limits<float>::quiet_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0050553 fcvt.w.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(sign_extend(kMinInt32),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxInt32),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RNE, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RNE);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RNE, test) {
+  EXPECT_DISASSEMBLY(
+      "c0050553 fcvt.w.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RTZ, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RTZ);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RTZ, test) {
+  EXPECT_DISASSEMBLY(
+      "c0051553 fcvt.w.s a0, fa0, rtz\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-43, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RDN, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RDN);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RDN, test) {
+  EXPECT_DISASSEMBLY(
+      "c0052553 fcvt.w.s a0, fa0, rdn\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RUP, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RUP);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RUP, test) {
+  EXPECT_DISASSEMBLY(
+      "c0053553 fcvt.w.s a0, fa0, rup\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-43, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToWord_RMM, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtws(A0, FA0, RMM);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToWord_RMM, test) {
+  EXPECT_DISASSEMBLY(
+      "c0054553 fcvt.w.s a0, fa0, rmm\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-44, CallI(test->entry(), -43.6f));
+  EXPECT_EQ(-44, CallI(test->entry(), -43.5f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.4f));
+  EXPECT_EQ(-43, CallI(test->entry(), -43.0f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.6f));
+  EXPECT_EQ(-43, CallI(test->entry(), -42.5f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.4f));
+  EXPECT_EQ(-42, CallI(test->entry(), -42.0f));
+  EXPECT_EQ(0, CallI(test->entry(), -0.0f));
+  EXPECT_EQ(0, CallI(test->entry(), +0.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.0f));
+  EXPECT_EQ(42, CallI(test->entry(), 42.4f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.5f));
+  EXPECT_EQ(43, CallI(test->entry(), 42.6f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.0f));
+  EXPECT_EQ(43, CallI(test->entry(), 43.4f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.5f));
+  EXPECT_EQ(44, CallI(test->entry(), 43.6f));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToUnsignedWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtwus(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToUnsignedWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0150553 fcvt.wu.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  // float loss of precision
+  EXPECT_EQ(-2147483648, CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(sign_extend(0),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(sign_extend(kMaxUint32),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtsw(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0050553 fcvt.s.w fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42.0f, CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<float>(kMinInt32),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxInt32),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(-1.0f, CallF(test->entry(), sign_extend(kMaxUint32)));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertUnsignedWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtswu(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertUnsignedWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0150553 fcvt.s.wu fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(
+      static_cast<float>(static_cast<uint32_t>(static_cast<int32_t>(-42))),
+      CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint32_t>(kMinInt32)),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxInt32),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxUint32),
+            CallF(test->entry(), sign_extend(kMaxUint32)));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleMove, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvs(FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleMove, test) {
+  EXPECT_DISASSEMBLY(
+      "20b58553 fmv.s fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(36.0f, CallF(test->entry(), 42.0f, 36.0f));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), -std::numeric_limits<float>::infinity(),
+                  std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleAbsoluteValue, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fabss(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleAbsoluteValue, test) {
+  EXPECT_DISASSEMBLY(
+      "20a52553 fabs.s fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), 0.0f));
+  EXPECT_EQ(0.0f, CallF(test->entry(), -0.0f));
+  EXPECT_EQ(42.0f, CallF(test->entry(), 42.0f));
+  EXPECT_EQ(42.0f, CallF(test->entry(), -42.0f));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), -std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(SingleNegate, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnegs(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(SingleNegate, test) {
+  EXPECT_DISASSEMBLY(
+      "20a51553 fneg.s fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-0.0f, CallF(test->entry(), 0.0f));
+  EXPECT_EQ(0.0f, CallF(test->entry(), -0.0f));
+  EXPECT_EQ(-42.0f, CallF(test->entry(), 42.0f));
+  EXPECT_EQ(42.0f, CallF(test->entry(), -42.0f));
+  EXPECT_EQ(-std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), -std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(BitCastSingleToInteger, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvxw(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BitCastSingleToInteger, test) {
+  EXPECT_DISASSEMBLY(
+      "e0050553 fmv.x.w a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(bit_cast<int32_t>(0.0f), CallI(test->entry(), 0.0f));
+  EXPECT_EQ(bit_cast<int32_t>(-0.0f), CallI(test->entry(), -0.0f));
+  EXPECT_EQ(bit_cast<int32_t>(42.0f), CallI(test->entry(), 42.0f));
+  EXPECT_EQ(bit_cast<int32_t>(-42.0f), CallI(test->entry(), -42.0f));
+  EXPECT_EQ(bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()),
+            CallI(test->entry(), std::numeric_limits<float>::quiet_NaN()));
+  EXPECT_EQ(bit_cast<int32_t>(std::numeric_limits<float>::signaling_NaN()),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+  EXPECT_EQ(bit_cast<int32_t>(std::numeric_limits<float>::infinity()),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(bit_cast<int32_t>(-std::numeric_limits<float>::infinity()),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+}
+
+ASSEMBLER_TEST_GENERATE(BitCastIntegerToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmvwx(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(BitCastIntegerToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "f0050553 fmv.w.x fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(bit_cast<int32_t>(0.0f))));
+  EXPECT_EQ(-0.0f, CallF(test->entry(), sign_extend(bit_cast<int32_t>(-0.0f))));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(bit_cast<int32_t>(42.0f))));
+  EXPECT_EQ(-42.0f,
+            CallF(test->entry(), sign_extend(bit_cast<int32_t>(-42.0f))));
+  EXPECT_EQ(true, isnan(CallF(test->entry(),
+                              sign_extend(bit_cast<int32_t>(
+                                  std::numeric_limits<float>::quiet_NaN())))));
+  EXPECT_EQ(true,
+            isnan(CallF(test->entry(),
+                        sign_extend(bit_cast<int32_t>(
+                            std::numeric_limits<float>::signaling_NaN())))));
+  EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            CallF(test->entry(), sign_extend(bit_cast<int32_t>(
+                                     std::numeric_limits<float>::infinity()))));
+  EXPECT_EQ(
+      -std::numeric_limits<float>::infinity(),
+      CallF(test->entry(), sign_extend(bit_cast<int32_t>(
+                               -std::numeric_limits<float>::infinity()))));
+}
+
+#if XLEN >= 64
+ASSEMBLER_TEST_GENERATE(ConvertSingleToDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtls(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0250553 fcvt.l.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-42, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(static_cast<int64_t>(kMinInt32),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  // float loses precision:
+  EXPECT_EQ(static_cast<int64_t>(kMaxInt32) + 1,
+            CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(static_cast<int64_t>(kMaxUint32) + 1,
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(kMinInt64, CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(kMaxInt64, CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(kMaxInt64, CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(kMinInt64,
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(kMaxInt64,
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(kMaxInt64,
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertSingleToUnsignedDoubleWord, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtlus(A0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertSingleToUnsignedDoubleWord, test) {
+  EXPECT_DISASSEMBLY(
+      "c0350553 fcvt.lu.s a0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(-42)));
+  EXPECT_EQ(0, CallI(test->entry(), static_cast<float>(0)));
+  EXPECT_EQ(42, CallI(test->entry(), static_cast<float>(42)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), static_cast<float>(kMinInt32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxInt32) + 1),
+            CallI(test->entry(), static_cast<float>(kMaxInt32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint32) + 1),
+            CallI(test->entry(), static_cast<float>(kMaxUint32)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), static_cast<float>(kMinInt64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxInt64) + 1),
+            CallI(test->entry(), static_cast<float>(kMaxInt64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), static_cast<float>(kMaxUint64)));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(0)),
+            CallI(test->entry(), -std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), std::numeric_limits<float>::infinity()));
+  EXPECT_EQ(static_cast<int64_t>(static_cast<uint64_t>(kMaxUint64)),
+            CallI(test->entry(), std::numeric_limits<float>::signaling_NaN()));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertDoubleWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtsl(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertDoubleWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0250553 fcvt.s.l fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(-42.0f, CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(static_cast<float>(kMinInt32),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(kMaxInt32),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<float>(sign_extend(kMaxUint32)),
+            CallF(test->entry(), sign_extend(kMaxUint32)));
+  EXPECT_EQ(static_cast<float>(kMinInt64),
+            CallF(test->entry(), sign_extend(kMinInt64)));
+  EXPECT_EQ(static_cast<float>(kMaxInt64),
+            CallF(test->entry(), sign_extend(kMaxInt64)));
+  EXPECT_EQ(static_cast<float>(sign_extend(kMaxUint64)),
+            CallF(test->entry(), sign_extend(kMaxUint64)));
+}
+
+ASSEMBLER_TEST_GENERATE(ConvertUnsignedDoubleWordToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtslu(FA0, A0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(ConvertUnsignedDoubleWordToSingle, test) {
+  EXPECT_DISASSEMBLY(
+      "d0350553 fcvt.s.lu fa0, a0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0f, CallF(test->entry(), sign_extend(0)));
+  EXPECT_EQ(42.0f, CallF(test->entry(), sign_extend(42)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(-42))),
+            CallF(test->entry(), sign_extend(-42)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMinInt32))),
+            CallF(test->entry(), sign_extend(kMinInt32)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMaxInt32))),
+            CallF(test->entry(), sign_extend(kMaxInt32)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMaxUint32))),
+            CallF(test->entry(), sign_extend(kMaxUint32)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMinInt64))),
+            CallF(test->entry(), sign_extend(kMinInt64)));
+  EXPECT_EQ(static_cast<float>(static_cast<uint64_t>(sign_extend(kMaxInt64))),
+            CallF(test->entry(), sign_extend(kMaxInt64)));
+  EXPECT_EQ(static_cast<float>(kMaxUint64),
+            CallF(test->entry(), sign_extend(kMaxUint64)));
+}
+#endif
+
+ASSEMBLER_TEST_GENERATE(LoadDoubleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fld(FA0, Address(A0, 1 * sizeof(double)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(LoadDoubleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00853507 fld fa0, 8(a0)\n"
+      "00008067 ret\n");
+
+  double* data = reinterpret_cast<double*>(malloc(3 * sizeof(double)));
+  data[0] = 1.7;
+  data[1] = 2.8;
+  data[2] = 3.9;
+  EXPECT_EQ(data[1], CallD(test->entry(), reinterpret_cast<intx_t>(data)));
+}
+
+ASSEMBLER_TEST_GENERATE(StoreDoubleFloat, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsd(FA0, Address(A0, 1 * sizeof(double)));
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(StoreDoubleFloat, test) {
+  EXPECT_DISASSEMBLY(
+      "00a53427 fsd fa0, 8(a0)\n"
+      "00008067 ret\n");
+
+  double* data = reinterpret_cast<double*>(malloc(3 * sizeof(double)));
+  data[0] = 1.7;
+  data[1] = 2.8;
+  data[2] = 3.9;
+  CallD(test->entry(), reinterpret_cast<intx_t>(data), 4.2);
+  EXPECT_EQ(4.2, data[1]);
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmaddd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "62b50543 fmadd.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(22.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(26.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-16.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(16.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmsubd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "62b50547 fmsub.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(-22.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(22.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(16.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(-26.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(26.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleNegateMultiplySubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmsubd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleNegateMultiplySubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "62b5054b fnmsub.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-8.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(22.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-22.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-16.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(26.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-26.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleNegateMultiplyAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fnmaddd(FA0, FA0, FA1, FA2);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleNegateMultiplyAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "62b5054f fnmadd.d fa0, fa0, fa1, fa2\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-22.0, CallD(test->entry(), 3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), -3.0, 5.0, 7.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, -5.0, 7.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), 3.0, 5.0, -7.0));
+
+  EXPECT_EQ(-26.0, CallD(test->entry(), 7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallD(test->entry(), -7.0, 3.0, 5.0));
+  EXPECT_EQ(16.0, CallD(test->entry(), 7.0, -3.0, 5.0));
+  EXPECT_EQ(-16.0, CallD(test->entry(), 7.0, 3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleAdd, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ faddd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleAdd, test) {
+  EXPECT_DISASSEMBLY(
+      "02b50553 fadd.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-2.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), -3.0, -5.0));
+
+  EXPECT_EQ(10.0, CallD(test->entry(), 7.0, 3.0));
+  EXPECT_EQ(-4.0, CallD(test->entry(), -7.0, 3.0));
+  EXPECT_EQ(4.0, CallD(test->entry(), 7.0, -3.0));
+  EXPECT_EQ(-10.0, CallD(test->entry(), -7.0, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleSubtract, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsubd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleSubtract, test) {
+  EXPECT_DISASSEMBLY(
+      "0ab50553 fsub.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-2.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-8.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(8.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), -3.0, -5.0));
+
+  EXPECT_EQ(4.0, CallD(test->entry(), 7.0, 3.0));
+  EXPECT_EQ(-10.0, CallD(test->entry(), -7.0, 3.0));
+  EXPECT_EQ(10.0, CallD(test->entry(), 7.0, -3.0));
+  EXPECT_EQ(-4.0, CallD(test->entry(), -7.0, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMultiply, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmuld(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMultiply, test) {
+  EXPECT_DISASSEMBLY(
+      "12b50553 fmul.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(15.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-15.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-15.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(15.0, CallD(test->entry(), -3.0, -5.0));
+
+  EXPECT_EQ(21.0, CallD(test->entry(), 7.0, 3.0));
+  EXPECT_EQ(-21.0, CallD(test->entry(), -7.0, 3.0));
+  EXPECT_EQ(-21.0, CallD(test->entry(), 7.0, -3.0));
+  EXPECT_EQ(21.0, CallD(test->entry(), -7.0, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleDivide, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fdivd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleDivide, test) {
+  EXPECT_DISASSEMBLY(
+      "1ab50553 fdiv.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(2.0, CallD(test->entry(), 10.0, 5.0));
+  EXPECT_EQ(-2.0, CallD(test->entry(), -10.0, 5.0));
+  EXPECT_EQ(-2.0, CallD(test->entry(), 10.0, -5.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), -10.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleSquareRoot, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsqrtd(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleSquareRoot, test) {
+  EXPECT_DISASSEMBLY(
+      "5a050553 fsqrt.d fa0, fa0\n"
+      "00008067 ret\n");
+  EXPECT_EQ(0.0, CallD(test->entry(), 0.0));
+  EXPECT_EQ(1.0, CallD(test->entry(), 1.0));
+  EXPECT_EQ(2.0, CallD(test->entry(), 4.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 9.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "22b50553 fsgnj.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleNegatedSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjnd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleNegatedSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "22b51553 fsgnjn.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleXorSignInject, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fsgnjxd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleXorSignInject, test) {
+  EXPECT_DISASSEMBLY(
+      "22b52553 fsgnjx.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, -5.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMin, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmind(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMin, test) {
+  EXPECT_DISASSEMBLY(
+      "2ab50553 fmin.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(1.0, CallD(test->entry(), 3.0, 1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 3.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(-1.0, CallD(test->entry(), 3.0, -1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), 3.0, -3.0));
+  EXPECT_EQ(-5.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -3.0));
+  EXPECT_EQ(-5.0, CallD(test->entry(), -3.0, -5.0));
+
+  double qNAN = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, qNAN));
+  EXPECT_EQ(3.0, CallD(test->entry(), qNAN, 3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, qNAN));
+  EXPECT_EQ(-3.0, CallD(test->entry(), qNAN, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleMax, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fmaxd(FA0, FA0, FA1);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleMax, test) {
+  EXPECT_DISASSEMBLY(
+      "2ab51553 fmax.d fa0, fa0, fa1\n"
+      "00008067 ret\n");
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, 3.0));
+  EXPECT_EQ(5.0, CallD(test->entry(), 3.0, 5.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -3.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, -5.0));
+  EXPECT_EQ(1.0, CallD(test->entry(), -3.0, 1.0));
+  EXPECT_EQ(3.0, CallD(test->entry(), -3.0, 3.0));
+  EXPECT_EQ(5.0, CallD(test->entry(), -3.0, 5.0));
+  EXPECT_EQ(-1.0, CallD(test->entry(), -3.0, -1.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, -5.0));
+
+  double qNAN = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(3.0, CallD(test->entry(), 3.0, qNAN));
+  EXPECT_EQ(3.0, CallD(test->entry(), qNAN, 3.0));
+  EXPECT_EQ(-3.0, CallD(test->entry(), -3.0, qNAN));
+  EXPECT_EQ(-3.0, CallD(test->entry(), qNAN, -3.0));
+}
+
+ASSEMBLER_TEST_GENERATE(DoubleToSingle, assembler) {
+  FLAG_use_compressed_instructions = false;
+  __ SetExtensions(RV_G);
+  __ fcvtsd(FA0, FA0);
+  __ ret();
+}
+ASSEMBLER_TEST_RUN(DoubleT