Version 3.0.0-360.0.dev

Merge 2459634762075a640e76f57440d050bf2e8e00a7 into dev
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 57b4052..64cd46a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -163,12 +163,21 @@
 - `jsify` is now permissive and has inverse semantics to `dartify`.
 - `jsify` and `dartify` both handle types they understand natively more
   efficiently.
+- Signature of `callMethod` has been aligned with the other methods and
+  now takes `Object` instead of `String`.
 
 ### Tools
 
 #### Web Dev Compiler (DDC)
 - Removed deprecated command line flags `-k`, `--kernel`, and `--dart-sdk`.
 
+#### Dart2js
+
+- Cleanup related to [#46100](https://github.com/dart-lang/sdk/issues/46100):
+  the internal dart2js snapshot fails unless it is called from a supported
+  interface, such as `dart compile js`, `flutter build`, or
+  `build_web_compilers`. This is not expected to be a visible change.
+
 #### Formatter
 
 * Format `sync*` and `async*` functions with `=>` bodies.
diff --git a/pkg/analyzer/TRIAGE.md b/pkg/analyzer/TRIAGE.md
index 8b039c1..83bb51b 100644
--- a/pkg/analyzer/TRIAGE.md
+++ b/pkg/analyzer/TRIAGE.md
@@ -58,10 +58,10 @@
 
 ### P2
 
-* Incorrect analysis errors or warnings, on edge cases with simple workaround
+* Incorrect analysis errors, on edge cases with simple workaround
   * EXAMPLE: Disabling the error or warning 'fixes' the issue and unblocks
     users.
-* Incorrect analysis infos/hints, on edge cases
+* Incorrect analysis warnings, on edge cases
 * Incorrect resolution of symbols or libraries, edge cases only with workarounds
 * Incorrect data from analyzer API, edge cases without workaround
 * Automation resulting in incorrect code, edge cases
@@ -75,8 +75,8 @@
 
 * Uncaught exceptions caught by a fuzzer, but believed to be theoretical
   situations only
-* Incorrect analysis errors or warnings, theoretical
-* Incorrect analysis infos/hints, on edge cases with workaround
+* Incorrect analysis errors, theoretical
+* Incorrect analysis warnings, on edge cases with workaround
 * Incorrect resolution of symbols or libraries, theoretical
 * Incorrect data from analyzer API, edge case with workaround available
 * Performance regression impacting edge cases with workaround or without
@@ -89,7 +89,7 @@
 
 ### P4
 
-* Incorrect analysis infos/hints, theoretical
+* Incorrect analysis warnings, theoretical
 * Incorrect data from analyzer API, theoretical
 * Theoretical performance problems
 * An enhancement that may have some evidence that it isn't a good idea to
@@ -122,8 +122,7 @@
 * "corrupted code" - Modification of source code in such a way that it is
   more than just a bit wrong or having some symbols that don't exist, but is
   not valid Dart and would be painful to manually correct.
-* "diagnostic" - An error, warning, hint, or lint generated by the analyzer
-  or linter.
+* "diagnostic" - An error, warning, or lint generated by the analyzer.
 * "incorrect code" - Modification of code in a way that is known to be wrong,
   but would be trivial to figure out how to fix for the human using the tool.
 * "key users" - Flutter, Pub, Fuchsia, Dart, Google/1P
diff --git a/pkg/analyzer/lib/dart/analysis/analysis_options.dart b/pkg/analyzer/lib/dart/analysis/analysis_options.dart
index 05fbbaf..ddeab65 100644
--- a/pkg/analyzer/lib/dart/analysis/analysis_options.dart
+++ b/pkg/analyzer/lib/dart/analysis/analysis_options.dart
@@ -35,8 +35,8 @@
   /// analysis.
   List<String> get excludePatterns;
 
-  /// Return `true` if analysis is to generate hint results (e.g. type inference
-  /// based information and pub best practices).
+  /// Return `true` if analysis is to generate hint results (e.g. best practices
+  /// and analysis based on certain annotations).
   bool get hint;
 
   /// Return `true` if analysis is to generate lint warnings.
diff --git a/pkg/analyzer/lib/source/error_processor.dart b/pkg/analyzer/lib/source/error_processor.dart
index 207f15a..e34dad3 100644
--- a/pkg/analyzer/lib/source/error_processor.dart
+++ b/pkg/analyzer/lib/source/error_processor.dart
@@ -24,7 +24,7 @@
   /// Create an error config for the given error code map.
   /// For example:
   ///     new ErrorConfig({'missing_return' : 'error'});
-  /// will create a processor config that turns `missing_return` hints into
+  /// will create a processor config that turns `missing_return` warnings into
   /// errors.
   ErrorConfig(YamlNode? codeMap) {
     _processMap(codeMap);
diff --git a/pkg/analyzer/lib/src/dart/analysis/library_analyzer.dart b/pkg/analyzer/lib/src/dart/analysis/library_analyzer.dart
index eede372..accd399 100644
--- a/pkg/analyzer/lib/src/dart/analysis/library_analyzer.dart
+++ b/pkg/analyzer/lib/src/dart/analysis/library_analyzer.dart
@@ -250,7 +250,7 @@
     );
   }
 
-  /// Compute diagnostics in [units], including errors and warnings, hints,
+  /// Compute diagnostics in [units], including errors and warnings,
   /// lints, and a few other cases.
   void _computeDiagnostics(Map<FileState, CompilationUnitImpl> units) {
     units.forEach((file, unit) {
@@ -274,7 +274,7 @@
       }
       var usedElements = UsedLocalElements.merge(usedLocalElements);
       units.forEach((file, unit) {
-        _computeHints(
+        _computeWarnings(
           file,
           unit,
           usedImportedElements: usedImportedElements,
@@ -322,85 +322,6 @@
     }
   }
 
-  void _computeHints(
-    FileState file,
-    CompilationUnit unit, {
-    required List<UsedImportedElements> usedImportedElements,
-    required UsedLocalElements usedElements,
-  }) {
-    AnalysisErrorListener errorListener = _getErrorListener(file);
-    ErrorReporter errorReporter = _getErrorReporter(file);
-
-    if (!_libraryElement.isNonNullableByDefault) {
-      unit.accept(
-        LegacyDeadCodeVerifier(
-          errorReporter,
-          typeSystem: _typeSystem,
-        ),
-      );
-    }
-
-    UnicodeTextVerifier(errorReporter).verify(unit, file.content);
-
-    unit.accept(DeadCodeVerifier(errorReporter));
-
-    unit.accept(
-      BestPracticesVerifier(
-        errorReporter,
-        _typeProvider,
-        _libraryElement,
-        unit,
-        file.content,
-        declaredVariables: _declaredVariables,
-        typeSystem: _typeSystem,
-        inheritanceManager: _inheritance,
-        analysisOptions: _analysisOptions,
-        workspacePackage: _library.file.workspacePackage,
-      ),
-    );
-
-    unit.accept(OverrideVerifier(
-      _inheritance,
-      _libraryElement,
-      errorReporter,
-    ));
-
-    TodoFinder(errorReporter).findIn(unit);
-    LanguageVersionOverrideVerifier(errorReporter).verify(unit);
-
-    // Verify imports.
-    {
-      ImportsVerifier verifier = ImportsVerifier();
-      verifier.addImports(unit);
-      usedImportedElements.forEach(verifier.removeUsedElements);
-      verifier.generateDuplicateExportHints(errorReporter);
-      verifier.generateDuplicateImportHints(errorReporter);
-      verifier.generateDuplicateShownHiddenNameHints(errorReporter);
-      verifier.generateUnusedImportHints(errorReporter);
-      verifier.generateUnusedShownNameHints(errorReporter);
-      verifier.generateUnnecessaryImportHints(
-          errorReporter, usedImportedElements);
-    }
-
-    // Unused local elements.
-    {
-      UnusedLocalElementsVerifier visitor = UnusedLocalElementsVerifier(
-          errorListener, usedElements, _inheritance, _libraryElement);
-      unit.accept(visitor);
-    }
-
-    //
-    // Find code that uses features from an SDK version that does not satisfy
-    // the SDK constraints specified in analysis options.
-    //
-    var sdkVersionConstraint = _analysisOptions.sdkVersionConstraint;
-    if (sdkVersionConstraint != null) {
-      SdkConstraintVerifier verifier = SdkConstraintVerifier(
-          errorReporter, _libraryElement, _typeProvider, sdkVersionConstraint);
-      unit.accept(verifier);
-    }
-  }
-
   void _computeLints(
     FileState file,
     LinterContextUnit currentUnit,
@@ -478,6 +399,85 @@
     unit.accept(FfiVerifier(_typeSystem, errorReporter));
   }
 
+  void _computeWarnings(
+    FileState file,
+    CompilationUnit unit, {
+    required List<UsedImportedElements> usedImportedElements,
+    required UsedLocalElements usedElements,
+  }) {
+    AnalysisErrorListener errorListener = _getErrorListener(file);
+    ErrorReporter errorReporter = _getErrorReporter(file);
+
+    if (!_libraryElement.isNonNullableByDefault) {
+      unit.accept(
+        LegacyDeadCodeVerifier(
+          errorReporter,
+          typeSystem: _typeSystem,
+        ),
+      );
+    }
+
+    UnicodeTextVerifier(errorReporter).verify(unit, file.content);
+
+    unit.accept(DeadCodeVerifier(errorReporter));
+
+    unit.accept(
+      BestPracticesVerifier(
+        errorReporter,
+        _typeProvider,
+        _libraryElement,
+        unit,
+        file.content,
+        declaredVariables: _declaredVariables,
+        typeSystem: _typeSystem,
+        inheritanceManager: _inheritance,
+        analysisOptions: _analysisOptions,
+        workspacePackage: _library.file.workspacePackage,
+      ),
+    );
+
+    unit.accept(OverrideVerifier(
+      _inheritance,
+      _libraryElement,
+      errorReporter,
+    ));
+
+    TodoFinder(errorReporter).findIn(unit);
+    LanguageVersionOverrideVerifier(errorReporter).verify(unit);
+
+    // Verify imports.
+    {
+      ImportsVerifier verifier = ImportsVerifier();
+      verifier.addImports(unit);
+      usedImportedElements.forEach(verifier.removeUsedElements);
+      verifier.generateDuplicateExportWarnings(errorReporter);
+      verifier.generateDuplicateImportWarnings(errorReporter);
+      verifier.generateDuplicateShownHiddenNameWarnings(errorReporter);
+      verifier.generateUnusedImportHints(errorReporter);
+      verifier.generateUnusedShownNameHints(errorReporter);
+      verifier.generateUnnecessaryImportHints(
+          errorReporter, usedImportedElements);
+    }
+
+    // Unused local elements.
+    {
+      UnusedLocalElementsVerifier visitor = UnusedLocalElementsVerifier(
+          errorListener, usedElements, _inheritance, _libraryElement);
+      unit.accept(visitor);
+    }
+
+    //
+    // Find code that uses features from an SDK version that does not satisfy
+    // the SDK constraints specified in analysis options.
+    //
+    var sdkVersionConstraint = _analysisOptions.sdkVersionConstraint;
+    if (sdkVersionConstraint != null) {
+      SdkConstraintVerifier verifier = SdkConstraintVerifier(
+          errorReporter, _libraryElement, _typeProvider, sdkVersionConstraint);
+      unit.accept(verifier);
+    }
+  }
+
   /// Return a subset of the given [errors] that are not marked as ignored in
   /// the [file].
   List<AnalysisError> _filterIgnoredErrors(
diff --git a/pkg/analyzer/lib/src/dart/element/type_system.dart b/pkg/analyzer/lib/src/dart/element/type_system.dart
index c693202..3adb6dc 100644
--- a/pkg/analyzer/lib/src/dart/element/type_system.dart
+++ b/pkg/analyzer/lib/src/dart/element/type_system.dart
@@ -794,10 +794,10 @@
 
     // If the subtype relation goes the other way, allow the implicit downcast.
     if (isSubtypeOf(toType, fromType)) {
-      // TODO(leafp,jmesserly): we emit warnings/hints for these in
-      // src/task/strong/checker.dart, which is a bit inconsistent. That
-      // code should be handled into places that use isAssignableTo, such as
-      // ErrorVerifier.
+      // TODO(leafp,jmesserly): we emit warnings for these in
+      // `src/task/strong/checker.dart`, which is a bit inconsistent. That code
+      // should be handled into places that use `isAssignableTo`, such as
+      // [ErrorVerifier].
       return true;
     }
 
diff --git a/pkg/analyzer/lib/src/error/best_practices_verifier.dart b/pkg/analyzer/lib/src/error/best_practices_verifier.dart
index 5b2cb5e..5cfaea2 100644
--- a/pkg/analyzer/lib/src/error/best_practices_verifier.dart
+++ b/pkg/analyzer/lib/src/error/best_practices_verifier.dart
@@ -292,8 +292,9 @@
         }
       } else {
         // Something other than a declaration was annotated. Whatever this is,
-        // it probably warrants a Hint, but this has not been specified on
-        // visibleForTemplate or visibleForTesting, so leave it alone for now.
+        // it probably warrants a Warning, but this has not been specified on
+        // `visibleForTemplate` or `visibleForTesting`, so leave it alone for
+        // now.
       }
     }
 
@@ -412,8 +413,6 @@
     }
 
     try {
-      // Commented out until we decide that we want this hint in the analyzer
-      //    checkForOverrideEqualsButNotHashCode(node);
       _checkForImmutable(node);
       _checkForInvalidSealedSuperclass(node);
       super.visitClassDeclaration(node);
@@ -444,11 +443,22 @@
   }
 
   @override
+  void visitConstantPattern(ConstantPattern node) {
+    if (node.expression.isDoubleNan) {
+      _errorReporter.reportErrorForNode(
+        WarningCode.UNNECESSARY_NAN_COMPARISON_FALSE,
+        node,
+      );
+    }
+    super.visitConstantPattern(node);
+  }
+
+  @override
   void visitConstructorDeclaration(ConstructorDeclaration node) {
     var element = node.declaredElement as ConstructorElementImpl;
     if (!_isNonNullableByDefault && element.isFactory) {
       if (node.body is BlockFunctionBody) {
-        // Check the block for a return statement, if not, create the hint.
+        // Check the block for a return statement.
         if (!ExitDetector.exits(node.body)) {
           _errorReporter.reportErrorForNode(
               WarningCode.MISSING_RETURN, node, [node.returnType.name]);
@@ -718,8 +728,6 @@
       _inDoNotStoreMember = true;
     }
     try {
-      // This was determined to not be a good hint, see: dartbug.com/16029
-      //checkForOverridingPrivateMember(node);
       _checkForMissingReturn(node.body, node);
       _mustCallSuperVerifier.checkMethodDeclaration(node);
       _checkForUnnecessaryNoSuchMethod(node);
@@ -764,7 +772,7 @@
   @override
   void visitMethodInvocation(MethodInvocation node) {
     _deprecatedVerifier.methodInvocation(node);
-    _checkForNullAwareHints(node, node.operator);
+    _checkForNullAwareWarnings(node, node.operator);
     _errorHandlerVerifier.verifyMethodInvocation(node);
     _nullSafeApiVerifier.methodInvocation(node);
     super.visitMethodInvocation(node);
@@ -832,7 +840,7 @@
 
   @override
   void visitPropertyAccess(PropertyAccess node) {
-    _checkForNullAwareHints(node, node.operator);
+    _checkForNullAwareWarnings(node, node.operator);
     super.visitPropertyAccess(node);
   }
 
@@ -894,14 +902,15 @@
     }
   }
 
-  /// Check for the passed is expression for the unnecessary type check hint
-  /// codes as well as null checks expressed using an is expression.
+  /// Checks for the passed [IsExpression] for the unnecessary type check
+  /// warning codes as well as null checks expressed using an
+  /// [IsExpression].
   ///
-  /// @param node the is expression to check
-  /// @return `true` if and only if a hint code is generated on the passed node
-  /// See [HintCode.TYPE_CHECK_IS_NOT_NULL], [HintCode.TYPE_CHECK_IS_NULL],
-  /// [HintCode.UNNECESSARY_TYPE_CHECK_TRUE], and
-  /// [HintCode.UNNECESSARY_TYPE_CHECK_FALSE].
+  /// Returns `true` if a warning code is generated on [node].
+  /// See [WarningCode.TYPE_CHECK_IS_NOT_NULL],
+  /// [WarningCode.TYPE_CHECK_IS_NULL],
+  /// [WarningCode.UNNECESSARY_TYPE_CHECK_TRUE], and
+  /// [WarningCode.UNNECESSARY_TYPE_CHECK_FALSE].
   bool _checkAllTypeChecks(IsExpression node) {
     var leftNode = node.expression;
     var rightNode = node.type;
@@ -910,8 +919,8 @@
     void report() {
       _errorReporter.reportErrorForNode(
         node.notOperator == null
-            ? HintCode.UNNECESSARY_TYPE_CHECK_TRUE
-            : HintCode.UNNECESSARY_TYPE_CHECK_FALSE,
+            ? WarningCode.UNNECESSARY_TYPE_CHECK_TRUE
+            : WarningCode.UNNECESSARY_TYPE_CHECK_FALSE,
         node,
       );
     }
@@ -1050,7 +1059,7 @@
   ///
   /// If [node] is marked with [immutable] or inherits from a class or mixin
   /// marked with [immutable], this function searches the fields of [node] and
-  /// its superclasses, reporting a hint if any non-final instance fields are
+  /// its superclasses, reporting a warning if any non-final instance fields are
   /// found.
   void _checkForImmutable(NamedCompilationUnitMember node) {
     /// Return `true` if the given class [element] is annotated with the
@@ -1274,15 +1283,10 @@
       );
     }
 
-    bool isDoubleNan(Expression expression) =>
-        expression is PrefixedIdentifier &&
-        expression.prefix.name == 'double' &&
-        expression.identifier.name == 'nan';
-
     void checkLeftRight(ErrorCode errorCode) {
-      if (isDoubleNan(node.leftOperand)) {
+      if (node.leftOperand.isDoubleNan) {
         reportStartEnd(errorCode, node.leftOperand, node.operator);
-      } else if (isDoubleNan(node.rightOperand)) {
+      } else if (node.rightOperand.isDoubleNan) {
         reportStartEnd(errorCode, node.operator, node.rightOperand);
       }
     }
@@ -1352,10 +1356,10 @@
       if (constructorName.name != null) {
         fullConstructorName = '$fullConstructorName.${constructorName.name}';
       }
-      var hint = node.keyword?.keyword == Keyword.NEW
+      var warning = node.keyword?.keyword == Keyword.NEW
           ? WarningCode.NON_CONST_CALL_TO_LITERAL_CONSTRUCTOR_USING_NEW
           : WarningCode.NON_CONST_CALL_TO_LITERAL_CONSTRUCTOR;
-      _errorReporter.reportErrorForNode(hint, node, [fullConstructorName]);
+      _errorReporter.reportErrorForNode(warning, node, [fullConstructorName]);
     }
   }
 
@@ -1385,13 +1389,10 @@
     return false;
   }
 
-  /// Generate a hint for functions or methods that have a return type, but do
-  /// not have a return statement on all branches. At the end of blocks with no
-  /// return, Dart implicitly returns `null`. Avoiding these implicit returns
-  /// is considered a best practice.
-  ///
-  /// Note: for async functions/methods, this hint only applies when the
-  /// function has a return type that Future<Null> is not assignable to.
+  /// Generates a warning for functions that have a potentially non-nullable
+  /// return type, but do not have a return statement on all branches. At the
+  /// end of blocks with no return, Dart implicitly returns `null`. Avoiding
+  /// these implicit returns is considered a best practice.
   ///
   /// See [WarningCode.MISSING_RETURN].
   void _checkForMissingReturn(FunctionBody body, AstNode functionNode) {
@@ -1459,8 +1460,8 @@
     }
   }
 
-  /// Produce several null-aware related hints.
-  void _checkForNullAwareHints(Expression node, Token? operator) {
+  /// Produce several null-aware related warnings.
+  void _checkForNullAwareWarnings(Expression node, Token? operator) {
     if (_isNonNullableByDefault) {
       return;
     }
@@ -1559,10 +1560,10 @@
     }
   }
 
-  /// Generate a hint for `noSuchMethod` methods that do nothing except of
-  /// calling another `noSuchMethod` that is not defined by `Object`.
+  /// Generates a warning for `noSuchMethod` methods that do nothing except of
+  /// calling another `noSuchMethod` which is not defined by `Object`.
   ///
-  /// Return `true` if and only if a hint code is generated on the passed node.
+  /// Returns `true` if a warning code is generated for [node].
   bool _checkForUnnecessaryNoSuchMethod(MethodDeclaration node) {
     if (node.name.lexeme != FunctionElement.NO_SUCH_METHOD_METHOD_NAME) {
       return false;
@@ -2008,18 +2009,18 @@
       : _inTemplateSource =
             _library.source.fullName.contains(_templateExtension);
 
-  /// Produces a hint if [identifier] is accessed from an invalid location.
+  /// Produces a warning if [identifier] is accessed from an invalid location.
   ///
-  /// In particular, a hint is produced in either of the two following cases:
+  /// In particular, a warning is produced in either of the two following cases:
   ///
   /// * The element associated with [identifier] is annotated with [internal],
   ///   and is accessed from outside the package in which the element is
   ///   declared.
   /// * The element associated with [identifier] is annotated with [protected],
-  ///   [visibleForTesting], and/or [visibleForTemplate], and is accessed from a
+  ///   [visibleForTesting], and/or `visibleForTemplate`, and is accessed from a
   ///   location which is invalid as per the rules of each such annotation.
   ///   Conversely, if the element is annotated with more than one of these
-  ///   annotations, the access is valid (and no hint will be produced) if it
+  ///   annotations, the access is valid (and no warning is produced) if it
   ///   conforms to the rules of at least one of the annotations.
   void verify(SimpleIdentifier identifier) {
     if (identifier.inDeclarationContext() || _inCommentReference(identifier)) {
@@ -2375,3 +2376,15 @@
     }
   }
 }
+
+extension on Expression {
+  /// Whether this is the [PrefixedIdentifier] referring to `double.nan`.
+  // TODO(srawlins): This will return the wrong answer for `prefixed.double.nan`
+  // and for `import 'foo.dart' as double; double.nan`.
+  bool get isDoubleNan {
+    final self = this;
+    return self is PrefixedIdentifier &&
+        self.prefix.name == 'double' &&
+        self.identifier.name == 'nan';
+  }
+}
diff --git a/pkg/analyzer/lib/src/error/codes.g.dart b/pkg/analyzer/lib/src/error/codes.g.dart
index bb8bcbd..2ffda66 100644
--- a/pkg/analyzer/lib/src/error/codes.g.dart
+++ b/pkg/analyzer/lib/src/error/codes.g.dart
@@ -6090,8 +6090,8 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere a @factory annotation is associated with
-  ///  anything other than a method.
+  ///  This warning is generated anywhere a @factory annotation is associated
+  ///  with anything other than a method.
   static const WarningCode INVALID_FACTORY_ANNOTATION = WarningCode(
     'INVALID_FACTORY_ANNOTATION',
     "Only methods can be annotated as factories.",
@@ -6113,8 +6113,8 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere an @immutable annotation is associated with
-  ///  anything other than a class.
+  ///  This warning is generated anywhere an @immutable annotation is associated
+  ///  with anything other than a class.
   static const WarningCode INVALID_IMMUTABLE_ANNOTATION = WarningCode(
     'INVALID_IMMUTABLE_ANNOTATION',
     "Only classes can be annotated as being immutable.",
@@ -6249,7 +6249,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere where `@nonVirtual` annotates something
+  ///  This warning is generated anywhere where `@nonVirtual` annotates something
   ///  other than a non-abstract instance member in a class or mixin.
   ///
   ///  No Parameters.
@@ -6261,7 +6261,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere where an instance member annotated with
+  ///  This warning is generated anywhere where an instance member annotated with
   ///  `@nonVirtual` is overridden in a subclass.
   ///
   ///  Parameters:
@@ -6274,7 +6274,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere where `@required` annotates a named
+  ///  This warning is generated anywhere where `@required` annotates a named
   ///  parameter with a default value.
   ///
   ///  Parameters:
@@ -6286,7 +6286,7 @@
     correctionMessage: "Remove @required.",
   );
 
-  ///  This hint is generated anywhere where `@required` annotates an optional
+  ///  This warning is generated anywhere where `@required` annotates an optional
   ///  positional parameter.
   ///
   ///  Parameters:
@@ -6299,8 +6299,8 @@
     correctionMessage: "Remove @required.",
   );
 
-  ///  This hint is generated anywhere where `@required` annotates a non optional
-  ///  positional parameter.
+  ///  This warning is generated anywhere where `@required` annotates a
+  ///  non-optional positional parameter.
   ///
   ///  Parameters:
   ///  0: the name of the member
@@ -6311,8 +6311,8 @@
     correctionMessage: "Remove @required.",
   );
 
-  ///  This hint is generated anywhere where `@sealed` annotates something other
-  ///  than a class.
+  ///  This warning is generated anywhere where `@sealed` annotates something
+  ///  other than a class.
   ///
   ///  No parameters.
   static const WarningCode INVALID_SEALED_ANNOTATION = WarningCode(
@@ -6330,8 +6330,8 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere where a member annotated with `@protected`
-  ///  is used outside of an instance member of a subclass.
+  ///  This warning is generated anywhere where a member annotated with
+  ///  `@protected` is used outside of an instance member of a subclass.
   ///
   ///  Parameters:
   ///  0: the name of the member
@@ -6351,7 +6351,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere where a member annotated with
+  ///  This warning is generated anywhere where a member annotated with
   ///  `@visibleForTemplate` is used outside of a "template" Dart file.
   ///
   ///  Parameters:
@@ -6363,7 +6363,7 @@
     "The member '{0}' can only be used within '{1}' or a template library.",
   );
 
-  ///  This hint is generated anywhere where a member annotated with
+  ///  This warning is generated anywhere where a member annotated with
   ///  `@visibleForTesting` is used outside the defining library, or a test.
   ///
   ///  Parameters:
@@ -6376,8 +6376,8 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere where a private declaration is annotated
-  ///  with `@visibleForTemplate` or `@visibleForTesting`.
+  ///  This warning is generated anywhere where a private declaration is
+  ///  annotated with `@visibleForTemplate` or `@visibleForTesting`.
   ///
   ///  Parameters:
   ///  0: the name of the member
@@ -6434,8 +6434,8 @@
     uniqueName: 'MISSING_OVERRIDE_OF_MUST_BE_OVERRIDDEN_TWO',
   );
 
-  ///  Generate a hint for a constructor, function or method invocation where a
-  ///  required parameter is missing.
+  ///  Generates a warning for a constructor, function or method invocation where
+  ///  a required parameter is missing.
   ///
   ///  Parameters:
   ///  0: the name of the parameter
@@ -6445,8 +6445,8 @@
     hasPublishedDocs: true,
   );
 
-  ///  Generate a hint for a constructor, function or method invocation where a
-  ///  required parameter is missing.
+  ///  Generates a warning for a constructor, function or method invocation where
+  ///  a required parameter is missing.
   ///
   ///  Parameters:
   ///  0: the name of the parameter
@@ -6469,7 +6469,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  This hint is generated anywhere where a `@sealed` class is used as a
+  ///  This warning is generated anywhere where a `@sealed` class is used as a
   ///  a superclass constraint of a mixin.
   ///
   ///  Parameters:
@@ -6485,7 +6485,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  Generate a hint for classes that inherit from classes annotated with
+  ///  Generates a warning for classes that inherit from classes annotated with
   ///  `@immutable` but that are not immutable.
   ///
   ///  Parameters:
@@ -6507,7 +6507,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  Generate a hint for non-const instance creation using a constructor
+  ///  Generates a warning for non-const instance creation using a constructor
   ///  annotated with `@literal`.
   ///
   ///  Parameters:
@@ -6520,7 +6520,7 @@
     hasPublishedDocs: true,
   );
 
-  ///  Generate a hint for non-const instance creation (with the `new` keyword)
+  ///  Generate a warning for non-const instance creation (with the `new` keyword)
   ///  using a constructor annotated with `@literal`.
   ///
   ///  Parameters:
diff --git a/pkg/analyzer/lib/src/error/dead_code_verifier.dart b/pkg/analyzer/lib/src/error/dead_code_verifier.dart
index b963c43..70975a7 100644
--- a/pkg/analyzer/lib/src/error/dead_code_verifier.dart
+++ b/pkg/analyzer/lib/src/error/dead_code_verifier.dart
@@ -102,21 +102,21 @@
     Namespace namespace =
         NamespaceBuilder().createExportNamespaceForLibrary(library);
     NodeList<SimpleIdentifier> names;
-    ErrorCode hintCode;
+    ErrorCode warningCode;
     if (combinator is HideCombinator) {
       names = combinator.hiddenNames;
-      hintCode = WarningCode.UNDEFINED_HIDDEN_NAME;
+      warningCode = WarningCode.UNDEFINED_HIDDEN_NAME;
     } else {
       names = (combinator as ShowCombinator).shownNames;
-      hintCode = WarningCode.UNDEFINED_SHOWN_NAME;
+      warningCode = WarningCode.UNDEFINED_SHOWN_NAME;
     }
     for (SimpleIdentifier name in names) {
       String nameStr = name.name;
       Element? element = namespace.get(nameStr);
       element ??= namespace.get("$nameStr=");
       if (element == null) {
-        _errorReporter
-            .reportErrorForNode(hintCode, name, [library.identifier, nameStr]);
+        _errorReporter.reportErrorForNode(
+            warningCode, name, [library.identifier, nameStr]);
       }
     }
   }
diff --git a/pkg/analyzer/lib/src/error/imports_verifier.dart b/pkg/analyzer/lib/src/error/imports_verifier.dart
index baca24c..9db227a 100644
--- a/pkg/analyzer/lib/src/error/imports_verifier.dart
+++ b/pkg/analyzer/lib/src/error/imports_verifier.dart
@@ -189,7 +189,7 @@
 /// otherwise a [HintCode.UNUSED_IMPORT] hint is generated with
 /// [generateUnusedImportHints].
 ///
-/// Additionally, [generateDuplicateImportHints] generates
+/// Additionally, [generateDuplicateImportWarnings] generates
 /// [HintCode.DUPLICATE_IMPORT] hints and [HintCode.UNUSED_SHOWN_NAME] hints.
 ///
 /// While this class does not yet have support for an "Organize Imports" action,
@@ -316,9 +316,9 @@
 
   /// Any time after the defining compilation unit has been visited by this
   /// visitor, this method can be called to report an
-  /// [StaticWarningCode.DUPLICATE_EXPORT] hint for each of the export
+  /// [WarningCode.DUPLICATE_EXPORT] hint for each of the export
   /// directives in the [_duplicateExports] list.
-  void generateDuplicateExportHints(ErrorReporter errorReporter) {
+  void generateDuplicateExportWarnings(ErrorReporter errorReporter) {
     var length = _duplicateExports.length;
     for (var i = 0; i < length; i++) {
       errorReporter.reportErrorForNode(
@@ -328,9 +328,9 @@
 
   /// Any time after the defining compilation unit has been visited by this
   /// visitor, this method can be called to report an
-  /// [StaticWarningCode.DUPLICATE_IMPORT] hint for each of the import
+  /// [WarningCode.DUPLICATE_IMPORT] hint for each of the import
   /// directives in the [_duplicateImports] list.
-  void generateDuplicateImportHints(ErrorReporter errorReporter) {
+  void generateDuplicateImportWarnings(ErrorReporter errorReporter) {
     var length = _duplicateImports.length;
     for (var i = 0; i < length; i++) {
       errorReporter.reportErrorForNode(
@@ -338,13 +338,13 @@
     }
   }
 
-  /// Report a [StaticWarningCode.DUPLICATE_SHOWN_NAME] and
-  /// [StaticWarningCode.DUPLICATE_HIDDEN_NAME] hints for each duplicate shown
-  /// or hidden name.
+  /// Report a [WarningCode.DUPLICATE_SHOWN_NAME] and
+  /// [WarningCode.DUPLICATE_HIDDEN_NAME] hints for each duplicate shown or
+  /// hidden name.
   ///
   /// Only call this method after all of the compilation units have been visited
   /// by this visitor.
-  void generateDuplicateShownHiddenNameHints(ErrorReporter reporter) {
+  void generateDuplicateShownHiddenNameWarnings(ErrorReporter reporter) {
     _duplicateHiddenNamesMap.forEach(
         (NamespaceDirective directive, List<SimpleIdentifier> identifiers) {
       int length = identifiers.length;
diff --git a/pkg/analyzer/messages.yaml b/pkg/analyzer/messages.yaml
index e477bf6..0be9aef 100644
--- a/pkg/analyzer/messages.yaml
+++ b/pkg/analyzer/messages.yaml
@@ -22328,8 +22328,8 @@
   INVALID_FACTORY_ANNOTATION:
     problemMessage: Only methods can be annotated as factories.
     comment: |-
-      This hint is generated anywhere a @factory annotation is associated with
-      anything other than a method.
+      This warning is generated anywhere a @factory annotation is associated
+      with anything other than a method.
   INVALID_FACTORY_METHOD_DECL:
     problemMessage: "Factory method '{0}' must have a return type."
     comment: |-
@@ -22421,8 +22421,8 @@
   INVALID_IMMUTABLE_ANNOTATION:
     problemMessage: Only classes can be annotated as being immutable.
     comment: |-
-      This hint is generated anywhere an @immutable annotation is associated with
-      anything other than a class.
+      This warning is generated anywhere an @immutable annotation is associated
+      with anything other than a class.
   INVALID_INTERNAL_ANNOTATION:
     problemMessage: "Only public elements in a package's private API can be annotated as being internal."
     comment: |-
@@ -22631,7 +22631,7 @@
     correctionMessage: Try removing '@nonVirtual'.
     hasPublishedDocs: true
     comment: |-
-      This hint is generated anywhere where `@nonVirtual` annotates something
+      This warning is generated anywhere where `@nonVirtual` annotates something
       other than a non-abstract instance member in a class or mixin.
 
       No Parameters.
@@ -22711,7 +22711,7 @@
     problemMessage: "The member '{0}' is declared non-virtual in '{1}' and can't be overridden in subclasses."
     hasPublishedDocs: true
     comment: |-
-      This hint is generated anywhere where an instance member annotated with
+      This warning is generated anywhere where an instance member annotated with
       `@nonVirtual` is overridden in a subclass.
 
       Parameters:
@@ -22777,7 +22777,7 @@
     problemMessage: "The type parameter '{0}' is annotated with @required but only named parameters without a default value can be annotated with it."
     correctionMessage: Remove @required.
     comment: |-
-      This hint is generated anywhere where `@required` annotates a named
+      This warning is generated anywhere where `@required` annotates a named
       parameter with a default value.
 
       Parameters:
@@ -22786,7 +22786,7 @@
     problemMessage: "Incorrect use of the annotation @required on the optional positional parameter '{0}'. Optional positional parameters cannot be required."
     correctionMessage: Remove @required.
     comment: |-
-      This hint is generated anywhere where `@required` annotates an optional
+      This warning is generated anywhere where `@required` annotates an optional
       positional parameter.
 
       Parameters:
@@ -22795,8 +22795,8 @@
     problemMessage: "Redundant use of the annotation @required on the required positional parameter '{0}'."
     correctionMessage: Remove @required.
     comment: |-
-      This hint is generated anywhere where `@required` annotates a non optional
-      positional parameter.
+      This warning is generated anywhere where `@required` annotates a
+      non-optional positional parameter.
 
       Parameters:
       0: the name of the member
@@ -22841,8 +22841,8 @@
   INVALID_USE_OF_PROTECTED_MEMBER:
     problemMessage: "The member '{0}' can only be used within instance members of subclasses of '{1}'."
     comment: |-
-      This hint is generated anywhere where a member annotated with `@protected`
-      is used outside of an instance member of a subclass.
+      This warning is generated anywhere where a member annotated with
+      `@protected` is used outside of an instance member of a subclass.
 
       Parameters:
       0: the name of the member
@@ -22895,7 +22895,7 @@
   INVALID_USE_OF_VISIBLE_FOR_TEMPLATE_MEMBER:
     problemMessage: "The member '{0}' can only be used within '{1}' or a template library."
     comment: |-
-      This hint is generated anywhere where a member annotated with
+      This warning is generated anywhere where a member annotated with
       `@visibleForTemplate` is used outside of a "template" Dart file.
 
       Parameters:
@@ -22905,7 +22905,7 @@
     problemMessage: "The member '{0}' can only be used within '{1}' or a test."
     hasPublishedDocs: true
     comment: |-
-      This hint is generated anywhere where a member annotated with
+      This warning is generated anywhere where a member annotated with
       `@visibleForTesting` is used outside the defining library, or a test.
 
       Parameters:
@@ -22967,8 +22967,8 @@
     problemMessage: "The member '{0}' is annotated with '{1}', but this annotation is only meaningful on declarations of public members."
     hasPublishedDocs: true
     comment: |-
-      This hint is generated anywhere where a private declaration is annotated
-      with `@visibleForTemplate` or `@visibleForTesting`.
+      This warning is generated anywhere where a private declaration is
+      annotated with `@visibleForTemplate` or `@visibleForTesting`.
 
       Parameters:
       0: the name of the member
@@ -23117,8 +23117,8 @@
     correctionMessage: Try removing the '@sealed' annotation.
     hasPublishedDocs: true
     comment: |-
-      This hint is generated anywhere where `@sealed` annotates something other
-      than a class.
+      This warning is generated anywhere where `@sealed` annotates something
+      other than a class.
 
       No parameters.
     documentation: |-
@@ -23233,8 +23233,8 @@
     problemMessage: "The parameter '{0}' is required."
     hasPublishedDocs: true
     comment: |-
-      Generate a hint for a constructor, function or method invocation where a
-      required parameter is missing.
+      Generates a warning for a constructor, function or method invocation where
+      a required parameter is missing.
 
       Parameters:
       0: the name of the parameter
@@ -23280,8 +23280,8 @@
     problemMessage: "The parameter '{0}' is required. {1}."
     hasPublishedDocs: true
     comment: |-
-      Generate a hint for a constructor, function or method invocation where a
-      required parameter is missing.
+      Generates a warning for a constructor, function or method invocation where
+      a required parameter is missing.
 
       Parameters:
       0: the name of the parameter
@@ -23323,7 +23323,7 @@
     correctionMessage: Try composing with this class, or refer to its documentation for more information.
     hasPublishedDocs: true
     comment: |-
-      This hint is generated anywhere where a `@sealed` class is used as a
+      This warning is generated anywhere where a `@sealed` class is used as a
       a superclass constraint of a mixin.
 
       Parameters:
@@ -23366,7 +23366,7 @@
     problemMessage: "This class (or a class that this class inherits from) is marked as '@immutable', but one or more of its instance fields aren't final: {0}"
     hasPublishedDocs: true
     comment: |-
-      Generate a hint for classes that inherit from classes annotated with
+      Generates a warning for classes that inherit from classes annotated with
       `@immutable` but that are not immutable.
 
       Parameters:
@@ -23479,7 +23479,7 @@
     correctionMessage: "Try replacing the 'new' keyword with 'const'."
     hasPublishedDocs: true
     comment: |-
-      Generate a hint for non-const instance creation (with the `new` keyword)
+      Generate a warning for non-const instance creation (with the `new` keyword)
       using a constructor annotated with `@literal`.
 
       Parameters:
@@ -23489,7 +23489,7 @@
     correctionMessage: "Try adding a 'const' keyword."
     hasPublishedDocs: true
     comment: |-
-      Generate a hint for non-const instance creation using a constructor
+      Generates a warning for non-const instance creation using a constructor
       annotated with `@literal`.
 
       Parameters:
diff --git a/pkg/analyzer/test/src/diagnostics/unnecessary_nan_comparison_test.dart b/pkg/analyzer/test/src/diagnostics/unnecessary_nan_comparison_test.dart
index 548d352..845ec1d 100644
--- a/pkg/analyzer/test/src/diagnostics/unnecessary_nan_comparison_test.dart
+++ b/pkg/analyzer/test/src/diagnostics/unnecessary_nan_comparison_test.dart
@@ -15,6 +15,18 @@
 
 @reflectiveTest
 class UnnecessaryNanComparisonTest extends PubPackageResolutionTest {
+  test_constantPattern() async {
+    await assertErrorsInCode('''
+void f(List<double> list) {
+  switch (list) {
+    case [double.nan]:
+  }
+}
+''', [
+      error(WarningCode.UNNECESSARY_NAN_COMPARISON_FALSE, 56, 10),
+    ]);
+  }
+
   test_equal() async {
     await assertErrorsInCode('''
 void f(double d) {
diff --git a/pkg/compiler/lib/src/dart2js.dart b/pkg/compiler/lib/src/dart2js.dart
index f99409b..2615a08 100644
--- a/pkg/compiler/lib/src/dart2js.dart
+++ b/pkg/compiler/lib/src/dart2js.dart
@@ -732,13 +732,6 @@
 
   parseCommandLine(handlers, argv);
 
-  if (invoker == null) {
-    warning("The 'dart2js' entrypoint script is deprecated, "
-        "please use 'dart compile js' instead.");
-  } else if (verbose != null && !wantHelp) {
-    print("Compiler invoked from: '$invoker'");
-  }
-
   final diagnostic = diagnosticHandler = FormattingDiagnosticHandler();
   if (verbose != null) {
     diagnostic.verbose = verbose!;
@@ -778,6 +771,22 @@
     helpAndExit(wantHelp, wantVersion, diagnostic.verbose);
   }
 
+  if (invoker == null) {
+    final message = "The 'dart2js' entrypoint script is deprecated, "
+        "please use 'dart compile js' instead.";
+    // Aside from asking for `-h`, dart2js fails when it is invoked from its
+    // snapshot directly and not using the supported workflows.  However, we
+    // allow invoking dart2js from Dart sources to support the dart2js team
+    // local workflows and testing.
+    if (!Platform.script.path.endsWith(".dart")) {
+      _fail(message);
+    } else {
+      warning(message);
+    }
+  } else if (verbose != null) {
+    print("Compiler invoked from: '$invoker'");
+  }
+
   if (arguments.isEmpty &&
       entryUri == null &&
       inputDillUri == null &&
diff --git a/pkg/compiler/lib/src/inferrer/work_queue.dart b/pkg/compiler/lib/src/inferrer/work_queue.dart
index e7923d8..9accd91 100644
--- a/pkg/compiler/lib/src/inferrer/work_queue.dart
+++ b/pkg/compiler/lib/src/inferrer/work_queue.dart
@@ -5,40 +5,18 @@
 import 'dart:collection' show Queue;
 import 'type_graph_nodes.dart';
 
-const _numBuckets = 2;
-
-/// This function applies a bucket index to each type information.
-///
-/// Current strategy:
-/// Process call sites together effectively splitting intraprocedural type
-/// refinement and local type refinement.
-int _bucketForInfo(TypeInformation info) {
-  if (info is! CallSiteTypeInformation) return 0;
-  return 1;
-}
-
 /// A work queue for the inferrer. It filters out nodes that are tagged as
 /// [TypeInformation.doNotEnqueue], as well as ensures through
 /// [TypeInformation.inQueue] that a node is in the queue only once at
 /// a time.
-///
-/// The queue uses a bucketed approach to allow the inferrer to make progress
-/// on certain categories of types while also ensuring no category is starved
-/// of resources. The queue draws work items from a bucket until it is empty
-/// and then proceeds onto the next bucket with work remaining. This allows
-/// related work items to be processed closer to each other.
 class WorkQueue {
-  final List<Queue<TypeInformation>> buckets =
-      List.generate(_numBuckets, (_) => Queue());
-  int _length = 0;
-  int _activeBucket = 0;
+  final Queue<TypeInformation> queue = Queue<TypeInformation>();
 
   void add(TypeInformation element) {
     if (element.doNotEnqueue) return;
     if (element.inQueue) return;
-    buckets[_bucketForInfo(element)].addLast(element);
+    queue.addLast(element);
     element.inQueue = true;
-    _length++;
   }
 
   void addAll(Iterable<TypeInformation> all) {
@@ -46,18 +24,12 @@
   }
 
   TypeInformation remove() {
-    var bucket = buckets[_activeBucket];
-    while (bucket.isEmpty) {
-      if (++_activeBucket == buckets.length) _activeBucket = 0;
-      bucket = buckets[_activeBucket];
-    }
-    TypeInformation element = bucket.removeFirst();
+    TypeInformation element = queue.removeFirst();
     element.inQueue = false;
-    _length--;
     return element;
   }
 
-  bool get isEmpty => _length == 0;
+  bool get isEmpty => queue.isEmpty;
 
-  int get length => _length;
+  int get length => queue.length;
 }
diff --git a/pkg/compiler/lib/src/inferrer_experimental/work_queue.dart b/pkg/compiler/lib/src/inferrer_experimental/work_queue.dart
index e7923d8..6e02e81 100644
--- a/pkg/compiler/lib/src/inferrer_experimental/work_queue.dart
+++ b/pkg/compiler/lib/src/inferrer_experimental/work_queue.dart
@@ -24,19 +24,23 @@
 ///
 /// The queue uses a bucketed approach to allow the inferrer to make progress
 /// on certain categories of types while also ensuring no category is starved
-/// of resources. The queue draws work items from a bucket until it is empty
-/// and then proceeds onto the next bucket with work remaining. This allows
-/// related work items to be processed closer to each other.
+/// of resources. We grab a "snapshot" of the active bucket and process elements
+/// from it until it is empty. Anything added to the bucket after we have
+/// grabbed the snapshot is not processed immediately. Instead once the snapshot
+/// is empty we move on to the next bucket. By ignoring elements after the
+/// snapshot we prevent the queue from getting stuck on loops within the same
+/// bucket.
 class WorkQueue {
   final List<Queue<TypeInformation>> buckets =
-      List.generate(_numBuckets, (_) => Queue());
+      List.generate(_numBuckets, (_) => Queue(), growable: false);
   int _length = 0;
-  int _activeBucket = 0;
+  Queue<TypeInformation> _activeQueue = Queue();
+  int _activeBucketIndex = 0;
 
   void add(TypeInformation element) {
     if (element.doNotEnqueue) return;
     if (element.inQueue) return;
-    buckets[_bucketForInfo(element)].addLast(element);
+    buckets[_bucketForInfo(element)].add(element);
     element.inQueue = true;
     _length++;
   }
@@ -46,17 +50,27 @@
   }
 
   TypeInformation remove() {
-    var bucket = buckets[_activeBucket];
-    while (bucket.isEmpty) {
-      if (++_activeBucket == buckets.length) _activeBucket = 0;
-      bucket = buckets[_activeBucket];
+    while (_activeQueue.isEmpty) {
+      assert(_length != 0);
+      final bucket = buckets[_activeBucketIndex];
+      if (bucket.isNotEmpty) {
+        final tmp = _activeQueue;
+        _activeQueue = buckets[_activeBucketIndex];
+        buckets[_activeBucketIndex] = tmp;
+      }
+      _incrementBucketIndex();
     }
-    TypeInformation element = bucket.removeFirst();
-    element.inQueue = false;
+    final element = _activeQueue.removeFirst();
     _length--;
+    element.inQueue = false;
     return element;
   }
 
+  void _incrementBucketIndex() {
+    _activeBucketIndex =
+        _activeBucketIndex == buckets.length - 1 ? 0 : _activeBucketIndex + 1;
+  }
+
   bool get isEmpty => _length == 0;
 
   int get length => _length;
diff --git a/pkg/compiler/tool/modular_test_suite_helper.dart b/pkg/compiler/tool/modular_test_suite_helper.dart
index 566cf76..66c4861 100644
--- a/pkg/compiler/tool/modular_test_suite_helper.dart
+++ b/pkg/compiler/tool/modular_test_suite_helper.dart
@@ -269,6 +269,7 @@
       _dart2jsScript,
       Flags.soundNullSafety,
       if (_options.useSdk) '--libraries-spec=$_librarySpecForSnapshot',
+      if (_options.useSdk) '--invoker=modular_test',
       // If we have sources, then we aren't building the SDK, otherwise we
       // assume we are building the sdk and pass in a full dill.
       if (sources.isNotEmpty)
@@ -344,6 +345,7 @@
       _dart2jsScript,
       // TODO(sigmund): remove this dependency on libraries.json
       if (_options.useSdk) '--libraries-spec=$_librarySpecForSnapshot',
+      if (_options.useSdk) '--invoker=modular_test',
       Flags.soundNullSafety,
       '${Flags.entryUri}=$fakeRoot${module.mainSource}',
       '${Flags.inputDill}=${toUri(module, dillId)}',
@@ -405,6 +407,7 @@
       _dart2jsScript,
       // TODO(sigmund): remove this dependency on libraries.json
       if (_options.useSdk) '--libraries-spec=$_librarySpecForSnapshot',
+      if (_options.useSdk) '--invoker=modular_test',
       Flags.soundNullSafety,
       '${Flags.entryUri}=$fakeRoot${module.mainSource}',
       '${Flags.inputDill}=${toUri(module, fullDillId)}',
@@ -455,6 +458,7 @@
       _dart2jsScript,
       // TODO(sigmund): remove this dependency on libraries.json
       if (_options.useSdk) '--libraries-spec=$_librarySpecForSnapshot',
+      if (_options.useSdk) '--invoker=modular_test',
       Flags.soundNullSafety,
       '${Flags.entryUri}=$fakeRoot${module.mainSource}',
       '${Flags.inputDill}=${toUri(module, globalUpdatedDillId)}',
@@ -509,6 +513,7 @@
       '--packages=${sdkRoot.toFilePath()}/$packageConfigJsonPath',
       _dart2jsScript,
       if (_options.useSdk) '--libraries-spec=$_librarySpecForSnapshot',
+      if (_options.useSdk) '--invoker=modular_test',
       Flags.soundNullSafety,
       '${Flags.entryUri}=$fakeRoot${module.mainSource}',
       '${Flags.inputDill}=${toUri(module, globalUpdatedDillId)}',
@@ -563,6 +568,7 @@
       '--packages=${sdkRoot.toFilePath()}/$packageConfigJsonPath',
       _dart2jsScript,
       if (_options.useSdk) '--libraries-spec=$_librarySpecForSnapshot',
+      if (_options.useSdk) '--invoker=modular_test',
       Flags.soundNullSafety,
       '${Flags.entryUri}=$fakeRoot${module.mainSource}',
       '${Flags.inputDill}=${toUri(module, globalUpdatedDillId)}',
diff --git a/pkg/js/CHANGELOG.md b/pkg/js/CHANGELOG.md
index 027e056..b580d15 100644
--- a/pkg/js/CHANGELOG.md
+++ b/pkg/js/CHANGELOG.md
@@ -1,6 +1,7 @@
-## 0.6.8
+## 0.6.9-dev
 
 - Remove dependency on `dart:js`.
+- Update SDK lower constraint to 3.0.0-217.0.dev.
 - Update SDK upper constraint to 4.0.0.
 
 ## 0.6.7
diff --git a/pkg/js/pubspec.yaml b/pkg/js/pubspec.yaml
index fbbe092..b746a97 100644
--- a/pkg/js/pubspec.yaml
+++ b/pkg/js/pubspec.yaml
@@ -1,10 +1,10 @@
 name: js
-version: 0.6.8
+version: 0.6.9-dev
 description: Annotations to create static Dart interfaces for JavaScript APIs.
 repository: https://github.com/dart-lang/sdk/tree/main/pkg/js
 
 environment:
-  sdk: ">=2.19.0 <4.0.0"
+  sdk: ">=3.0.0-217.0.dev <4.0.0"
 
 dependencies:
   meta: ^1.7.0
diff --git a/runtime/vm/compiler/stub_code_compiler.cc b/runtime/vm/compiler/stub_code_compiler.cc
index 4df6db1..2bd4b6b 100644
--- a/runtime/vm/compiler/stub_code_compiler.cc
+++ b/runtime/vm/compiler/stub_code_compiler.cc
@@ -38,7 +38,7 @@
   return slots_from_fp;
 }
 
-void StubCodeCompiler::GenerateInitStaticFieldStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInitStaticFieldStub() {
   __ EnterStubFrame();
   __ PushObject(NullObject());  // Make room for result.
   __ PushRegister(InitStaticFieldABI::kFieldReg);
@@ -49,8 +49,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler,
-                                                       bool is_final) {
+void StubCodeCompiler::GenerateInitLateStaticFieldStub(bool is_final) {
   const Register kResultReg = InitStaticFieldABI::kResultReg;
   const Register kFieldReg = InitStaticFieldABI::kFieldReg;
   const Register kAddressReg = InitLateStaticFieldInternalRegs::kAddressReg;
@@ -100,16 +99,15 @@
   }
 }
 
-void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler) {
-  GenerateInitLateStaticFieldStub(assembler, /*is_final=*/false);
+void StubCodeCompiler::GenerateInitLateStaticFieldStub() {
+  GenerateInitLateStaticFieldStub(/*is_final=*/false);
 }
 
-void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub(
-    Assembler* assembler) {
-  GenerateInitLateStaticFieldStub(assembler, /*is_final=*/true);
+void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub() {
+  GenerateInitLateStaticFieldStub(/*is_final=*/true);
 }
 
-void StubCodeCompiler::GenerateInitInstanceFieldStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInitInstanceFieldStub() {
   __ EnterStubFrame();
   __ PushObject(NullObject());  // Make room for result.
   __ PushRegistersInOrder(
@@ -121,8 +119,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler,
-                                                         bool is_final) {
+void StubCodeCompiler::GenerateInitLateInstanceFieldStub(bool is_final) {
   const Register kInstanceReg = InitInstanceFieldABI::kInstanceReg;
   const Register kFieldReg = InitInstanceFieldABI::kFieldReg;
   const Register kAddressReg = InitLateInstanceFieldInternalRegs::kAddressReg;
@@ -196,16 +193,15 @@
   }
 }
 
-void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler) {
-  GenerateInitLateInstanceFieldStub(assembler, /*is_final=*/false);
+void StubCodeCompiler::GenerateInitLateInstanceFieldStub() {
+  GenerateInitLateInstanceFieldStub(/*is_final=*/false);
 }
 
-void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub(
-    Assembler* assembler) {
-  GenerateInitLateInstanceFieldStub(assembler, /*is_final=*/true);
+void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub() {
+  GenerateInitLateInstanceFieldStub(/*is_final=*/true);
 }
 
-void StubCodeCompiler::GenerateThrowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateThrowStub() {
   __ EnterStubFrame();
   __ PushObject(NullObject());  // Make room for (unused) result.
   __ PushRegister(ThrowABI::kExceptionReg);
@@ -213,7 +209,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateReThrowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateReThrowStub() {
   __ EnterStubFrame();
   __ PushObject(NullObject());  // Make room for (unused) result.
   __ PushRegistersInOrder(
@@ -222,7 +218,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateAssertBooleanStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAssertBooleanStub() {
   __ EnterStubFrame();
   __ PushObject(NullObject());  // Make room for (unused) result.
   __ PushRegister(AssertBooleanABI::kObjectReg);
@@ -230,7 +226,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateAssertSubtypeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAssertSubtypeStub() {
   __ EnterStubFrame();
   __ PushRegistersInOrder({AssertSubtypeABI::kInstantiatorTypeArgumentsReg,
                            AssertSubtypeABI::kFunctionTypeArgumentsReg,
@@ -243,7 +239,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateAssertAssignableStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAssertAssignableStub() {
 #if !defined(TARGET_ARCH_IA32)
   __ Breakpoint();
 #else
@@ -277,8 +273,7 @@
 // - InstantiationABI::kResultTypeArgumentsReg: instantiated tav
 // Clobbers:
 // - InstantiationABI::kScratchReg
-void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
   // We only need the offset of the current entry up until we either call
   // the runtime or until we retrieve the instantiated type arguments out of it
   // to put in the result register, so we use the result register to store it.
@@ -495,8 +490,7 @@
 }
 
 void StubCodeCompiler::
-    GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub(
-        Assembler* assembler) {
+    GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub() {
   const Register kScratch1Reg = InstantiationABI::kResultTypeArgumentsReg;
   const Register kScratch2Reg = InstantiationABI::kScratchReg;
   // Return the instantiator type arguments if its nullability is compatible for
@@ -518,11 +512,11 @@
   __ Ret();
 
   __ Bind(&cache_lookup);
-  GenerateInstantiateTypeArgumentsStub(assembler);
+  GenerateInstantiateTypeArgumentsStub();
 }
 
-void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
-    Assembler* assembler) {
+void StubCodeCompiler::
+    GenerateInstantiateTypeArgumentsMayShareFunctionTAStub() {
   const Register kScratch1Reg = InstantiationABI::kResultTypeArgumentsReg;
   const Register kScratch2Reg = InstantiationABI::kScratchReg;
   // Return the function type arguments if its nullability is compatible for
@@ -544,7 +538,7 @@
   __ Ret();
 
   __ Bind(&cache_lookup);
-  GenerateInstantiateTypeArgumentsStub(assembler);
+  GenerateInstantiateTypeArgumentsStub();
 }
 
 static void BuildInstantiateTypeRuntimeCall(Assembler* assembler) {
@@ -626,48 +620,45 @@
   BuildInstantiateTypeRuntimeCall(assembler);
 }
 
-void StubCodeCompiler::GenerateInstantiateTypeNonNullableClassTypeParameterStub(
-    Assembler* assembler) {
+void StubCodeCompiler::
+    GenerateInstantiateTypeNonNullableClassTypeParameterStub() {
   BuildInstantiateTypeParameterStub(assembler, Nullability::kNonNullable,
                                     /*is_function_parameter=*/false);
 }
 
-void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub() {
   BuildInstantiateTypeParameterStub(assembler, Nullability::kNullable,
                                     /*is_function_parameter=*/false);
 }
 
-void StubCodeCompiler::GenerateInstantiateTypeLegacyClassTypeParameterStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateInstantiateTypeLegacyClassTypeParameterStub() {
   BuildInstantiateTypeParameterStub(assembler, Nullability::kLegacy,
                                     /*is_function_parameter=*/false);
 }
 
 void StubCodeCompiler::
-    GenerateInstantiateTypeNonNullableFunctionTypeParameterStub(
-        Assembler* assembler) {
+    GenerateInstantiateTypeNonNullableFunctionTypeParameterStub() {
   BuildInstantiateTypeParameterStub(assembler, Nullability::kNonNullable,
                                     /*is_function_parameter=*/true);
 }
 
-void StubCodeCompiler::GenerateInstantiateTypeNullableFunctionTypeParameterStub(
-    Assembler* assembler) {
+void StubCodeCompiler::
+    GenerateInstantiateTypeNullableFunctionTypeParameterStub() {
   BuildInstantiateTypeParameterStub(assembler, Nullability::kNullable,
                                     /*is_function_parameter=*/true);
 }
 
-void StubCodeCompiler::GenerateInstantiateTypeLegacyFunctionTypeParameterStub(
-    Assembler* assembler) {
+void StubCodeCompiler::
+    GenerateInstantiateTypeLegacyFunctionTypeParameterStub() {
   BuildInstantiateTypeParameterStub(assembler, Nullability::kLegacy,
                                     /*is_function_parameter=*/true);
 }
 
-void StubCodeCompiler::GenerateInstantiateTypeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInstantiateTypeStub() {
   BuildInstantiateTypeRuntimeCall(assembler);
 }
 
-void StubCodeCompiler::GenerateInstanceOfStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInstanceOfStub() {
   __ EnterStubFrame();
   __ PushObject(NullObject());  // Make room for the result.
   __ PushRegistersInOrder({TypeTestABI::kInstanceReg, TypeTestABI::kDstTypeReg,
@@ -806,16 +797,12 @@
   __ Jump(&check_top_type, compiler::Assembler::kNearJump);
 }
 
-void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub(
-    Assembler* assembler) {
-  GenerateTypeIsTopTypeForSubtyping(assembler,
-                                    /*null_safety=*/false);
+void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub() {
+  GenerateTypeIsTopTypeForSubtyping(assembler, /*null_safety=*/false);
 }
 
-void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingNullSafeStub(
-    Assembler* assembler) {
-  GenerateTypeIsTopTypeForSubtyping(assembler,
-                                    /*null_safety=*/true);
+void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingNullSafeStub() {
+  GenerateTypeIsTopTypeForSubtyping(assembler, /*null_safety=*/true);
 }
 
 // Version of Instance::NullIsAssignableTo(other, inst_tav, fun_tav) used when
@@ -955,16 +942,12 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateNullIsAssignableToTypeStub(
-    Assembler* assembler) {
-  GenerateNullIsAssignableToType(assembler,
-                                 /*null_safety=*/false);
+void StubCodeCompiler::GenerateNullIsAssignableToTypeStub() {
+  GenerateNullIsAssignableToType(assembler, /*null_safety=*/false);
 }
 
-void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub(
-    Assembler* assembler) {
-  GenerateNullIsAssignableToType(assembler,
-                                 /*null_safety=*/true);
+void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub() {
+  GenerateNullIsAssignableToType(assembler, /*null_safety=*/true);
 }
 #if !defined(TARGET_ARCH_IA32)
 // The <X>TypeTestStubs are used to test whether a given value is of a given
@@ -986,15 +969,14 @@
 //
 // Note of warning: The caller will not populate CODE_REG and we have therefore
 // no access to the pool.
-void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDefaultTypeTestStub() {
   __ LoadFromOffset(CODE_REG, THR,
                     target::Thread::slow_type_test_stub_offset());
   __ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
 }
 
 // Used instead of DefaultTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDefaultNullableTypeTestStub() {
   Label done;
 
   // Fast case for 'null'.
@@ -1009,11 +991,11 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTopTypeTypeTestStub() {
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnreachableTypeTestStub() {
   __ Breakpoint();
 }
 
@@ -1057,12 +1039,11 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub() {
   BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
 }
 
-void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTypeParameterTypeTestStub() {
   BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
 }
 
@@ -1086,8 +1067,7 @@
   __ Drop(1);  // Discard return value.
 }
 
-void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateLazySpecializeTypeTestStub() {
   __ LoadFromOffset(CODE_REG, THR,
                     target::Thread::lazy_specialize_type_test_stub_offset());
   __ EnterStubFrame();
@@ -1097,8 +1077,7 @@
 }
 
 // Used instead of LazySpecializeTypeTestStub when null is assignable.
-void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub() {
   Label done;
 
   __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
@@ -1114,7 +1093,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSlowTypeTestStub() {
   Label done, call_runtime;
 
   if (!FLAG_precompiled_mode) {
@@ -1188,7 +1167,7 @@
 #else
 // Type testing stubs are not implemented on IA32.
 #define GENERATE_BREAKPOINT_STUB(Name)                                         \
-  void StubCodeCompiler::Generate##Name##Stub(Assembler* assembler) {          \
+  void StubCodeCompiler::Generate##Name##Stub() {                              \
     __ Breakpoint();                                                           \
   }
 
@@ -1204,7 +1183,7 @@
 //   AllocateClosureABI::kResultReg: new allocated Closure object.
 // Clobbered:
 //   AllocateClosureABI::kScratchReg
-void StubCodeCompiler::GenerateAllocateClosureStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateClosureStub() {
   const intptr_t instance_size =
       target::RoundedAllocationSize(target::Closure::InstanceSize());
   __ EnsureHasClassIdInDEBUG(kFunctionCid, AllocateClosureABI::kFunctionReg,
@@ -1280,7 +1259,7 @@
   __ PopRegister(AllocateClosureABI::kFunctionReg);
   __ PopRegister(AllocateClosureABI::kResultReg);
   ASSERT(target::WillAllocateNewOrRememberedObject(instance_size));
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
   __ LeaveStubFrame();
 
   // AllocateClosureABI::kResultReg: new object
@@ -1290,7 +1269,7 @@
 // Generates allocation stub for _GrowableList class.
 // This stub exists solely for performance reasons: default allocation
 // stub is slower as it doesn't use specialized inline allocation.
-void StubCodeCompiler::GenerateAllocateGrowableArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateGrowableArrayStub() {
 #if defined(TARGET_ARCH_IA32)
   // This stub is not used on IA32 because IA32 version of
   // StubCodeCompiler::GenerateAllocationStubForClass uses inline
@@ -1324,7 +1303,7 @@
 #endif  // defined(TARGET_ARCH_IA32)
 }
 
-void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateRecordStub() {
   const Register result_reg = AllocateRecordABI::kResultReg;
   const Register shape_reg = AllocateRecordABI::kShapeReg;
   const Register temp_reg = AllocateRecordABI::kTemp1Reg;
@@ -1428,13 +1407,12 @@
   __ Drop(1);
   __ PopRegister(AllocateRecordABI::kResultReg);
 
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
   __ LeaveStubFrame();
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
-                                                       intptr_t num_fields,
+void StubCodeCompiler::GenerateAllocateSmallRecordStub(intptr_t num_fields,
                                                        bool has_named_fields) {
   ASSERT(num_fields == 2 || num_fields == 3);
   const Register result_reg = AllocateSmallRecordABI::kResultReg;
@@ -1505,31 +1483,30 @@
   __ Drop(4);
   __ PopRegister(result_reg);
 
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
   __ LeaveStubFrame();
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateAllocateRecord2Stub(Assembler* assembler) {
-  GenerateAllocateSmallRecordStub(assembler, 2, /*has_named_fields=*/false);
+void StubCodeCompiler::GenerateAllocateRecord2Stub() {
+  GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/false);
 }
 
-void StubCodeCompiler::GenerateAllocateRecord2NamedStub(Assembler* assembler) {
-  GenerateAllocateSmallRecordStub(assembler, 2, /*has_named_fields=*/true);
+void StubCodeCompiler::GenerateAllocateRecord2NamedStub() {
+  GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateRecord3Stub(Assembler* assembler) {
-  GenerateAllocateSmallRecordStub(assembler, 3, /*has_named_fields=*/false);
+void StubCodeCompiler::GenerateAllocateRecord3Stub() {
+  GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/false);
 }
 
-void StubCodeCompiler::GenerateAllocateRecord3NamedStub(Assembler* assembler) {
-  GenerateAllocateSmallRecordStub(assembler, 3, /*has_named_fields=*/true);
+void StubCodeCompiler::GenerateAllocateRecord3NamedStub() {
+  GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/true);
 }
 
 // The UnhandledException class lives in the VM isolate, so it cannot cache
 // an allocation stub for itself. Instead, we cache it in the stub code list.
-void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub() {
   Thread* thread = Thread::Current();
   auto class_table = thread->isolate_group()->class_table();
   ASSERT(class_table->HasValidClassAt(kUnhandledExceptionCid));
@@ -1537,27 +1514,25 @@
                                       class_table->At(kUnhandledExceptionCid));
   ASSERT(!cls.IsNull());
 
-  GenerateAllocationStubForClass(assembler, nullptr, cls,
-                                 Code::Handle(Code::null()),
+  GenerateAllocationStubForClass(nullptr, cls, Code::Handle(Code::null()),
                                  Code::Handle(Code::null()));
 }
 
 #define TYPED_DATA_ALLOCATION_STUB(clazz)                                      \
-  void StubCodeCompiler::GenerateAllocate##clazz##Stub(Assembler* assembler) { \
-    GenerateAllocateTypedDataArrayStub(assembler, kTypedData##clazz##Cid);     \
+  void StubCodeCompiler::GenerateAllocate##clazz##Stub() {                     \
+    GenerateAllocateTypedDataArrayStub(kTypedData##clazz##Cid);                \
   }
 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATION_STUB)
 #undef TYPED_DATA_ALLOCATION_STUB
 
-void StubCodeCompiler::GenerateLateInitializationError(Assembler* assembler,
-                                                       bool with_fpu_regs) {
+void StubCodeCompiler::GenerateLateInitializationError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     __ PushRegister(LateInitializationErrorABI::kFieldReg);
     __ CallRuntime(kLateFieldNotInitializedErrorRuntimeEntry,
                    /*argument_count=*/1);
   };
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::
                 late_initialization_error_shared_with_fpu_regs_stub_offset()
@@ -1566,125 +1541,109 @@
       /*allow_return=*/false, perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateLateInitializationErrorSharedWithoutFPURegsStub(
-    Assembler* assembler) {
-  GenerateLateInitializationError(assembler, /*with_fpu_regs=*/false);
+void StubCodeCompiler::
+    GenerateLateInitializationErrorSharedWithoutFPURegsStub() {
+  GenerateLateInitializationError(/*with_fpu_regs=*/false);
 }
 
-void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub(
-    Assembler* assembler) {
-  GenerateLateInitializationError(assembler, /*with_fpu_regs=*/true);
+void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub() {
+  GenerateLateInitializationError(/*with_fpu_regs=*/true);
 }
 
-void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
+      /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
       target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/false);
 }
 
-void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
+      /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
       target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
       /*allow_return=*/false);
 }
 
-void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
+      /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
       target::Thread::null_arg_error_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/false);
 }
 
-void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
+      /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
       target::Thread::null_arg_error_shared_with_fpu_regs_stub_offset(),
       /*allow_return=*/false);
 }
 
-void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
+      /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
       target::Thread::null_cast_error_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/false);
 }
 
-void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
+      /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
       target::Thread::null_cast_error_shared_with_fpu_regs_stub_offset(),
       /*allow_return=*/false);
 }
 
-void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false,
-      &kInterruptOrStackOverflowRuntimeEntry,
+      /*save_fpu_registers=*/false, &kInterruptOrStackOverflowRuntimeEntry,
       target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/true);
 }
 
-void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub() {
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/true,
-      &kInterruptOrStackOverflowRuntimeEntry,
+      /*save_fpu_registers=*/true, &kInterruptOrStackOverflowRuntimeEntry,
       target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
       /*allow_return=*/true);
 }
 
-void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub(
-    Assembler* assembler) {
-  GenerateRangeError(assembler, /*with_fpu_regs=*/false);
+void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub() {
+  GenerateRangeError(/*with_fpu_regs=*/false);
 }
 
-void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub(
-    Assembler* assembler) {
-  GenerateRangeError(assembler, /*with_fpu_regs=*/true);
+void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub() {
+  GenerateRangeError(/*with_fpu_regs=*/true);
 }
 
-void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub(
-    Assembler* assembler) {
-  GenerateWriteError(assembler, /*with_fpu_regs=*/false);
+void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub() {
+  GenerateWriteError(/*with_fpu_regs=*/false);
 }
 
-void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub(
-    Assembler* assembler) {
-  GenerateWriteError(assembler, /*with_fpu_regs=*/true);
+void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub() {
+  GenerateWriteError(/*with_fpu_regs=*/true);
 }
 
-void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub() {
   __ Breakpoint();  // Marker stub.
 }
 
-void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAsynchronousGapMarkerStub() {
   __ Breakpoint();  // Marker stub.
 }
 
-void StubCodeCompiler::GenerateUnknownDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnknownDartCodeStub() {
   // Enter frame to include caller into the backtrace.
   __ EnterStubFrame();
   __ Breakpoint();  // Marker stub.
 }
 
-void StubCodeCompiler::GenerateNotLoadedStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateNotLoadedStub() {
   __ EnterStubFrame();
   __ CallRuntime(kNotLoadedRuntimeEntry, 0);
   __ Breakpoint();
 }
 
 #define EMIT_BOX_ALLOCATION(Name)                                              \
-  void StubCodeCompiler::GenerateAllocate##Name##Stub(Assembler* assembler) {  \
+  void StubCodeCompiler::GenerateAllocate##Name##Stub() {                      \
     Label call_runtime;                                                        \
     if (!FLAG_use_slow_path && FLAG_inline_alloc) {                            \
       __ TryAllocate(compiler::Name##Class(), &call_runtime,                   \
@@ -1735,13 +1694,13 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateBoxDoubleStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateBoxDoubleStub() {
   GenerateBoxFpuValueStub(assembler, compiler::DoubleClass(),
                           kBoxDoubleRuntimeEntry,
                           &Assembler::StoreUnboxedDouble);
 }
 
-void StubCodeCompiler::GenerateBoxFloat32x4Stub(Assembler* assembler) {
+void StubCodeCompiler::GenerateBoxFloat32x4Stub() {
 #if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
   GenerateBoxFpuValueStub(assembler, compiler::Float32x4Class(),
                           kBoxFloat32x4RuntimeEntry,
@@ -1751,7 +1710,7 @@
 #endif
 }
 
-void StubCodeCompiler::GenerateBoxFloat64x2Stub(Assembler* assembler) {
+void StubCodeCompiler::GenerateBoxFloat64x2Stub() {
 #if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
   GenerateBoxFpuValueStub(assembler, compiler::Float64x2Class(),
                           kBoxFloat64x2RuntimeEntry,
@@ -1761,7 +1720,7 @@
 #endif
 }
 
-void StubCodeCompiler::GenerateDoubleToIntegerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDoubleToIntegerStub() {
   __ EnterStubFrame();
   __ StoreUnboxedDouble(DoubleToIntegerStubABI::kInputReg, THR,
                         target::Thread::unboxed_runtime_arg_offset());
@@ -1884,7 +1843,6 @@
 }
 
 void StubCodeCompiler::GenerateSuspendStub(
-    Assembler* assembler,
     bool call_suspend_function,
     bool pass_type_arguments,
     intptr_t suspend_entry_point_offset_in_thread,
@@ -2140,36 +2098,35 @@
   __ Jump(&call_dart);
 }
 
-void StubCodeCompiler::GenerateAwaitStub(Assembler* assembler) {
-  GenerateSuspendStub(assembler,
-                      /*call_suspend_function=*/true,
-                      /*pass_type_arguments=*/false,
-                      target::Thread::suspend_state_await_entry_point_offset(),
-                      target::ObjectStore::suspend_state_await_offset());
+void StubCodeCompiler::GenerateAwaitStub() {
+  GenerateSuspendStub(
+      /*call_suspend_function=*/true,
+      /*pass_type_arguments=*/false,
+      target::Thread::suspend_state_await_entry_point_offset(),
+      target::ObjectStore::suspend_state_await_offset());
 }
 
-void StubCodeCompiler::GenerateAwaitWithTypeCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAwaitWithTypeCheckStub() {
   GenerateSuspendStub(
-      assembler,
+
       /*call_suspend_function=*/true,
       /*pass_type_arguments=*/true,
       target::Thread::suspend_state_await_with_type_check_entry_point_offset(),
       target::ObjectStore::suspend_state_await_with_type_check_offset());
 }
 
-void StubCodeCompiler::GenerateYieldAsyncStarStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateYieldAsyncStarStub() {
   GenerateSuspendStub(
-      assembler,
+
       /*call_suspend_function=*/true,
       /*pass_type_arguments=*/false,
       target::Thread::suspend_state_yield_async_star_entry_point_offset(),
       target::ObjectStore::suspend_state_yield_async_star_offset());
 }
 
-void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub() {
   GenerateSuspendStub(
-      assembler,
+
       /*call_suspend_function=*/true,
       /*pass_type_arguments=*/false,
       target::Thread::
@@ -2177,15 +2134,13 @@
       target::ObjectStore::suspend_state_suspend_sync_star_at_start_offset());
 }
 
-void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub(
-    Assembler* assembler) {
-  GenerateSuspendStub(assembler,
-                      /*call_suspend_function=*/false,
-                      /*pass_type_arguments=*/false, -1, -1);
+void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub() {
+  GenerateSuspendStub(
+      /*call_suspend_function=*/false,
+      /*pass_type_arguments=*/false, -1, -1);
 }
 
 void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
-    Assembler* assembler,
     intptr_t init_entry_point_offset_in_thread,
     intptr_t init_function_offset_in_object_store) {
   const Register kTypeArgs = InitSuspendableFunctionStubABI::kTypeArgsReg;
@@ -2205,27 +2160,25 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateInitAsyncStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInitAsyncStub() {
   GenerateInitSuspendableFunctionStub(
-      assembler, target::Thread::suspend_state_init_async_entry_point_offset(),
+      target::Thread::suspend_state_init_async_entry_point_offset(),
       target::ObjectStore::suspend_state_init_async_offset());
 }
 
-void StubCodeCompiler::GenerateInitAsyncStarStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInitAsyncStarStub() {
   GenerateInitSuspendableFunctionStub(
-      assembler,
       target::Thread::suspend_state_init_async_star_entry_point_offset(),
       target::ObjectStore::suspend_state_init_async_star_offset());
 }
 
-void StubCodeCompiler::GenerateInitSyncStarStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInitSyncStarStub() {
   GenerateInitSuspendableFunctionStub(
-      assembler,
       target::Thread::suspend_state_init_sync_star_entry_point_offset(),
       target::ObjectStore::suspend_state_init_sync_star_offset());
 }
 
-void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateResumeStub() {
   const Register kSuspendState = ResumeStubABI::kSuspendStateReg;
   const Register kTemp = ResumeStubABI::kTempReg;
   const Register kFrameSize = ResumeStubABI::kFrameSizeReg;
@@ -2406,7 +2359,6 @@
 }
 
 void StubCodeCompiler::GenerateReturnStub(
-    Assembler* assembler,
     intptr_t return_entry_point_offset_in_thread,
     intptr_t return_function_offset_in_object_store,
     intptr_t return_stub_offset_in_thread) {
@@ -2438,32 +2390,29 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateReturnAsyncStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateReturnAsyncStub() {
   GenerateReturnStub(
-      assembler,
       target::Thread::suspend_state_return_async_entry_point_offset(),
       target::ObjectStore::suspend_state_return_async_offset(),
       target::Thread::return_async_stub_offset());
 }
 
-void StubCodeCompiler::GenerateReturnAsyncNotFutureStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateReturnAsyncNotFutureStub() {
   GenerateReturnStub(
-      assembler,
       target::Thread::
           suspend_state_return_async_not_future_entry_point_offset(),
       target::ObjectStore::suspend_state_return_async_not_future_offset(),
       target::Thread::return_async_not_future_stub_offset());
 }
 
-void StubCodeCompiler::GenerateReturnAsyncStarStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateReturnAsyncStarStub() {
   GenerateReturnStub(
-      assembler,
       target::Thread::suspend_state_return_async_star_entry_point_offset(),
       target::ObjectStore::suspend_state_return_async_star_offset(),
       target::Thread::return_async_star_stub_offset());
 }
 
-void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAsyncExceptionHandlerStub() {
   const Register kSuspendState = AsyncExceptionHandlerStubABI::kSuspendStateReg;
   ASSERT(kSuspendState != kExceptionObjectReg);
   ASSERT(kSuspendState != kStackTraceObjectReg);
@@ -2514,7 +2463,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateCloneSuspendStateStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCloneSuspendStateStub() {
   const Register kSource = CloneSuspendStateStubABI::kSourceReg;
   const Register kDestination = CloneSuspendStateStubABI::kDestinationReg;
   const Register kTemp = CloneSuspendStateStubABI::kTempReg;
diff --git a/runtime/vm/compiler/stub_code_compiler.h b/runtime/vm/compiler/stub_code_compiler.h
index 73e2027..6a6613a 100644
--- a/runtime/vm/compiler/stub_code_compiler.h
+++ b/runtime/vm/compiler/stub_code_compiler.h
@@ -48,29 +48,28 @@
 
 using UnresolvedPcRelativeCalls = GrowableArray<UnresolvedPcRelativeCall*>;
 
-class StubCodeCompiler : public AllStatic {
+class StubCodeCompiler {
  public:
+  StubCodeCompiler(Assembler* assembler_) : assembler(assembler_) {}
+
+  Assembler* assembler;
+
 #if !defined(TARGET_ARCH_IA32)
-  static void GenerateBuildMethodExtractorStub(
-      Assembler* assembler,
-      const Code& closure_allocation_stub,
-      const Code& context_allocation_stub,
-      bool generic);
+  void GenerateBuildMethodExtractorStub(const Code& closure_allocation_stub,
+                                        const Code& context_allocation_stub,
+                                        bool generic);
 #endif
 
-  static void EnsureIsNewOrRemembered(Assembler* assembler,
-                                      bool preserve_registers = true);
+  void EnsureIsNewOrRemembered(bool preserve_registers = true);
   static ArrayPtr BuildStaticCallsTable(
       Zone* zone,
       compiler::UnresolvedPcRelativeCalls* unresolved_calls);
 
-#define STUB_CODE_GENERATE(name)                                               \
-  static void Generate##name##Stub(Assembler* assembler);
+#define STUB_CODE_GENERATE(name) void Generate##name##Stub();
   VM_STUB_CODE_LIST(STUB_CODE_GENERATE)
 #undef STUB_CODE_GENERATE
 
-  static void GenerateAllocationStubForClass(
-      Assembler* assembler,
+  void GenerateAllocationStubForClass(
       UnresolvedPcRelativeCalls* unresolved_calls,
       const Class& cls,
       const dart::Code& allocate_object,
@@ -88,16 +87,13 @@
     kCheckExactness,
     kIgnoreExactness,
   };
-  static void GenerateNArgsCheckInlineCacheStub(
-      Assembler* assembler,
-      intptr_t num_args,
-      const RuntimeEntry& handle_ic_miss,
-      Token::Kind kind,
-      Optimized optimized,
-      CallType type,
-      Exactness exactness);
-  static void GenerateNArgsCheckInlineCacheStubForEntryKind(
-      Assembler* assembler,
+  void GenerateNArgsCheckInlineCacheStub(intptr_t num_args,
+                                         const RuntimeEntry& handle_ic_miss,
+                                         Token::Kind kind,
+                                         Optimized optimized,
+                                         CallType type,
+                                         Exactness exactness);
+  void GenerateNArgsCheckInlineCacheStubForEntryKind(
       intptr_t num_args,
       const RuntimeEntry& handle_ic_miss,
       Token::Kind kind,
@@ -105,9 +101,8 @@
       CallType type,
       Exactness exactness,
       CodeEntryKind entry_kind);
-  static void GenerateUsageCounterIncrement(Assembler* assembler,
-                                            Register temp_reg);
-  static void GenerateOptimizedUsageCounterIncrement(Assembler* assembler);
+  void GenerateUsageCounterIncrement(Register temp_reg);
+  void GenerateOptimizedUsageCounterIncrement();
 
 #if defined(TARGET_ARCH_X64)
   static constexpr intptr_t kNativeCallbackTrampolineSize = 10;
@@ -145,8 +140,7 @@
 #error What architecture?
 #endif
 
-  static void GenerateJITCallbackTrampolines(Assembler* assembler,
-                                             intptr_t next_callback_id);
+  void GenerateJITCallbackTrampolines(intptr_t next_callback_id);
 
   // Calculates the offset (in words) from FP to the provided [cpu_register].
   //
@@ -161,64 +155,52 @@
   static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register);
 
  private:
+  DISALLOW_COPY_AND_ASSIGN(StubCodeCompiler);
+
   // Common function for generating InitLateStaticField and
   // InitLateFinalStaticField stubs.
-  static void GenerateInitLateStaticFieldStub(Assembler* assembler,
-                                              bool is_final);
+  void GenerateInitLateStaticFieldStub(bool is_final);
 
   // Common function for generating InitLateInstanceField and
   // InitLateFinalInstanceField stubs.
-  static void GenerateInitLateInstanceFieldStub(Assembler* assembler,
-                                                bool is_final);
+  void GenerateInitLateInstanceFieldStub(bool is_final);
 
   // Common function for generating Allocate<TypedData>Array stubs.
-  static void GenerateAllocateTypedDataArrayStub(Assembler* assembler,
-                                                 intptr_t cid);
+  void GenerateAllocateTypedDataArrayStub(intptr_t cid);
 
-  static void GenerateAllocateSmallRecordStub(Assembler* assembler,
-                                              intptr_t num_fields,
-                                              bool has_named_fields);
+  void GenerateAllocateSmallRecordStub(intptr_t num_fields,
+                                       bool has_named_fields);
 
-  static void GenerateSharedStubGeneric(
-      Assembler* assembler,
-      bool save_fpu_registers,
-      intptr_t self_code_stub_offset_from_thread,
-      bool allow_return,
-      std::function<void()> perform_runtime_call);
+  void GenerateSharedStubGeneric(bool save_fpu_registers,
+                                 intptr_t self_code_stub_offset_from_thread,
+                                 bool allow_return,
+                                 std::function<void()> perform_runtime_call);
 
   // Generates shared slow path stub which saves registers and calls
   // [target] runtime entry.
   // If [store_runtime_result_in_result_register], then stub puts result into
   // SharedSlowPathStubABI::kResultReg.
-  static void GenerateSharedStub(
-      Assembler* assembler,
-      bool save_fpu_registers,
-      const RuntimeEntry* target,
-      intptr_t self_code_stub_offset_from_thread,
-      bool allow_return,
-      bool store_runtime_result_in_result_register = false);
+  void GenerateSharedStub(bool save_fpu_registers,
+                          const RuntimeEntry* target,
+                          intptr_t self_code_stub_offset_from_thread,
+                          bool allow_return,
+                          bool store_runtime_result_in_result_register = false);
 
-  static void GenerateLateInitializationError(Assembler* assembler,
-                                              bool with_fpu_regs);
+  void GenerateLateInitializationError(bool with_fpu_regs);
 
-  static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs);
-  static void GenerateWriteError(Assembler* assembler, bool with_fpu_regs);
+  void GenerateRangeError(bool with_fpu_regs);
+  void GenerateWriteError(bool with_fpu_regs);
 
-  static void GenerateSuspendStub(
-      Assembler* assembler,
-      bool call_suspend_function,
-      bool pass_type_arguments,
-      intptr_t suspend_entry_point_offset_in_thread,
-      intptr_t suspend_function_offset_in_object_store);
-  static void GenerateInitSuspendableFunctionStub(
-      Assembler* assembler,
+  void GenerateSuspendStub(bool call_suspend_function,
+                           bool pass_type_arguments,
+                           intptr_t suspend_entry_point_offset_in_thread,
+                           intptr_t suspend_function_offset_in_object_store);
+  void GenerateInitSuspendableFunctionStub(
       intptr_t init_entry_point_offset_in_thread,
       intptr_t init_function_offset_in_object_store);
-  static void GenerateReturnStub(
-      Assembler* assembler,
-      intptr_t return_entry_point_offset_in_thread,
-      intptr_t return_function_offset_in_object_store,
-      intptr_t return_stub_offset_in_thread);
+  void GenerateReturnStub(intptr_t return_entry_point_offset_in_thread,
+                          intptr_t return_function_offset_in_object_store,
+                          intptr_t return_stub_offset_in_thread);
 };
 
 }  // namespace compiler
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index eb1d027..23af363 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -35,8 +35,7 @@
 //
 // WARNING: This might clobber all registers except for [R0], [THR] and [FP].
 // The caller should simply call LeaveStubFrame() and return.
-void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
-                                               bool preserve_registers) {
+void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
   // If the object is not remembered we call a leaf-runtime to add it to the
   // remembered set.
   Label done;
@@ -62,7 +61,7 @@
 //   SP + 4*R4 : address of return value.
 //   R9 : address of the runtime function to call.
 //   R4 : number of arguments to the call.
-void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallToRuntimeStub() {
   const intptr_t thread_offset = target::NativeArguments::thread_offset();
   const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
   const intptr_t argv_offset = target::NativeArguments::argv_offset();
@@ -153,7 +152,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStubGeneric(
-    Assembler* assembler,
     bool save_fpu_registers,
     intptr_t self_code_stub_offset_from_thread,
     bool allow_return,
@@ -181,7 +179,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStub(
-    Assembler* assembler,
     bool save_fpu_registers,
     const RuntimeEntry* target,
     intptr_t self_code_stub_offset_from_thread,
@@ -203,7 +200,7 @@
                                  SharedSlowPathStubABI::kResultReg)));
     }
   };
-  GenerateSharedStubGeneric(assembler, save_fpu_registers,
+  GenerateSharedStubGeneric(save_fpu_registers,
                             self_code_stub_offset_from_thread, allow_return,
                             perform_runtime_call);
 }
@@ -212,7 +209,6 @@
 // R4: The type_arguments_field_offset (or 0)
 // SP+0: The object from which we are tearing a method off.
 void StubCodeCompiler::GenerateBuildMethodExtractorStub(
-    Assembler* assembler,
     const Code& closure_allocation_stub,
     const Code& context_allocation_stub,
     bool generic) {
@@ -300,7 +296,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateEnterSafepointStub() {
   RegisterSet all_registers;
   all_registers.AddAllGeneralRegisters();
   __ PushRegisters(all_registers);
@@ -338,13 +334,12 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointStub() {
   GenerateExitSafepointStubCommon(
       assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
 }
 
-void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
   GenerateExitSafepointStubCommon(
       assembler,
       kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@@ -359,8 +354,7 @@
 // On exit:
 //   Stack: preserved
 //   NOTFP, R4: clobbered, although normally callee-saved
-void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
   COMPILE_ASSERT(IsAbiPreservedRegister(R4));
 
   // TransitionGeneratedToNative might clobber LR if it takes the slow path.
@@ -380,7 +374,6 @@
 
 #if !defined(DART_PRECOMPILER)
 void StubCodeCompiler::GenerateJITCallbackTrampolines(
-    Assembler* assembler,
     intptr_t next_callback_id) {
 #if defined(USING_SIMULATOR)
   // TODO(37299): FFI is not support in SIMARM.
@@ -493,8 +486,7 @@
 }
 #endif  // !defined(DART_PRECOMPILER)
 
-void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
   __ EnterStubFrame();
   __ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
   __ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@@ -503,8 +495,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     ASSERT(!GenericCheckBoundInstr::UseUnboxedRepresentation());
     __ PushRegistersInOrder(
@@ -514,22 +505,21 @@
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/false, perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
     __ Breakpoint();
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@@ -632,14 +622,14 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
               target::Thread::no_scope_native_wrapper_entry_point_offset()));
 }
 
-void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -652,7 +642,7 @@
 //   R9 : address of the native function to call.
 //   R2 : address of first argument in argument array.
 //   R1 : argc_tag including number of arguments and function kind.
-void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -661,7 +651,7 @@
 
 // Input parameters:
 //   ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub() {
   // Create a stub frame as we are pushing some objects on the stack before
   // calling into the runtime.
   __ EnterStubFrame();
@@ -681,7 +671,7 @@
 // Called from a static call only when an invalid code has been entered
 // (invalid because its function was optimized or deoptimized).
 // ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub() {
   Label monomorphic;
   __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
 
@@ -731,8 +721,7 @@
 
 // Called from object allocate instruction when the allocation stub has been
 // disabled.
-void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -754,8 +743,7 @@
 
 // Called from object allocate instruction when the allocation stub for a
 // generic class has been disabled.
-void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -965,8 +953,7 @@
 }
 
 // R0: result, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ LoadImmediate(IP, kZapCodeReg);
   __ Push(IP);
@@ -980,8 +967,7 @@
 
 // R0: exception, must be preserved
 // R1: stacktrace, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ LoadImmediate(IP, kZapCodeReg);
   __ Push(IP);
@@ -993,7 +979,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub() {
   __ Push(CODE_REG);
   __ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
   GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@@ -1051,8 +1037,7 @@
 // Input:
 //   ARGS_DESC_REG - arguments descriptor
 //   IC_DATA_REG - icdata/megamorphic_cache
-void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
   GenerateNoSuchMethodDispatcherBody(assembler);
 }
 
@@ -1065,7 +1050,7 @@
 //   AllocateArrayABI::kResultReg: newly allocated array.
 // Clobbered:
 //   R3, R4, R8, R9
-void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
     // Compute the size to be allocated, it is based on the array length
@@ -1196,8 +1181,7 @@
 }
 
 // Called for allocation of Mint.
-void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1209,16 +1193,14 @@
   }
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
-  GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
-                     &kAllocateMintRuntimeEntry,
+  GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
                      target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
                      /*allow_return=*/true,
                      /*store_runtime_result_in_result_register=*/true);
 }
 
 // Called for allocation of Mint.
-void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1231,7 +1213,7 @@
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
+      /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
       target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
       /*allow_return=*/true,
       /*store_runtime_result_in_result_register=*/true);
@@ -1244,7 +1226,7 @@
 //   R1 : arguments descriptor array.
 //   R2 : arguments array.
 //   R3 : current thread.
-void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub() {
   SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
 
   // Push code object to PC marker slot.
@@ -1449,7 +1431,7 @@
 //   R0: new allocated Context object.
 // Clobbered:
 //   Potentially any since is can go to runtime.
-void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1494,7 +1476,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // R0: new object
   // Restore the frame pointer.
@@ -1510,7 +1492,7 @@
 //   R0: new allocated Context object.
 // Clobbered:
 //   Potentially any since it can go to runtime.
-void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCloneContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1569,7 +1551,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // R0: new object
   // Restore the frame pointer.
@@ -1577,7 +1559,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
   for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
     if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
 
@@ -1750,11 +1732,11 @@
   }
 }
 
-void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, false);
 }
 
-void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, true);
 }
 
@@ -1866,16 +1848,15 @@
 }
 
 // Called for inline allocation of objects (any class).
-void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
   const Register kClsReg = R1;
 
   if (!FLAG_precompiled_mode) {
@@ -1905,14 +1886,13 @@
 
   // Write-barrier elimination is enabled for [cls] and we therefore need to
   // ensure that the object is in new-space or has remembered bit set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   __ LeaveDartFrameAndReturn();
 }
 
 // Called for inline allocation of objects.
 void StubCodeCompiler::GenerateAllocationStubForClass(
-    Assembler* assembler,
     UnresolvedPcRelativeCalls* unresolved_calls,
     const Class& cls,
     const Code& allocate_object,
@@ -1981,8 +1961,7 @@
 //  LR : return address.
 //  SP : address of last argument.
 //  R4: arguments descriptor array.
-void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
   __ EnterStubFrame();
 
   // Load the receiver.
@@ -2020,8 +1999,7 @@
 //  R9: inline cache data object.
 // Cannot use function object from ICData as it may be the inlined
 // function and not the top-scope function.
-void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
   Register ic_reg = R9;
   Register func_reg = R8;
   if (FLAG_precompiled_mode) {
@@ -2044,8 +2022,7 @@
 }
 
 // Loads function into 'temp_reg'.
-void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
-                                                     Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
   if (FLAG_precompiled_mode) {
     __ Breakpoint();
     return;
@@ -2156,7 +2133,6 @@
 // - Match found -> jump to target.
 // - Match not found -> jump to IC miss.
 void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
-    Assembler* assembler,
     intptr_t num_args,
     const RuntimeEntry& handle_ic_miss,
     Token::Kind kind,
@@ -2174,9 +2150,9 @@
   }
 
   if (optimized == kOptimized) {
-    GenerateOptimizedUsageCounterIncrement(assembler);
+    GenerateOptimizedUsageCounterIncrement();
   } else {
-    GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
+    GenerateUsageCounterIncrement(/* scratch */ R8);
   }
 
   ASSERT(exactness == kIgnoreExactness);  // Unimplemented.
@@ -2401,67 +2377,63 @@
 //  R0: receiver
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 //  R0: receiver
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
 //  R0: receiver
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 //  R0: receiver
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  R0: receiver
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  R0: receiver
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  R0: receiver
 //  R9: ICData
 //  R8: Function
 //  LR: return address
-void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kOptimized, kInstanceCall, kIgnoreExactness);
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  R0: receiver
@@ -2469,8 +2441,7 @@
 //  R8: Function
 //  LR: return address
 void StubCodeCompiler::
-    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
-        Assembler* assembler) {
+    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
@@ -2478,19 +2449,17 @@
 //  R9: ICData
 //  R8: Function
 //  LR: return address
-void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kOptimized, kInstanceCall, kIgnoreExactness);
 }
 
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
   GenerateRecordEntryPoint(assembler);
-  GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
+  GenerateUsageCounterIncrement(/* scratch */ R8);
 #if defined(DEBUG)
   {
     Label ok;
@@ -2559,28 +2528,26 @@
 
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
-  GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kUnoptimized, kStaticCall, kIgnoreExactness);
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
+  GenerateUsageCounterIncrement(/* scratch */ R8);
+  GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
+                                    Token::kILLEGAL, kUnoptimized, kStaticCall,
+                                    kIgnoreExactness);
 }
 
 //  R9: ICData
 //  LR: return address
-void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
+  GenerateUsageCounterIncrement(/* scratch */ R8);
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kStaticCall, kIgnoreExactness);
 }
 
 // Stub for compiling a function and jumping to the compiled code.
 // ARGS_DESC_REG: Arguments descriptor.
 // FUNCTION_REG: Function.
-void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub() {
   __ EnterStubFrame();
   // Preserve arg desc, pass function.
   COMPILE_ASSERT(FUNCTION_REG < ARGS_DESC_REG);
@@ -2594,7 +2561,7 @@
 }
 
 // R9: Contains an ICData.
-void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2611,8 +2578,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2627,7 +2593,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2643,7 +2609,7 @@
 }
 
 // Called only from unoptimized code. All relevant registers have been saved.
-void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2894,27 +2860,27 @@
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 1);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 3);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 5);
 }
 
 // See comment on[GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 7);
 }
 
 // Return the current stack pointer address, used to do stack alignment checks.
-void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateGetCStackPointerStub() {
   __ mov(R0, Operand(SP));
   __ Ret();
 }
@@ -2928,7 +2894,7 @@
 // Does not return.
 //
 // Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
-void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub() {
   COMPILE_ASSERT(kExceptionObjectReg == R0);
   COMPILE_ASSERT(kStackTraceObjectReg == R1);
   COMPILE_ASSERT(IsAbiPreservedRegister(R4));
@@ -2982,7 +2948,7 @@
 //
 // The arguments are stored in the Thread object.
 // Does not return.
-void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
   WRITES_RETURN_ADDRESS_TO_LR(
       __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
 
@@ -3006,7 +2972,7 @@
 // Deoptimize a frame on the call stack before rewinding.
 // The arguments are stored in the Thread object.
 // No result.
-void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub() {
   // Push zap value instead of CODE_REG.
   __ LoadImmediate(IP, kZapCodeReg);
   __ Push(IP);
@@ -3026,7 +2992,7 @@
 // Calls to the runtime to optimize the given function.
 // R8: function to be reoptimized.
 // ARGS_DESC_REG: argument descriptor (preserved).
-void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub() {
   __ ldr(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
   __ EnterStubFrame();
   __ Push(ARGS_DESC_REG);
@@ -3107,8 +3073,7 @@
 // SP + 4: left operand.
 // SP + 0: right operand.
 // Return Zero condition flag set if equal.
-void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
 #if !defined(PRODUCT)
   // Check single stepping.
   Label stepping, done_stepping;
@@ -3142,8 +3107,7 @@
 // SP + 4: left operand.
 // SP + 0: right operand.
 // Return Zero condition flag set if equal.
-void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
   const Register temp = R2;
   const Register left = R1;
   const Register right = R0;
@@ -3160,7 +3124,7 @@
 //  FUNCTION_REG: target function
 //  ARGS_DESC_REG: arguments descriptor
 //  CODE_REG: target Code
-void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub() {
   __ LoadTaggedClassIdMayBeSmi(R8, R0);
   // R8: receiver cid as Smi.
   __ ldr(R2,
@@ -3215,10 +3179,10 @@
   __ b(&loop);
 
   __ Bind(&miss);
-  GenerateSwitchableCallMissStub(assembler);
+  GenerateSwitchableCallMissStub();
 }
 
-void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub() {
   Label loop, found, miss;
   __ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
   __ ldr(R4, FieldAddress(IC_DATA_REG,
@@ -3267,8 +3231,7 @@
 //   R9: MonomorphicSmiableCall object
 //
 //   R2, R3: clobbered
-void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
   __ LoadClassIdMayBeSmi(IP, R0);
 
   // entrypoint_ should come right after expected_cid_
@@ -3301,7 +3264,7 @@
 
 // Called from switchable IC calls.
 //  R0: receiver
-void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSwitchableCallMissStub() {
   __ ldr(CODE_REG,
          Address(THR, target::Thread::switchable_call_miss_stub_offset()));
   __ EnterStubFrame();
@@ -3317,7 +3280,7 @@
 //  R9: SingleTargetCache
 // Passed to target:
 //  CODE_REG: target Code object
-void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub() {
   Label miss;
   __ LoadClassIdMayBeSmi(R1, R0);
   __ ldrh(R2,
@@ -3360,8 +3323,7 @@
   return -1;
 }
 
-void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
-                                                          intptr_t cid) {
+void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
   const intptr_t element_size = TypedDataElementSizeInBytes(cid);
   const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
   const intptr_t scale_shift = GetScaleFactor(element_size);
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index ba3f7a6..085253f 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -34,8 +34,7 @@
 //
 // WARNING: This might clobber all registers except for [R0], [THR] and [FP].
 // The caller should simply call LeaveStubFrame() and return.
-void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
-                                               bool preserve_registers) {
+void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
   // If the object is not remembered we call a leaf-runtime to add it to the
   // remembered set.
   Label done;
@@ -154,7 +153,7 @@
 //   SP + 8*R4 : address of return value.
 //   R5 : address of the runtime function to call.
 //   R4 : number of arguments to the call.
-void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallToRuntimeStub() {
   const intptr_t thread_offset = target::NativeArguments::thread_offset();
   const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
   const intptr_t argv_offset = target::NativeArguments::argv_offset();
@@ -270,7 +269,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStubGeneric(
-    Assembler* assembler,
     bool save_fpu_registers,
     intptr_t self_code_stub_offset_from_thread,
     bool allow_return,
@@ -298,7 +296,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStub(
-    Assembler* assembler,
     bool save_fpu_registers,
     const RuntimeEntry* target,
     intptr_t self_code_stub_offset_from_thread,
@@ -318,12 +315,12 @@
                                  SharedSlowPathStubABI::kResultReg)));
     }
   };
-  GenerateSharedStubGeneric(assembler, save_fpu_registers,
+  GenerateSharedStubGeneric(save_fpu_registers,
                             self_code_stub_offset_from_thread, allow_return,
                             perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateEnterSafepointStub() {
   RegisterSet all_registers;
   all_registers.AddAllGeneralRegisters();
 
@@ -378,13 +375,12 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointStub() {
   GenerateExitSafepointStubCommon(
       assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
 }
 
-void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
   GenerateExitSafepointStubCommon(
       assembler,
       kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@@ -399,8 +395,7 @@
 // On exit:
 //   R19: clobbered, although normally callee-saved
 //   Stack: preserved, CSP == SP
-void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
   COMPILE_ASSERT(IsAbiPreservedRegister(R19));
 
   SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(__ mov(R19, LR));
@@ -432,7 +427,6 @@
 
 #if !defined(DART_PRECOMPILER)
 void StubCodeCompiler::GenerateJITCallbackTrampolines(
-    Assembler* assembler,
     intptr_t next_callback_id) {
 #if !defined(HOST_ARCH_ARM64)
   // TODO(37299): FFI is not support in SIMARM64.
@@ -577,7 +571,6 @@
 // R1: The extracted method.
 // R4: The type_arguments_field_offset (or 0)
 void StubCodeCompiler::GenerateBuildMethodExtractorStub(
-    Assembler* assembler,
     const Code& closure_allocation_stub,
     const Code& context_allocation_stub,
     bool generic) {
@@ -670,8 +663,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
   __ EnterStubFrame();
   __ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
   __ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@@ -680,8 +672,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     // If the generated code has unboxed index/length we need to box them before
     // calling the runtime entry.
@@ -730,22 +721,21 @@
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/false, perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
     __ Breakpoint();
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@@ -865,14 +855,14 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
               target::Thread::no_scope_native_wrapper_entry_point_offset()));
 }
 
-void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -885,7 +875,7 @@
 //   R5 : address of the native function to call.
 //   R2 : address of first argument in argument array.
 //   R1 : argc_tag including number of arguments and function kind.
-void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -894,7 +884,7 @@
 
 // Input parameters:
 //   ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub() {
   // Create a stub frame as we are pushing some objects on the stack before
   // calling into the runtime.
   __ EnterStubFrame();
@@ -915,7 +905,7 @@
 // Called from a static call only when an invalid code has been entered
 // (invalid because its function was optimized or deoptimized).
 // ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub() {
   Label monomorphic;
   __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
 
@@ -967,8 +957,7 @@
 
 // Called from object allocate instruction when the allocation stub has been
 // disabled.
-void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -989,8 +978,7 @@
 
 // Called from object allocate instruction when the allocation stub for a
 // generic class has been disabled.
-void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -1210,8 +1198,7 @@
 }
 
 // R0: result, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ LoadImmediate(TMP, kZapCodeReg);
   __ Push(TMP);
@@ -1225,8 +1212,7 @@
 
 // R0: exception, must be preserved
 // R1: stacktrace, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ LoadImmediate(TMP, kZapCodeReg);
   __ Push(TMP);
@@ -1238,7 +1224,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub() {
   __ Push(CODE_REG);
   __ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
   GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@@ -1296,8 +1282,7 @@
 // Input:
 //   ARGS_DESC_REG - arguments descriptor
 //   IC_DATA_REG - icdata/megamorphic_cache
-void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
   GenerateNoSuchMethodDispatcherBody(assembler);
 }
 
@@ -1310,7 +1295,7 @@
 //   AllocateArrayABI::kResultReg: newly allocated array.
 // Clobbered:
 //   R3, R7
-void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
     // Compute the size to be allocated, it is based on the array length
@@ -1467,8 +1452,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1480,15 +1464,13 @@
   }
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
-  GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
-                     &kAllocateMintRuntimeEntry,
+  GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
                      target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
                      /*allow_return=*/true,
                      /*store_runtime_result_in_result_register=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1501,7 +1483,7 @@
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
+      /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
       target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
       /*allow_return=*/true,
       /*store_runtime_result_in_result_register=*/true);
@@ -1514,7 +1496,7 @@
 //   R1 : arguments descriptor array.
 //   R2 : arguments array.
 //   R3 : current thread.
-void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub() {
   __ Comment("InvokeDartCodeStub");
 
   // Copy the C stack pointer (CSP/R31) into the stack pointer we'll actually
@@ -1731,7 +1713,7 @@
 //   R0: new allocated Context object.
 // Clobbered:
 //   R2, R3, R4, TMP
-void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1786,7 +1768,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // R0: new object
   // Restore the frame pointer.
@@ -1802,7 +1784,7 @@
 //   R0: new allocated Context object.
 // Clobbered:
 //   R1, (R2), R3, R4, (TMP)
-void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCloneContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1862,7 +1844,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // R0: new object
   // Restore the frame pointer.
@@ -1870,7 +1852,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
   for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
     if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
 
@@ -2068,11 +2050,11 @@
   }
 }
 
-void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, false);
 }
 
-void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, true);
 }
 
@@ -2182,16 +2164,15 @@
 }
 
 // Called for inline allocation of objects (any class).
-void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
   if (!FLAG_precompiled_mode) {
     __ ldr(CODE_REG,
            Address(THR, target::Thread::call_to_runtime_stub_offset()));
@@ -2217,7 +2198,7 @@
 
   // Write-barrier elimination is enabled for [cls] and we therefore need to
   // ensure that the object is in new-space or has remembered bit set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   __ LeaveStubFrame();
 
@@ -2226,7 +2207,6 @@
 
 // Called for inline allocation of objects.
 void StubCodeCompiler::GenerateAllocationStubForClass(
-    Assembler* assembler,
     UnresolvedPcRelativeCalls* unresolved_calls,
     const Class& cls,
     const Code& allocate_object,
@@ -2299,8 +2279,7 @@
 //  LR : return address.
 //  SP : address of last argument.
 //  R4: arguments descriptor array.
-void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
   __ EnterStubFrame();
 
   // Load the receiver.
@@ -2339,8 +2318,7 @@
 //  R5: inline cache data object.
 // Cannot use function object from ICData as it may be the inlined
 // function and not the top-scope function.
-void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
   Register ic_reg = R5;
   Register func_reg = R6;
   if (FLAG_precompiled_mode) {
@@ -2367,8 +2345,7 @@
 }
 
 // Loads function into 'temp_reg'.
-void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
-                                                     Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
   if (FLAG_precompiled_mode) {
     __ Breakpoint();
     return;
@@ -2482,7 +2459,6 @@
 // - Match found -> jump to target.
 // - Match not found -> jump to IC miss.
 void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
-    Assembler* assembler,
     intptr_t num_args,
     const RuntimeEntry& handle_ic_miss,
     Token::Kind kind,
@@ -2500,9 +2476,9 @@
   }
 
   if (optimized == kOptimized) {
-    GenerateOptimizedUsageCounterIncrement(assembler);
+    GenerateOptimizedUsageCounterIncrement();
   } else {
-    GenerateUsageCounterIncrement(assembler, /*scratch=*/R6);
+    GenerateUsageCounterIncrement(/*scratch=*/R6);
   }
 
   ASSERT(exactness == kIgnoreExactness);  // Unimplemented.
@@ -2735,67 +2711,63 @@
 // R0: receiver
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // R0: receiver
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
 // R0: receiver
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // R0: receiver
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // R0: receiver
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // R0: receiver
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // R0: receiver
 // R5: ICData
 // R6: Function
 // LR: return address
-void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kOptimized, kInstanceCall, kIgnoreExactness);
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // R0: receiver
@@ -2803,8 +2775,7 @@
 // R6: Function
 // LR: return address
 void StubCodeCompiler::
-    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
-        Assembler* assembler) {
+    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
@@ -2812,19 +2783,17 @@
 // R5: ICData
 // R6: Function
 // LR: return address
-void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kOptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
   GenerateRecordEntryPoint(assembler);
-  GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
+  GenerateUsageCounterIncrement(/* scratch */ R6);
 #if defined(DEBUG)
   {
     Label ok;
@@ -2900,28 +2869,26 @@
 
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
-  GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kUnoptimized, kStaticCall, kIgnoreExactness);
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
+  GenerateUsageCounterIncrement(/* scratch */ R6);
+  GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
+                                    Token::kILLEGAL, kUnoptimized, kStaticCall,
+                                    kIgnoreExactness);
 }
 
 // R5: ICData
 // LR: return address
-void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
+  GenerateUsageCounterIncrement(/* scratch */ R6);
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kStaticCall, kIgnoreExactness);
 }
 
 // Stub for compiling a function and jumping to the compiled code.
 // ARGS_DESC_REG: Arguments descriptor.
 // FUNCTION_REG: Function.
-void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub() {
   // Preserve arg desc.
   __ EnterStubFrame();
   __ Push(ARGS_DESC_REG);  // Save arg. desc.
@@ -2939,7 +2906,7 @@
 }
 
 // R5: Contains an ICData.
-void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2957,8 +2924,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2974,7 +2940,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2989,7 +2955,7 @@
 }
 
 // Called only from unoptimized code. All relevant registers have been saved.
-void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -3219,26 +3185,26 @@
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 1);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 3);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 5);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 7);
 }
 
-void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateGetCStackPointerStub() {
   __ mov(R0, CSP);
   __ ret();
 }
@@ -3252,7 +3218,7 @@
 // Does not return.
 //
 // Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
-void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub() {
   ASSERT(kExceptionObjectReg == R0);
   ASSERT(kStackTraceObjectReg == R1);
   __ set_lr_state(compiler::LRState::Clobbered());
@@ -3305,7 +3271,7 @@
 //
 // The arguments are stored in the Thread object.
 // Does not return.
-void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
   WRITES_RETURN_ADDRESS_TO_LR(
       __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
 
@@ -3328,7 +3294,7 @@
 // Deoptimize a frame on the call stack before rewinding.
 // The arguments are stored in the Thread object.
 // No result.
-void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub() {
   // Push zap value instead of CODE_REG.
   __ LoadImmediate(TMP, kZapCodeReg);
   __ Push(TMP);
@@ -3348,7 +3314,7 @@
 // Calls to the runtime to optimize the given function.
 // R6: function to be re-optimized.
 // ARGS_DESC_REG: argument descriptor (preserved).
-void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub() {
   __ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
   __ EnterStubFrame();
   __ Push(ARGS_DESC_REG);
@@ -3416,8 +3382,7 @@
 // SP + 4: left operand.
 // SP + 0: right operand.
 // Return Zero condition flag set if equal.
-void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
 #if !defined(PRODUCT)
   // Check single stepping.
   Label stepping, done_stepping;
@@ -3450,8 +3415,7 @@
 // SP + 4: left operand.
 // SP + 0: right operand.
 // Return Zero condition flag set if equal.
-void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
   const Register left = R1;
   const Register right = R0;
   __ LoadFromOffset(left, SP, 1 * target::kWordSize);
@@ -3466,7 +3430,7 @@
 //  FUNCTION_REG: target function
 //  CODE_REG: target Code
 //  ARGS_DESC_REG: arguments descriptor
-void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub() {
   // Jump if receiver is a smi.
   Label smi_case;
   __ BranchIfSmi(R0, &smi_case);
@@ -3541,13 +3505,13 @@
   __ b(&cid_loaded);
 
   __ Bind(&miss);
-  GenerateSwitchableCallMissStub(assembler);
+  GenerateSwitchableCallMissStub();
 }
 
 // Input:
 //   R0 - receiver
 //   IC_DATA_REG - icdata
-void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub() {
   Label loop, found, miss;
   __ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
   __ ldr(ARGS_DESC_REG,
@@ -3599,8 +3563,7 @@
 //   R5: MonomorphicSmiableCall object
 //
 //   R1: clobbered
-void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
   Label miss;
   __ LoadClassIdMayBeSmi(IP0, R0);
 
@@ -3623,7 +3586,7 @@
 
 // Called from switchable IC calls.
 //  R0: receiver
-void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSwitchableCallMissStub() {
   __ ldr(CODE_REG,
          Address(THR, target::Thread::switchable_call_miss_stub_offset()));
   __ EnterStubFrame();
@@ -3650,7 +3613,7 @@
 //  R5: SingleTargetCache
 // Passed to target:
 //  CODE_REG: target Code object
-void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub() {
   Label miss;
   __ LoadClassIdMayBeSmi(R1, R0);
   __ ldr(R2, FieldAddress(R5, target::SingleTargetCache::lower_limit_offset()),
@@ -3705,8 +3668,7 @@
   return -1;
 }
 
-void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
-                                                          intptr_t cid) {
+void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
   const intptr_t element_size = TypedDataElementSizeInBytes(cid);
   const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
   const intptr_t scale_shift = GetScaleFactor(element_size);
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index 58d571e..2b5fbe9 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -33,8 +33,7 @@
 //
 // WARNING: This might clobber all registers except for [EAX], [THR] and [FP].
 // The caller should simply call LeaveFrame() and return.
-void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
-                                               bool preserve_registers) {
+void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
   // If the object is not remembered we call a leaf-runtime to add it to the
   // remembered set.
   Label done;
@@ -61,7 +60,7 @@
 //   ECX : address of the runtime function to call.
 //   EDX : number of arguments to the call.
 // Must preserve callee saved registers EDI and EBX.
-void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallToRuntimeStub() {
   const intptr_t thread_offset = target::NativeArguments::thread_offset();
   const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
   const intptr_t argv_offset = target::NativeArguments::argv_offset();
@@ -137,7 +136,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateEnterSafepointStub() {
   __ pushal();
   __ subl(SPREG, Immediate(8));
   __ movsd(Address(SPREG, 0), XMM0);
@@ -179,13 +178,12 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointStub() {
   GenerateExitSafepointStubCommon(
       assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
 }
 
-void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
   GenerateExitSafepointStubCommon(
       assembler,
       kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@@ -200,8 +198,7 @@
 // On exit:
 //   Stack: preserved
 //   EBX: clobbered (even though it's normally callee-saved)
-void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
   __ popl(EBX);
 
   __ movl(ECX, compiler::Immediate(target::Thread::exit_through_ffi()));
@@ -214,7 +211,6 @@
 }
 
 void StubCodeCompiler::GenerateJITCallbackTrampolines(
-    Assembler* assembler,
     intptr_t next_callback_id) {
   Label done, ret_4;
 
@@ -346,7 +342,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStubGeneric(
-    Assembler* assembler,
     bool save_fpu_registers,
     intptr_t self_code_stub_offset_from_thread,
     bool allow_return,
@@ -356,7 +351,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStub(
-    Assembler* assembler,
     bool save_fpu_registers,
     const RuntimeEntry* target,
     intptr_t self_code_stub_offset_from_thread,
@@ -366,20 +360,17 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
   // Only used in AOT.
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
   // Only used in AOT.
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
   // Only used in AOT.
   __ Breakpoint();
 }
@@ -472,14 +463,14 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
               target::Thread::no_scope_native_wrapper_entry_point_offset()));
 }
 
-void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -492,7 +483,7 @@
 //   EAX : address of first argument in argument array.
 //   ECX : address of the native function to call.
 //   EDX : argc_tag including number of arguments and function kind.
-void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -501,7 +492,7 @@
 
 // Input parameters:
 //   ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub() {
   __ EnterStubFrame();
   __ pushl(ARGS_DESC_REG);  // Preserve arguments descriptor array.
   __ pushl(Immediate(0));  // Setup space on stack for return value.
@@ -517,7 +508,7 @@
 // Called from a static call only when an invalid code has been entered
 // (invalid because its function was optimized or deoptimized).
 // ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub() {
   Label monomorphic;
   __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
 
@@ -552,8 +543,7 @@
 
 // Called from object allocate instruction when the allocation stub has been
 // disabled.
-void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
   __ EnterStubFrame();
   __ pushl(Immediate(0));  // Setup space on stack for return value.
   __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
@@ -566,8 +556,7 @@
 
 // Called from object allocate instruction when the allocation stub for a
 // generic class has been disabled.
-void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
   __ EnterStubFrame();
   // Preserve type arguments register.
   __ pushl(AllocateObjectABI::kTypeArgumentsReg);
@@ -763,8 +752,7 @@
 }
 
 // EAX: result, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
   // Return address for "call" to deopt stub.
   __ pushl(Immediate(kZapReturnAddress));
   GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
@@ -773,15 +761,14 @@
 
 // EAX: exception, must be preserved
 // EDX: stacktrace, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
   // Return address for "call" to deopt stub.
   __ pushl(Immediate(kZapReturnAddress));
   GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
   __ ret();
 }
 
-void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub() {
   GenerateDeoptimizationSequence(assembler, kEagerDeopt);
   __ ret();
 }
@@ -832,8 +819,7 @@
   GenerateNoSuchMethodDispatcherCode(assembler);
 }
 
-void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
   GenerateNoSuchMethodDispatcherCode(assembler);
 }
 
@@ -845,7 +831,7 @@
 //   AllocateArrayABI::kResultReg: newly allocated array.
 // Clobbered:
 //   EBX, EDI
-void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
     // Compute the size to be allocated, it is based on the array length
@@ -998,7 +984,7 @@
 //   ESP + 12 : arguments array.
 //   ESP + 16 : current thread.
 // Uses EAX, EDX, ECX, EDI as temporary registers.
-void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub() {
   const intptr_t kTargetCodeOffset = 2 * target::kWordSize;
   const intptr_t kArgumentsDescOffset = 3 * target::kWordSize;
   const intptr_t kArgumentsOffset = 4 * target::kWordSize;
@@ -1207,7 +1193,7 @@
 // EAX: new allocated Context object.
 // Clobbered:
 // EBX, EDX
-void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1257,7 +1243,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // EAX: new object
   // Restore the frame pointer.
@@ -1273,7 +1259,7 @@
 //   EAX: new allocated Context object.
 // Clobbered:
 //   EBX, ECX, EDX
-void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCloneContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1331,7 +1317,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // EAX: new object
   // Restore the frame pointer.
@@ -1339,7 +1325,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
   for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
     if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
 
@@ -1367,8 +1353,7 @@
 COMPILE_ASSERT(kWriteBarrierObjectReg == EDX);
 COMPILE_ASSERT(kWriteBarrierValueReg == EBX);
 COMPILE_ASSERT(kWriteBarrierSlotReg == EDI);
-static void GenerateWriteBarrierStubHelper(Assembler* assembler,
-                                           bool cards) {
+static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
   // Save values being destroyed.
   __ pushl(EAX);
   __ pushl(ECX);
@@ -1524,24 +1509,23 @@
   }
 }
 
-void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, false);
 }
 
-void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, true);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectStub() {
   __ int3();
 }
 
-void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
   __ int3();
 }
 
-void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
   __ int3();
 }
 
@@ -1554,7 +1538,6 @@
 // Returns patch_code_pc offset where patching code for disabling the stub
 // has been generated (similar to regularly generated Dart code).
 void StubCodeCompiler::GenerateAllocationStubForClass(
-    Assembler* assembler,
     UnresolvedPcRelativeCalls* unresolved_calls,
     const Class& cls,
     const Code& allocate_object,
@@ -1682,7 +1665,7 @@
   if (AllocateObjectInstr::WillAllocateNewOrRemembered(cls)) {
     // Write-barrier elimination is enabled for [cls] and we therefore need to
     // ensure that the object is in new-space or has remembered bit set.
-    EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+    EnsureIsNewOrRemembered(/*preserve_registers=*/false);
   }
 
   // AllocateObjectABI::kResultReg: new object
@@ -1699,8 +1682,7 @@
 //   ESP + 4 : address of last argument.
 //   EDX : arguments descriptor array.
 // Uses EAX, EBX, EDI as temporary registers.
-void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
   __ EnterStubFrame();
 
   // Load the receiver.
@@ -1738,8 +1720,7 @@
 
 // Cannot use function object from ICData as it may be the inlined
 // function and not the top-scope function.
-void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
   Register ic_reg = ECX;
   Register func_reg = EAX;
   if (FLAG_trace_optimized_ic_calls) {
@@ -1759,8 +1740,7 @@
 }
 
 // Loads function into 'temp_reg'.
-void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
-                                                     Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
   if (FLAG_optimization_counter_threshold >= 0) {
     Register func_reg = temp_reg;
     ASSERT(func_reg != IC_DATA_REG);
@@ -1856,24 +1836,22 @@
 // - Match found -> jump to target.
 // - Match not found -> jump to IC miss.
 void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
-    Assembler* assembler,
     intptr_t num_args,
     const RuntimeEntry& handle_ic_miss,
     Token::Kind kind,
     Optimized optimized,
     CallType type,
     Exactness exactness) {
-  GenerateNArgsCheckInlineCacheStubForEntryKind(
-      assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
-      CodeEntryKind::kNormal);
+  GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
+                                                optimized, type, exactness,
+                                                CodeEntryKind::kNormal);
   __ BindUncheckedEntryPoint();
-  GenerateNArgsCheckInlineCacheStubForEntryKind(
-      assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
-      CodeEntryKind::kUnchecked);
+  GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
+                                                optimized, type, exactness,
+                                                CodeEntryKind::kUnchecked);
 }
 
 void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
-    Assembler* assembler,
     intptr_t num_args,
     const RuntimeEntry& handle_ic_miss,
     Token::Kind kind,
@@ -1882,9 +1860,9 @@
     Exactness exactness,
     CodeEntryKind entry_kind) {
   if (optimized == kOptimized) {
-    GenerateOptimizedUsageCounterIncrement(assembler);
+    GenerateOptimizedUsageCounterIncrement();
   } else {
-    GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
+    GenerateUsageCounterIncrement(/* scratch */ EAX);
   }
 
   ASSERT(exactness == kIgnoreExactness);  // Unimplemented.
@@ -2065,77 +2043,71 @@
 // EBX: receiver
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // EBX: receiver
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
   __ Stop("Unimplemented");
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
   __ Stop("Unimplemented");
 }
 
 // EBX: receiver
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // EBX: receiver
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // EBX: receiver
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // EBX: receiver
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // EBX: receiver
 // ECX: ICData
 // EAX: Function
 // ESP[0]: return address
-void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kOptimized, kInstanceCall, kIgnoreExactness);
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // EBX: receiver
@@ -2143,8 +2115,7 @@
 // EAX: Function
 // ESP[0]: return address
 void StubCodeCompiler::
-    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
-        Assembler* assembler) {
+    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
@@ -2152,19 +2123,19 @@
 // ECX: ICData
 // EAX: Function
 // ESP[0]: return address
-void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kOptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // ECX: ICData
 // ESP[0]: return address
 static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
-    Assembler* assembler,
+    StubCodeCompiler* stub_code_compiler,
     CodeEntryKind entry_kind) {
-  StubCodeCompiler::GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
+  stub_code_compiler->GenerateUsageCounterIncrement(/* scratch */ EAX);
+  auto* const assembler = stub_code_compiler->assembler;
 
 #if defined(DEBUG)
   {
@@ -2226,37 +2197,34 @@
 #endif
 }
 
-void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
+  GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
                                                     CodeEntryKind::kNormal);
   __ BindUncheckedEntryPoint();
-  GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
+  GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
                                                     CodeEntryKind::kUnchecked);
 }
 
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kStaticCall, kIgnoreExactness);
 }
 
 // ECX: ICData
 // ESP[0]: return address
-void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kStaticCall, kIgnoreExactness);
 }
 
 // Stub for compiling a function and jumping to the compiled code.
 // ARGS_DESC_REG: Arguments descriptor.
 // FUNCTION_REG: Function.
-void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub() {
   __ EnterStubFrame();
   __ pushl(ARGS_DESC_REG);  // Preserve arguments descriptor array.
   __ pushl(FUNCTION_REG);   // Pass function.
@@ -2269,7 +2237,7 @@
 }
 
 // ECX: Contains an ICData.
-void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2287,8 +2255,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2304,7 +2271,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2321,7 +2288,7 @@
 }
 
 // Called only from unoptimized code.
-void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2568,29 +2535,29 @@
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 1);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 3);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 5);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 7);
 }
 
 // Return the current stack pointer address, used to do stack alignment checks.
 // TOS + 0: return address
 // Result in EAX.
-void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateGetCStackPointerStub() {
   __ leal(EAX, Address(ESP, target::kWordSize));
   __ ret();
 }
@@ -2602,7 +2569,7 @@
 // TOS + 3: frame_pointer
 // TOS + 4: thread
 // No Result.
-void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub() {
   __ movl(THR, Address(ESP, 4 * target::kWordSize));  // Load target thread.
   __ movl(EBP,
           Address(ESP, 3 * target::kWordSize));  // Load target frame_pointer.
@@ -2641,7 +2608,7 @@
 //
 // The arguments are stored in the Thread object.
 // No result.
-void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
   ASSERT(kExceptionObjectReg == EAX);
   ASSERT(kStackTraceObjectReg == EDX);
   __ movl(EBX, Address(THR, target::Thread::resume_pc_offset()));
@@ -2665,7 +2632,7 @@
 // Deoptimize a frame on the call stack before rewinding.
 // The arguments are stored in the Thread object.
 // No result.
-void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub() {
   // Push the deopt pc.
   __ pushl(Address(THR, target::Thread::resume_pc_offset()));
   GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@@ -2680,7 +2647,7 @@
 // Calls to the runtime to optimize the given function.
 // EBX: function to be reoptimized.
 // ARGS_DESC_REG: argument descriptor (preserved).
-void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub() {
   __ movl(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
   __ EnterStubFrame();
   __ pushl(ARGS_DESC_REG);
@@ -2756,8 +2723,7 @@
 // TOS + 1: right argument.
 // TOS + 2: left argument.
 // Returns ZF set.
-void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
 #if !defined(PRODUCT)
   // Check single stepping.
   Label stepping, done_stepping;
@@ -2790,8 +2756,7 @@
 // TOS + 1: right argument.
 // TOS + 2: left argument.
 // Returns ZF set.
-void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
   const Register left = EAX;
   const Register right = EDX;
   const Register temp = ECX;
@@ -2808,7 +2773,7 @@
 //  EBX: target entry point
 //  FUNCTION_REG: target function
 //  ARGS_DESC_REG: argument descriptor
-void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub() {
   // Jump if receiver is a smi.
   Label smi_case;
   // Check if object (in tmp) is a Smi.
@@ -2880,21 +2845,20 @@
 
   __ Bind(&miss);
   __ popl(EBX);  // restore receiver
-  GenerateSwitchableCallMissStub(assembler);
+  GenerateSwitchableCallMissStub();
 }
 
-void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub() {
   __ int3();  // AOT only.
 }
 
-void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
   __ int3();  // AOT only.
 }
 
 // Called from switchable IC calls.
 //  EBX: receiver
-void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSwitchableCallMissStub() {
   __ movl(CODE_REG,
           Address(THR, target::Thread::switchable_call_miss_stub_offset()));
   __ EnterStubFrame();
@@ -2916,7 +2880,7 @@
   __ jmp(EAX);
 }
 
-void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub() {
   __ int3();  // AOT only.
 }
 
@@ -2937,8 +2901,7 @@
   return static_cast<ScaleFactor>(0);
 }
 
-void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
-                                                          intptr_t cid) {
+void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
   const intptr_t element_size = TypedDataElementSizeInBytes(cid);
   const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
   ScaleFactor scale_factor = GetScaleFactor(element_size);
diff --git a/runtime/vm/compiler/stub_code_compiler_riscv.cc b/runtime/vm/compiler/stub_code_compiler_riscv.cc
index 0b0d26c..64ae49d 100644
--- a/runtime/vm/compiler/stub_code_compiler_riscv.cc
+++ b/runtime/vm/compiler/stub_code_compiler_riscv.cc
@@ -34,8 +34,7 @@
 //
 // WARNING: This might clobber all registers except for [A0], [THR] and [FP].
 // The caller should simply call LeaveStubFrame() and return.
-void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
-                                               bool preserve_registers) {
+void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
   // If the object is not remembered we call a leaf-runtime to add it to the
   // remembered set.
   Label done;
@@ -60,7 +59,7 @@
 //   SP + 8*T4 : address of return value.
 //   T5 : address of the runtime function to call.
 //   T4 : number of arguments to the call.
-void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallToRuntimeStub() {
   const intptr_t thread_offset = target::NativeArguments::thread_offset();
   const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
   const intptr_t argv_offset = target::NativeArguments::argv_offset();
@@ -160,7 +159,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStubGeneric(
-    Assembler* assembler,
     bool save_fpu_registers,
     intptr_t self_code_stub_offset_from_thread,
     bool allow_return,
@@ -188,7 +186,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStub(
-    Assembler* assembler,
     bool save_fpu_registers,
     const RuntimeEntry* target,
     intptr_t self_code_stub_offset_from_thread,
@@ -207,12 +204,12 @@
                                     SharedSlowPathStubABI::kResultReg)));
     }
   };
-  GenerateSharedStubGeneric(assembler, save_fpu_registers,
+  GenerateSharedStubGeneric(save_fpu_registers,
                             self_code_stub_offset_from_thread, allow_return,
                             perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateEnterSafepointStub() {
   RegisterSet all_registers;
   all_registers.AddAllGeneralRegisters();
 
@@ -253,13 +250,12 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointStub() {
   GenerateExitSafepointStubCommon(
       assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
 }
 
-void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
   GenerateExitSafepointStubCommon(
       assembler,
       kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@@ -274,8 +270,7 @@
 // On exit:
 //   S3: clobbered, although normally callee-saved
 //   Stack: preserved, CSP == SP
-void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
   COMPILE_ASSERT(IsAbiPreservedRegister(S3));
   __ mv(S3, RA);
   __ LoadImmediate(T1, target::Thread::exit_through_ffi());
@@ -299,7 +294,6 @@
 
 #if !defined(DART_PRECOMPILER)
 void StubCodeCompiler::GenerateJITCallbackTrampolines(
-    Assembler* assembler,
     intptr_t next_callback_id) {
 #if defined(USING_SIMULATOR)
   // TODO(37299): FFI is not support in SIMRISCV32/64.
@@ -421,7 +415,6 @@
 // T1: The extracted method.
 // T4: The type_arguments_field_offset (or 0)
 void StubCodeCompiler::GenerateBuildMethodExtractorStub(
-    Assembler* assembler,
     const Code& closure_allocation_stub,
     const Code& context_allocation_stub,
     bool generic) {
@@ -513,8 +506,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
   __ EnterStubFrame();
   __ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
   __ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@@ -523,8 +515,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
   // If the generated code has unboxed index/length we need to box them before
   // calling the runtime entry.
@@ -569,22 +560,21 @@
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/false, perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
     __ Breakpoint();
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@@ -684,14 +674,14 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
               target::Thread::no_scope_native_wrapper_entry_point_offset()));
 }
 
-void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -704,7 +694,7 @@
 //   R5 : address of the native function to call.
 //   R2 : address of first argument in argument array.
 //   R1 : argc_tag including number of arguments and function kind.
-void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -713,7 +703,7 @@
 
 // Input parameters:
 //   ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub() {
   // Create a stub frame as we are pushing some objects on the stack before
   // calling into the runtime.
   __ EnterStubFrame();
@@ -735,7 +725,7 @@
 // Called from a static call only when an invalid code has been entered
 // (invalid because its function was optimized or deoptimized).
 // ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub() {
   Label monomorphic;
   __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
 
@@ -786,8 +776,7 @@
 
 // Called from object allocate instruction when the allocation stub has been
 // disabled.
-void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -808,8 +797,7 @@
 
 // Called from object allocate instruction when the allocation stub for a
 // generic class has been disabled.
-void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -1029,8 +1017,7 @@
 }
 
 // A0: result, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ LoadImmediate(TMP, kZapCodeReg);
   __ PushRegister(TMP);
@@ -1044,8 +1031,7 @@
 
 // A0: exception, must be preserved
 // A1: stacktrace, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ LoadImmediate(TMP, kZapCodeReg);
   __ PushRegister(TMP);
@@ -1057,7 +1043,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub() {
   __ PushRegister(CODE_REG);
   __ lx(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
   GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@@ -1114,8 +1100,7 @@
 // Input:
 //   ARGS_DESC_REG - arguments descriptor
 //   IC_DATA_REG - icdata/megamorphic_cache
-void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
   GenerateNoSuchMethodDispatcherBody(assembler);
 }
 
@@ -1128,7 +1113,7 @@
 //   AllocateArrayABI::kResultReg: newly allocated array.
 // Clobbered:
 //   T3, T4, T5
-void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
     // Compute the size to be allocated, it is based on the array length
@@ -1278,8 +1263,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1291,15 +1275,13 @@
   }
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
-  GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
-                     &kAllocateMintRuntimeEntry,
+  GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
                      target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
                      /*allow_return=*/true,
                      /*store_runtime_result_in_result_register=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1312,7 +1294,7 @@
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
+      /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
       target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
       /*allow_return=*/true,
       /*store_runtime_result_in_result_register=*/true);
@@ -1326,7 +1308,7 @@
 //   A2 : arguments array.
 //   A3 : current thread.
 // Beware!  TMP == A3
-void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub() {
   __ Comment("InvokeDartCodeStub");
 
   __ EnterFrame(1 * target::kWordSize);
@@ -1529,7 +1511,7 @@
 //   T1: number of context variables.
 // Output:
 //   A0: new allocated Context object.
-void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1578,7 +1560,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // A0: new object
   // Restore the frame pointer.
@@ -1591,7 +1573,7 @@
 //   T5: context variable to clone.
 // Output:
 //   A0: new allocated Context object.
-void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCloneContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1650,14 +1632,14 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // A0: new object
   __ LeaveStubFrame();
   __ ret();
 }
 
-void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
   for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
     if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
 
@@ -1863,11 +1845,11 @@
   }
 }
 
-void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, false);
 }
 
-void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, true);
 }
 
@@ -1971,16 +1953,15 @@
 }
 
 // Called for inline allocation of objects (any class).
-void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
   if (!FLAG_precompiled_mode) {
     __ lx(CODE_REG,
           Address(THR, target::Thread::call_to_runtime_stub_offset()));
@@ -2005,7 +1986,7 @@
 
   // Write-barrier elimination is enabled for [cls] and we therefore need to
   // ensure that the object is in new-space or has remembered bit set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   __ LeaveStubFrame();
 
@@ -2014,7 +1995,6 @@
 
 // Called for inline allocation of objects.
 void StubCodeCompiler::GenerateAllocationStubForClass(
-    Assembler* assembler,
     UnresolvedPcRelativeCalls* unresolved_calls,
     const Class& cls,
     const Code& allocate_object,
@@ -2087,8 +2067,7 @@
 //  RA : return address.
 //  SP : address of last argument.
 //  S4: arguments descriptor array.
-void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
   __ EnterStubFrame();
 
   // Load the receiver.
@@ -2126,8 +2105,7 @@
 //  S5: inline cache data object.
 // Cannot use function object from ICData as it may be the inlined
 // function and not the top-scope function.
-void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
   if (FLAG_precompiled_mode) {
     __ Breakpoint();
     return;
@@ -2143,8 +2121,7 @@
 }
 
 // Loads function into 'func_reg'.
-void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
-                                                     Register func_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Register func_reg) {
   if (FLAG_precompiled_mode) {
     __ trap();
     return;
@@ -2264,7 +2241,6 @@
 // - Match found -> jump to target.
 // - Match not found -> jump to IC miss.
 void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
-    Assembler* assembler,
     intptr_t num_args,
     const RuntimeEntry& handle_ic_miss,
     Token::Kind kind,
@@ -2283,9 +2259,9 @@
   }
 
   if (optimized == kOptimized) {
-    GenerateOptimizedUsageCounterIncrement(assembler);
+    GenerateOptimizedUsageCounterIncrement();
   } else {
-    GenerateUsageCounterIncrement(assembler, /*scratch=*/T0);
+    GenerateUsageCounterIncrement(/*scratch=*/T0);
   }
 
   ASSERT(exactness == kIgnoreExactness);  // Unimplemented.
@@ -2508,67 +2484,63 @@
 // A0: receiver
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // A0: receiver
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
 // A0: receiver
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // A0: receiver
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // A0: receiver
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // A0: receiver
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // A0: receiver
 // S5: ICData
 // A6: Function
 // RA: return address
-void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kOptimized, kInstanceCall, kIgnoreExactness);
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 // A0: receiver
@@ -2576,8 +2548,7 @@
 // A6: Function
 // RA: return address
 void StubCodeCompiler::
-    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
-        Assembler* assembler) {
+    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
   __ Stop("Unimplemented");
 }
 
@@ -2585,19 +2556,17 @@
 // S5: ICData
 // A6: Function
 // RA: return address
-void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kOptimized, kInstanceCall, kIgnoreExactness);
 }
 
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
   GenerateRecordEntryPoint(assembler);
-  GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
+  GenerateUsageCounterIncrement(/* scratch */ T0);
 
 #if defined(DEBUG)
   {
@@ -2673,28 +2642,26 @@
 
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
-  GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kUnoptimized, kStaticCall, kIgnoreExactness);
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
+  GenerateUsageCounterIncrement(/* scratch */ T0);
+  GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
+                                    Token::kILLEGAL, kUnoptimized, kStaticCall,
+                                    kIgnoreExactness);
 }
 
 // S5: ICData
 // RA: return address
-void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
+  GenerateUsageCounterIncrement(/* scratch */ T0);
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kStaticCall, kIgnoreExactness);
 }
 
 // Stub for compiling a function and jumping to the compiled code.
 // ARGS_DESC_REG: Arguments descriptor.
 // FUNCTION_REG: Function.
-void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub() {
   // Preserve arg desc.
   __ EnterStubFrame();
   // Save arguments descriptor and pass function.
@@ -2713,7 +2680,7 @@
 
 // A0: Receiver
 // S5: ICData
-void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2733,8 +2700,7 @@
 }
 
 // S5: ICData
-void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2751,7 +2717,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2767,7 +2733,7 @@
 }
 
 // Called only from unoptimized code. All relevant registers have been saved.
-void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2996,26 +2962,26 @@
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 1);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 3);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 5);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 7);
 }
 
-void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateGetCStackPointerStub() {
   __ mv(A0, SP);
   __ ret();
 }
@@ -3029,7 +2995,7 @@
 // Does not return.
 //
 // Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
-void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub() {
   ASSERT(kExceptionObjectReg == A0);
   ASSERT(kStackTraceObjectReg == A1);
   __ mv(CALLEE_SAVED_TEMP, A0);  // Program counter.
@@ -3078,7 +3044,7 @@
 //
 // The arguments are stored in the Thread object.
 // Does not return.
-void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
   // Exception object.
   ASSERT(kExceptionObjectReg == A0);
   __ LoadFromOffset(A0, THR, target::Thread::active_exception_offset());
@@ -3096,7 +3062,7 @@
 // Deoptimize a frame on the call stack before rewinding.
 // The arguments are stored in the Thread object.
 // No result.
-void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub() {
   // Push zap value instead of CODE_REG.
   __ LoadImmediate(TMP, kZapCodeReg);
   __ PushRegister(TMP);
@@ -3115,7 +3081,7 @@
 // Calls to the runtime to optimize the given function.
 // A0: function to be re-optimized.
 // ARGS_DESC_REG: argument descriptor (preserved).
-void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub() {
   __ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
   __ EnterStubFrame();
 
@@ -3203,8 +3169,7 @@
 // SP + 4: left operand.
 // SP + 0: right operand.
 // Return TMP set to 0 if equal.
-void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
 #if !defined(PRODUCT)
   // Check single stepping.
   Label stepping, done_stepping;
@@ -3237,8 +3202,7 @@
 // SP + 4: left operand.
 // SP + 0: right operand.
 // Return TMP set to 0 if equal.
-void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
   const Register left = A0;
   const Register right = A1;
   __ LoadFromOffset(left, SP, 1 * target::kWordSize);
@@ -3254,7 +3218,7 @@
 //  FUNCTION_REG: target function
 //  CODE_REG: target Code
 //  ARGS_DESC_REG: arguments descriptor
-void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub() {
   // Jump if receiver is a smi.
   Label smi_case;
   __ BranchIfSmi(A0, &smi_case);
@@ -3326,13 +3290,13 @@
   __ j(&cid_loaded);
 
   __ Bind(&miss);
-  GenerateSwitchableCallMissStub(assembler);
+  GenerateSwitchableCallMissStub();
 }
 
 // Input:
 //   A0 - receiver
 //   IC_DATA_REG - icdata
-void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub() {
   Label loop, found, miss;
   __ lx(T1, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
   __ lx(ARGS_DESC_REG,
@@ -3383,8 +3347,7 @@
 //   S5: MonomorphicSmiableCall object
 //
 //   T1,T2: clobbered
-void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
   Label miss;
   __ LoadClassIdMayBeSmi(T1, A0);
 
@@ -3405,7 +3368,7 @@
 
 // Called from switchable IC calls.
 //  A0: receiver
-void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSwitchableCallMissStub() {
   __ lx(CODE_REG,
         Address(THR, target::Thread::switchable_call_miss_stub_offset()));
   __ EnterStubFrame();
@@ -3430,7 +3393,7 @@
 //  S5: SingleTargetCache
 // Passed to target:
 //  CODE_REG: target Code object
-void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub() {
   Label miss;
   __ LoadClassIdMayBeSmi(A1, A0);
   __ lhu(T2, FieldAddress(S5, target::SingleTargetCache::lower_limit_offset()));
@@ -3478,8 +3441,7 @@
   return -1;
 }
 
-void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
-                                                          intptr_t cid) {
+void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
   const intptr_t element_size = TypedDataElementSizeInBytes(cid);
   const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
   const intptr_t scale_shift = GetScaleFactor(element_size);
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index d9578dd..eacb4a6 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -37,8 +37,7 @@
 //
 // WARNING: This might clobber all registers except for [RAX], [THR] and [FP].
 // The caller should simply call LeaveStubFrame() and return.
-void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
-                                               bool preserve_registers) {
+void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
   // If the object is not remembered we call a leaf-runtime to add it to the
   // remembered set.
   Label done;
@@ -159,7 +158,7 @@
 //   RBX : address of the runtime function to call.
 //   R10 : number of arguments to the call.
 // Must preserve callee saved registers R12 and R13.
-void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallToRuntimeStub() {
   const intptr_t thread_offset = target::NativeArguments::thread_offset();
   const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
   const intptr_t argv_offset = target::NativeArguments::argv_offset();
@@ -253,7 +252,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStubGeneric(
-    Assembler* assembler,
     bool save_fpu_registers,
     intptr_t self_code_stub_offset_from_thread,
     bool allow_return,
@@ -291,7 +289,6 @@
 }
 
 void StubCodeCompiler::GenerateSharedStub(
-    Assembler* assembler,
     bool save_fpu_registers,
     const RuntimeEntry* target,
     intptr_t self_code_stub_offset_from_thread,
@@ -310,12 +307,12 @@
               RAX);
     }
   };
-  GenerateSharedStubGeneric(assembler, save_fpu_registers,
+  GenerateSharedStubGeneric(save_fpu_registers,
                             self_code_stub_offset_from_thread, allow_return,
                             perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateEnterSafepointStub() {
   RegisterSet all_registers;
   all_registers.AddAllGeneralRegisters();
   __ PushRegisters(all_registers);
@@ -353,13 +350,12 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointStub() {
   GenerateExitSafepointStubCommon(
       assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
 }
 
-void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
   GenerateExitSafepointStubCommon(
       assembler,
       kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@@ -374,8 +370,7 @@
 // On exit:
 //   Stack pointer lowered by shadow space
 //   RBX, R12 clobbered
-void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
   __ movq(R12, compiler::Immediate(target::Thread::exit_through_ffi()));
   __ TransitionGeneratedToNative(RBX, FPREG, R12,
                                  /*enter_safepoint=*/true);
@@ -396,7 +391,6 @@
     CallingConventions::kFpuArgumentRegisters);
 
 void StubCodeCompiler::GenerateJITCallbackTrampolines(
-    Assembler* assembler,
     intptr_t next_callback_id) {
   Label done;
 
@@ -511,7 +505,6 @@
 // RBX: The extracted method.
 // RDX: The type_arguments_field_offset (or 0)
 void StubCodeCompiler::GenerateBuildMethodExtractorStub(
-    Assembler* assembler,
     const Code& closure_allocation_stub,
     const Code& context_allocation_stub,
     bool generic) {
@@ -602,8 +595,7 @@
   __ Ret();
 }
 
-void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
   __ EnterStubFrame();
   __ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
   __ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@@ -612,8 +604,7 @@
   __ Breakpoint();
 }
 
-void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     // If the generated code has unboxed index/length we need to box them before
     // calling the runtime entry.
@@ -664,22 +655,21 @@
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
       /*allow_return=*/false, perform_runtime_call);
 }
 
-void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
-                                          bool with_fpu_regs) {
+void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
   auto perform_runtime_call = [&]() {
     __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
     __ Breakpoint();
   };
 
   GenerateSharedStubGeneric(
-      assembler, /*save_fpu_registers=*/with_fpu_regs,
+      /*save_fpu_registers=*/with_fpu_regs,
       with_fpu_regs
           ? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
           : target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@@ -781,14 +771,14 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
               target::Thread::no_scope_native_wrapper_entry_point_offset()));
 }
 
-void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -801,7 +791,7 @@
 //   RAX : address of first argument in argument array.
 //   RBX : address of the native function to call.
 //   R10 : argc_tag including number of arguments and function kind.
-void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
   GenerateCallNativeWithWrapperStub(
       assembler,
       Address(THR,
@@ -810,7 +800,7 @@
 
 // Input parameters:
 //   ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub() {
   __ EnterStubFrame();
   __ pushq(ARGS_DESC_REG);  // Preserve arguments descriptor array.
   // Setup space on stack for return value.
@@ -828,7 +818,7 @@
 // Called from a static call only when an invalid code has been entered
 // (invalid because its function was optimized or deoptimized).
 // ARGS_DESC_REG: arguments descriptor array.
-void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub() {
   Label monomorphic;
   __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
 
@@ -874,8 +864,7 @@
 
 // Called from object allocate instruction when the allocation stub has been
 // disabled.
-void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -894,8 +883,7 @@
 
 // Called from object allocate instruction when the allocation stub for a
 // generic class has been disabled.
-void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
   // Load code pointer to this stub from the thread:
   // The one that is passed in, is not correct - it points to the code object
   // that needs to be replaced.
@@ -1113,8 +1101,7 @@
 }
 
 // RAX: result, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ pushq(Immediate(kZapCodeReg));
   // Return address for "call" to deopt stub.
@@ -1127,8 +1114,7 @@
 
 // RAX: exception, must be preserved
 // RDX: stacktrace, must be preserved
-void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
   // Push zap value instead of CODE_REG for lazy deopt.
   __ pushq(Immediate(kZapCodeReg));
   // Return address for "call" to deopt stub.
@@ -1139,7 +1125,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub() {
   __ popq(TMP);
   __ pushq(CODE_REG);
   __ pushq(TMP);
@@ -1204,8 +1190,7 @@
 // Input:
 //   IC_DATA_REG - icdata/megamorphic_cache
 //   RDX - receiver
-void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
   __ EnterStubFrame();
 
   __ movq(ARGS_DESC_REG,
@@ -1225,7 +1210,7 @@
 //   AllocateArrayABI::kResultReg: newly allocated array.
 // Clobbered:
 //   RCX, RDI, R12
-void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
     // Compute the size to be allocated, it is based on the array length
@@ -1355,14 +1340,13 @@
   // Write-barrier elimination might be enabled for this array (depending on the
   // array length). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler);
+  EnsureIsNewOrRemembered();
 
   __ LeaveStubFrame();
   __ ret();
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1374,15 +1358,13 @@
   }
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
-  GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
-                     &kAllocateMintRuntimeEntry,
+  GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
                      target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
                      /*allow_return=*/true,
                      /*store_runtime_result_in_result_register=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
   // For test purpose call allocation stub without inline allocation attempt.
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1395,7 +1377,7 @@
   COMPILE_ASSERT(AllocateMintABI::kResultReg ==
                  SharedSlowPathStubABI::kResultReg);
   GenerateSharedStub(
-      assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
+      /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
       target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
       /*allow_return=*/true,
       /*store_runtime_result_in_result_register=*/true);
@@ -1412,7 +1394,7 @@
 //   RSI : arguments descriptor array.
 //   RDX : arguments array.
 //   RCX : current thread.
-void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub() {
   __ EnterFrame(0);
 
   const Register kTargetReg = CallingConventions::kArg1Reg;
@@ -1643,7 +1625,7 @@
 //   RAX: new allocated Context object.
 // Clobbered:
 //   R9, R13
-void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub() {
   __ LoadObject(R9, NullObject());
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
@@ -1696,7 +1678,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // RAX: new object
   // Restore the frame pointer.
@@ -1712,7 +1694,7 @@
 //   RAX: new allocated Context object.
 // Clobbered:
 //   R10, R13
-void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCloneContextStub() {
   if (!FLAG_use_slow_path && FLAG_inline_alloc) {
     Label slow_case;
 
@@ -1769,7 +1751,7 @@
   // Write-barrier elimination might be enabled for this context (depending on
   // the size). To be sure we will check if the allocated object is in old
   // space and if so call a leaf runtime to add it to the remembered set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // RAX: new object
   // Restore the frame pointer.
@@ -1778,7 +1760,7 @@
   __ ret();
 }
 
-void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
   for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
     if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
 
@@ -1805,8 +1787,7 @@
 COMPILE_ASSERT(kWriteBarrierObjectReg == RDX);
 COMPILE_ASSERT(kWriteBarrierValueReg == RAX);
 COMPILE_ASSERT(kWriteBarrierSlotReg == R13);
-static void GenerateWriteBarrierStubHelper(Assembler* assembler,
-                                           bool cards) {
+static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
   Label add_to_mark_stack, remember_card, lost_race;
   __ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
   __ j(ZERO, &add_to_mark_stack);
@@ -1956,11 +1937,11 @@
   }
 }
 
-void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, false);
 }
 
-void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
   GenerateWriteBarrierStubHelper(assembler, true);
 }
 
@@ -2066,16 +2047,15 @@
 }
 
 // Called for inline allocation of objects (any class).
-void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
   GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
 }
 
-void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
   if (!FLAG_precompiled_mode) {
     __ movq(CODE_REG,
             Address(THR, target::Thread::call_to_runtime_stub_offset()));
@@ -2107,7 +2087,7 @@
 
   // Write-barrier elimination is enabled for [cls] and we therefore need to
   // ensure that the object is in new-space or has remembered bit set.
-  EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
+  EnsureIsNewOrRemembered(/*preserve_registers=*/false);
 
   // AllocateObjectABI::kResultReg: new object
   // Restore the frame pointer.
@@ -2118,7 +2098,6 @@
 
 // Called for inline allocation of objects.
 void StubCodeCompiler::GenerateAllocationStubForClass(
-    Assembler* assembler,
     UnresolvedPcRelativeCalls* unresolved_calls,
     const Class& cls,
     const Code& allocate_object,
@@ -2187,8 +2166,7 @@
 //   RSP : points to return address.
 //   RSP + 8 : address of last argument.
 //   R10 : arguments descriptor array.
-void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
   __ EnterStubFrame();
 
   // Load the receiver.
@@ -2229,8 +2207,7 @@
 
 // Cannot use function object from ICData as it may be the inlined
 // function and not the top-scope function.
-void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
   if (FLAG_precompiled_mode) {
     __ Breakpoint();
     return;
@@ -2254,8 +2231,7 @@
 }
 
 // Loads function into 'temp_reg', preserves IC_DATA_REG.
-void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
-                                                     Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
   if (FLAG_precompiled_mode) {
     __ Breakpoint();
     return;
@@ -2372,7 +2348,6 @@
 // - Match found -> jump to target.
 // - Match not found -> jump to IC miss.
 void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
-    Assembler* assembler,
     intptr_t num_args,
     const RuntimeEntry& handle_ic_miss,
     Token::Kind kind,
@@ -2390,9 +2365,9 @@
   }
 
   if (optimized == kOptimized) {
-    GenerateOptimizedUsageCounterIncrement(assembler);
+    GenerateOptimizedUsageCounterIncrement();
   } else {
-    GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
+    GenerateUsageCounterIncrement(/* scratch */ RCX);
   }
 
   ASSERT(num_args == 1 || num_args == 2);
@@ -2648,69 +2623,65 @@
 //  RDX: receiver
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 //  RDX: receiver
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kCheckExactness);
 }
 
 //  RDX: receiver
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kInstanceCall, kIgnoreExactness);
 }
 
 //  RDX: receiver
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  RDX: receiver
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  RDX: receiver
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
-      kUnoptimized, kInstanceCall, kIgnoreExactness);
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  RDX: receiver
 //  RBX: ICData
 //  RDI: Function
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kOptimized, kInstanceCall, kIgnoreExactness);
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
+      kInstanceCall, kIgnoreExactness);
 }
 
 //  RDX: receiver
@@ -2718,30 +2689,27 @@
 //  RDI: Function
 //  RSP[0]: return address
 void StubCodeCompiler::
-    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
-        Assembler* assembler) {
+    GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kOptimized, kInstanceCall, kCheckExactness);
+      1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
+      kInstanceCall, kCheckExactness);
 }
 
 //  RDX: receiver
 //  RBX: ICData
 //  RDI: Function
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kOptimized, kInstanceCall, kIgnoreExactness);
 }
 
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
   GenerateRecordEntryPoint(assembler);
-  GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
+  GenerateUsageCounterIncrement(/* scratch */ RCX);
 #if defined(DEBUG)
   {
     Label ok;
@@ -2818,26 +2786,24 @@
 
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
-    Assembler* assembler) {
-  GenerateNArgsCheckInlineCacheStub(
-      assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
-      kUnoptimized, kStaticCall, kIgnoreExactness);
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
+  GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
+                                    Token::kILLEGAL, kUnoptimized, kStaticCall,
+                                    kIgnoreExactness);
 }
 
 //  RBX: ICData
 //  RSP[0]: return address
-void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
   GenerateNArgsCheckInlineCacheStub(
-      assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
+      2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
       kUnoptimized, kStaticCall, kIgnoreExactness);
 }
 
 // Stub for compiling a function and jumping to the compiled code.
 // ARGS_DESC_REG: Arguments descriptor.
 // FUNCTION_REG: Function.
-void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub() {
   __ EnterStubFrame();
   __ pushq(ARGS_DESC_REG);  // Preserve arguments descriptor array.
   __ pushq(FUNCTION_REG);   // Pass function.
@@ -2855,7 +2821,7 @@
 
 // RBX: Contains an ICData.
 // TOS(0): return address (Dart code).
-void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2874,8 +2840,7 @@
 #endif  // defined(PRODUCT)
 }
 
-void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2893,7 +2858,7 @@
 }
 
 //  TOS(0): return address (Dart code).
-void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -2909,7 +2874,7 @@
 }
 
 // Called only from unoptimized code.
-void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub() {
 #if defined(PRODUCT)
   __ Stop("No debugging in PRODUCT mode");
 #else
@@ -3135,22 +3100,22 @@
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 1);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 3);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 5);
 }
 
 // See comment on [GenerateSubtypeNTestCacheStub].
-void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
   GenerateSubtypeNTestCacheStub(assembler, 7);
 }
 
@@ -3158,7 +3123,7 @@
 // checks.
 // TOS + 0: return address
 // Result in RAX.
-void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateGetCStackPointerStub() {
   __ leaq(RAX, Address(RSP, target::kWordSize));
   __ ret();
 }
@@ -3170,7 +3135,7 @@
 // Arg3: frame_pointer
 // Arg4: thread
 // No Result.
-void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub() {
   __ movq(THR, CallingConventions::kArg4Reg);
   __ movq(RBP, CallingConventions::kArg3Reg);
   __ movq(RSP, CallingConventions::kArg2Reg);
@@ -3211,7 +3176,7 @@
 //
 // The arguments are stored in the Thread object.
 // No result.
-void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
   ASSERT(kExceptionObjectReg == RAX);
   ASSERT(kStackTraceObjectReg == RDX);
   __ movq(CallingConventions::kArg1Reg,
@@ -3238,7 +3203,7 @@
 // Deoptimize a frame on the call stack before rewinding.
 // The arguments are stored in the Thread object.
 // No result.
-void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub() {
   // Push zap value instead of CODE_REG.
   __ pushq(Immediate(kZapCodeReg));
 
@@ -3259,7 +3224,7 @@
 // Calls to the runtime to optimize the given function.
 // RDI: function to be reoptimized.
 // ARGS_DESC_REG: argument descriptor (preserved).
-void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub() {
   __ movq(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
   __ EnterStubFrame();
   __ pushq(ARGS_DESC_REG);  // Preserve args descriptor.
@@ -3323,8 +3288,7 @@
 // TOS + 1: right argument.
 // TOS + 2: left argument.
 // Returns ZF set.
-void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
 #if !defined(PRODUCT)
   // Check single stepping.
   Label stepping, done_stepping;
@@ -3358,8 +3322,7 @@
 // TOS + 1: right argument.
 // TOS + 2: left argument.
 // Returns ZF set.
-void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
   const Register left = RAX;
   const Register right = RDX;
 
@@ -3376,7 +3339,7 @@
 //  FUNCTION_REG: target function
 //  CODE_REG: target Code
 //  ARGS_DESC_REG: arguments descriptor
-void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub() {
   // Jump if receiver is a smi.
   Label smi_case;
   __ testq(RDX, Immediate(kSmiTagMask));
@@ -3451,13 +3414,13 @@
   __ jmp(&cid_loaded);
 
   __ Bind(&miss);
-  GenerateSwitchableCallMissStub(assembler);
+  GenerateSwitchableCallMissStub();
 }
 
 // Input:
 //  IC_DATA_REG - icdata
 //  RDX - receiver object
-void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub() {
   Label loop, found, miss;
   __ movq(R13, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
   __ movq(ARGS_DESC_REG,
@@ -3503,8 +3466,7 @@
   __ jmp(RCX);
 }
 
-void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
-    Assembler* assembler) {
+void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
   Label have_cid, miss;
 
   __ movq(RAX, Immediate(kSmiCid));
@@ -3527,7 +3489,7 @@
 
 // Called from switchable IC calls.
 //  RDX: receiver
-void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSwitchableCallMissStub() {
   __ movq(CODE_REG,
           Address(THR, target::Thread::switchable_call_miss_stub_offset()));
   __ EnterStubFrame();
@@ -3554,7 +3516,7 @@
 //  RBX: SingleTargetCache
 // Passed to target::
 //  CODE_REG: target Code object
-void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub() {
   Label miss;
   __ LoadClassIdMayBeSmi(RAX, RDX);
   __ movzxw(R9,
@@ -3608,8 +3570,7 @@
   return static_cast<ScaleFactor>(0);
 }
 
-void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
-                                                          intptr_t cid) {
+void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
   const intptr_t element_size = TypedDataElementSizeInBytes(cid);
   const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
   ScaleFactor scale_factor = GetScaleFactor(element_size);
diff --git a/runtime/vm/ffi_callback_trampolines.cc b/runtime/vm/ffi_callback_trampolines.cc
index fd73c3c..8ce7605 100644
--- a/runtime/vm/ffi_callback_trampolines.cc
+++ b/runtime/vm/ffi_callback_trampolines.cc
@@ -60,8 +60,8 @@
     trampoline_pages_.Add(memory);
 
     compiler::Assembler assembler(/*object_pool_builder=*/nullptr);
-    compiler::StubCodeCompiler::GenerateJITCallbackTrampolines(
-        &assembler, next_callback_id_);
+    compiler::StubCodeCompiler stubCodeCompiler(&assembler);
+    stubCodeCompiler.GenerateJITCallbackTrampolines(next_callback_id_);
 
     MemoryRegion region(memory->address(), memory->size());
     assembler.FinalizeInstructions(region);
diff --git a/runtime/vm/object_test.cc b/runtime/vm/object_test.cc
index 8c74065..9046cc8 100644
--- a/runtime/vm/object_test.cc
+++ b/runtime/vm/object_test.cc
@@ -8193,10 +8193,16 @@
       zone, Function::New(signature, symbol, UntaggedFunction::kRegularFunction,
                           false, false, false, false, false, klass,
                           TokenPosition::kNoSource));
+
   compiler::ObjectPoolBuilder pool_builder;
-  const auto& invoke_instantiate_tav =
-      Code::Handle(zone, StubCode::Generate("InstantiateTAV", &pool_builder,
-                                            &GenerateInvokeInstantiateTAVStub));
+  SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
+  compiler::Assembler assembler(&pool_builder);
+  GenerateInvokeInstantiateTAVStub(&assembler);
+  const Code& invoke_instantiate_tav = Code::Handle(
+      Code::FinalizeCodeAndNotify("InstantiateTAV", nullptr, &assembler,
+                                  Code::PoolAttachment::kNotAttachPool,
+                                  /*optimized=*/false));
+
   const auto& pool =
       ObjectPool::Handle(zone, ObjectPool::NewFromBuilder(pool_builder));
   invoke_instantiate_tav.set_object_pool(pool.ptr());
diff --git a/runtime/vm/stub_code.cc b/runtime/vm/stub_code.cc
index 916fd1f..95c2617 100644
--- a/runtime/vm/stub_code.cc
+++ b/runtime/vm/stub_code.cc
@@ -29,7 +29,7 @@
 #define STUB_CODE_DECLARE(name) {nullptr, #name},
 #else
 #define STUB_CODE_DECLARE(name)                                                \
-  {nullptr, #name, compiler::StubCodeCompiler::Generate##name##Stub},
+  {nullptr, #name, &compiler::StubCodeCompiler::Generate##name##Stub},
 #endif
     VM_STUB_CODE_LIST(STUB_CODE_DECLARE)
 #undef STUB_CODE_DECLARE
@@ -91,15 +91,15 @@
 #undef STUB_CODE_GENERATE
 #undef STUB_CODE_SET_OBJECT_POOL
 
-CodePtr StubCode::Generate(
-    const char* name,
-    compiler::ObjectPoolBuilder* object_pool_builder,
-    void (*GenerateStub)(compiler::Assembler* assembler)) {
+CodePtr StubCode::Generate(const char* name,
+                           compiler::ObjectPoolBuilder* object_pool_builder,
+                           void (compiler::StubCodeCompiler::*GenerateStub)()) {
   auto thread = Thread::Current();
   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
 
   compiler::Assembler assembler(object_pool_builder);
-  GenerateStub(&assembler);
+  compiler::StubCodeCompiler stubCodeCompiler(&assembler);
+  (stubCodeCompiler.*GenerateStub)();
   const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
       name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
       /*optimized=*/false));
@@ -221,8 +221,9 @@
     compiler::Assembler assembler(wrapper);
     compiler::UnresolvedPcRelativeCalls unresolved_calls;
     const char* name = cls.ToCString();
-    compiler::StubCodeCompiler::GenerateAllocationStubForClass(
-        &assembler, &unresolved_calls, cls, allocate_object_stub,
+    compiler::StubCodeCompiler stubCodeCompiler(&assembler);
+    stubCodeCompiler.GenerateAllocationStubForClass(
+        &unresolved_calls, cls, allocate_object_stub,
         allocate_object_parametrized_stub);
 
     const auto& static_calls_table =
@@ -316,8 +317,9 @@
 
   compiler::ObjectPoolBuilder object_pool_builder;
   compiler::Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
-  compiler::StubCodeCompiler::GenerateBuildMethodExtractorStub(
-      &assembler, closure_allocation_stub, context_allocation_stub, generic);
+  compiler::StubCodeCompiler stubCodeCompiler(&assembler);
+  stubCodeCompiler.GenerateBuildMethodExtractorStub(
+      closure_allocation_stub, context_allocation_stub, generic);
 
   const char* name = generic ? "BuildGenericMethodExtractor"
                              : "BuildNonGenericMethodExtractor";
diff --git a/runtime/vm/stub_code.h b/runtime/vm/stub_code.h
index 8aeeaad..ab7eb15 100644
--- a/runtime/vm/stub_code.h
+++ b/runtime/vm/stub_code.h
@@ -83,7 +83,7 @@
   // code executable area.
   static CodePtr Generate(const char* name,
                           compiler::ObjectPoolBuilder* object_pool_builder,
-                          void (*GenerateStub)(compiler::Assembler* assembler));
+                          void (compiler::StubCodeCompiler::*GenerateStub)());
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
   static const Code& UnoptimizedStaticCallEntry(intptr_t num_args_tested);
@@ -104,7 +104,7 @@
       compiler::ObjectPoolBuilder* opw) {                                      \
     return StubCode::Generate(                                                 \
         "_iso_stub_" #name, opw,                                               \
-        compiler::StubCodeCompiler::Generate##name##Stub);                     \
+        &compiler::StubCodeCompiler::Generate##name##Stub);                    \
   }
   VM_STUB_CODE_LIST(GENERATE_STUB);
 #undef GENERATE_STUB
@@ -127,7 +127,7 @@
     Code* code;
     const char* name;
 #if !defined(DART_PRECOMPILED_RUNTIME)
-    void (*generator)(compiler::Assembler* assembler);
+    void (compiler::StubCodeCompiler::*generator)();
 #endif
   };
   static StubCodeEntry entries_[kNumStubEntries];
diff --git a/runtime/vm/type_testing_stubs_test.cc b/runtime/vm/type_testing_stubs_test.cc
index b5f6ff0..e3f4d41 100644
--- a/runtime/vm/type_testing_stubs_test.cc
+++ b/runtime/vm/type_testing_stubs_test.cc
@@ -479,10 +479,15 @@
         zone, Function::New(
                   signature, symbol, UntaggedFunction::kRegularFunction, false,
                   false, false, false, false, klass, TokenPosition::kNoSource));
+
     compiler::ObjectPoolBuilder pool_builder;
-    const auto& invoke_tts = Code::Handle(
-        zone,
-        StubCode::Generate("InvokeTTS", &pool_builder, &GenerateInvokeTTSStub));
+    SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
+    compiler::Assembler assembler(&pool_builder);
+    GenerateInvokeTTSStub(&assembler);
+    const Code& invoke_tts = Code::Handle(Code::FinalizeCodeAndNotify(
+        "InvokeTTS", nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
+        /*optimized=*/false));
+
     const auto& pool =
         ObjectPool::Handle(zone, ObjectPool::NewFromBuilder(pool_builder));
     invoke_tts.set_object_pool(pool.ptr());
diff --git a/sdk/lib/_internal/js_shared/lib/js_util_patch.dart b/sdk/lib/_internal/js_shared/lib/js_util_patch.dart
index 3bb8c67..c9ffa27 100644
--- a/sdk/lib/_internal/js_shared/lib/js_util_patch.dart
+++ b/sdk/lib/_internal/js_shared/lib/js_util_patch.dart
@@ -101,13 +101,13 @@
 }
 
 @patch
-T callMethod<T>(Object o, String method, List<Object?> args) {
+T callMethod<T>(Object o, Object method, List<Object?> args) {
   assertInteropArgs(args);
   return JS<dynamic>('Object|Null', '#[#].apply(#, #)', o, method, o, args);
 }
 
 /// Similar to [callMethod] but introduces an unsound implicit cast to `T`.
-T _callMethodTrustType<T>(Object o, String method, List<Object?> args) {
+T _callMethodTrustType<T>(Object o, Object method, List<Object?> args) {
   assertInteropArgs(args);
   return JS<T>('Object|Null', '#[#].apply(#, #)', o, method, o, args);
 }
@@ -118,7 +118,7 @@
 /// body of this method. Edit `ProgramCompiler.visitStaticInvocation` if you
 /// edit this method.
 @pragma('dart2js:tryInline')
-T _callMethodUnchecked0<T>(Object o, String method) {
+T _callMethodUnchecked0<T>(Object o, Object method) {
   return JS<dynamic>('Object|Null', '#[#]()', o, method);
 }
 
@@ -129,7 +129,7 @@
 /// body of this method. Edit `ProgramCompiler.visitStaticInvocation` if you
 /// edit this method.
 @pragma('dart2js:tryInline')
-T _callMethodUncheckedTrustType0<T>(Object o, String method) {
+T _callMethodUncheckedTrustType0<T>(Object o, Object method) {
   return JS<T>('Object|Null', '#[#]()', o, method);
 }
 
@@ -139,7 +139,7 @@
 /// body of this method. Edit `ProgramCompiler.visitStaticInvocation` if you
 /// edit this method.
 @pragma('dart2js:tryInline')
-T _callMethodUnchecked1<T>(Object o, String method, Object? arg1) {
+T _callMethodUnchecked1<T>(Object o, Object method, Object? arg1) {
   return JS<dynamic>('Object|Null', '#[#](#)', o, method, arg1);
 }
 
@@ -150,7 +150,7 @@
 /// body of this method. Edit `ProgramCompiler.visitStaticInvocation` if you
 /// edit this method.
 @pragma('dart2js:tryInline')
-T _callMethodUncheckedTrustType1<T>(Object o, String method, Object? arg1) {
+T _callMethodUncheckedTrustType1<T>(Object o, Object method, Object? arg1) {
   return JS<T>('Object|Null', '#[#](#)', o, method, arg1);
 }
 
@@ -161,7 +161,7 @@
 /// edit this method.
 @pragma('dart2js:tryInline')
 T _callMethodUnchecked2<T>(
-    Object o, String method, Object? arg1, Object? arg2) {
+    Object o, Object method, Object? arg1, Object? arg2) {
   return JS<dynamic>('Object|Null', '#[#](#, #)', o, method, arg1, arg2);
 }
 
@@ -173,7 +173,7 @@
 /// edit this method.
 @pragma('dart2js:tryInline')
 T _callMethodUncheckedTrustType2<T>(
-    Object o, String method, Object? arg1, Object? arg2) {
+    Object o, Object method, Object? arg1, Object? arg2) {
   return JS<T>('Object|Null', '#[#](#, #)', o, method, arg1, arg2);
 }
 
@@ -184,7 +184,7 @@
 /// edit this method.
 @pragma('dart2js:tryInline')
 T _callMethodUnchecked3<T>(
-    Object o, String method, Object? arg1, Object? arg2, Object? arg3) {
+    Object o, Object method, Object? arg1, Object? arg2, Object? arg3) {
   return JS<dynamic>(
       'Object|Null', '#[#](#, #, #)', o, method, arg1, arg2, arg3);
 }
@@ -197,7 +197,7 @@
 /// edit this method.
 @pragma('dart2js:tryInline')
 T _callMethodUncheckedTrustType3<T>(
-    Object o, String method, Object? arg1, Object? arg2, Object? arg3) {
+    Object o, Object method, Object? arg1, Object? arg2, Object? arg3) {
   return JS<T>('Object|Null', '#[#](#, #, #)', o, method, arg1, arg2, arg3);
 }
 
@@ -207,7 +207,7 @@
 /// body of this method. Edit `ProgramCompiler.visitStaticInvocation` if you
 /// edit this method.
 @pragma('dart2js:tryInline')
-T _callMethodUnchecked4<T>(Object o, String method, Object? arg1, Object? arg2,
+T _callMethodUnchecked4<T>(Object o, Object method, Object? arg1, Object? arg2,
     Object? arg3, Object? arg4) {
   return JS<dynamic>(
       'Object|Null', '#[#](#, #, #, #)', o, method, arg1, arg2, arg3, arg4);
@@ -220,7 +220,7 @@
 /// body of this method. Edit `ProgramCompiler.visitStaticInvocation` if you
 /// edit this method.
 @pragma('dart2js:tryInline')
-T _callMethodUncheckedTrustType4<T>(Object o, String method, Object? arg1,
+T _callMethodUncheckedTrustType4<T>(Object o, Object method, Object? arg1,
     Object? arg2, Object? arg3, Object? arg4) {
   return JS<T>(
       'Object|Null', '#[#](#, #, #, #)', o, method, arg1, arg2, arg3, arg4);
diff --git a/sdk/lib/_internal/wasm/lib/js_util_patch.dart b/sdk/lib/_internal/wasm/lib/js_util_patch.dart
index d98b1ad..9885c1f 100644
--- a/sdk/lib/_internal/wasm/lib/js_util_patch.dart
+++ b/sdk/lib/_internal/wasm/lib/js_util_patch.dart
@@ -86,7 +86,7 @@
         as T;
 
 @patch
-T callMethod<T>(Object o, String method, List<Object?> args) => dartifyRaw(
+T callMethod<T>(Object o, Object method, List<Object?> args) => dartifyRaw(
     callMethodVarArgsRaw(jsifyRaw(o), jsifyRaw(method), jsifyRaw(args))) as T;
 
 @patch
diff --git a/sdk/lib/html/dart2js/html_dart2js.dart b/sdk/lib/html/dart2js/html_dart2js.dart
index 0be73e5..d35c07d 100644
--- a/sdk/lib/html/dart2js/html_dart2js.dart
+++ b/sdk/lib/html/dart2js/html_dart2js.dart
@@ -38067,7 +38067,7 @@
   static const int Y = 89;
   static const int Z = 90;
   static const int META = 91;
-  static const int WIN_KEY_LEFT = 91;
+  static const int WIN_KEY_LEFT = 91; // Note that it's the same value as META.
   static const int WIN_KEY_RIGHT = 92;
   static const int CONTEXT_MENU = 93;
   static const int NUM_ZERO = 96;
@@ -38292,7 +38292,8 @@
         return _KeyName.UP;
       case KeyCode.WIN_IME:
       case KeyCode.WIN_KEY:
-      case KeyCode.WIN_KEY_LEFT:
+      // Covered by `KeyCode.META` above.
+      // case KeyCode.WIN_KEY_LEFT:
       case KeyCode.WIN_KEY_RIGHT:
         return _KeyName.WIN;
       default:
diff --git a/sdk/lib/js_util/js_util.dart b/sdk/lib/js_util/js_util.dart
index 16a406d..5c609d4 100644
--- a/sdk/lib/js_util/js_util.dart
+++ b/sdk/lib/js_util/js_util.dart
@@ -46,7 +46,7 @@
 // A CFE transformation may optimize calls to `callMethod` when [args] is a
 // a list literal or const list containing at most 4 values, all of which are
 // statically known to be non-functions.
-external T callMethod<T>(Object o, String method, List<Object?> args);
+external T callMethod<T>(Object o, Object method, List<Object?> args);
 
 /// Check whether [o] is an instance of [type].
 ///
diff --git a/tests/language/class_modifiers/base_transitivity/base_class_different_library_error_test.dart b/tests/language/class_modifiers/base_transitivity/base_class_different_library_error_test.dart
index c227fb5..9360566 100644
--- a/tests/language/class_modifiers/base_transitivity/base_class_different_library_error_test.dart
+++ b/tests/language/class_modifiers/base_transitivity/base_class_different_library_error_test.dart
@@ -104,14 +104,14 @@
 
 // Extending via an anonymous mixin application class.
 class SimpleExtendApplication = BaseClass with _MixinOnObject;
-//    ^
+//    ^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'SimpleExtendApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
-// [analyzer] unspecified
 
 interface class InterfaceExtendApplication = BaseClass with _MixinOnObject;
-//              ^
+//              ^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'InterfaceExtendApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
-// [analyzer] unspecified
 
 /// It is an error if BaseClass is implemented by something which is not base,
 /// final or sealed.
@@ -164,7 +164,8 @@
 
 // Implementing with a mixin application class.
 class SimpleImplementApplication = Object
-//    ^
+//    ^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'SimpleImplementApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
     with
         _MixinOnObject
@@ -182,7 +183,8 @@
 // [cfe] The class 'BaseClass' can't be implemented outside of its library because it's a base class.
 
 interface class InterfaceImplementApplication = Object
-//              ^
+//              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'InterfaceImplementApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
     with
         _MixinOnObject
diff --git a/tests/language/class_modifiers/base_transitivity/base_class_same_library_error_test.dart b/tests/language/class_modifiers/base_transitivity/base_class_same_library_error_test.dart
index 832a638..abeb93c 100644
--- a/tests/language/class_modifiers/base_transitivity/base_class_same_library_error_test.dart
+++ b/tests/language/class_modifiers/base_transitivity/base_class_same_library_error_test.dart
@@ -52,7 +52,7 @@
 // [cfe] The type 'InterfaceSealedExtendImplement' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
 
 mixin class MixinClassSealedExtendImplement implements SealedExtend {}
-//          ^^^^^^^^^^^^^^^^^^^^^^^^^^
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 // [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'MixinClassSealedExtendImplement' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
 
@@ -65,7 +65,6 @@
 mixin MixinSealedExtendOn on SealedExtend {}
 //    ^^^^^^^^^^^^^^^^^^^
 // [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
-//    ^
 // [cfe] The type 'MixinSealedExtendOn' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
 
 // Extending via an anonymous mixin class.
@@ -81,13 +80,13 @@
 
 // Extending via an anonymous mixin application class.
 class SimpleExtendApplication = BaseClass with _MixinOnObject;
-//    ^
+//    ^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'SimpleExtendApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
-// [analyzer] unspecified
 interface class InterfaceExtendApplication = BaseClass with _MixinOnObject;
-//              ^
+//              ^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'InterfaceExtendApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
-// [analyzer] unspecified
 
 /// It is an error if BaseClass is implemented by something which is not base,
 /// final or sealed.
@@ -154,16 +153,17 @@
 
 // Implementing with a mixin application class.
 interface class InterfaceImplementApplication = Object
-//              ^
+//              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'InterfaceImplementApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
     with
         _MixinOnObject
     implements
         BaseClass;
 class SimpleImplementApplication = Object
-//    ^
+//    ^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'SimpleImplementApplication' must be 'base', 'final' or 'sealed' because the supertype 'BaseClass' is 'base'.
-// [analyzer] unspecified
     with
         _MixinOnObject
     implements
diff --git a/tests/language/class_modifiers/base_transitivity/final_class_same_library_error_test.dart b/tests/language/class_modifiers/base_transitivity/final_class_same_library_error_test.dart
index aab78e6..5e3a1df 100644
--- a/tests/language/class_modifiers/base_transitivity/final_class_same_library_error_test.dart
+++ b/tests/language/class_modifiers/base_transitivity/final_class_same_library_error_test.dart
@@ -80,14 +80,14 @@
 
 // Extending via an anonymous mixin application class.
 class SimpleExtendApplication = FinalClass with _MixinOnObject;
-//    ^
+//    ^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'SimpleExtendApplication' must be 'base', 'final' or 'sealed' because the supertype 'FinalClass' is 'final'.
-// [analyzer] unspecified
 
 interface class InterfaceExtendApplication = FinalClass with _MixinOnObject;
-//              ^
+//              ^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'InterfaceExtendApplication' must be 'base', 'final' or 'sealed' because the supertype 'FinalClass' is 'final'.
-// [analyzer] unspecified
 
 /// It is an error if FinalClass is implemented by something which is not base,
 /// final or sealed.
@@ -154,17 +154,17 @@
 
 // Implementing with a mixin application class.
 interface class InterfaceImplementApplication = Object
-//              ^
+//              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'InterfaceImplementApplication' must be 'base', 'final' or 'sealed' because the supertype 'FinalClass' is 'final'.
-// [analyzer] unspecified
     with
         _MixinOnObject
     implements
         FinalClass;
 class SimpleImplementApplication = Object
-//    ^
+//    ^^^^^^^^^^^^^^^^^^^^^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.SUBTYPE_OF_BASE_OR_FINAL_IS_NOT_BASE_FINAL_OR_SEALED
 // [cfe] The type 'SimpleImplementApplication' must be 'base', 'final' or 'sealed' because the supertype 'FinalClass' is 'final'.
-// [analyzer] unspecified
     with
         _MixinOnObject
     implements
diff --git a/tests/web/wasm/js_util_test.dart b/tests/web/wasm/js_util_test.dart
index e3239d7..89ec2f1 100644
--- a/tests/web/wasm/js_util_test.dart
+++ b/tests/web/wasm/js_util_test.dart
@@ -372,16 +372,24 @@
 external _JSSymbol get symbol;
 
 @JS()
+external _JSSymbol get symbol2;
+
+@JS()
 external JSString methodWithSymbol(_JSSymbol s);
 
 void symbolTest() {
   eval(r'''
-      var s = Symbol.for('symbol');
-      globalThis.symbol = s;
-      globalThis[s] = 'boo';
+      var s1 = Symbol.for('symbol');
+      globalThis.symbol = s1;
+      globalThis[s1] = 'boo';
       globalThis.methodWithSymbol = function(s) {
         return Symbol.keyFor(s);
       }
+      var symbol2 = Symbol.for('symbolMethod');
+      globalThis[symbol2] = function() {
+        return 'hello world';
+      }
+      globalThis.symbol2 = symbol2;
       ''');
   Expect.equals(
       _JSSymbol.keyFor(_JSSymbol._for('symbol'.toJS)).toDart, 'symbol');
@@ -394,6 +402,7 @@
   Expect.equals(
       _JSSymbol.keyFor(getProperty<_JSSymbol>(globalThis, 'symbol')).toDart,
       'symbol');
+  Expect.equals(callMethod<String>(globalThis, symbol2, []), 'hello world');
 }
 
 void main() async {
diff --git a/tools/VERSION b/tools/VERSION
index a373ed8..669a3ff 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 3
 MINOR 0
 PATCH 0
-PRERELEASE 359
+PRERELEASE 360
 PRERELEASE_PATCH 0
diff --git a/tools/dom/src/KeyCode.dart b/tools/dom/src/KeyCode.dart
index a51d062..6ffde46 100644
--- a/tools/dom/src/KeyCode.dart
+++ b/tools/dom/src/KeyCode.dart
@@ -95,7 +95,7 @@
   static const int Y = 89;
   static const int Z = 90;
   static const int META = 91;
-  static const int WIN_KEY_LEFT = 91;
+  static const int WIN_KEY_LEFT = 91; // Note that it's the same value as META.
   static const int WIN_KEY_RIGHT = 92;
   static const int CONTEXT_MENU = 93;
   static const int NUM_ZERO = 96;
@@ -320,7 +320,8 @@
         return _KeyName.UP;
       case KeyCode.WIN_IME:
       case KeyCode.WIN_KEY:
-      case KeyCode.WIN_KEY_LEFT:
+      // Covered by `KeyCode.META` above.
+      // case KeyCode.WIN_KEY_LEFT:
       case KeyCode.WIN_KEY_RIGHT:
         return _KeyName.WIN;
       default:
diff --git a/utils/compiler/BUILD.gn b/utils/compiler/BUILD.gn
index 758637f..d8093f9 100644
--- a/utils/compiler/BUILD.gn
+++ b/utils/compiler/BUILD.gn
@@ -63,6 +63,7 @@
   vm_args = []
   main_dart = "$target_gen_dir/dart2js.dart"
   training_args = [
+    "--invoker=gn_build",
     "--packages=" +
         rebase_path("../../.dart_tool/package_config.json", root_build_dir),
     "--libraries-spec=" +
diff --git a/utils/dartdevc/BUILD.gn b/utils/dartdevc/BUILD.gn
index 05418a6..32b51a7 100644
--- a/utils/dartdevc/BUILD.gn
+++ b/utils/dartdevc/BUILD.gn
@@ -78,6 +78,7 @@
     args = [
       "$abs_main",
       "-m",
+      "--invoker=gn_build",
       "-o$abs_output",
       "--no-source-maps",
       "--platform-binaries=" + rebase_path("$root_out_dir"),