Version 2.18.0-62.0.dev

Merge commit 'bb47e41ab203a8b571b3b18cf8a8ff89bb1048ce' into 'dev'
diff --git a/pkg/analysis_server/benchmark/benchmarks.dart b/pkg/analysis_server/benchmark/benchmarks.dart
index cf25eb4..9dea223 100644
--- a/pkg/analysis_server/benchmark/benchmarks.dart
+++ b/pkg/analysis_server/benchmark/benchmarks.dart
@@ -118,7 +118,7 @@
 
   @override
   BenchMarkResult combine(BenchMarkResult other) {
-    BenchMarkResult _combine(BenchMarkResult? a, BenchMarkResult? b) {
+    BenchMarkResult combine(BenchMarkResult? a, BenchMarkResult? b) {
       if (a == null) return b!;
       if (b == null) return a;
       return a.combine(b);
@@ -133,7 +133,7 @@
         .toList();
 
     for (var key in keys) {
-      combined.add(key, _combine(results[key], o.results[key]));
+      combined.add(key, combine(results[key], o.results[key]));
     }
 
     return combined;
diff --git a/pkg/analysis_server/benchmark/perf/benchmarks_impl.dart b/pkg/analysis_server/benchmark/perf/benchmarks_impl.dart
index 501fb8d..94dd7c0 100644
--- a/pkg/analysis_server/benchmark/perf/benchmarks_impl.dart
+++ b/pkg/analysis_server/benchmark/perf/benchmarks_impl.dart
@@ -75,7 +75,7 @@
     var completionCount = 0;
     var stopwatch = Stopwatch()..start();
 
-    Future _complete(int offset) async {
+    Future complete(int offset) async {
       await test.complete(filePath, offset, isWarmUp: false);
       completionCount++;
     }
@@ -86,11 +86,11 @@
       var index =
           contents.indexOf(RegExp(r'\..*;$', multiLine: true), startIndex);
 
-      await _complete(index - 10);
-      await _complete(index - 1);
-      await _complete(index);
-      await _complete(index + 1);
-      await _complete(index + 10);
+      await complete(index - 10);
+      await complete(index - 1);
+      await complete(index);
+      await complete(index + 1);
+      await complete(index + 10);
 
       if (i + 1 < kGroupCount) {
         // mutate
diff --git a/pkg/analysis_server/lib/src/lsp/handlers/handler_document_color.dart b/pkg/analysis_server/lib/src/lsp/handlers/handler_document_color.dart
index e65353e..124caa1 100644
--- a/pkg/analysis_server/lib/src/lsp/handlers/handler_document_color.dart
+++ b/pkg/analysis_server/lib/src/lsp/handlers/handler_document_color.dart
@@ -40,7 +40,7 @@
   }
 
   ErrorOr<List<ColorInformation>> _getColors(ResolvedUnitResult unit) {
-    ColorInformation _toColorInformation(ColorReference reference) {
+    ColorInformation toColorInformation(ColorReference reference) {
       return ColorInformation(
         range: toRange(unit.lineInfo, reference.offset, reference.length),
         color: Color(
@@ -56,6 +56,6 @@
 
     final computer = ColorComputer(unit);
     final colors = computer.compute();
-    return success(colors.map(_toColorInformation).toList());
+    return success(colors.map(toColorInformation).toList());
   }
 }
diff --git a/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart b/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart
index 9146784..7a67a93 100644
--- a/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart
+++ b/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart
@@ -522,14 +522,14 @@
     // the hashcode of their registration options to allow for multiple
     // registrations of a single method.
 
-    String _registrationHash(Registration registration) =>
+    String registrationHash(Registration registration) =>
         '${registration.method}${registration.registerOptions.hashCode}';
 
     final newRegistrationsMap = Map.fromEntries(
-        newRegistrations.map((r) => MapEntry(r, _registrationHash(r))));
+        newRegistrations.map((r) => MapEntry(r, registrationHash(r))));
     final newRegistrationsJsons = newRegistrationsMap.values.toSet();
     final currentRegistrationsMap = Map.fromEntries(
-        currentRegistrations.map((r) => MapEntry(r, _registrationHash(r))));
+        currentRegistrations.map((r) => MapEntry(r, registrationHash(r))));
     final currentRegistrationJsons = currentRegistrationsMap.values.toSet();
 
     final registrationsToAdd = newRegistrationsMap.entries
diff --git a/pkg/analysis_server/test/analysis/update_content_test.dart b/pkg/analysis_server/test/analysis/update_content_test.dart
index ac230a7..a75ac82 100644
--- a/pkg/analysis_server/test/analysis/update_content_test.dart
+++ b/pkg/analysis_server/test/analysis/update_content_test.dart
@@ -30,9 +30,8 @@
   void processNotification(Notification notification) {
     if (notification.event == ANALYSIS_NOTIFICATION_ERRORS) {
       var decoded = AnalysisErrorsParams.fromNotification(notification);
-      String _format(AnalysisError e) =>
-          '${e.location.startLine}: ${e.message}';
-      filesErrors[getFile(decoded.file)] = decoded.errors.map(_format).toList();
+      String format(AnalysisError e) => '${e.location.startLine}: ${e.message}';
+      filesErrors[getFile(decoded.file)] = decoded.errors.map(format).toList();
     }
     if (notification.event == ANALYSIS_NOTIFICATION_NAVIGATION) {
       navigationCount++;
diff --git a/pkg/analysis_server/test/stress/completion/completion_runner.dart b/pkg/analysis_server/test/stress/completion/completion_runner.dart
index 301c2ad..ad61551 100644
--- a/pkg/analysis_server/test/stress/completion/completion_runner.dart
+++ b/pkg/analysis_server/test/stress/completion/completion_runner.dart
@@ -176,7 +176,7 @@
           output.writeln();
         }
 
-        void _printCounts(List<int> counts) {
+        void printCounts(List<int> counts) {
           var nearTopCount = 0;
           for (var i = 0; i < counts.length; i++) {
             var count = counts[i];
@@ -188,10 +188,10 @@
 
         output.writeln();
         output.writeln('By position in the list');
-        _printCounts(indexCount);
+        printCounts(indexCount);
         output.writeln();
         output.writeln('By position in the list (filtered by first character)');
-        _printCounts(filteredIndexCount);
+        printCounts(filteredIndexCount);
         output.writeln();
       }
     }
diff --git a/pkg/dart2js_info/bin/src/debug_info.dart b/pkg/dart2js_info/bin/src/debug_info.dart
index 1de2f7d..51a8c99 100644
--- a/pkg/dart2js_info/bin/src/debug_info.dart
+++ b/pkg/dart2js_info/bin/src/debug_info.dart
@@ -303,15 +303,15 @@
   // differently than 'deps' links
   int inUsesNotInDependencies = 0;
   int inDependenciesNotInUses = 0;
-  _sameEdges(f) {
+  sameEdges(f) {
     var targets1 = g1.targetsOf(f).toSet();
     var targets2 = g2.targetsOf(f).toSet();
     inUsesNotInDependencies += targets1.difference(targets2).length;
     inDependenciesNotInUses += targets2.difference(targets1).length;
   }
 
-  info.functions.forEach(_sameEdges);
-  info.fields.forEach(_sameEdges);
+  info.functions.forEach(sameEdges);
+  info.fields.forEach(sameEdges);
   if (inUsesNotInDependencies == 0 && inDependenciesNotInUses == 0) {
     _pass('dependency data is consistent');
   } else {
diff --git a/pkg/dart2js_info/bin/src/deferred_library_size.dart b/pkg/dart2js_info/bin/src/deferred_library_size.dart
index 46455ed..353d8ed 100644
--- a/pkg/dart2js_info/bin/src/deferred_library_size.dart
+++ b/pkg/dart2js_info/bin/src/deferred_library_size.dart
@@ -56,7 +56,7 @@
   int longest = importSizes.fold('Percent of code deferred'.length,
       (longest, importSize) => max(longest, importSize.import.length));
 
-  _printRow(label, data, {int width = 15}) {
+  printRow(label, data, {int width = 15}) {
     print('${label.toString().padRight(longest + 1)}'
         '${data.toString().padLeft(width)}');
   }
@@ -66,16 +66,16 @@
   print('-' * (longest + 16));
   for (var importSize in importSizes) {
     // TODO(het): split into specific and shared size
-    _printRow(importSize.import, importSize.size);
+    printRow(importSize.import, importSize.size);
   }
   print('-' * (longest + 16));
 
   var mainChunkSize = sizeByImport['main'];
   var deferredSize = programSize - mainChunkSize;
   var percentDeferred = (deferredSize * 100 / programSize).toStringAsFixed(2);
-  _printRow('Main chunk size', mainChunkSize);
-  _printRow('Deferred code size', deferredSize);
-  _printRow('Percent of code deferred', '$percentDeferred%');
+  printRow('Main chunk size', mainChunkSize);
+  printRow('Deferred code size', deferredSize);
+  printRow('Percent of code deferred', '$percentDeferred%');
 }
 
 Map<String, int> getSizeByImport(AllInfo info) {
diff --git a/pkg/dart2js_info/bin/src/library_size_split.dart b/pkg/dart2js_info/bin/src/library_size_split.dart
index 1721457..3246d1e 100644
--- a/pkg/dart2js_info/bin/src/library_size_split.dart
+++ b/pkg/dart2js_info/bin/src/library_size_split.dart
@@ -131,12 +131,12 @@
     var realTotal = info.program.size;
     var longest = 0;
     var rows = <_Row>[];
-    _addRow(String label, int value) {
+    addRow(String label, int value) {
       rows.add(_Row(label, value));
       longest = max(longest, label.length);
     }
 
-    _printRow(_Row row) {
+    printRow(_Row row) {
       if (row is _Divider) {
         print(' ${'-' * (longest + 18)}');
         return;
@@ -157,14 +157,14 @@
         lastCluster = entry.cluster;
       }
       var size = entry.size;
-      _addRow(name, size);
+      addRow(name, size);
     }
     rows.add(const _Divider());
-    _addRow("All libraries (excludes preambles, statics & consts)", allLibs);
-    _addRow("Shared consts", allConstants);
-    _addRow("Total accounted", allLibs + allConstants);
-    _addRow("Program Size", realTotal);
-    rows.forEach(_printRow);
+    addRow("All libraries (excludes preambles, statics & consts)", allLibs);
+    addRow("Shared consts", allConstants);
+    addRow("Total accounted", allLibs + allConstants);
+    addRow("Program Size", realTotal);
+    rows.forEach(printRow);
   }
 }
 
diff --git a/pkg/dev_compiler/lib/src/kernel/compiler.dart b/pkg/dev_compiler/lib/src/kernel/compiler.dart
index 7a7f204..5eca49b 100644
--- a/pkg/dev_compiler/lib/src/kernel/compiler.dart
+++ b/pkg/dev_compiler/lib/src/kernel/compiler.dart
@@ -905,13 +905,13 @@
 
     js_ast.Expression emitDeferredType(DartType t,
         {bool emitNullability = true}) {
-      js_ast.Expression _emitDeferredType(DartType t,
+      js_ast.Expression emitDeferredType(DartType t,
           {bool emitNullability = true}) {
         if (t is InterfaceType) {
           _declareBeforeUse(t.classNode);
           if (t.typeArguments.isNotEmpty) {
-            var typeRep = _emitGenericClassType(
-                t, t.typeArguments.map(_emitDeferredType));
+            var typeRep =
+                _emitGenericClassType(t, t.typeArguments.map(emitDeferredType));
             return emitNullability
                 ? _emitNullabilityWrapper(typeRep, t.declaredNullability)
                 : typeRep;
@@ -922,13 +922,13 @@
           if (normalizedType is FutureOrType) {
             _declareBeforeUse(_coreTypes.deprecatedFutureOrClass);
             var typeRep = _emitFutureOrTypeWithArgument(
-                _emitDeferredType(normalizedType.typeArgument));
+                emitDeferredType(normalizedType.typeArgument));
             return emitNullability
                 ? _emitNullabilityWrapper(
                     typeRep, normalizedType.declaredNullability)
                 : typeRep;
           }
-          return _emitDeferredType(normalizedType,
+          return emitDeferredType(normalizedType,
               emitNullability: emitNullability);
         } else if (t is TypeParameterType) {
           return _emitTypeParameterType(t, emitNullability: emitNullability);
@@ -940,7 +940,7 @@
       var savedEmittingDeferredType = _emittingDeferredType;
       _emittingDeferredType = true;
       var deferredClassRep =
-          _emitDeferredType(t, emitNullability: emitNullability);
+          emitDeferredType(t, emitNullability: emitNullability);
       _emittingDeferredType = savedEmittingDeferredType;
       return deferredClassRep;
     }
diff --git a/pkg/native_stack_traces/lib/src/elf.dart b/pkg/native_stack_traces/lib/src/elf.dart
index ac7cb50..c29b1e4 100644
--- a/pkg/native_stack_traces/lib/src/elf.dart
+++ b/pkg/native_stack_traces/lib/src/elf.dart
@@ -1045,7 +1045,7 @@
       entry.setName(sectionHeaderStringTable);
       sectionsByName.putIfAbsent(entry.name, () => {}).add(section);
     }
-    void _cacheSymbolNames(String stringTableTag, String symbolTableTag) {
+    void cacheSymbolNames(String stringTableTag, String symbolTableTag) {
       final stringTables = sectionsByName[stringTableTag]?.cast<StringTable>();
       if (stringTables == null) {
         return;
@@ -1068,8 +1068,8 @@
       }
     }
 
-    _cacheSymbolNames('.strtab', '.symtab');
-    _cacheSymbolNames('.dynstr', '.dynsym');
+    cacheSymbolNames('.strtab', '.symtab');
+    cacheSymbolNames('.dynstr', '.dynsym');
     // Set the wordSize and endian of the original reader before returning.
     elfReader.wordSize = reader.wordSize;
     elfReader.endian = reader.endian;
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 4690df6e..4cd2832 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -2717,7 +2717,7 @@
         opc = B31;
         break;
       case kFourBytes:
-        opc = B30;
+        opc = op == LDP ? B30 : 0;
         break;
       case kUnsignedFourBytes:
         opc = 0;
diff --git a/runtime/vm/compiler/assembler/assembler_arm64_test.cc b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
index a95b7fe..0010aba 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
@@ -751,6 +751,92 @@
       "ret\n");
 }
 
+ASSEMBLER_TEST_GENERATE(LoadStorePairUnsigned32, assembler) {
+  __ SetupDartSP();
+  __ LoadImmediate(R2, 0xAABBCCDDEEFF9988);
+  __ LoadImmediate(R3, 0xBBCCDDEEFF998877);
+  __ sub(SP, SP, Operand(4 * target::kWordSize));
+  __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
+  __ stp(R2, R3,
+         Address(SP, 2 * sizeof(uint32_t), Address::PairOffset,
+                 compiler::kUnsignedFourBytes),
+         kUnsignedFourBytes);
+  __ ldp(R0, R1,
+         Address(SP, 2 * sizeof(uint32_t), Address::PairOffset,
+                 kUnsignedFourBytes),
+         kUnsignedFourBytes);
+  __ add(SP, SP, Operand(4 * target::kWordSize));
+  __ sub(R0, R0, Operand(R1));
+  __ RestoreCSP();
+  __ ret();
+}
+
+ASSEMBLER_TEST_RUN(LoadStorePairUnsigned32, test) {
+  typedef int64_t (*Int64Return)() DART_UNUSED;
+  EXPECT_EQ(-278523631, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
+  EXPECT_DISASSEMBLY(
+      "mov sp, csp\n"
+      "sub csp, csp, #0x1000\n"
+      "movz r2, #0x9988\n"
+      "movk r2, #0xeeff lsl 16\n"
+      "movk r2, #0xccdd lsl 32\n"
+      "movk r2, #0xaabb lsl 48\n"
+      "movz r3, #0x8877\n"
+      "movk r3, #0xff99 lsl 16\n"
+      "movk r3, #0xddee lsl 32\n"
+      "movk r3, #0xbbcc lsl 48\n"
+      "sub sp, sp, #0x20\n"
+      "and csp, sp, 0xfffffffffffffff0\n"
+      "stpw r2, r3, [sp, #8]\n"
+      "ldpw r0, r1, [sp, #8]\n"
+      "add sp, sp, #0x20\n"
+      "sub r0, r0, r1\n"
+      "mov csp, sp\n"
+      "ret\n");
+}
+
+ASSEMBLER_TEST_GENERATE(LoadStorePairSigned32, assembler) {
+  __ SetupDartSP();
+  __ LoadImmediate(R2, 0xAABBCCDDEEFF9988);
+  __ LoadImmediate(R3, 0xBBCCDDEEFF998877);
+  __ sub(SP, SP, Operand(4 * target::kWordSize));
+  __ andi(CSP, SP, Immediate(~15));  // Must not access beyond CSP.
+  __ stp(R2, R3,
+         Address(SP, 2 * sizeof(int32_t), Address::PairOffset, kFourBytes),
+         kFourBytes);
+  __ ldp(R0, R1,
+         Address(SP, 2 * sizeof(int32_t), Address::PairOffset, kFourBytes),
+         kFourBytes);
+  __ add(SP, SP, Operand(4 * target::kWordSize));
+  __ sub(R0, R0, Operand(R1));
+  __ RestoreCSP();
+  __ ret();
+}
+
+ASSEMBLER_TEST_RUN(LoadStorePairSigned32, test) {
+  typedef int64_t (*Int64Return)() DART_UNUSED;
+  EXPECT_EQ(-278523631, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
+  EXPECT_DISASSEMBLY(
+      "mov sp, csp\n"
+      "sub csp, csp, #0x1000\n"
+      "movz r2, #0x9988\n"
+      "movk r2, #0xeeff lsl 16\n"
+      "movk r2, #0xccdd lsl 32\n"
+      "movk r2, #0xaabb lsl 48\n"
+      "movz r3, #0x8877\n"
+      "movk r3, #0xff99 lsl 16\n"
+      "movk r3, #0xddee lsl 32\n"
+      "movk r3, #0xbbcc lsl 48\n"
+      "sub sp, sp, #0x20\n"
+      "and csp, sp, 0xfffffffffffffff0\n"
+      "stpw r2, r3, [sp, #8]\n"
+      "ldpsw r0, r1, [sp, #8]\n"
+      "add sp, sp, #0x20\n"
+      "sub r0, r0, r1\n"
+      "mov csp, sp\n"
+      "ret\n");
+}
+
 ASSEMBLER_TEST_GENERATE(PushRegisterPair, assembler) {
   __ SetupDartSP();
   __ LoadImmediate(R2, 12);
diff --git a/runtime/vm/compiler/assembler/disassembler_arm64.cc b/runtime/vm/compiler/assembler/disassembler_arm64.cc
index e742ef7..c16ca8e 100644
--- a/runtime/vm/compiler/assembler/disassembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/disassembler_arm64.cc
@@ -552,7 +552,11 @@
       ASSERT(STRING_STARTS_WITH(format, "opc"));
       if (instr->Bit(26) == 0) {
         if (instr->Bit(31) == 0) {
-          Print("w");
+          if (instr->Bit(30) == 1) {
+            Print("sw");
+          } else {
+            Print("w");
+          }
         } else {
           // 64-bit width is most commonly used, no need to print "x".
         }
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
index f36cdc7..f2c212d 100644
--- a/runtime/vm/compiler/runtime_api.h
+++ b/runtime/vm/compiler/runtime_api.h
@@ -63,7 +63,7 @@
 // constants and introduce compilation errors when used.
 //
 // target::kWordSize and target::ObjectAlignment give access to
-// word size and object aligment offsets for the target.
+// word size and object alignment offsets for the target.
 //
 // Similarly kHostWordSize gives access to the host word size.
 class InvalidClass {};
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index 0034a52..3a5737b 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -1376,15 +1376,22 @@
                     target::Array::data_offset() - kHeapObjectTag);
     // R3: iterator which initially points to the start of the variable
     // data area to be initialized.
-    Label loop, done;
+#if defined(DART_COMPRESSED_POINTERS)
+    const Register kWordOfNulls = TMP;
+    __ andi(kWordOfNulls, NULL_REG, Immediate(0xFFFFFFFF));
+    __ orr(kWordOfNulls, kWordOfNulls, Operand(kWordOfNulls, LSL, 32));
+#else
+    const Register kWordOfNulls = NULL_REG;
+#endif
+    Label loop;
     __ Bind(&loop);
-    // TODO(cshapiro): StoreIntoObjectNoBarrier
+    ASSERT(target::kObjectAlignment == 2 * target::kWordSize);
+    __ stp(kWordOfNulls, kWordOfNulls,
+           Address(R3, 2 * target::kWordSize, Address::PairPostIndex));
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
     __ CompareRegisters(R3, R7);
-    __ b(&done, CS);
-    __ str(NULL_REG, Address(R3), kObjectBytes);  // Store if unsigned lower.
-    __ AddImmediate(R3, target::kCompressedWordSize);
-    __ b(&loop);  // Loop until R3 == R7.
-    __ Bind(&done);
+    __ b(&loop, UNSIGNED_LESS);
 
     // Done allocating and initializing the array.
     // AllocateArrayABI::kResultReg: new object.
@@ -1697,17 +1704,25 @@
     // Initialize the context variables.
     // R0: new object.
     // R1: number of context variables.
-    {
-      Label loop, done;
-      __ AddImmediate(R3, R0,
-                      target::Context::variable_offset(0) - kHeapObjectTag);
-      __ Bind(&loop);
-      __ subs(R1, R1, Operand(1));
-      __ b(&done, MI);
-      __ str(NULL_REG, Address(R3, R1, UXTX, Address::Scaled), kObjectBytes);
-      __ b(&loop, NE);  // Loop if R1 not zero.
-      __ Bind(&done);
-    }
+    __ AddImmediate(R3, R0,
+                    target::Context::variable_offset(0) - kHeapObjectTag);
+#if defined(DART_COMPRESSED_POINTERS)
+    const Register kWordOfNulls = TMP;
+    __ andi(kWordOfNulls, NULL_REG, Immediate(0xFFFFFFFF));
+    __ orr(kWordOfNulls, kWordOfNulls, Operand(kWordOfNulls, LSL, 32));
+#else
+    const Register kWordOfNulls = NULL_REG;
+#endif
+    Label loop;
+    __ Bind(&loop);
+    ASSERT(target::kObjectAlignment == 2 * target::kWordSize);
+    __ stp(kWordOfNulls, kWordOfNulls,
+           Address(R3, 2 * target::kWordSize, Address::PairPostIndex));
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+    __ subs(R1, R1,
+            Operand(target::kObjectAlignment / target::kCompressedWordSize));
+    __ b(&loop, HI);
 
     // Done allocating and initializing the context.
     // R0: new object.
@@ -2047,20 +2062,24 @@
     // Initialize the remaining words of the object.
     {
       const Register kFieldReg = R4;
-
       __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
                       target::Instance::first_field_offset());
-      Label done, init_loop;
-      __ Bind(&init_loop);
+#if defined(DART_COMPRESSED_POINTERS)
+      const Register kWordOfNulls = TMP;
+      __ andi(kWordOfNulls, NULL_REG, Immediate(0xFFFFFFFF));
+      __ orr(kWordOfNulls, kWordOfNulls, Operand(kWordOfNulls, LSL, 32));
+#else
+      const Register kWordOfNulls = NULL_REG;
+#endif
+      Label loop;
+      __ Bind(&loop);
+      ASSERT(target::kObjectAlignment == 2 * target::kWordSize);
+      __ stp(kWordOfNulls, kWordOfNulls,
+             Address(kFieldReg, 2 * target::kWordSize, Address::PairPostIndex));
+      // Safe to only check every kObjectAlignment bytes instead of each word.
+      ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
       __ CompareRegisters(kFieldReg, kNewTopReg);
-      __ b(&done, UNSIGNED_GREATER_EQUAL);
-      __ str(
-          NULL_REG,
-          Address(kFieldReg, target::kCompressedWordSize, Address::PostIndex),
-          kObjectBytes);
-      __ b(&init_loop);
-
-      __ Bind(&done);
+      __ b(&loop, UNSIGNED_LESS);
     }  // kFieldReg = R4
 
     if (is_cls_parameterized) {
@@ -3832,14 +3851,12 @@
     __ AddImmediate(R2, R0, target::TypedData::HeaderSize() - 1);
     __ StoreInternalPointer(
         R0, FieldAddress(R0, target::PointerBase::data_offset()), R2);
-    Label init_loop, done;
-    __ Bind(&init_loop);
+    Label loop;
+    __ Bind(&loop);
+    ASSERT(target::kObjectAlignment == 2 * target::kWordSize);
+    __ stp(ZR, ZR, Address(R2, 2 * target::kWordSize, Address::PairPostIndex));
     __ cmp(R2, Operand(R1));
-    __ b(&done, CS);
-    __ str(ZR, Address(R2, 0));
-    __ add(R2, R2, Operand(target::kWordSize));
-    __ b(&init_loop);
-    __ Bind(&done);
+    __ b(&loop, UNSIGNED_LESS);
 
     __ Ret();
 
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index a40060d..05c9646 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -928,17 +928,19 @@
     __ leal(EBX, FieldAddress(AllocateArrayABI::kResultReg, EBX, TIMES_1, 0));
     __ leal(EDI, FieldAddress(AllocateArrayABI::kResultReg,
                               target::Array::header_size()));
-    Label done;
-    Label init_loop;
-    __ Bind(&init_loop);
+    Label loop;
+    __ Bind(&loop);
+    for (intptr_t offset = 0; offset < target::kObjectAlignment;
+         offset += target::kWordSize) {
+      // No generational barrier needed, since we are storing null.
+      __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+                                  Address(EDI, offset), NullObject());
+    }
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+    __ addl(EDI, Immediate(target::kObjectAlignment));
     __ cmpl(EDI, EBX);
-    __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
-    // No generational barrier needed, since we are storing null.
-    __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg, Address(EDI, 0),
-                                NullObject());
-    __ addl(EDI, Immediate(target::kWordSize));
-    __ jmp(&init_loop, Assembler::kNearJump);
-    __ Bind(&done);
+    __ j(UNSIGNED_LESS, &loop);
     __ ret();
 
     // Unable to allocate the array using the fast inline code, just call
@@ -1570,16 +1572,18 @@
       // ECX: next word to be initialized.
       // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
       //                                       (if is_cls_parameterized).
-      Label init_loop;
-      Label done;
-      __ Bind(&init_loop);
+      Label loop;
+      __ Bind(&loop);
+      for (intptr_t offset = 0; offset < target::kObjectAlignment;
+           offset += target::kWordSize) {
+        __ StoreIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
+                                    Address(ECX, offset), NullObject());
+      }
+      // Safe to only check every kObjectAlignment bytes instead of each word.
+      ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+      __ addl(ECX, Immediate(target::kObjectAlignment));
       __ cmpl(ECX, EBX);
-      __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
-      __ StoreIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
-                                  Address(ECX, 0), NullObject());
-      __ addl(ECX, Immediate(target::kWordSize));
-      __ jmp(&init_loop, Assembler::kNearJump);
-      __ Bind(&done);
+      __ j(UNSIGNED_LESS, &loop);
     }
     if (is_cls_parameterized) {
       // AllocateObjectABI::kResultReg: new object (tagged).
@@ -3066,14 +3070,17 @@
     __ leal(EDI, FieldAddress(EAX, target::TypedData::HeaderSize()));
     __ StoreInternalPointer(
         EAX, FieldAddress(EAX, target::PointerBase::data_offset()), EDI);
-    Label done, init_loop;
-    __ Bind(&init_loop);
+    Label loop;
+    __ Bind(&loop);
+    for (intptr_t offset = 0; offset < target::kObjectAlignment;
+         offset += target::kWordSize) {
+      __ movl(Address(EDI, offset), ECX);
+    }
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+    __ addl(EDI, Immediate(target::kObjectAlignment));
     __ cmpl(EDI, EBX);
-    __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
-    __ movl(Address(EDI, 0), ECX);
-    __ addl(EDI, Immediate(target::kWordSize));
-    __ jmp(&init_loop, Assembler::kNearJump);
-    __ Bind(&done);
+    __ j(UNSIGNED_LESS, &loop);
 
     __ ret();
 
diff --git a/runtime/vm/compiler/stub_code_compiler_riscv.cc b/runtime/vm/compiler/stub_code_compiler_riscv.cc
index 3b3715c..b9b67fd 100644
--- a/runtime/vm/compiler/stub_code_compiler_riscv.cc
+++ b/runtime/vm/compiler/stub_code_compiler_riscv.cc
@@ -1189,15 +1189,17 @@
                     target::Array::data_offset() - kHeapObjectTag);
     // R3: iterator which initially points to the start of the variable
     // data area to be initialized.
-    Label loop, done;
+    Label loop;
     __ Bind(&loop);
-    // TODO(cshapiro): StoreIntoObjectNoBarrier
-    __ bgeu(T3, T4, &done);
-    __ sx(NULL_REG, Address(T3, 0));
-    __ sx(NULL_REG, Address(T3, target::kCompressedWordSize));
-    __ AddImmediate(T3, 2 * target::kCompressedWordSize);
-    __ j(&loop);  // Loop until T3 == T4.
-    __ Bind(&done);
+    for (intptr_t offset = 0; offset < target::kObjectAlignment;
+         offset += target::kCompressedWordSize) {
+      __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+                                            Address(T3, offset), NULL_REG);
+    }
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+    __ addi(T3, T3, target::kObjectAlignment);
+    __ bltu(T3, T4, &loop);
 
     // Done allocating and initializing the array.
     // AllocateArrayABI::kResultReg: new object.
@@ -1873,15 +1875,18 @@
 
       __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
                       target::Instance::first_field_offset());
-      Label done, init_loop;
-      __ Bind(&init_loop);
-      __ CompareRegisters(kFieldReg, kNewTopReg);
-      __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
-      __ sx(NULL_REG, Address(kFieldReg, 0));
-      __ addi(kFieldReg, kFieldReg, target::kCompressedWordSize);
-      __ j(&init_loop);
-
-      __ Bind(&done);
+      Label loop;
+      __ Bind(&loop);
+      for (intptr_t offset = 0; offset < target::kObjectAlignment;
+           offset += target::kCompressedWordSize) {
+        __ StoreCompressedIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
+                                              Address(kFieldReg, offset),
+                                              NULL_REG);
+      }
+      // Safe to only check every kObjectAlignment bytes instead of each word.
+      ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+      __ addi(kFieldReg, kFieldReg, target::kObjectAlignment);
+      __ bltu(kFieldReg, kNewTopReg, &loop);
     }  // kFieldReg = T4
 
     if (is_cls_parameterized) {
@@ -3627,13 +3632,16 @@
     __ AddImmediate(T3, A0, target::TypedData::HeaderSize() - 1);
     __ StoreInternalPointer(
         A0, FieldAddress(A0, target::PointerBase::data_offset()), T3);
-    Label init_loop, done;
-    __ Bind(&init_loop);
-    __ bgeu(T3, T4, &done);
-    __ sx(ZR, Address(T3, 0));
-    __ addi(T3, T3, target::kWordSize);
-    __ j(&init_loop);
-    __ Bind(&done);
+    Label loop;
+    __ Bind(&loop);
+    for (intptr_t offset = 0; offset < target::kObjectAlignment;
+         offset += target::kWordSize) {
+      __ sx(ZR, Address(T3, offset));
+    }
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+    __ addi(T3, T3, target::kObjectAlignment);
+    __ bltu(T3, T4, &loop);
 
     __ Ret();
 
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index c817ad4..c549f75 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -1285,22 +1285,19 @@
     __ LoadObject(R12, NullObject());
     __ leaq(RDI, FieldAddress(AllocateArrayABI::kResultReg,
                               target::Array::header_size()));
-    Label done;
-    Label init_loop;
-    __ Bind(&init_loop);
+    Label loop;
+    __ Bind(&loop);
+    for (intptr_t offset = 0; offset < target::kObjectAlignment;
+         offset += target::kCompressedWordSize) {
+      // No generational barrier needed, since we are storing null.
+      __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+                                            Address(RDI, offset), R12);
+    }
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+    __ addq(RDI, Immediate(target::kObjectAlignment));
     __ cmpq(RDI, RCX);
-#if defined(DEBUG)
-    static auto const kJumpLength = Assembler::kFarJump;
-#else
-    static auto const kJumpLength = Assembler::kNearJump;
-#endif  // DEBUG
-    __ j(ABOVE_EQUAL, &done, kJumpLength);
-    // No generational barrier needed, since we are storing null.
-    __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
-                                          Address(RDI, 0), R12);
-    __ addq(RDI, Immediate(target::kCompressedWordSize));
-    __ jmp(&init_loop, kJumpLength);
-    __ Bind(&done);
+    __ j(UNSIGNED_LESS, &loop);
     __ ret();
 
     // Unable to allocate the array using the fast inline code, just call
@@ -1977,21 +1974,19 @@
       __ LoadObject(kNullReg, NullObject());
 
       // Loop until the whole object is initialized.
-      Label init_loop;
-      Label done;
-      __ Bind(&init_loop);
+      Label loop;
+      __ Bind(&loop);
+      for (intptr_t offset = 0; offset < target::kObjectAlignment;
+           offset += target::kCompressedWordSize) {
+        __ StoreCompressedIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
+                                              Address(kNextFieldReg, offset),
+                                              kNullReg);
+      }
+      // Safe to only check every kObjectAlignment bytes instead of each word.
+      ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+      __ addq(kNextFieldReg, Immediate(target::kObjectAlignment));
       __ cmpq(kNextFieldReg, kNewTopReg);
-#if defined(DEBUG)
-      static auto const kJumpLength = Assembler::kFarJump;
-#else
-      static auto const kJumpLength = Assembler::kNearJump;
-#endif  // DEBUG
-      __ j(ABOVE_EQUAL, &done, kJumpLength);
-      __ StoreCompressedIntoObjectNoBarrier(
-          AllocateObjectABI::kResultReg, Address(kNextFieldReg, 0), kNullReg);
-      __ addq(kNextFieldReg, Immediate(target::kCompressedWordSize));
-      __ jmp(&init_loop, Assembler::kNearJump);
-      __ Bind(&done);
+      __ j(UNSIGNED_LESS, &loop);
     }  // kNextFieldReg = RDI, kNullReg = R10
 
     if (is_cls_parameterized) {
@@ -3774,18 +3769,19 @@
     /* RDI: iterator which initially points to the start of the variable */
     /* RBX: scratch register. */
     /* data area to be initialized. */
-    __ xorq(RBX, RBX); /* Zero. */
+    __ pxor(XMM0, XMM0); /* Zero. */
     __ leaq(RDI, FieldAddress(RAX, target::TypedData::HeaderSize()));
     __ StoreInternalPointer(
         RAX, FieldAddress(RAX, target::PointerBase::data_offset()), RDI);
-    Label done, init_loop;
-    __ Bind(&init_loop);
+    Label loop;
+    __ Bind(&loop);
+    ASSERT(target::kObjectAlignment == kFpuRegisterSize);
+    __ movups(Address(RDI, 0), XMM0);
+    // Safe to only check every kObjectAlignment bytes instead of each word.
+    ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
+    __ addq(RDI, Immediate(target::kObjectAlignment));
     __ cmpq(RDI, RCX);
-    __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
-    __ movq(Address(RDI, 0), RBX);
-    __ addq(RDI, Immediate(target::kWordSize));
-    __ jmp(&init_loop, Assembler::kNearJump);
-    __ Bind(&done);
+    __ j(UNSIGNED_LESS, &loop, Assembler::kNearJump);
 
     __ ret();
 
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index 0648d60..a6b53d4 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -865,23 +865,32 @@
 
 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words,
                                    GCReason reason) const {
-  if (reason != GCReason::kNewSpace) {
-    // If we GC for a reason other than new-space being full, that's not an
-    // indication that new-space is too small.
-    return old_size_in_words;
+  bool grow = false;
+  if (2 * heap_->isolate_group()->MutatorCount() >
+      (old_size_in_words / kNewPageSizeInWords)) {
+    // Not enough TLABs to give two to each mutator.
+    grow = true;
   }
 
-  if (stats_history_.Size() != 0) {
-    double garbage = stats_history_.Get(0).ExpectedGarbageFraction();
-    if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) {
-      // Too much survived last time; grow new-space in the hope that a greater
-      // fraction of objects will become unreachable before new-space becomes
-      // full.
-      return Utils::Minimum(max_semi_capacity_in_words_,
-                            old_size_in_words * FLAG_new_gen_growth_factor);
+  if (reason == GCReason::kNewSpace) {
+    // If we GC for a reason other than new-space being full (i.e., full
+    // collection for old-space or store-buffer overflow), that's not an
+    // indication that new-space is too small.
+    if (stats_history_.Size() != 0) {
+      double garbage = stats_history_.Get(0).ExpectedGarbageFraction();
+      if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) {
+        // Too much survived last time; grow new-space in the hope that a
+        // greater fraction of objects will become unreachable before new-space
+        // becomes full.
+        grow = true;
+      }
     }
   }
 
+  if (grow) {
+    return Utils::Minimum(max_semi_capacity_in_words_,
+                          old_size_in_words * FLAG_new_gen_growth_factor);
+  }
   return old_size_in_words;
 }
 
diff --git a/runtime/vm/heap/scavenger.h b/runtime/vm/heap/scavenger.h
index 229912a..9930fa8 100644
--- a/runtime/vm/heap/scavenger.h
+++ b/runtime/vm/heap/scavenger.h
@@ -33,6 +33,11 @@
 static constexpr intptr_t kNewPageSizeInWords = kNewPageSize / kWordSize;
 static constexpr intptr_t kNewPageMask = ~(kNewPageSize - 1);
 
+// Simplify initialization in allocation stubs by ensuring it is safe
+// to overshoot the object end by up to kAllocationRedZoneSize. (Just as the
+// stack red zone allows one to overshoot the stack pointer.)
+static constexpr intptr_t kAllocationRedZoneSize = kObjectAlignment;
+
 // A page containing new generation objects.
 class NewPage {
  public:
@@ -40,7 +45,7 @@
   void Deallocate();
 
   uword start() const { return memory_->start(); }
-  uword end() const { return memory_->end(); }
+  uword end() const { return memory_->end() - kAllocationRedZoneSize; }
   bool Contains(uword addr) const { return memory_->Contains(addr); }
   void WriteProtect(bool read_only) {
     memory_->Protect(read_only ? VirtualMemory::kReadOnly
@@ -54,6 +59,7 @@
 
   uword object_start() const { return start() + ObjectStartOffset(); }
   uword object_end() const { return owner_ != nullptr ? owner_->top() : top_; }
+  intptr_t used() const { return object_end() - object_start(); }
   void VisitObjects(ObjectVisitor* visitor) const {
     uword addr = object_start();
     uword end = object_end();
@@ -180,6 +186,13 @@
   bool Contains(uword addr) const;
   void WriteProtect(bool read_only);
 
+  intptr_t used_in_words() const {
+    intptr_t size = 0;
+    for (const NewPage* p = head_; p != nullptr; p = p->next()) {
+      size += p->used();
+    }
+    return size >> kWordSizeLog2;
+  }
   intptr_t capacity_in_words() const { return capacity_in_words_; }
   intptr_t max_capacity_in_words() const { return max_capacity_in_words_; }
 
@@ -281,7 +294,7 @@
 
   int64_t UsedInWords() const {
     MutexLocker ml(&space_lock_);
-    return to_->capacity_in_words();
+    return to_->used_in_words();
   }
   int64_t CapacityInWords() const { return to_->max_capacity_in_words(); }
   int64_t ExternalInWords() const { return external_size_ >> kWordSizeLog2; }
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index a71c5f5..269048c 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -573,6 +573,10 @@
 
   void IncreaseMutatorCount(Isolate* mutator, bool is_nested_reenter);
   void DecreaseMutatorCount(Isolate* mutator, bool is_nested_exit);
+  intptr_t MutatorCount() const {
+    ASSERT(Thread::Current()->IsAtSafepoint());  // Otherwise lock is needed.
+    return active_mutators_;
+  }
 
   bool HasTagHandler() const { return library_tag_handler() != nullptr; }
   ObjectPtr CallTagHandler(Dart_LibraryTag tag,
diff --git a/tools/VERSION b/tools/VERSION
index cb60cd8..6e3e47e 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 18
 PATCH 0
-PRERELEASE 61
+PRERELEASE 62
 PRERELEASE_PATCH 0
\ No newline at end of file