Version 2.14.0-309.0.dev

Merge commit '298777c07d843bf79c0ac272405e64b927d01982' into 'dev'
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ed07841..fb270be 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -246,51 +246,6 @@
 [#46545]: https://github.com/dart-lang/sdk/issues/46545
 [1]: https://dart.dev/faq#q-what-browsers-do-you-support-as-javascript-compilation-targets
 
-### Language
-
-*   Add an unsigned shift right operator `>>>`. Pad with zeroes, ignoring the
-    sign bit. On the web platform `int.>>>` shifts the low 32 bits interpreted
-    as an unsigned integer, so `a >>> b` gives the same result as
-    `a.toUnsigned(32) >>> b` on the VM.
-
-*   Prior to Dart 2.14, metadata (annotations) were not permitted to be
-    specified with generic type arguments.  This restriction is lifted in Dart
-    Dart 2.14.
-
-    ```dart
-    class C<T> {
-      const C();
-    }
-    @C();      // Previously permitted.
-    @C<int>(); // Previously an error, now permitted.
-    ```
-
-*   Prior to Dart 2.14, generic function types were not permitted as arguments
-    to generic classes or functions, nor to be used as generic bounds.  This
-    restriction is lifted in Dart 2.14.
-
-    ```dart
-    T wrapWithLogging<T>(T f) {
-      if (f is void Function<T>(T x)) {
-        return <S>(S x) {
-          print("Call: f<$S>($x)");
-          var r = f<S>(x);
-          print("Return: $x");
-          return r;
-        } as T;
-      } // More cases here
-      return f;
-    }
-    void foo<T>(T x) {
-      print("Foo!");
-    }
-    void main() {
-      // Previously an error, now permitted.
-      var f = wrapWithLogging<void Function<T>(T)>(foo);
-      f<int>(3);
-    }
-    ```
-
 ## 2.13.4 - 2021-06-28
 
 This is a patch release that fixes:
diff --git a/pkg/analysis_server/lib/src/analysis_server.dart b/pkg/analysis_server/lib/src/analysis_server.dart
index ab4deff..94a7e01 100644
--- a/pkg/analysis_server/lib/src/analysis_server.dart
+++ b/pkg/analysis_server/lib/src/analysis_server.dart
@@ -679,83 +679,8 @@
   @override
   void listenAnalysisDriver(analysis.AnalysisDriver analysisDriver) {
     analysisDriver.results.listen((result) {
-      var path = result.path;
-      filesToFlush.add(path);
-      if (analysisServer.isAnalyzed(path)) {
-        _notificationManager.recordAnalysisErrors(NotificationManager.serverId,
-            path, server.doAnalysisError_listFromEngine(result));
-      }
-      analysisServer.getDocumentationCacheFor(result)?.cacheFromResult(result);
-      analysisServer.getExtensionCacheFor(result)?.cacheFromResult(result);
-      var unit = result.unit;
-      if (unit != null) {
-        if (analysisServer._hasAnalysisServiceSubscription(
-            AnalysisService.HIGHLIGHTS, path)) {
-          _runDelayed(() {
-            _notificationManager.recordHighlightRegions(
-                NotificationManager.serverId,
-                path,
-                _computeHighlightRegions(unit));
-          });
-        }
-        if (analysisServer._hasAnalysisServiceSubscription(
-            AnalysisService.NAVIGATION, path)) {
-          _runDelayed(() {
-            _notificationManager.recordNavigationParams(
-                NotificationManager.serverId,
-                path,
-                _computeNavigationParams(path, unit));
-          });
-        }
-        if (analysisServer._hasAnalysisServiceSubscription(
-            AnalysisService.OCCURRENCES, path)) {
-          _runDelayed(() {
-            _notificationManager.recordOccurrences(
-                NotificationManager.serverId, path, _computeOccurrences(unit));
-          });
-        }
-//          if (analysisServer._hasAnalysisServiceSubscription(
-//              AnalysisService.OUTLINE, path)) {
-//            _runDelayed(() {
-//              // TODO(brianwilkerson) Change NotificationManager to store params
-//              // so that fileKind and libraryName can be recorded / passed along.
-//              notificationManager.recordOutlines(NotificationManager.serverId,
-//                  path, _computeOutlineParams(path, unit, result.lineInfo));
-//            });
-//          }
-        if (analysisServer._hasAnalysisServiceSubscription(
-            AnalysisService.CLOSING_LABELS, path)) {
-          _runDelayed(() {
-            sendAnalysisNotificationClosingLabels(
-                analysisServer, path, result.lineInfo, unit);
-          });
-        }
-        if (analysisServer._hasAnalysisServiceSubscription(
-            AnalysisService.FOLDING, path)) {
-          _runDelayed(() {
-            sendAnalysisNotificationFolding(
-                analysisServer, path, result.lineInfo, unit);
-          });
-        }
-        if (analysisServer._hasAnalysisServiceSubscription(
-            AnalysisService.OUTLINE, path)) {
-          _runDelayed(() {
-            sendAnalysisNotificationOutline(analysisServer, result);
-          });
-        }
-        if (analysisServer._hasAnalysisServiceSubscription(
-            AnalysisService.OVERRIDES, path)) {
-          _runDelayed(() {
-            sendAnalysisNotificationOverrides(analysisServer, path, unit);
-          });
-        }
-        if (analysisServer._hasFlutterServiceSubscription(
-            FlutterService.OUTLINE, path)) {
-          _runDelayed(() {
-            sendFlutterNotificationOutline(analysisServer, result);
-          });
-        }
-        // TODO(scheglov) Implement notifications for AnalysisService.IMPLEMENTED.
+      if (result is FileResult) {
+        _handleFileResult(result);
       }
     });
     analysisDriver.exceptions.listen(analysisServer.logExceptionResult);
@@ -788,6 +713,100 @@
     return collector.allOccurrences;
   }
 
+  void _handleFileResult(FileResult result) {
+    var path = result.path;
+    filesToFlush.add(path);
+
+    if (result is AnalysisResultWithErrors) {
+      if (analysisServer.isAnalyzed(path)) {
+        _notificationManager.recordAnalysisErrors(NotificationManager.serverId,
+            path, server.doAnalysisError_listFromEngine(result));
+      }
+    }
+
+    if (result is ResolvedUnitResult) {
+      _handleResolvedUnitResult(result);
+    }
+  }
+
+  void _handleResolvedUnitResult(ResolvedUnitResult result) {
+    var path = result.path;
+
+    analysisServer.getDocumentationCacheFor(result)?.cacheFromResult(result);
+    analysisServer.getExtensionCacheFor(result)?.cacheFromResult(result);
+
+    var unit = result.unit;
+    if (unit != null) {
+      if (analysisServer._hasAnalysisServiceSubscription(
+          AnalysisService.HIGHLIGHTS, path)) {
+        _runDelayed(() {
+          _notificationManager.recordHighlightRegions(
+              NotificationManager.serverId,
+              path,
+              _computeHighlightRegions(unit));
+        });
+      }
+      if (analysisServer._hasAnalysisServiceSubscription(
+          AnalysisService.NAVIGATION, path)) {
+        _runDelayed(() {
+          _notificationManager.recordNavigationParams(
+              NotificationManager.serverId,
+              path,
+              _computeNavigationParams(path, unit));
+        });
+      }
+      if (analysisServer._hasAnalysisServiceSubscription(
+          AnalysisService.OCCURRENCES, path)) {
+        _runDelayed(() {
+          _notificationManager.recordOccurrences(
+              NotificationManager.serverId, path, _computeOccurrences(unit));
+        });
+      }
+      // if (analysisServer._hasAnalysisServiceSubscription(
+      //     AnalysisService.OUTLINE, path)) {
+      //   _runDelayed(() {
+      //     // TODO(brianwilkerson) Change NotificationManager to store params
+      //     // so that fileKind and libraryName can be recorded / passed along.
+      //     notificationManager.recordOutlines(NotificationManager.serverId, path,
+      //         _computeOutlineParams(path, unit, result.lineInfo));
+      //   });
+      // }
+      if (analysisServer._hasAnalysisServiceSubscription(
+          AnalysisService.CLOSING_LABELS, path)) {
+        _runDelayed(() {
+          sendAnalysisNotificationClosingLabels(
+              analysisServer, path, result.lineInfo, unit);
+        });
+      }
+      if (analysisServer._hasAnalysisServiceSubscription(
+          AnalysisService.FOLDING, path)) {
+        _runDelayed(() {
+          sendAnalysisNotificationFolding(
+              analysisServer, path, result.lineInfo, unit);
+        });
+      }
+      if (analysisServer._hasAnalysisServiceSubscription(
+          AnalysisService.OUTLINE, path)) {
+        _runDelayed(() {
+          sendAnalysisNotificationOutline(analysisServer, result);
+        });
+      }
+      if (analysisServer._hasAnalysisServiceSubscription(
+          AnalysisService.OVERRIDES, path)) {
+        _runDelayed(() {
+          sendAnalysisNotificationOverrides(analysisServer, path, unit);
+        });
+      }
+      if (analysisServer._hasFlutterServiceSubscription(
+          FlutterService.OUTLINE, path)) {
+        _runDelayed(() {
+          sendFlutterNotificationOutline(analysisServer, result);
+        });
+      }
+      // TODO(scheglov) Implement notifications for AnalysisService.IMPLEMENTED.
+    }
+  }
+
   /// Run [f] in a new [Future].
   ///
   /// This method is used to delay sending notifications. If there is a more
diff --git a/pkg/analysis_server/lib/src/lsp/lsp_analysis_server.dart b/pkg/analysis_server/lib/src/lsp/lsp_analysis_server.dart
index 31c8884..a09919e 100644
--- a/pkg/analysis_server/lib/src/lsp/lsp_analysis_server.dart
+++ b/pkg/analysis_server/lib/src/lsp/lsp_analysis_server.dart
@@ -825,37 +825,8 @@
     }
 
     analysisDriver.results.listen((result) {
-      var path = result.path;
-      filesToFlush.add(path);
-      if (analysisServer.isAnalyzed(path)) {
-        final serverErrors = protocol.doAnalysisError_listFromEngine(result);
-        recordAnalysisErrors(path, serverErrors);
-      }
-      analysisServer.getDocumentationCacheFor(result)?.cacheFromResult(result);
-      analysisServer.getExtensionCacheFor(result)?.cacheFromResult(result);
-      final unit = result.unit;
-      if (unit != null) {
-        if (analysisServer.shouldSendClosingLabelsFor(path)) {
-          final labels = DartUnitClosingLabelsComputer(result.lineInfo, unit)
-              .compute()
-              .map((l) => toClosingLabel(result.lineInfo, l))
-              .toList();
-
-          analysisServer.publishClosingLabels(path, labels);
-        }
-        if (analysisServer.shouldSendOutlineFor(path)) {
-          final outline = DartUnitOutlineComputer(
-            result,
-            withBasicFlutter: true,
-          ).compute();
-          final lspOutline = toOutline(result.lineInfo, outline);
-          analysisServer.publishOutline(path, lspOutline);
-        }
-        if (analysisServer.shouldSendFlutterOutlineFor(path)) {
-          final outline = FlutterOutlineComputer(result).compute();
-          final lspOutline = toFlutterOutline(result.lineInfo, outline);
-          analysisServer.publishFlutterOutline(path, lspOutline);
-        }
+      if (result is FileResult) {
+        _handleFileResult(result);
       }
     });
     analysisDriver.exceptions.listen(analysisServer.logExceptionResult);
@@ -870,6 +841,54 @@
         .recordAnalysisErrors(NotificationManager.serverId, path, errorsToSend);
   }
 
+  void _handleFileResult(FileResult result) {
+    var path = result.path;
+    filesToFlush.add(path);
+
+    if (result is AnalysisResultWithErrors) {
+      if (analysisServer.isAnalyzed(path)) {
+        final serverErrors = protocol.doAnalysisError_listFromEngine(result);
+        recordAnalysisErrors(path, serverErrors);
+      }
+    }
+
+    if (result is ResolvedUnitResult) {
+      _handleResolvedUnitResult(result);
+    }
+  }
+
+  void _handleResolvedUnitResult(ResolvedUnitResult result) {
+    var path = result.path;
+
+    analysisServer.getDocumentationCacheFor(result)?.cacheFromResult(result);
+    analysisServer.getExtensionCacheFor(result)?.cacheFromResult(result);
+
+    final unit = result.unit;
+    if (unit != null) {
+      if (analysisServer.shouldSendClosingLabelsFor(path)) {
+        final labels = DartUnitClosingLabelsComputer(result.lineInfo, unit)
+            .compute()
+            .map((l) => toClosingLabel(result.lineInfo, l))
+            .toList();
+
+        analysisServer.publishClosingLabels(path, labels);
+      }
+      if (analysisServer.shouldSendOutlineFor(path)) {
+        final outline = DartUnitOutlineComputer(
+          result,
+          withBasicFlutter: true,
+        ).compute();
+        final lspOutline = toOutline(result.lineInfo, outline);
+        analysisServer.publishOutline(path, lspOutline);
+      }
+      if (analysisServer.shouldSendFlutterOutlineFor(path)) {
+        final outline = FlutterOutlineComputer(result).compute();
+        final lspOutline = toFlutterOutline(result.lineInfo, outline);
+        analysisServer.publishFlutterOutline(path, lspOutline);
+      }
+    }
+  }
+
   bool _shouldSendError(protocol.AnalysisError error) =>
       error.code != ErrorType.TODO.name.toLowerCase() ||
       analysisServer.clientConfiguration.showTodos;
diff --git a/pkg/analysis_server/lib/src/protocol_server.dart b/pkg/analysis_server/lib/src/protocol_server.dart
index c03f74b..5283798 100644
--- a/pkg/analysis_server/lib/src/protocol_server.dart
+++ b/pkg/analysis_server/lib/src/protocol_server.dart
@@ -27,7 +27,7 @@
 /// Returns a list of AnalysisErrors corresponding to the given list of Engine
 /// errors.
 List<AnalysisError> doAnalysisError_listFromEngine(
-    engine.ResolvedUnitResult result) {
+    engine.AnalysisResultWithErrors result) {
   return mapEngineErrors(result, result.errors, newAnalysisError_fromEngine);
 }
 
@@ -79,9 +79,10 @@
 
 /// Translates engine errors through the ErrorProcessor.
 List<T> mapEngineErrors<T>(
-    engine.ResolvedUnitResult result,
+    engine.AnalysisResultWithErrors result,
     List<engine.AnalysisError> errors,
-    T Function(engine.ResolvedUnitResult result, engine.AnalysisError error,
+    T Function(
+            engine.AnalysisResultWithErrors result, engine.AnalysisError error,
             [engine.ErrorSeverity errorSeverity])
         constructor) {
   var analysisOptions = result.session.analysisContext.analysisOptions;
@@ -106,7 +107,7 @@
 ///
 /// If an [errorSeverity] is specified, it will override the one in [error].
 AnalysisError newAnalysisError_fromEngine(
-    engine.ResolvedUnitResult result, engine.AnalysisError error,
+    engine.AnalysisResultWithErrors result, engine.AnalysisError error,
     [engine.ErrorSeverity? errorSeverity]) {
   var errorCode = error.errorCode;
   // prepare location
@@ -155,7 +156,7 @@
 
 /// Create a DiagnosticMessage based on an [engine.DiagnosticMessage].
 DiagnosticMessage newDiagnosticMessage(
-    engine.ResolvedUnitResult result, engine.DiagnosticMessage message) {
+    engine.AnalysisResultWithErrors result, engine.DiagnosticMessage message) {
   var file = message.filePath;
   var offset = message.offset;
   var length = message.length;
diff --git a/pkg/analyzer/CHANGELOG.md b/pkg/analyzer/CHANGELOG.md
index 69c3b4a..e8557d9 100644
--- a/pkg/analyzer/CHANGELOG.md
+++ b/pkg/analyzer/CHANGELOG.md
@@ -4,6 +4,11 @@
 * Changed `ResolvedLibraryResult.element` to be non-nullable.
 * Changed `ResolvedLibraryResult.units` to be non-nullable.
 * Deprecated and renamed `AnalysisSession.getXyz2()` into `getXyz()`.
+* Changed `AnalysisDriver.results` to `Stream<Object>`.
+  It used to always produce `ResolvedUnitResult`s, but sometimes its
+  `content` and `unit` were `null`, when the result actually had only errors.
+  Now it produces either `ResolvedUnitResult`, or `ErrorsResult`, or
+  some other results that might be added in the future.
 
 ## 2.0.0
 * Removed deprecated `Scope.lookup2()`.
diff --git a/pkg/analyzer/lib/src/dart/analysis/driver.dart b/pkg/analyzer/lib/src/dart/analysis/driver.dart
index 28d520f..52d65fc 100644
--- a/pkg/analyzer/lib/src/dart/analysis/driver.dart
+++ b/pkg/analyzer/lib/src/dart/analysis/driver.dart
@@ -201,10 +201,10 @@
   final _partsToAnalyze = <String>{};
 
   /// The controller for the [results] stream.
-  final _resultController = StreamController<ResolvedUnitResult>();
+  final _resultController = StreamController<Object>();
 
   /// The stream that will be written to when analysis results are produced.
-  late final Stream<ResolvedUnitResult> _onResults;
+  late final Stream<Object> _onResults;
 
   /// Resolution signatures of the most recently produced results for files.
   final Map<String, String> _lastProducedSignatures = {};
@@ -383,6 +383,13 @@
   /// an analysis result is produced for every added file prior to the next time
   /// the analysis state transitions to "idle".
   ///
+  /// [ResolvedUnitResult]s are produced for:
+  /// 1. Files requested using [getResult2].
+  /// 2. Files passed to [addFile] which are also in [priorityFiles].
+  ///
+  /// [ErrorsResult]s are produced for:
+  /// 1. Files passed to [addFile] which are not in [priorityFiles].
+  ///
   /// At least one analysis result is produced for every file passed to
   /// [addFile] or [changeFile] prior to the next time the analysis state
   /// transitions to "idle", unless the file is later removed from analysis
@@ -394,7 +401,7 @@
   ///
   /// Results might be produced even for files that have never been added
   /// using [addFile], for example when [getResult2] was called for a file.
-  Stream<ResolvedUnitResult> get results => _onResults;
+  Stream<Object> get results => _onResults;
 
   /// Return the search support for the driver.
   Search get search => _search;
diff --git a/pkg/analyzer/test/src/dart/analysis/base.dart b/pkg/analyzer/test/src/dart/analysis/base.dart
index 542e7f8..6b8cfd1 100644
--- a/pkg/analyzer/test/src/dart/analysis/base.dart
+++ b/pkg/analyzer/test/src/dart/analysis/base.dart
@@ -141,7 +141,11 @@
     driver = createAnalysisDriver();
     scheduler.start();
     scheduler.status.listen(allStatuses.add);
-    driver.results.listen(allResults.add);
+    driver.results.listen((result) {
+      if (result is ResolvedUnitResult) {
+        allResults.add(result);
+      }
+    });
     driver.exceptions.listen(allExceptions.add);
   }
 
diff --git a/pkg/analyzer/test/src/dart/analysis/driver_test.dart b/pkg/analyzer/test/src/dart/analysis/driver_test.dart
index b0c354c..998836a 100644
--- a/pkg/analyzer/test/src/dart/analysis/driver_test.dart
+++ b/pkg/analyzer/test/src/dart/analysis/driver_test.dart
@@ -74,7 +74,11 @@
       analysisOptions: AnalysisOptionsImpl(),
       packages: Packages.empty,
     );
-    driver.results.forEach(allResults.add);
+    driver.results.listen((result) {
+      if (result is ResolvedUnitResult) {
+        allResults.add(result);
+      }
+    });
     return driver;
   }
 
diff --git a/pkg/dev_compiler/lib/src/kernel/asset_file_system.dart b/pkg/dev_compiler/lib/src/kernel/asset_file_system.dart
index 4d73fb2..930fce7 100644
--- a/pkg/dev_compiler/lib/src/kernel/asset_file_system.dart
+++ b/pkg/dev_compiler/lib/src/kernel/asset_file_system.dart
@@ -93,7 +93,7 @@
       Future<T> Function(RetryTimeoutClient httpClient) body) async {
     RetryTimeoutClient httpClient;
     try {
-      httpClient = RetryTimeoutClient(HttpClient(), retries: 4);
+      httpClient = RetryTimeoutClient(HttpClient(), retries: 5);
       return await body(httpClient);
     } on Exception catch (e, s) {
       throw FileSystemException(uri, '$e:$s');
diff --git a/pkg/dev_compiler/lib/src/kernel/retry_timeout_client.dart b/pkg/dev_compiler/lib/src/kernel/retry_timeout_client.dart
index 36a768e..dfaa3d9 100644
--- a/pkg/dev_compiler/lib/src/kernel/retry_timeout_client.dart
+++ b/pkg/dev_compiler/lib/src/kernel/retry_timeout_client.dart
@@ -99,7 +99,8 @@
   void close({bool force = false}) => _inner.close(force: force);
 }
 
-bool _defaultWhen(HttpClientResponse response) => response.statusCode == 503;
+bool _defaultWhen(HttpClientResponse response) =>
+    response.statusCode == 500 || response.statusCode == 503;
 
 bool _defaultWhenError(Object error, StackTrace stackTrace) =>
     error is OSError ||
diff --git a/pkg/dev_compiler/test/expression_compiler/asset_file_system_test.dart b/pkg/dev_compiler/test/expression_compiler/asset_file_system_test.dart
index a11668a..94a6543 100644
--- a/pkg/dev_compiler/test/expression_compiler/asset_file_system_test.dart
+++ b/pkg/dev_compiler/test/expression_compiler/asset_file_system_test.dart
@@ -7,6 +7,7 @@
 import 'dart:async';
 import 'dart:convert';
 import 'dart:io' show HttpServer;
+import 'dart:math';
 
 import 'package:browser_launcher/browser_launcher.dart';
 import 'package:dev_compiler/src/kernel/asset_file_system.dart';
@@ -66,6 +67,34 @@
   return Response.internalServerError();
 }
 
+FutureOr<Response> unreliableHandler(Request request) {
+  final uri = request.requestedUri;
+  final headers = {
+    'content-length': '${utf8.encode(_smallFileContents).length}',
+    ...request.headers,
+  };
+
+  if (Random().nextInt(3) == 0) return Response.internalServerError();
+
+  if (request.method == 'HEAD') {
+    // 'exists'
+    return uri.pathSegments.last == _existingFile
+        ? Response.ok(null, headers: headers)
+        : Response.notFound(uri.toString());
+  }
+  if (request.method == 'GET') {
+    // 'readAsBytes'
+    return uri.pathSegments.last == _existingFile
+        ? Response.ok(_smallFileContents, headers: headers)
+        : Response.notFound(uri.toString());
+  }
+  return Response.internalServerError();
+}
+
+FutureOr<Response> alwaysFailingHandler(Request request) {
+  return Response.internalServerError();
+}
+
 void main() async {
   HttpServer server;
   AssetFileSystem fileSystem;
@@ -198,13 +227,117 @@
     test('can read a lot of files concurrently', () async {
       var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
       var futures = [
-        for (var i = 0; i < 512; i++) entity.readAsBytes(),
+        for (var i = 0; i < 512; i++) entity.readAsString(),
       ];
       var results = await Future.wait(futures);
       var fileContents = _largeFileContents();
       for (var result in results) {
-        expect(utf8.decode(result), fileContents);
+        expect(result, fileContents);
       }
     }, timeout: const Timeout.factor(2));
   });
+
+  group('AssetFileSystem with an unreliable server', () {
+    setUpAll(() async {
+      var hostname = 'localhost';
+      var port = await findUnusedPort();
+
+      server = await HttpMultiServer.bind(hostname, port);
+      fileSystem =
+          AssetFileSystem(StandardFileSystem.instance, hostname, '$port');
+
+      serveRequests(server, unreliableHandler);
+    });
+
+    tearDownAll(() async {
+      await expectLater(server.close(), completes);
+    });
+
+    test('can tell if file exists', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      expect(await entity.exists(), true);
+    });
+
+    test('can tell if file does not exist', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_nonExistingFile));
+      expect(await entity.exists(), false);
+    });
+
+    test('can read existing file using readAsBytes', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      expect(await entity.readAsBytes(), _smallFileBytes);
+    });
+
+    test('can read and decode existing file using readAsBytes', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      expect(utf8.decode(await entity.readAsBytes()), _smallFileContents);
+    });
+
+    test('can read existing file using readAsString', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      expect(await entity.readAsString(), _smallFileContents);
+    });
+
+    test('cannot read non-existing file', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_nonExistingFile));
+      await expectLater(
+          entity.readAsBytes(), throwsA(isA<FileSystemException>()));
+    });
+
+    test('can read a lot of files concurrently', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      var futures = [
+        for (var i = 0; i < 512; i++) entity.readAsString(),
+      ];
+      var results = await Future.wait(futures);
+      for (var result in results) {
+        expect(result, _smallFileContents);
+      }
+    }, timeout: const Timeout.factor(2));
+  });
+
+  group('AssetFileSystem with failing server', () {
+    setUpAll(() async {
+      var hostname = 'localhost';
+      var port = await findUnusedPort();
+
+      server = await HttpMultiServer.bind(hostname, port);
+      fileSystem =
+          AssetFileSystem(StandardFileSystem.instance, hostname, '$port');
+
+      serveRequests(server, alwaysFailingHandler);
+    });
+
+    tearDownAll(() async {
+      await expectLater(server.close(), completes);
+    });
+
+    test('cannot tell if file exists', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      expect(await entity.exists(), false);
+    });
+
+    test('cannot tell if file does not exist', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_nonExistingFile));
+      expect(await entity.exists(), false);
+    });
+
+    test('cannot read existing file using readAsBytes', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      await expectLater(
+          entity.readAsBytes(), throwsA(isA<FileSystemException>()));
+    });
+
+    test('cannot read existing file using readAsString', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_existingFile));
+      await expectLater(
+          entity.readAsString(), throwsA(isA<FileSystemException>()));
+    });
+
+    test('cannot read non-existing file', () async {
+      var entity = fileSystem.entityForUri(Uri.parse(_nonExistingFile));
+      await expectLater(
+          entity.readAsBytes(), throwsA(isA<FileSystemException>()));
+    });
+  });
 }
diff --git a/runtime/bin/elf_loader.cc b/runtime/bin/elf_loader.cc
index 92d6685..43b152d 100644
--- a/runtime/bin/elf_loader.cc
+++ b/runtime/bin/elf_loader.cc
@@ -6,7 +6,6 @@
 #include <bin/file.h>
 #include <platform/elf.h>
 #include <platform/globals.h>
-#include <vm/bss_relocs.h>
 #include <vm/cpu.h>
 #include <vm/virtual_memory.h>
 
@@ -239,8 +238,6 @@
   const char* dynamic_string_table_ = nullptr;
   const dart::elf::Symbol* dynamic_symbol_table_ = nullptr;
   uword dynamic_symbol_count_ = 0;
-  uword* vm_bss_ = nullptr;
-  uword* isolate_bss_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(LoadedElf);
 };
@@ -468,20 +465,11 @@
       dynamic_symbol_table_ = reinterpret_cast<const dart::elf::Symbol*>(
           base_->start() + header.memory_offset);
       dynamic_symbol_count_ = header.file_size / sizeof(dart::elf::Symbol);
-    } else if (strcmp(name, ".bss") == 0) {
-      auto const bss_size =
-          (BSS::kVmEntryCount + BSS::kIsolateEntryCount) * kWordSize;
-      CHECK_ERROR(header.memory_offset != 0, ".bss must be loaded.");
-      CHECK_ERROR(header.file_size >= bss_size,
-                  ".bss does not have enough space.");
-      vm_bss_ = reinterpret_cast<uword*>(base_->start() + header.memory_offset);
-      isolate_bss_ = vm_bss_ + BSS::kVmEntryCount;
     }
   }
 
   CHECK_ERROR(dynamic_string_table_ != nullptr, "Couldn't find .dynstr.");
   CHECK_ERROR(dynamic_symbol_table_ != nullptr, "Couldn't find .dynsym.");
-  CHECK_ERROR(vm_bss_ != nullptr, "Couldn't find .bss.");
   return true;
 }
 
diff --git a/runtime/lib/object.cc b/runtime/lib/object.cc
index 129caeb..14dacfb 100644
--- a/runtime/lib/object.cc
+++ b/runtime/lib/object.cc
@@ -308,6 +308,11 @@
   return Object::null();
 }
 
+DEFINE_NATIVE_ENTRY(Internal_deoptimizeFunctionsOnStack, 0, 0) {
+  DeoptimizeFunctionsOnStack();
+  return Object::null();
+}
+
 static bool ExtractInterfaceTypeArgs(Zone* zone,
                                      const Class& instance_cls,
                                      const TypeArguments& instance_type_args,
diff --git a/runtime/platform/atomic.h b/runtime/platform/atomic.h
index 98bc900..ffb5bdc 100644
--- a/runtime/platform/atomic.h
+++ b/runtime/platform/atomic.h
@@ -21,9 +21,16 @@
   T load(std::memory_order order = std::memory_order_relaxed) const {
     return value_.load(order);
   }
+  T load(std::memory_order order = std::memory_order_relaxed) const volatile {
+    return value_.load(order);
+  }
   void store(T arg, std::memory_order order = std::memory_order_relaxed) {
     value_.store(arg, order);
   }
+  void store(T arg,
+             std::memory_order order = std::memory_order_relaxed) volatile {
+    value_.store(arg, order);
+  }
 
   T fetch_add(T arg, std::memory_order order = std::memory_order_relaxed) {
     return value_.fetch_add(arg, order);
@@ -44,6 +51,12 @@
       std::memory_order order = std::memory_order_relaxed) {
     return value_.compare_exchange_weak(expected, desired, order, order);
   }
+  bool compare_exchange_weak(
+      T& expected,  // NOLINT
+      T desired,
+      std::memory_order order = std::memory_order_relaxed) volatile {
+    return value_.compare_exchange_weak(expected, desired, order, order);
+  }
   bool compare_exchange_strong(
       T& expected,  // NOLINT
       T desired,
diff --git a/runtime/tests/vm/dart/isolates/regress_46539_test.dart b/runtime/tests/vm/dart/isolates/regress_46539_test.dart
new file mode 100644
index 0000000..b07d133
--- /dev/null
+++ b/runtime/tests/vm/dart/isolates/regress_46539_test.dart
@@ -0,0 +1,83 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// VMOptions=--optimization-filter=foo --enable-isolate-groups --experimental-enable-isolate-groups-jit --no-use-osr --optimization-counter-threshold=1 --deterministic
+
+// Important: This is a regression test for a concurrency issue, if this test
+// is flaky it is essentially failing!
+
+import 'dart:async';
+import 'dart:io';
+import 'dart:isolate';
+import 'dart:_internal' show VMInternalsForTesting;
+
+import 'package:expect/expect.dart';
+
+const int isolateCount = 3;
+const int deoptIsolateId = 0;
+const int polyIsolateId = 1;
+
+final bool isAOT = Platform.executable.contains('dart_precompiled_runtime');
+
+main() async {
+  // This test will cause deoptimizations (via helper in `dart:_internal`) and
+  // does therefore not run in AOT.
+  if (isAOT) return;
+
+  final onExit = ReceivePort();
+  final onError = ReceivePort()
+    ..listen((error) {
+      print('Error: $error');
+      exitCode = 250;
+    });
+  for (int i = 0; i < isolateCount; ++i) {
+    await Isolate.spawn(isolate, i,
+        onExit: onExit.sendPort, onError: onError.sendPort);
+  }
+  final onExits = StreamIterator(onExit);
+  for (int i = 0; i < isolateCount; ++i) {
+    Expect.isTrue(await onExits.moveNext());
+  }
+  onExits.cancel();
+  onError.close();
+}
+
+final globalA = A();
+final globalB = B();
+
+isolate(int isolateId) {
+  final A a = isolateId == polyIsolateId ? globalB : globalA;
+  if (isolateId == polyIsolateId) {
+    // We start deopting after 1 second.
+    sleep(500000);
+  }
+
+  // This runs in unoptimized mode and will therefore do switchable calls.
+  final sw = Stopwatch()..start();
+  while (sw.elapsedMicroseconds < 2000000) {
+    a.foo(isolateId);
+    a.foo(isolateId);
+    a.foo(isolateId);
+    a.foo(isolateId);
+  }
+}
+
+class A {
+  @pragma('vm:never-inline')
+  foo(int isolateId) {
+    if (isolateId == deoptIsolateId) {
+      VMInternalsForTesting.deoptimizeFunctionsOnStack();
+    }
+  }
+}
+
+class B implements A {
+  @pragma('vm:never-inline')
+  foo(int isolateId) {}
+}
+
+void sleep(int us) {
+  final sw = Stopwatch()..start();
+  while (sw.elapsedMicroseconds < us);
+}
diff --git a/runtime/tests/vm/dart_2/isolates/regress_46539_test.dart b/runtime/tests/vm/dart_2/isolates/regress_46539_test.dart
new file mode 100644
index 0000000..b07d133
--- /dev/null
+++ b/runtime/tests/vm/dart_2/isolates/regress_46539_test.dart
@@ -0,0 +1,83 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// VMOptions=--optimization-filter=foo --enable-isolate-groups --experimental-enable-isolate-groups-jit --no-use-osr --optimization-counter-threshold=1 --deterministic
+
+// Important: This is a regression test for a concurrency issue, if this test
+// is flaky it is essentially failing!
+
+import 'dart:async';
+import 'dart:io';
+import 'dart:isolate';
+import 'dart:_internal' show VMInternalsForTesting;
+
+import 'package:expect/expect.dart';
+
+const int isolateCount = 3;
+const int deoptIsolateId = 0;
+const int polyIsolateId = 1;
+
+final bool isAOT = Platform.executable.contains('dart_precompiled_runtime');
+
+main() async {
+  // This test will cause deoptimizations (via helper in `dart:_internal`) and
+  // does therefore not run in AOT.
+  if (isAOT) return;
+
+  final onExit = ReceivePort();
+  final onError = ReceivePort()
+    ..listen((error) {
+      print('Error: $error');
+      exitCode = 250;
+    });
+  for (int i = 0; i < isolateCount; ++i) {
+    await Isolate.spawn(isolate, i,
+        onExit: onExit.sendPort, onError: onError.sendPort);
+  }
+  final onExits = StreamIterator(onExit);
+  for (int i = 0; i < isolateCount; ++i) {
+    Expect.isTrue(await onExits.moveNext());
+  }
+  onExits.cancel();
+  onError.close();
+}
+
+final globalA = A();
+final globalB = B();
+
+isolate(int isolateId) {
+  final A a = isolateId == polyIsolateId ? globalB : globalA;
+  if (isolateId == polyIsolateId) {
+    // We start deopting after 1 second.
+    sleep(500000);
+  }
+
+  // This runs in unoptimized mode and will therefore do switchable calls.
+  final sw = Stopwatch()..start();
+  while (sw.elapsedMicroseconds < 2000000) {
+    a.foo(isolateId);
+    a.foo(isolateId);
+    a.foo(isolateId);
+    a.foo(isolateId);
+  }
+}
+
+class A {
+  @pragma('vm:never-inline')
+  foo(int isolateId) {
+    if (isolateId == deoptIsolateId) {
+      VMInternalsForTesting.deoptimizeFunctionsOnStack();
+    }
+  }
+}
+
+class B implements A {
+  @pragma('vm:never-inline')
+  foo(int isolateId) {}
+}
+
+void sleep(int us) {
+  final sw = Stopwatch()..start();
+  while (sw.elapsedMicroseconds < us);
+}
diff --git a/runtime/vm/bitfield.h b/runtime/vm/bitfield.h
index f96a34e..d6f60c7 100644
--- a/runtime/vm/bitfield.h
+++ b/runtime/vm/bitfield.h
@@ -226,22 +226,6 @@
                    void>::type>
     : public BitField<typename S::ContainedType, T, position, size, false> {};
 
-template <typename S, typename T, int position>
-class BitField<S,
-               T,
-               position,
-               (sizeof(S) * kBitsPerByte) - position,
-               false,
-               typename std::enable_if<
-                   std::is_base_of<AtomicBitFieldContainerBase, S>::value,
-                   void>::type>
-    : public BitField<typename S::ContainedType,
-                      T,
-                      position,
-                      (sizeof(typename S::ContainedType) * kBitsPerByte) -
-                          position,
-                      false> {};
-
 }  // namespace dart
 
 #endif  // RUNTIME_VM_BITFIELD_H_
diff --git a/runtime/vm/bitmap.h b/runtime/vm/bitmap.h
index 186f1e6..74c4c5b 100644
--- a/runtime/vm/bitmap.h
+++ b/runtime/vm/bitmap.h
@@ -22,7 +22,9 @@
   }
 
   BitmapBuilder(const BitmapBuilder& other)
-      : length_(other.length_), data_size_in_bytes_(other.data_size_in_bytes_) {
+      : ZoneAllocated(),
+        length_(other.length_),
+        data_size_in_bytes_(other.data_size_in_bytes_) {
     if (data_size_in_bytes_ == kInlineCapacityInBytes) {
       memmove(data_.inline_, other.data_.inline_, kInlineCapacityInBytes);
     } else {
diff --git a/runtime/vm/bootstrap_natives.h b/runtime/vm/bootstrap_natives.h
index 7eb3452..e9710df6 100644
--- a/runtime/vm/bootstrap_natives.h
+++ b/runtime/vm/bootstrap_natives.h
@@ -344,6 +344,7 @@
   V(Internal_allocateTwoByteString, 1)                                         \
   V(Internal_writeIntoOneByteString, 3)                                        \
   V(Internal_writeIntoTwoByteString, 3)                                        \
+  V(Internal_deoptimizeFunctionsOnStack, 0)                                    \
   V(InvocationMirror_unpackTypeArguments, 2)                                   \
   V(NoSuchMethodError_existingMethodSignature, 3)                              \
   V(WeakProperty_getKey, 1)                                                    \
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index d0216f6..40c6f95 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -689,13 +689,13 @@
   // calling into the runtime.
   __ EnterStubFrame();
   __ LoadImmediate(R1, 0);
-  __ Push(R9);  // Preserve cache (guarded CID as Smi).
+  __ Push(R1);  // Result slot.
   __ Push(R0);  // Preserve receiver.
-  __ Push(R1);
-  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0);
-  __ Pop(CODE_REG);
+  __ Push(R9);  // Old cache value (also 2nd return value).
+  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
+  __ Pop(R9);  // Get target cache object.
   __ Pop(R0);  // Restore receiver.
-  __ Pop(R9);  // Restore cache (guarded CID as Smi).
+  __ Pop(CODE_REG);  // Get target Code object.
   // Remove the stub frame.
   __ LeaveStubFrame();
   // Jump to the dart function.
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index d4bc8cf..943417e 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -804,13 +804,13 @@
   // Create a stub frame as we are pushing some objects on the stack before
   // calling into the runtime.
   __ EnterStubFrame();
-  __ Push(R5);  // Preserve cache (guarded CID as Smi).
+  __ Push(ZR);  // Result slot.
   __ Push(R0);  // Preserve receiver.
-  __ Push(ZR);
-  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0);
-  __ Pop(CODE_REG);
-  __ Pop(R0);  // Restore receiver.
-  __ Pop(R5);  // Restore cache (guarded CID as Smi).
+  __ Push(R5);  // Old cache value (also 2nd return value).
+  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
+  __ Pop(R5);        // Get target cache object.
+  __ Pop(R0);        // Restore receiver.
+  __ Pop(CODE_REG);  // Get target Code object.
   // Remove the stub frame.
   __ LeaveStubFrame();
   // Jump to the dart function.
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index 16ef27a..ea1b6a4 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -515,13 +515,13 @@
   __ Bind(&monomorphic);
   // This was a switchable call.
   __ EnterStubFrame();
-  __ pushl(ECX);           // Preserve cache (guarded CID as Smi).
-  __ pushl(EBX);           // Preserve receiver.
   __ pushl(Immediate(0));  // Result slot.
-  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0);
-  __ popl(CODE_REG);  // Get Code object.
+  __ pushl(EBX);           // Preserve receiver.
+  __ pushl(ECX);           // Old cache value (also 2nd return value).
+  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
+  __ popl(ECX);       // Get target cache object.
   __ popl(EBX);       // Restore receiver.
-  __ popl(ECX);       // Restore cache (guarded CID as Smi).
+  __ popl(CODE_REG);  // Get target Code object.
   __ movl(EAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
                                           CodeEntryKind::kMonomorphic)));
   __ LeaveFrame();
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index 6981255..221e515 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -721,13 +721,13 @@
   __ movq(CODE_REG,
           Address(THR, target::Thread::fix_callers_target_code_offset()));
   __ EnterStubFrame();
-  __ pushq(RBX);           // Preserve cache (guarded CID as Smi).
-  __ pushq(RDX);           // Preserve receiver.
   __ pushq(Immediate(0));  // Result slot.
-  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 0);
-  __ popq(CODE_REG);  // Get Code object.
+  __ pushq(RDX);           // Preserve receiver.
+  __ pushq(RBX);           // Old cache value (also 2nd return value).
+  __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
+  __ popq(RBX);       // Get target cache object.
   __ popq(RDX);       // Restore receiver.
-  __ popq(RBX);       // Restore cache (guarded CID as Smi).
+  __ popq(CODE_REG);  // Get target Code object.
   __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
                                           CodeEntryKind::kMonomorphic)));
   __ LeaveStubFrame();
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index 2fbb323..57c2217e 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -301,6 +301,7 @@
     vm_isolate_->isolate_object_store()->Init();
     TargetCPUFeatures::Init();
     Object::Init(vm_isolate_->group());
+    OffsetsTable::Init();
     ArgumentsDescriptor::Init();
     ICData::Init();
     SubtypeTestCache::Init();
@@ -635,6 +636,7 @@
   ICData::Cleanup();
   SubtypeTestCache::Cleanup();
   ArgumentsDescriptor::Cleanup();
+  OffsetsTable::Cleanup();
   TargetCPUFeatures::Cleanup();
   MarkingStack::Cleanup();
   StoreBuffer::Cleanup();
diff --git a/runtime/vm/elf.cc b/runtime/vm/elf.cc
index 942ed59..57b9e6d2 100644
--- a/runtime/vm/elf.cc
+++ b/runtime/vm/elf.cc
@@ -22,15 +22,24 @@
 class ElfWriteStream : public ValueObject {
  public:
   explicit ElfWriteStream(BaseWriteStream* stream, const Elf& elf)
-      : stream_(ASSERT_NOTNULL(stream)), elf_(elf) {}
+      : stream_(ASSERT_NOTNULL(stream)),
+        elf_(elf),
+        start_(stream_->Position()) {
+    // So that we can use the underlying stream's Align, as all alignments
+    // will be less than or equal to this alignment.
+    ASSERT(Utils::IsAligned(start_, Elf::kPageSize));
+  }
 
   // Subclasses of Section may need to query the Elf object during Write(),
   // so we store it in the ElfWriteStream for easy access.
   const Elf& elf() const { return elf_; }
 
-  intptr_t Position() const { return stream_->Position(); }
+  // We return positions in terms of the ELF content that has been written,
+  // ignoring any previous content on the stream.
+  intptr_t Position() const { return stream_->Position() - start_; }
   void Align(const intptr_t alignment) {
     ASSERT(Utils::IsPowerOfTwo(alignment));
+    ASSERT(alignment <= Elf::kPageSize);
     stream_->Align(alignment);
   }
   void WriteBytes(const uint8_t* b, intptr_t size) {
@@ -48,6 +57,7 @@
  private:
   BaseWriteStream* const stream_;
   const Elf& elf_;
+  const intptr_t start_;
 };
 
 static constexpr intptr_t kLinearInitValue = -1;
@@ -66,10 +76,24 @@
 
 #define DEFINE_LINEAR_FIELD(name) intptr_t name##_ = kLinearInitValue;
 
+// We only allow for dynamic casting to a subset of section types, since
+// these are the only ones we need to distinguish at runtime.
+#define FOR_EACH_SECTION_TYPE(V)                                               \
+  V(ReservedSection)                                                           \
+  V(SymbolTable)                                                               \
+  V(DynamicTable)                                                              \
+  V(BitsContainer)                                                             \
+  V(TextSection) V(DataSection) V(BssSection) V(PseudoSection) V(SectionTable)
+#define DEFINE_TYPE_CHECK_FOR(Type)                                            \
+  bool Is##Type() const { return true; }
+
+#define DECLARE_SECTION_TYPE_CLASS(Type) class Type;
+FOR_EACH_SECTION_TYPE(DECLARE_SECTION_TYPE_CLASS)
+#undef DECLARE_SECTION_TYPE_CLASS
+
 class BitsContainer;
 class Segment;
 
-static constexpr intptr_t kDefaultAlignment = -1;
 // Align note sections and segments to 4 byte boundries.
 static constexpr intptr_t kNoteAlignment = 4;
 
@@ -79,14 +103,16 @@
           bool allocate,
           bool executable,
           bool writable,
-          intptr_t align = kDefaultAlignment)
+          intptr_t align = compiler::target::kWordSize)
       : type(t),
         flags(EncodeFlags(allocate, executable, writable)),
-        alignment(align == kDefaultAlignment ? DefaultAlignment(t) : align),
+        alignment(align),
         // Non-segments will never have a memory offset, here represented by 0.
         memory_offset_(allocate ? kLinearInitValue : 0) {
-    // Only sections with type SHT_NULL are allowed to have an alignment of 0.
-    ASSERT(type == elf::SectionHeaderType::SHT_NULL || alignment > 0);
+    // Only SHT_NULL sections (namely, the reserved section) are allowed to have
+    // an alignment of 0 (as the written section header entry for the reserved
+    // section must be all 0s).
+    ASSERT(alignment > 0 || type == elf::SectionHeaderType::SHT_NULL);
     // Non-zero alignments must be a power of 2.
     ASSERT(alignment == 0 || Utils::IsPowerOfTwo(alignment));
   }
@@ -94,6 +120,7 @@
   virtual ~Section() {}
 
   // Linker view.
+
   const elf::SectionHeaderType type;
   const intptr_t flags;
   const intptr_t alignment;
@@ -103,26 +130,38 @@
   intptr_t link = elf::SHN_UNDEF;
   intptr_t info = 0;
   intptr_t entry_size = 0;
-  const char* symbol_name = nullptr;
+  // This field is set for all sections, but due to reordering, we may set it
+  // more than once.
+  intptr_t index = elf::SHN_UNDEF;
 
 #define FOR_EACH_SECTION_LINEAR_FIELD(M)                                       \
   M(name)                                                                      \
-  M(index)                                                                     \
   M(file_offset)
 
   FOR_EACH_SECTION_LINEAR_FIELD(DEFINE_LINEAR_FIELD_METHODS);
 
-  virtual intptr_t FileSize() const = 0;
+  // Only needs to be overridden for sections that may not be allocated or
+  // for allocated sections where MemorySize() and FileSize() may differ.
+  virtual intptr_t FileSize() const {
+    if (!IsAllocated()) {
+      UNREACHABLE();
+    }
+    return MemorySize();
+  }
 
   // Loader view.
+
 #define FOR_EACH_SEGMENT_LINEAR_FIELD(M) M(memory_offset)
 
   FOR_EACH_SEGMENT_LINEAR_FIELD(DEFINE_LINEAR_FIELD_METHODS);
 
-  // Each section belongs to at most one PT_LOAD segment.
-  Segment* load_segment = nullptr;
-
-  virtual intptr_t MemorySize() const = 0;
+  // Only needs to be overridden for sections that may be allocated.
+  virtual intptr_t MemorySize() const {
+    if (IsAllocated()) {
+      UNREACHABLE();
+    }
+    return 0;
+  }
 
   // Other methods.
 
@@ -137,23 +176,37 @@
   // Returns whether the size of a section can change.
   bool HasBeenFinalized() const {
     // Sections can grow or shrink up until Elf::ComputeOffsets has been run,
-    // which sets the file offset (and memory offset for allocated sections).
+    // which sets the file (and memory, if applicable) offsets.
     return file_offset_is_set();
   }
 
-  virtual const BitsContainer* AsBitsContainer() const { return nullptr; }
+#define DEFINE_BASE_TYPE_CHECKS(Type)                                          \
+  Type* As##Type() {                                                           \
+    return Is##Type() ? reinterpret_cast<Type*>(this) : nullptr;               \
+  }                                                                            \
+  const Type* As##Type() const {                                               \
+    return const_cast<Type*>(const_cast<Section*>(this)->As##Type());          \
+  }                                                                            \
+  virtual bool Is##Type() const { return false; }
+
+  FOR_EACH_SECTION_TYPE(DEFINE_BASE_TYPE_CHECKS)
+#undef DEFINE_BASE_TYPE_CHECKS
+
+  // Only some sections support merging.
+  virtual bool CanMergeWith(const Section& other) const { return false; }
+  virtual void Merge(const Section& other) { UNREACHABLE(); }
 
   // Writes the file contents of the section.
-  virtual void Write(ElfWriteStream* stream) = 0;
+  virtual void Write(ElfWriteStream* stream) const { UNREACHABLE(); }
 
-  virtual void WriteSectionHeader(ElfWriteStream* stream) {
+  virtual void WriteSectionHeader(ElfWriteStream* stream) const {
 #if defined(TARGET_ARCH_IS_32_BIT)
     stream->WriteWord(name());
     stream->WriteWord(static_cast<uint32_t>(type));
     stream->WriteWord(flags);
     stream->WriteAddr(memory_offset());
     stream->WriteOff(file_offset());
-    stream->WriteWord(FileSize());  // Has different meaning for BSS.
+    stream->WriteWord(FileSize());
     stream->WriteWord(link);
     stream->WriteWord(info);
     stream->WriteWord(alignment);
@@ -164,7 +217,7 @@
     stream->WriteXWord(flags);
     stream->WriteAddr(memory_offset());
     stream->WriteOff(file_offset());
-    stream->WriteXWord(FileSize());  // Has different meaning for BSS.
+    stream->WriteXWord(FileSize());
     stream->WriteWord(link);
     stream->WriteWord(info);
     stream->WriteXWord(alignment);
@@ -174,25 +227,17 @@
 
  private:
   static intptr_t EncodeFlags(bool allocate, bool executable, bool writable) {
+    // Executable and writable only make sense if this is an allocated section.
+    ASSERT(allocate || !executable && !writable);
     if (!allocate) return 0;
     intptr_t flags = elf::SHF_ALLOC;
+    // We currently don't allow sections that are both executable and writable.
+    ASSERT(!executable || !writable);
     if (executable) flags |= elf::SHF_EXECINSTR;
     if (writable) flags |= elf::SHF_WRITE;
     return flags;
   }
 
-  static intptr_t DefaultAlignment(elf::SectionHeaderType type) {
-    switch (type) {
-      case elf::SectionHeaderType::SHT_SYMTAB:
-      case elf::SectionHeaderType::SHT_DYNSYM:
-      case elf::SectionHeaderType::SHT_HASH:
-      case elf::SectionHeaderType::SHT_DYNAMIC:
-        return compiler::target::kWordSize;
-      default:
-        return 1;
-    }
-  }
-
   FOR_EACH_SECTION_LINEAR_FIELD(DEFINE_LINEAR_FIELD);
   FOR_EACH_SEGMENT_LINEAR_FIELD(DEFINE_LINEAR_FIELD);
 
@@ -217,49 +262,52 @@
     // so we never should pass this value.
     ASSERT(segment_type != elf::ProgramHeaderType::PT_NULL);
     // All segments should have at least one section.
+    ASSERT(initial_section != nullptr);
     ASSERT(initial_section->IsAllocated());
     sections_.Add(initial_section);
-    if (type == elf::ProgramHeaderType::PT_LOAD) {
-      ASSERT(initial_section->load_segment == nullptr);
-    }
   }
 
   virtual ~Segment() {}
 
-  static intptr_t Alignment(elf::ProgramHeaderType segment_type) {
-    switch (segment_type) {
+  const GrowableArray<Section*>& sections() const { return sections_; }
+
+  intptr_t Alignment() const {
+    switch (type) {
+      case elf::ProgramHeaderType::PT_LOAD:
+        return Elf::kPageSize;
       case elf::ProgramHeaderType::PT_PHDR:
       case elf::ProgramHeaderType::PT_DYNAMIC:
         return compiler::target::kWordSize;
       case elf::ProgramHeaderType::PT_NOTE:
         return kNoteAlignment;
       default:
-        return Elf::kPageSize;
+        UNREACHABLE();
+        return 0;
     }
   }
 
   bool IsExecutable() const { return (flags & elf::PF_X) == elf::PF_X; }
   bool IsWritable() const { return (flags & elf::PF_W) == elf::PF_W; }
 
-  void WriteProgramHeader(ElfWriteStream* stream) {
+  void WriteProgramHeader(ElfWriteStream* stream) const {
 #if defined(TARGET_ARCH_IS_32_BIT)
     stream->WriteWord(static_cast<uint32_t>(type));
     stream->WriteOff(FileOffset());
     stream->WriteAddr(MemoryOffset());  // Virtual address.
-    stream->WriteAddr(MemoryOffset());  // Physical address, not used.
+    stream->WriteAddr(MemoryOffset());  // Physical address.
     stream->WriteWord(FileSize());
     stream->WriteWord(MemorySize());
     stream->WriteWord(flags);
-    stream->WriteWord(Alignment(type));
+    stream->WriteWord(Alignment());
 #else
     stream->WriteWord(static_cast<uint32_t>(type));
     stream->WriteWord(flags);
     stream->WriteOff(FileOffset());
     stream->WriteAddr(MemoryOffset());  // Virtual address.
-    stream->WriteAddr(MemoryOffset());  // Physical address, not used.
+    stream->WriteAddr(MemoryOffset());  // Physical address.
     stream->WriteXWord(FileSize());
     stream->WriteXWord(MemorySize());
-    stream->WriteXWord(Alignment(type));
+    stream->WriteXWord(Alignment());
 #endif
   }
 
@@ -267,36 +315,16 @@
   // section was successfully added.
   bool Add(Section* section) {
     ASSERT(section != nullptr);
+    // We can't add if memory offsets have already been calculated.
+    ASSERT(!section->memory_offset_is_set());
     // We only add additional sections to load segments.
     ASSERT(type == elf::ProgramHeaderType::PT_LOAD);
-    // Don't use this to change a section's segment.
-    ASSERT(section->load_segment == nullptr);
     // We only add sections with the same executable and writable bits.
     if (IsExecutable() != section->IsExecutable() ||
         IsWritable() != section->IsWritable()) {
       return false;
     }
     sections_.Add(section);
-    section->load_segment = this;
-    return true;
-  }
-
-  bool Merge(Segment* other) {
-    ASSERT(other != nullptr);
-    // We only add additional sections to load segments.
-    ASSERT(type == elf::ProgramHeaderType::PT_LOAD);
-    // We only merge segments with the same executable and writable bits.
-    if (IsExecutable() != other->IsExecutable() ||
-        IsWritable() != other->IsWritable()) {
-      return false;
-    }
-    for (auto* section : other->sections_) {
-      // Don't merge segments where the memory offsets have already been
-      // calculated.
-      ASSERT(!section->memory_offset_is_set());
-      sections_.Add(section);
-      section->load_segment = this;
-    }
     return true;
   }
 
@@ -318,10 +346,10 @@
 
   intptr_t MemoryEnd() const { return MemoryOffset() + MemorySize(); }
 
- private:
-  static constexpr intptr_t kInitValue = -1;
-  static_assert(kInitValue < 0, "init value must be negative");
+  const elf::ProgramHeaderType type;
+  const intptr_t flags;
 
+ private:
   static intptr_t EncodeFlags(bool executable, bool writable) {
     intptr_t flags = elf::PF_R;
     if (executable) flags |= elf::PF_X;
@@ -329,11 +357,6 @@
     return flags;
   }
 
- public:
-  const elf::ProgramHeaderType type;
-  const intptr_t flags;
-
- private:
   GrowableArray<Section*> sections_;
 };
 
@@ -347,84 +370,11 @@
                 /*executable=*/false,
                 /*writable=*/false,
                 /*alignment=*/0) {
-    set_name(0);
-    set_index(0);
     set_file_offset(0);
   }
 
+  DEFINE_TYPE_CHECK_FOR(ReservedSection);
   intptr_t FileSize() const { return 0; }
-  intptr_t MemorySize() const { return 0; }
-  void Write(ElfWriteStream* stream) {}
-};
-
-// Represents portions of the file/memory space which do not correspond to
-// actual sections. Should never be added to sections_.
-class PseudoSection : public Section {
- public:
-  PseudoSection(bool executable,
-                bool writable,
-                intptr_t file_offset,
-                intptr_t file_size,
-                intptr_t memory_offset,
-                intptr_t memory_size)
-      : Section(elf::SectionHeaderType::SHT_NULL,
-                /*allocate=*/true,
-                executable,
-                writable,
-                /*alignment=*/0),
-        file_size_(file_size),
-        memory_size_(memory_size) {
-    set_file_offset(file_offset);
-    set_memory_offset(memory_offset);
-  }
-
-  intptr_t FileSize() const { return file_size_; }
-  intptr_t MemorySize() const { return memory_size_; }
-  void WriteSectionHeader(ElfWriteStream* stream) { UNREACHABLE(); }
-  void Write(ElfWriteStream* stream) { UNREACHABLE(); }
-
- private:
-  const intptr_t file_size_;
-  const intptr_t memory_size_;
-};
-
-// A segment for representing the program header table self-reference in the
-// program header table.
-class ProgramTableSelfSegment : public Segment {
- public:
-  ProgramTableSelfSegment(Zone* zone, intptr_t offset, intptr_t size)
-      : Segment(zone,
-                new (zone) PseudoSection(/*executable=*/false,
-                                         /*writable=*/false,
-                                         offset,
-                                         size,
-                                         offset,
-                                         size),
-                elf::ProgramHeaderType::PT_PHDR) {}
-};
-
-// A segment for representing the program header table load segment in the
-// program header table.
-class ProgramTableLoadSegment : public Segment {
- public:
-  // The Android dynamic linker in Jelly Bean incorrectly assumes that all
-  // non-writable segments are continguous. Since the BSS segment comes directly
-  // after the program header segment, we must make this segment writable so
-  // later non-writable segments does not cause the BSS to be also marked as
-  // read-only.
-  //
-  // The bug is here:
-  //   https://github.com/aosp-mirror/platform_bionic/blob/94963af28e445384e19775a838a29e6a71708179/linker/linker.c#L1991-L2001
-  explicit ProgramTableLoadSegment(Zone* zone, intptr_t size)
-      : Segment(zone,
-                // This segment should always start at address 0.
-                new (zone) PseudoSection(/*executable=*/false,
-                                         /*writable=*/true,
-                                         0,
-                                         size,
-                                         0,
-                                         size),
-                elf::ProgramHeaderType::PT_LOAD) {}
 };
 
 class StringTable : public Section {
@@ -437,18 +387,18 @@
         dynamic_(allocate),
         text_(zone, 128),
         text_indices_(zone) {
-    AddString("");
+    Add("");
   }
 
   intptr_t FileSize() const { return text_.length(); }
   intptr_t MemorySize() const { return dynamic_ ? FileSize() : 0; }
 
-  void Write(ElfWriteStream* stream) {
+  void Write(ElfWriteStream* stream) const {
     stream->WriteBytes(reinterpret_cast<const uint8_t*>(text_.buffer()),
                        text_.length());
   }
 
-  intptr_t AddString(const char* str) {
+  intptr_t Add(const char* str) {
     ASSERT(str != nullptr);
     if (auto const kv = text_indices_.Lookup(str)) {
       return kv->value;
@@ -460,8 +410,8 @@
     return offset;
   }
 
-  const char* At(intptr_t index) {
-    ASSERT(index < text_.length());
+  const char* At(intptr_t index) const {
+    if (index >= text_.length()) return nullptr;
     return text_.buffer() + index;
   }
 
@@ -491,51 +441,31 @@
         dynamic_(dynamic),
         symbols_(zone, 1),
         by_name_index_(zone) {
+    link = table_->index;
     entry_size = sizeof(elf::Symbol);
     // The first symbol table entry is reserved and must be all zeros.
     // (String tables always have the empty string at the 0th index.)
-    const char* const kReservedName = "";
-    AddSymbol(kReservedName, elf::STB_LOCAL, elf::STT_NOTYPE, elf::SHN_UNDEF,
-              /*size=*/0);
-    FinalizeSymbol(kReservedName, elf::SHN_UNDEF, /*offset=*/0);
+    ASSERT_EQUAL(table_->Lookup(""), 0);
+    symbols_.Add({/*name_index=*/0, elf::STB_LOCAL, elf::STT_NOTYPE, /*size=*/0,
+                  elf::SHN_UNDEF, /*offset=*/0});
+    // The info field on a symbol table section holds the index of the first
+    // non-local symbol, so since there are none yet, it points past the single
+    // symbol we do have.
+    info = 1;
   }
 
-  intptr_t FileSize() const { return Length() * entry_size; }
+  DEFINE_TYPE_CHECK_FOR(SymbolTable)
+  const StringTable& strtab() const { return *table_; }
+  intptr_t FileSize() const { return symbols_.length() * entry_size; }
   intptr_t MemorySize() const { return dynamic_ ? FileSize() : 0; }
 
-  class Symbol : public ZoneAllocated {
-   public:
-    Symbol(const char* cstr,
-           intptr_t name,
-           intptr_t binding,
-           intptr_t type,
-           intptr_t initial_section_index,
-           intptr_t size)
-        : name_index(name),
-          binding(binding),
-          type(type),
-          size(size),
-          section_index(initial_section_index),
-          cstr_(cstr) {}
-
-    void Finalize(intptr_t final_section_index, intptr_t offset) {
-      ASSERT(!HasBeenFinalized());  // No symbol should be re-finalized.
-      section_index = final_section_index;
-      offset_ = offset;
-    }
-    bool HasBeenFinalized() const { return offset_ != kNotFinalizedMarker; }
-    intptr_t offset() const {
-      ASSERT(HasBeenFinalized());
-      // Only the reserved initial symbol should have an offset of 0.
-      ASSERT_EQUAL(type == elf::STT_NOTYPE, offset_ == 0);
-      return offset_;
-    }
-
+  struct Symbol {
     void Write(ElfWriteStream* stream) const {
       const intptr_t start = stream->Position();
+      ASSERT(section_index == elf::SHN_UNDEF || offset > 0);
       stream->WriteWord(name_index);
 #if defined(TARGET_ARCH_IS_32_BIT)
-      stream->WriteAddr(offset());
+      stream->WriteAddr(offset);
       stream->WriteWord(size);
       stream->WriteByte(elf::SymbolInfo(binding, type));
       stream->WriteByte(0);
@@ -544,35 +474,34 @@
       stream->WriteByte(elf::SymbolInfo(binding, type));
       stream->WriteByte(0);
       stream->WriteHalf(section_index);
-      stream->WriteAddr(offset());
+      stream->WriteAddr(offset);
       stream->WriteXWord(size);
 #endif
       ASSERT_EQUAL(stream->Position() - start, sizeof(elf::Symbol));
     }
 
-    const intptr_t name_index;
-    const intptr_t binding;
-    const intptr_t type;
-    const intptr_t size;
-    // Is set twice: once in Elf::AddSection to the section's initial index into
-    // sections_, and then in Elf::FinalizeSymbols to the section's final index
-    // into sections_ after reordering.
+    intptr_t name_index;
+    intptr_t binding;
+    intptr_t type;
+    intptr_t size;
+    // Must be updated whenever sections are reordered.
     intptr_t section_index;
+    // Initialized to the section-relative offset, must be updated to the
+    // snapshot-relative offset before writing.
+    intptr_t offset;
 
    private:
-    static const intptr_t kNotFinalizedMarker = -1;
-
-    const char* const cstr_;
-    intptr_t offset_ = kNotFinalizedMarker;
-
-    friend class SymbolHashTable;  // For cstr_ access.
+    DISALLOW_ALLOCATION();
   };
 
-  void Write(ElfWriteStream* stream) {
-    for (intptr_t i = 0; i < Length(); i++) {
-      auto const symbol = At(i);
+  const GrowableArray<Symbol>& symbols() const { return symbols_; }
+
+  void Initialize(const GrowableArray<Section*>& sections);
+
+  void Write(ElfWriteStream* stream) const {
+    for (const auto& symbol : symbols_) {
       const intptr_t start = stream->Position();
-      symbol->Write(stream);
+      symbol.Write(stream);
       ASSERT_EQUAL(stream->Position() - start, entry_size);
     }
   }
@@ -580,131 +509,159 @@
   void AddSymbol(const char* name,
                  intptr_t binding,
                  intptr_t type,
-                 intptr_t section_index,
-                 intptr_t size) {
+                 intptr_t size,
+                 intptr_t index,
+                 intptr_t offset) {
     ASSERT(!table_->HasBeenFinalized());
-    auto const name_index = table_->AddString(name);
-    ASSERT(by_name_index_.Lookup(name_index) == nullptr);
-    auto const symbol = new (zone_)
-        Symbol(name, name_index, binding, type, section_index, size);
-    symbols_.Add(symbol);
-    by_name_index_.Insert(name_index, symbol);
+    auto const name_index = table_->Add(name);
+    ASSERT(name_index != 0);
+    const intptr_t new_index = symbols_.length();
+    symbols_.Add({name_index, binding, type, size, index, offset});
+    by_name_index_.Insert(name_index, new_index);
     // The info field on a symbol table section holds the index of the first
     // non-local symbol, so they can be skipped if desired. Thus, we need to
     // make sure local symbols are before any non-local ones.
     if (binding == elf::STB_LOCAL) {
-      if (info != symbols_.length() - 1) {
+      if (info != new_index) {
         // There are non-local symbols, as otherwise [info] would be the
         // index of the new symbol. Since the order doesn't otherwise matter,
         // swap the new local symbol with the value at index [info], so when
         // [info] is incremented it will point just past the new local symbol.
-        ASSERT(symbols_[info]->binding != elf::STB_LOCAL);
-        symbols_.Swap(info, symbols_.length() - 1);
+        ASSERT(symbols_[info].binding != elf::STB_LOCAL);
+        symbols_.Swap(info, new_index);
+        // Since by_name_index has indices into symbols_, we need to update it.
+        by_name_index_.Update({symbols_[info].name_index, info});
+        by_name_index_.Update({symbols_[new_index].name_index, new_index});
       }
       info += 1;
     }
   }
 
-  void FinalizeSymbol(const char* name,
-                      intptr_t final_section_index,
-                      intptr_t offset) {
-    const intptr_t name_index = table_->Lookup(name);
-    ASSERT(name_index != StringTable::kNotIndexed);
-    Symbol* symbol = by_name_index_.Lookup(name_index);
-    ASSERT(symbol != nullptr);
-    symbol->Finalize(final_section_index, offset);
+  void UpdateSectionIndices(const GrowableArray<intptr_t>& index_map) {
+#if defined(DEBUG)
+    const intptr_t map_size = index_map.length();
+    // The first entry must be 0 so that symbols with index SHN_UNDEF, like
+    // the initial reserved symbol, are unchanged.
+    ASSERT_EQUAL(index_map[0], 0);
+    for (intptr_t i = 1; i < map_size; i++) {
+      ASSERT(index_map[i] != 0);
+      ASSERT(index_map[i] < map_size);
+    }
+#endif
+    for (auto& symbol : symbols_) {
+      DEBUG_ASSERT(symbol.section_index < map_size);
+      symbol.section_index = index_map[symbol.section_index];
+    }
   }
 
-  intptr_t Length() const { return symbols_.length(); }
-  const Symbol* At(intptr_t i) const { return symbols_[i]; }
+  void Finalize(const GrowableArray<intptr_t>& address_map) {
+#if defined(DEBUG)
+    const intptr_t map_size = address_map.length();
+    // The first entry must be 0 so that symbols with index SHN_UNDEF, like
+    // the initial reserved symbol, are unchanged.
+    ASSERT_EQUAL(address_map[0], 0);
+    for (intptr_t i = 1; i < map_size; i++) {
+      // No section begins at the start of the snapshot.
+      ASSERT(address_map[i] != 0);
+    }
+#endif
+    for (auto& symbol : symbols_) {
+      DEBUG_ASSERT(symbol.section_index < map_size);
+      symbol.offset += address_map[symbol.section_index];
+    }
+  }
 
   const Symbol* Find(const char* name) const {
     ASSERT(name != nullptr);
-    auto const name_index = table_->Lookup(name);
-    return by_name_index_.Lookup(name_index);
+    const intptr_t name_index = table_->Lookup(name);
+    // 0 is kNoValue for by_name_index, but luckily that's the name of the
+    // initial reserved symbol.
+    if (name_index == 0) return &symbols_[0];
+    const intptr_t symbols_index = by_name_index_.Lookup(name_index);
+    if (symbols_index == 0) return nullptr;  // Not found.
+    return &symbols_[symbols_index];
   }
 
  private:
   Zone* const zone_;
   StringTable* const table_;
   const bool dynamic_;
-  GrowableArray<Symbol*> symbols_;
-  mutable IntMap<Symbol*> by_name_index_;
+  GrowableArray<Symbol> symbols_;
+  // Maps name indexes in table_ to indexes in symbols_. Does not include an
+  // entry for the reserved symbol (name ""), as 0 is kNoValue.
+  IntMap<intptr_t> by_name_index_;
 };
 
-static uint32_t ElfHash(const unsigned char* name) {
-  uint32_t h = 0;
-  while (*name != '\0') {
-    h = (h << 4) + *name++;
-    uint32_t g = h & 0xf0000000;
-    h ^= g;
-    h ^= g >> 24;
-  }
-  return h;
-}
-
 class SymbolHashTable : public Section {
  public:
-  SymbolHashTable(Zone* zone, StringTable* strtab, SymbolTable* symtab)
+  SymbolHashTable(Zone* zone, SymbolTable* symtab)
       : Section(elf::SectionHeaderType::SHT_HASH,
                 /*allocate=*/true,
                 /*executable=*/false,
-                /*writable=*/false) {
+                /*writable=*/false),
+        buckets_(zone, 0),
+        chains_(zone, 0) {
+    link = symtab->index;
     entry_size = sizeof(int32_t);
 
-    nchain_ = symtab->Length();
-    nbucket_ = symtab->Length();
+    const auto& symbols = symtab->symbols();
+    const intptr_t num_symbols = symbols.length();
+    buckets_.FillWith(elf::STN_UNDEF, 0, num_symbols);
+    chains_.FillWith(elf::STN_UNDEF, 0, num_symbols);
 
-    bucket_ = zone->Alloc<int32_t>(nbucket_);
-    for (intptr_t i = 0; i < nbucket_; i++) {
-      bucket_[i] = elf::STN_UNDEF;
-    }
-
-    chain_ = zone->Alloc<int32_t>(nchain_);
-    for (intptr_t i = 0; i < nchain_; i++) {
-      chain_[i] = elf::STN_UNDEF;
-    }
-
-    for (intptr_t i = 1; i < symtab->Length(); i++) {
-      auto const symbol = symtab->At(i);
-      uint32_t hash = ElfHash((const unsigned char*)symbol->cstr_);
-      uint32_t probe = hash % nbucket_;
-      chain_[i] = bucket_[probe];  // next = head
-      bucket_[probe] = i;          // head = symbol
+    for (intptr_t i = 1; i < num_symbols; i++) {
+      const auto& symbol = symbols[i];
+      uint32_t hash = HashSymbolName(symtab->strtab().At(symbol.name_index));
+      uint32_t probe = hash % num_symbols;
+      chains_[i] = buckets_[probe];  // next = head
+      buckets_[probe] = i;           // head = symbol
     }
   }
 
-  intptr_t FileSize() const { return entry_size * (nbucket_ + nchain_ + 2); }
-  intptr_t MemorySize() const { return FileSize(); }
+  intptr_t MemorySize() const {
+    return entry_size * (buckets_.length() + chains_.length() + 2);
+  }
 
-  void Write(ElfWriteStream* stream) {
-    stream->WriteWord(nbucket_);
-    stream->WriteWord(nchain_);
-    for (intptr_t i = 0; i < nbucket_; i++) {
-      stream->WriteWord(bucket_[i]);
+  void Write(ElfWriteStream* stream) const {
+    stream->WriteWord(buckets_.length());
+    stream->WriteWord(chains_.length());
+    for (const int32_t bucket : buckets_) {
+      stream->WriteWord(bucket);
     }
-    for (intptr_t i = 0; i < nchain_; i++) {
-      stream->WriteWord(chain_[i]);
+    for (const int32_t chain : chains_) {
+      stream->WriteWord(chain);
     }
   }
 
+  static uint32_t HashSymbolName(const void* p) {
+    auto* name = reinterpret_cast<const uint8_t*>(p);
+    uint32_t h = 0;
+    while (*name != '\0') {
+      h = (h << 4) + *name++;
+      uint32_t g = h & 0xf0000000;
+      h ^= g;
+      h ^= g >> 24;
+    }
+    return h;
+  }
+
  private:
-  int32_t nbucket_;
-  int32_t nchain_;
-  int32_t* bucket_;  // "Head"
-  int32_t* chain_;   // "Next"
+  GrowableArray<int32_t> buckets_;  // "Head"
+  GrowableArray<int32_t> chains_;   // "Next"
 };
 
 class DynamicTable : public Section {
  public:
-  explicit DynamicTable(Zone* zone)
+  DynamicTable(Zone* zone, SymbolTable* symtab, SymbolHashTable* hash)
       : Section(elf::SectionHeaderType::SHT_DYNAMIC,
                 /*allocate=*/true,
                 /*executable=*/false,
-                /*writable=*/true) {
+                /*writable=*/true),
+        symtab_(symtab),
+        hash_(hash) {
+    link = strtab().index;
     entry_size = sizeof(elf::DynamicEntry);
 
-    // Entries that are not constants are fixed during Elf::Finalize().
     AddEntry(zone, elf::DynamicEntryType::DT_HASH, kInvalidEntry);
     AddEntry(zone, elf::DynamicEntryType::DT_STRTAB, kInvalidEntry);
     AddEntry(zone, elf::DynamicEntryType::DT_STRSZ, kInvalidEntry);
@@ -715,19 +672,30 @@
 
   static constexpr intptr_t kInvalidEntry = -1;
 
-  intptr_t FileSize() const { return entries_.length() * entry_size; }
-  intptr_t MemorySize() const { return FileSize(); }
+  DEFINE_TYPE_CHECK_FOR(DynamicTable)
+  const SymbolHashTable& hash() const { return *hash_; }
+  const SymbolTable& symtab() const { return *symtab_; }
+  const StringTable& strtab() const { return symtab().strtab(); }
+  intptr_t MemorySize() const { return entries_.length() * entry_size; }
 
-  void Write(ElfWriteStream* stream) {
+  void Write(ElfWriteStream* stream) const {
     for (intptr_t i = 0; i < entries_.length(); i++) {
       entries_[i]->Write(stream);
     }
   }
 
+  void Finalize() {
+    FinalizeEntry(elf::DynamicEntryType::DT_HASH, hash().memory_offset());
+    FinalizeEntry(elf::DynamicEntryType::DT_STRTAB, strtab().memory_offset());
+    FinalizeEntry(elf::DynamicEntryType::DT_STRSZ, strtab().MemorySize());
+    FinalizeEntry(elf::DynamicEntryType::DT_SYMTAB, symtab().memory_offset());
+  }
+
+ private:
   struct Entry : public ZoneAllocated {
     Entry(elf::DynamicEntryType tag, intptr_t value) : tag(tag), value(value) {}
 
-    void Write(ElfWriteStream* stream) {
+    void Write(ElfWriteStream* stream) const {
       ASSERT(value != kInvalidEntry);
       const intptr_t start = stream->Position();
 #if defined(TARGET_ARCH_IS_32_BIT)
@@ -758,74 +726,28 @@
     }
   }
 
-  void FinalizeEntries(StringTable* strtab,
-                       SymbolTable* symtab,
-                       SymbolHashTable* hash) {
-    FinalizeEntry(elf::DynamicEntryType::DT_HASH, hash->memory_offset());
-    FinalizeEntry(elf::DynamicEntryType::DT_STRTAB, strtab->memory_offset());
-    FinalizeEntry(elf::DynamicEntryType::DT_STRSZ, strtab->MemorySize());
-    FinalizeEntry(elf::DynamicEntryType::DT_SYMTAB, symtab->memory_offset());
-  }
-
- private:
+  SymbolTable* const symtab_;
+  SymbolHashTable* const hash_;
   GrowableArray<Entry*> entries_;
 };
 
-// A segment for representing the dynamic table segment in the program header
-// table. There is no corresponding section for this segment.
-class DynamicSegment : public Segment {
- public:
-  explicit DynamicSegment(Zone* zone, DynamicTable* dynamic)
-      : Segment(zone, dynamic, elf::ProgramHeaderType::PT_DYNAMIC) {}
-};
-
-// A segment for representing the dynamic table segment in the program header
-// table. There is no corresponding section for this segment.
-class NoteSegment : public Segment {
- public:
-  NoteSegment(Zone* zone, Section* note)
-      : Segment(zone, note, elf::ProgramHeaderType::PT_NOTE) {
-    ASSERT_EQUAL(static_cast<uint32_t>(note->type),
-                 static_cast<uint32_t>(elf::SectionHeaderType::SHT_NOTE));
-  }
-};
-
 class BitsContainer : public Section {
  public:
-  // Fully specified BitsContainer information.
+  // Fully specified BitsContainer information. Unless otherwise specified,
+  // BitContainers are aligned on byte boundaries (i.e., no padding is used).
   BitsContainer(elf::SectionHeaderType type,
                 bool allocate,
                 bool executable,
                 bool writable,
-                intptr_t size,
-                const uint8_t* bytes,
-                const ZoneGrowableArray<Elf::Relocation>* relocations,
-                const ZoneGrowableArray<Elf::SymbolData>* symbols,
-                int alignment = kDefaultAlignment)
-      : Section(type, allocate, executable, writable, alignment),
-        file_size_(type == elf::SectionHeaderType::SHT_NOBITS ? 0 : size),
-        memory_size_(allocate ? size : 0),
-        bytes_(bytes),
-        relocations_(relocations),
-        symbols_(symbols) {
-    ASSERT(type == elf::SectionHeaderType::SHT_NOBITS || bytes != nullptr);
-  }
+                int alignment = 1)
+      : Section(type, allocate, executable, writable, alignment) {}
 
-  // For BitsContainers used only as sections.
-  BitsContainer(elf::SectionHeaderType type,
-                intptr_t size,
-                const uint8_t* bytes,
-                const ZoneGrowableArray<Elf::Relocation>* relocations,
-                const ZoneGrowableArray<Elf::SymbolData>* symbols,
-                intptr_t alignment = kDefaultAlignment)
+  // For BitsContainers used only as unallocated sections.
+  explicit BitsContainer(elf::SectionHeaderType type, intptr_t alignment = 1)
       : BitsContainer(type,
                       /*allocate=*/false,
                       /*executable=*/false,
                       /*writable=*/false,
-                      size,
-                      bytes,
-                      relocations,
-                      symbols,
                       alignment) {}
 
   // For BitsContainers used as segments whose type differ on the type of the
@@ -834,61 +756,58 @@
   BitsContainer(Elf::Type t,
                 bool executable,
                 bool writable,
-                intptr_t size,
-                const uint8_t* bytes,
-                const ZoneGrowableArray<Elf::Relocation>* relocations,
-                const ZoneGrowableArray<Elf::SymbolData>* symbols,
-                intptr_t alignment = kDefaultAlignment)
+                intptr_t alignment = 1)
       : BitsContainer(t == Elf::Type::Snapshot
                           ? elf::SectionHeaderType::SHT_PROGBITS
                           : elf::SectionHeaderType::SHT_NOBITS,
                       /*allocate=*/true,
                       executable,
                       writable,
-                      size,
-                      bytes,
-                      relocations,
-                      symbols,
                       alignment) {}
 
-  const BitsContainer* AsBitsContainer() const { return this; }
-  const ZoneGrowableArray<Elf::SymbolData>* symbols() const { return symbols_; }
+  DEFINE_TYPE_CHECK_FOR(BitsContainer)
 
-  void Write(ElfWriteStream* stream) {
-    if (type == elf::SectionHeaderType::SHT_NOBITS) return;
-    if (relocations_ == nullptr) {
-      return stream->WriteBytes(bytes(), FileSize());
-    }
-    const SymbolTable* symtab = ASSERT_NOTNULL(stream->elf().symtab());
-    // Resolve relocations as we write.
-    intptr_t current_pos = 0;
-    for (const auto& reloc : *relocations_) {
-      // We assume here that the relocations are sorted in increasing order,
-      // with unique section offsets.
-      ASSERT(current_pos <= reloc.section_offset);
-      if (current_pos < reloc.section_offset) {
-        stream->WriteBytes(bytes_ + current_pos,
-                           reloc.section_offset - current_pos);
+  bool IsNoBits() const { return type == elf::SectionHeaderType::SHT_NOBITS; }
+  bool HasBytes() const {
+    return portions_.length() != 0 && portions_[0].bytes != nullptr;
+  }
+
+  struct Portion {
+    void Write(ElfWriteStream* stream, intptr_t section_start) const {
+      ASSERT(bytes != nullptr);
+      if (relocations == nullptr) {
+        stream->WriteBytes(bytes, size);
+        return;
       }
-      intptr_t source_address = reloc.source_offset;
-      intptr_t target_address = reloc.target_offset;
-      // Null symbols denote that the corresponding offset should be treated
-      // as an absolute offset in the ELF memory space.
-      if (reloc.source_symbol != nullptr) {
-        if (strcmp(reloc.source_symbol, ".") == 0) {
-          source_address += memory_offset() + reloc.section_offset;
-        } else {
-          auto* const source_symbol = symtab->Find(reloc.source_symbol);
-          ASSERT(source_symbol != nullptr);
-          source_address += source_symbol->offset();
+      const SymbolTable& symtab = stream->elf().symtab();
+      // Resolve relocations as we write.
+      intptr_t current_pos = 0;
+      for (const auto& reloc : *relocations) {
+        // We assume here that the relocations are sorted in increasing order,
+        // with unique section offsets.
+        ASSERT(current_pos <= reloc.section_offset);
+        if (current_pos < reloc.section_offset) {
+          stream->WriteBytes(bytes + current_pos,
+                             reloc.section_offset - current_pos);
         }
-      }
-      if (reloc.target_symbol != nullptr) {
-        if (strcmp(reloc.target_symbol, ".") == 0) {
-          target_address += memory_offset() + reloc.section_offset;
-        } else {
-          auto* const target_symbol = symtab->Find(reloc.target_symbol);
-          if (target_symbol == nullptr) {
+        intptr_t source_address = reloc.source_offset;
+        if (reloc.source_symbol != nullptr) {
+          if (strcmp(reloc.source_symbol, ".") == 0) {
+            source_address += section_start + offset + reloc.section_offset;
+          } else {
+            auto* const source_symbol = symtab.Find(reloc.source_symbol);
+            ASSERT(source_symbol != nullptr);
+            source_address += source_symbol->offset;
+          }
+        }
+        ASSERT(reloc.size_in_bytes <= kWordSize);
+        word to_write = reloc.target_offset - source_address;
+        if (reloc.target_symbol != nullptr) {
+          if (strcmp(reloc.target_symbol, ".") == 0) {
+            to_write += section_start + offset + reloc.section_offset;
+          } else if (auto* const symbol = symtab.Find(reloc.target_symbol)) {
+            to_write += symbol->offset;
+          } else {
             ASSERT_EQUAL(strcmp(reloc.target_symbol, kSnapshotBuildIdAsmSymbol),
                          0);
             ASSERT_EQUAL(reloc.target_offset, 0);
@@ -897,142 +816,381 @@
             // TODO(dartbug.com/43516): Special case for snapshots with deferred
             // sections that handles the build ID relocation in an
             // InstructionsSection when there is no build ID.
-            const word to_write = Image::kNoRelocatedAddress;
-            stream->WriteBytes(reinterpret_cast<const uint8_t*>(&to_write),
-                               reloc.size_in_bytes);
-            current_pos = reloc.section_offset + reloc.size_in_bytes;
-            continue;
+            to_write = Image::kNoRelocatedAddress;
           }
-          target_address += target_symbol->offset();
         }
+        ASSERT(Utils::IsInt(reloc.size_in_bytes * kBitsPerByte, to_write));
+        stream->WriteBytes(reinterpret_cast<const uint8_t*>(&to_write),
+                           reloc.size_in_bytes);
+        current_pos = reloc.section_offset + reloc.size_in_bytes;
       }
-      ASSERT(reloc.size_in_bytes <= kWordSize);
-      const word to_write = target_address - source_address;
-      ASSERT(Utils::IsInt(reloc.size_in_bytes * kBitsPerByte, to_write));
-      stream->WriteBytes(reinterpret_cast<const uint8_t*>(&to_write),
-                         reloc.size_in_bytes);
-      current_pos = reloc.section_offset + reloc.size_in_bytes;
+      stream->WriteBytes(bytes + current_pos, size - current_pos);
     }
-    stream->WriteBytes(bytes_ + current_pos, FileSize() - current_pos);
+
+    intptr_t offset;
+    const char* symbol_name;
+    const uint8_t* bytes;
+    intptr_t size;
+    const ZoneGrowableArray<Elf::Relocation>* relocations;
+    const ZoneGrowableArray<Elf::SymbolData>* symbols;
+
+   private:
+    DISALLOW_ALLOCATION();
+  };
+
+  const GrowableArray<Portion>& portions() const { return portions_; }
+
+  const Portion& AddPortion(
+      const uint8_t* bytes,
+      intptr_t size,
+      const ZoneGrowableArray<Elf::Relocation>* relocations = nullptr,
+      const ZoneGrowableArray<Elf::SymbolData>* symbols = nullptr,
+      const char* symbol_name = nullptr) {
+    ASSERT(IsNoBits() || bytes != nullptr);
+    ASSERT(bytes != nullptr || relocations == nullptr);
+    // Make sure all portions are consistent in containing bytes.
+    ASSERT(portions_.is_empty() || HasBytes() == (bytes != nullptr));
+    const intptr_t offset = Utils::RoundUp(total_size_, alignment);
+    portions_.Add({offset, symbol_name, bytes, size, relocations, symbols});
+    const Portion& portion = portions_.Last();
+    total_size_ = offset + size;
+    return portion;
   }
 
-  uint32_t Hash() const {
-    ASSERT(bytes() != nullptr);
-    return Utils::StringHash(bytes(), MemorySize());
+  void Write(ElfWriteStream* stream) const {
+    if (type == elf::SectionHeaderType::SHT_NOBITS) return;
+    intptr_t start_position = stream->Position();  // Used for checks.
+    for (const auto& portion : portions_) {
+      stream->Align(alignment);
+      ASSERT_EQUAL(stream->Position(), start_position + portion.offset);
+      portion.Write(stream, memory_offset());
+    }
+    ASSERT_EQUAL(stream->Position(), start_position + total_size_);
   }
 
-  intptr_t FileSize() const { return file_size_; }
-  intptr_t MemorySize() const { return memory_size_; }
-  const uint8_t* bytes() const { return bytes_; }
+  // Returns the hash for the portion corresponding to symbol_name.
+  // Returns 0 if the portion has no bytes or no portions have that name.
+  uint32_t Hash(const char* symbol_name) const {
+    for (const auto& portion : portions_) {
+      if (strcmp(symbol_name, portion.symbol_name) == 0) {
+        if (portion.bytes == nullptr) return 0;
+        const uint32_t hash = Utils::StringHash(portion.bytes, portion.size);
+        // Ensure a non-zero return.
+        return hash == 0 ? 1 : hash;
+      }
+    }
+    return 0;
+  }
+
+  intptr_t FileSize() const { return IsNoBits() ? 0 : total_size_; }
+  intptr_t MemorySize() const { return IsAllocated() ? total_size_ : 0; }
 
  private:
-  const intptr_t file_size_;
-  const intptr_t memory_size_;
-  const uint8_t* const bytes_;
-  const ZoneGrowableArray<Elf::Relocation>* const relocations_;
-  const ZoneGrowableArray<Elf::SymbolData>* const symbols_;
+  GrowableArray<Portion> portions_;
+  intptr_t total_size_ = 0;
 };
 
+class NoteSection : public BitsContainer {
+ public:
+  // While the build ID section does not need to be writable, the first load
+  // segment in our ELF files is writable (see Elf::WriteProgramTable). Thus,
+  // this ensures we can put it in that segment right after the program table.
+  NoteSection()
+      : BitsContainer(elf::SectionHeaderType::SHT_NOTE,
+                      /*allocate=*/true,
+                      /*executable=*/false,
+                      /*writable=*/true,
+                      kNoteAlignment) {}
+};
+
+// Abstract bits container that allows merging by just appending the portion
+// information (with properly adjusted offsets) of the other to this one.
+class ConcatenableBitsContainer : public BitsContainer {
+ public:
+  ConcatenableBitsContainer(Elf::Type type,
+                            bool executable,
+                            bool writable,
+                            intptr_t alignment)
+      : BitsContainer(type, executable, writable, alignment) {}
+
+  virtual bool CanMergeWith(const Section& other) const = 0;
+  virtual void Merge(const Section& other) {
+    ASSERT(other.IsBitsContainer());
+    ASSERT(CanMergeWith(other));
+    for (const auto& portion : other.AsBitsContainer()->portions()) {
+      AddPortion(portion.bytes, portion.size, portion.relocations,
+                 portion.symbols, portion.symbol_name);
+    }
+  }
+};
+
+class TextSection : public ConcatenableBitsContainer {
+ public:
+  explicit TextSection(Elf::Type t)
+      : ConcatenableBitsContainer(t,
+                                  /*executable=*/true,
+                                  /*writable=*/false,
+                                  ImageWriter::kTextAlignment) {}
+
+  DEFINE_TYPE_CHECK_FOR(TextSection);
+
+  virtual bool CanMergeWith(const Section& other) const {
+    return other.IsTextSection();
+  }
+};
+
+class DataSection : public ConcatenableBitsContainer {
+ public:
+  explicit DataSection(Elf::Type t)
+      : ConcatenableBitsContainer(t,
+                                  /*executable=*/false,
+                                  /*writable=*/false,
+                                  ImageWriter::kRODataAlignment) {}
+
+  DEFINE_TYPE_CHECK_FOR(DataSection);
+
+  virtual bool CanMergeWith(const Section& other) const {
+    return other.IsDataSection();
+  }
+};
+
+class BssSection : public ConcatenableBitsContainer {
+ public:
+  explicit BssSection(Elf::Type t)
+      : ConcatenableBitsContainer(t,
+                                  /*executable=*/false,
+                                  /*writable=*/true,
+                                  ImageWriter::kBssAlignment) {}
+
+  DEFINE_TYPE_CHECK_FOR(BssSection);
+
+  virtual bool CanMergeWith(const Section& other) const {
+    return other.IsBssSection();
+  }
+};
+
+// Represents portions of the file/memory space which do not correspond to
+// sections from the section header. Should never be added to the section table,
+// but may be added to segments.
+class PseudoSection : public Section {
+ public:
+  // All PseudoSections are aligned to target word size.
+  static const intptr_t kAlignment = compiler::target::kWordSize;
+
+  PseudoSection(bool allocate, bool executable, bool writable)
+      : Section(elf::SectionHeaderType::SHT_NULL,
+                allocate,
+                executable,
+                writable,
+                kAlignment) {}
+
+  DEFINE_TYPE_CHECK_FOR(PseudoSection)
+
+  void Write(ElfWriteStream* stream) const = 0;
+};
+
+class ProgramTable : public PseudoSection {
+ public:
+  // See SectionTable::CreateProgramTable as to why this section is writable.
+  explicit ProgramTable(Zone* zone)
+      : PseudoSection(/*allocate=*/true,
+                      /*executable=*/false,
+                      /*writable=*/true),
+        segments_(zone, 0) {
+    entry_size = sizeof(elf::ProgramHeader);
+  }
+
+  const GrowableArray<Segment*>& segments() const { return segments_; }
+  intptr_t SegmentCount() const { return segments_.length(); }
+  intptr_t MemorySize() const {
+    return segments_.length() * sizeof(elf::ProgramHeader);
+  }
+
+  void Add(Segment* segment) {
+    ASSERT(segment != nullptr);
+    segments_.Add(segment);
+  }
+
+  void Write(ElfWriteStream* stream) const;
+
+ private:
+  GrowableArray<Segment*> segments_;
+};
+
+// This particular PseudoSection should not appear in segments either (hence
+// being marked non-allocated), but is directly held by the Elf object.
+class SectionTable : public PseudoSection {
+ public:
+  explicit SectionTable(Zone* zone)
+      : PseudoSection(/*allocate=*/false,
+                      /*executable=*/false,
+                      /*writable=*/false),
+        zone_(zone),
+        sections_(zone_, 2),
+        shstrtab_(zone_, /*allocate=*/false) {
+    entry_size = sizeof(elf::SectionHeader);
+    // The section at index 0 (elf::SHN_UNDEF) must be all 0s.
+    ASSERT_EQUAL(shstrtab_.Lookup(""), 0);
+    Add(new (zone_) ReservedSection(), "");
+    Add(&shstrtab_, ".shstrtab");
+  }
+
+  const GrowableArray<Section*>& sections() const { return sections_; }
+  intptr_t SectionCount() const { return sections_.length(); }
+  intptr_t StringTableIndex() const { return shstrtab_.index; }
+
+  bool HasSectionNamed(const char* name) {
+    return shstrtab_.Lookup(name) != StringTable::kNotIndexed;
+  }
+
+  void Add(Section* section, const char* name = nullptr) {
+    ASSERT(!section->IsPseudoSection());
+    ASSERT(name != nullptr || section->name_is_set());
+    if (name != nullptr) {
+      // First, check for an existing section with the same table name.
+      if (auto* const old_section = Find(name)) {
+        ASSERT(old_section->CanMergeWith(*section));
+        old_section->Merge(*section);
+        return;
+      }
+      // No existing section with this name.
+      const intptr_t name_index = shstrtab_.Add(name);
+      section->set_name(name_index);
+    }
+    section->index = sections_.length();
+    sections_.Add(section);
+  }
+
+  Section* Find(const char* name) {
+    const intptr_t name_index = shstrtab_.Lookup(name);
+    if (name_index == StringTable::kNotIndexed) {
+      // We're guaranteed that no section with this name has been added yet.
+      return nullptr;
+    }
+    // We check walk all sections to check for uniqueness in DEBUG mode.
+    Section* result = nullptr;
+    for (Section* const section : sections_) {
+      if (section->name() == name_index) {
+#if defined(DEBUG)
+        ASSERT(result == nullptr);
+        result = section;
+#else
+        return section;
+#endif
+      }
+    }
+    return result;
+  }
+
+  intptr_t FileSize() const {
+    return sections_.length() * sizeof(elf::SectionHeader);
+  }
+
+  void Write(ElfWriteStream* stream) const;
+
+  // Reorders the sections for creating a minimal amount of segments and
+  // creates and returns an appropriate program table.
+  //
+  // Also takes and adjusts section indices in the static symbol table, since it
+  // is not recorded in sections_ for stripped outputs.
+  ProgramTable* CreateProgramTable(SymbolTable* symtab);
+
+ private:
+  Zone* const zone_;
+  GrowableArray<Section*> sections_;
+  StringTable shstrtab_;
+};
+
+class ElfHeader : public PseudoSection {
+ public:
+  // See SectionTable::CreateProgramTable as to why this section is writable.
+  ElfHeader(const ProgramTable& program_table,
+            const SectionTable& section_table)
+      : PseudoSection(/*allocate=*/true,
+                      /*executable=*/false,
+                      /*writable=*/true),
+        program_table_(program_table),
+        section_table_(section_table) {}
+
+  intptr_t MemorySize() const { return sizeof(elf::ElfHeader); }
+
+  void Write(ElfWriteStream* stream) const;
+
+ private:
+  const ProgramTable& program_table_;
+  const SectionTable& section_table_;
+};
+
+#undef DEFINE_TYPE_CHECK_FOR
+#undef FOR_EACH_SECTION_TYPE
+
 Elf::Elf(Zone* zone, BaseWriteStream* stream, Type type, Dwarf* dwarf)
     : zone_(zone),
       unwrapped_stream_(stream),
       type_(type),
       dwarf_(dwarf),
-      shstrtab_(new (zone) StringTable(zone, /*allocate=*/false)),
-      dynstrtab_(new (zone) StringTable(zone, /*allocate=*/true)),
-      dynsym_(new (zone) SymbolTable(zone, dynstrtab_, /*dynamic=*/true)),
-      strtab_(new (zone_) StringTable(zone_, /*allocate=*/false)),
-      symtab_(new (zone_) SymbolTable(zone, strtab_, /*dynamic=*/false)) {
+      section_table_(new (zone) SectionTable(zone)) {
   // Separate debugging information should always have a Dwarf object.
   ASSERT(type_ == Type::Snapshot || dwarf_ != nullptr);
   // Assumed by various offset logic in this file.
   ASSERT_EQUAL(unwrapped_stream_->Position(), 0);
 }
 
-void Elf::AddSection(Section* section,
-                     const char* name,
-                     const char* symbol_name) {
-  ASSERT(section_table_file_size_ < 0);
-  ASSERT(!shstrtab_->HasBeenFinalized());
-  section->set_name(shstrtab_->AddString(name));
-  // We do not set the section index yet, that will be done during Finalize().
-  sections_.Add(section);
-  // We do set the initial section index in initialized symbols for quick lookup
-  // until reordering happens.
-  const intptr_t initial_section_index = sections_.length() - 1;
-  if (symbol_name != nullptr) {
-    ASSERT(section->IsAllocated());
-    section->symbol_name = symbol_name;
-    // While elf::STT_SECTION might seem more appropriate, section symbols are
-    // usually local and dlsym won't return them.
-    ASSERT(!dynsym_->HasBeenFinalized());
-    dynsym_->AddSymbol(symbol_name, elf::STB_GLOBAL, elf::STT_FUNC,
-                       initial_section_index, section->MemorySize());
-    // Some tools assume the static symbol table is a superset of the dynamic
-    // symbol table when it exists (see dartbug.com/41783).
-    ASSERT(!symtab_->HasBeenFinalized());
-    symtab_->AddSymbol(symbol_name, elf::STB_GLOBAL, elf::STT_FUNC,
-                       initial_section_index, section->FileSize());
-  }
-  if (auto const container = section->AsBitsContainer()) {
-    if (container->symbols() != nullptr) {
-      ASSERT(section->IsAllocated());
-      for (const auto& symbol_data : *container->symbols()) {
-        ASSERT(!symtab_->HasBeenFinalized());
-        symtab_->AddSymbol(symbol_data.name, elf::STB_LOCAL, symbol_data.type,
-                           initial_section_index, symbol_data.size);
-      }
-    }
-  }
-}
-
 void Elf::AddText(const char* name,
                   const uint8_t* bytes,
                   intptr_t size,
                   const ZoneGrowableArray<Relocation>* relocations,
                   const ZoneGrowableArray<SymbolData>* symbols) {
-  auto const image =
-      new (zone_) BitsContainer(type_, /*executable=*/true,
-                                /*writable=*/false, size, bytes, relocations,
-                                symbols, ImageWriter::kTextAlignment);
-  AddSection(image, ".text", name);
+  auto* const container = new (zone_) TextSection(type_);
+  container->AddPortion(bytes, size, relocations, symbols, name);
+  section_table_->Add(container, kTextName);
 }
 
-// Here, both VM and isolate will be compiled into a single snapshot.
-// In assembly generation, each serialized text section gets a separate
-// pointer into the BSS segment and BSS slots are created for each, since
-// we may not serialize both VM and isolate. Here, we always serialize both,
-// so make a BSS segment large enough for both, with the VM entries coming
-// first.
-static constexpr intptr_t kBssVmSize =
-    BSS::kVmEntryCount * compiler::target::kWordSize;
-static constexpr intptr_t kBssIsolateSize =
-    BSS::kIsolateEntryCount * compiler::target::kWordSize;
-static constexpr intptr_t kBssSize = kBssVmSize + kBssIsolateSize;
-
 void Elf::CreateBSS() {
-  uint8_t* bytes = nullptr;
-  if (type_ == Type::Snapshot) {
-    // Ideally the BSS segment would take no space in the object, but Android's
-    // "strip" utility truncates the memory-size of our segments to their
-    // file-size.
-    //
-    // Therefore we must insert zero-filled data for the BSS.
-    bytes = zone_->Alloc<uint8_t>(kBssSize);
-    memset(bytes, 0, kBssSize);
+  // Not idempotent.
+  ASSERT(section_table_->Find(kBssName) == nullptr);
+  // No text section means no BSS section.
+  auto* const text_section = section_table_->Find(kTextName);
+  if (text_section == nullptr) return;
+  ASSERT(text_section->IsTextSection());
+
+  auto* const bss_container = new (zone_) BssSection(type_);
+  for (const auto& portion : text_section->AsBitsContainer()->portions()) {
+    size_t size;
+    const char* symbol_name;
+    // First determine whether this is the VM's text portion or the isolate's.
+    if (strcmp(portion.symbol_name, kVmSnapshotInstructionsAsmSymbol) == 0) {
+      size = BSS::kVmEntryCount * compiler::target::kWordSize;
+      symbol_name = kVmSnapshotBssAsmSymbol;
+    } else if (strcmp(portion.symbol_name,
+                      kIsolateSnapshotInstructionsAsmSymbol) == 0) {
+      size = BSS::kIsolateEntryCount * compiler::target::kWordSize;
+      symbol_name = kIsolateSnapshotBssAsmSymbol;
+    } else {
+      // Not VM or isolate text.
+      UNREACHABLE();
+      continue;
+    }
+
+    uint8_t* bytes = nullptr;
+    if (type_ == Type::Snapshot) {
+      // Ideally the BSS segment would take no space in the object, but
+      // Android's "strip" utility truncates the memory-size of our segments to
+      // their file-size.
+      //
+      // Therefore we must insert zero-filled data for the BSS.
+      bytes = zone_->Alloc<uint8_t>(size);
+      memset(bytes, 0, size);
+    }
+    // For the BSS section, we add the section symbols as local symbols in the
+    // static symbol table, as these addresses are only used for relocation.
+    // (This matches the behavior in the assembly output.)
+    auto* symbols = new (zone_) ZoneGrowableArray<Elf::SymbolData>();
+    symbols->Add({symbol_name, elf::STT_SECTION, 0, size});
+    bss_container->AddPortion(bytes, size, /*relocations=*/nullptr, symbols);
   }
-  // For the BSS section, we add two local symbols to the static symbol table,
-  // one for each isolate. We use local symbols because these addresses are only
-  // used for relocation. (This matches the behavior in the assembly output,
-  // where these symbols are also local.)
-  auto* bss_symbols = new (zone_) ZoneGrowableArray<Elf::SymbolData>();
-  bss_symbols->Add({kVmSnapshotBssAsmSymbol, elf::STT_SECTION, 0, kBssVmSize});
-  bss_symbols->Add({kIsolateSnapshotBssAsmSymbol, elf::STT_SECTION, kBssVmSize,
-                    kBssIsolateSize});
-  bss_ = new (zone_) BitsContainer(
-      type_, /*executable=*/false, /*writable=*/true, kBssSize, bytes,
-      /*relocations=*/nullptr, bss_symbols, ImageWriter::kBssAlignment);
-  AddSection(bss_, ".bss");
+
+  section_table_->Add(bss_container, kBssName);
 }
 
 void Elf::AddROData(const char* name,
@@ -1040,11 +1198,9 @@
                     intptr_t size,
                     const ZoneGrowableArray<Relocation>* relocations,
                     const ZoneGrowableArray<SymbolData>* symbols) {
-  auto const image =
-      new (zone_) BitsContainer(type_, /*executable=*/false,
-                                /*writable=*/false, size, bytes, relocations,
-                                symbols, ImageWriter::kRODataAlignment);
-  AddSection(image, ".rodata", name);
+  auto* const container = new (zone_) DataSection(type_);
+  container->AddPortion(bytes, size, relocations, symbols, name);
+  section_table_->Add(container, kDataName);
 }
 
 #if defined(DART_PRECOMPILER)
@@ -1057,6 +1213,7 @@
 
   const uint8_t* buffer() const { return stream_->buffer(); }
   intptr_t bytes_written() const { return stream_->bytes_written(); }
+  intptr_t Position() const { return stream_->Position(); }
 
   void sleb128(intptr_t value) { stream_->WriteSLEB128(value); }
   void uleb128(uintptr_t value) { stream_->WriteLEB128(value); }
@@ -1143,43 +1300,79 @@
 static constexpr intptr_t kInitialDwarfBufferSize = 64 * KB;
 #endif
 
-const Section* Elf::FindSectionBySymbolName(const char* name) const {
-  auto* const symbol = symtab_->Find(name);
-  if (symbol == nullptr) return nullptr;
-  // Should not be run between OrderSectionsAndCreateSegments (when section
-  // indices may change) and FinalizeSymbols() (sets the final section index).
-  ASSERT(segments_.length() == 0 || symbol->HasBeenFinalized());
-  const Section* const section = sections_[symbol->section_index];
-  ASSERT_EQUAL(strcmp(section->symbol_name, name), 0);
-  return section;
-}
-
-void Elf::FinalizeSymbols() {
-  // Must be run after OrderSectionsAndCreateSegments and ComputeOffsets.
-  ASSERT(segments_.length() > 0);
-  ASSERT(section_table_file_offset_ > 0);
-  for (const auto& section : sections_) {
-    if (section->symbol_name != nullptr) {
-      dynsym_->FinalizeSymbol(section->symbol_name, section->index(),
-                              section->memory_offset());
-      symtab_->FinalizeSymbol(section->symbol_name, section->index(),
-                              section->memory_offset());
-    }
-    if (auto const container = section->AsBitsContainer()) {
-      if (container->symbols() != nullptr) {
-        for (const auto& symbol_data : *container->symbols()) {
-          symtab_->FinalizeSymbol(
-              symbol_data.name, section->index(),
-              section->memory_offset() + symbol_data.offset);
+void SymbolTable::Initialize(const GrowableArray<Section*>& sections) {
+  for (auto* const section : sections) {
+    // The values of all added symbols are memory addresses.
+    if (!section->IsAllocated()) continue;
+    if (auto* const bits = section->AsBitsContainer()) {
+      for (const auto& portion : section->AsBitsContainer()->portions()) {
+        if (portion.symbol_name != nullptr) {
+          // Global dynamic symbols for the content of a given section, which is
+          // always a single structured element (and thus we use STT_OBJECT).
+          const intptr_t binding = elf::STB_GLOBAL;
+          const intptr_t type = elf::STT_OBJECT;
+          // Some tools assume the static symbol table is a superset of the
+          // dynamic symbol table when it exists and only use it, so put all
+          // dynamic symbols there also. (see dartbug.com/41783).
+          AddSymbol(portion.symbol_name, binding, type, portion.size,
+                    section->index, portion.offset);
+        }
+        if (!dynamic_ && portion.symbols != nullptr) {
+          for (const auto& symbol_data : *portion.symbols) {
+            // Local static-only symbols, e.g., code payloads or RO objects.
+            AddSymbol(symbol_data.name, elf::STB_LOCAL, symbol_data.type,
+                      symbol_data.size, section->index,
+                      portion.offset + symbol_data.offset);
+          }
         }
       }
     }
   }
 }
 
+void Elf::InitializeSymbolTables() {
+  // Not idempotent.
+  ASSERT(symtab_ == nullptr);
+
+  // Create static and dynamic symbol tables.
+  auto* const dynstrtab = new (zone_) StringTable(zone_, /*allocate=*/true);
+  section_table_->Add(dynstrtab, ".dynstr");
+  auto* const dynsym =
+      new (zone_) SymbolTable(zone_, dynstrtab, /*dynamic=*/true);
+  section_table_->Add(dynsym, ".dynsym");
+  dynsym->Initialize(section_table_->sections());
+  // Now the dynamic symbol table is populated, set up the hash table and
+  // dynamic table.
+  auto* const hash = new (zone_) SymbolHashTable(zone_, dynsym);
+  section_table_->Add(hash, ".hash");
+  auto* const dynamic = new (zone_) DynamicTable(zone_, dynsym, hash);
+  section_table_->Add(dynamic, kDynamicTableName);
+
+  // We only add the static string and symbol tables to the section table if
+  // this is an unstripped output, but we always create them as they are used
+  // to resolve relocations.
+  auto* const strtab = new (zone_) StringTable(zone_, /*allocate=*/false);
+  if (!IsStripped()) {
+    section_table_->Add(strtab, ".strtab");
+  }
+  symtab_ = new (zone_) SymbolTable(zone_, strtab, /*dynamic=*/false);
+  if (!IsStripped()) {
+    section_table_->Add(symtab_, ".symtab");
+  }
+  symtab_->Initialize(section_table_->sections());
+}
+
 void Elf::FinalizeEhFrame() {
-#if defined(DART_PRECOMPILER) &&                                               \
-    (defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64))
+#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
+  // No text section added means no .eh_frame.
+  TextSection* text_section = nullptr;
+  if (auto* const section = section_table_->Find(kTextName)) {
+    text_section = section->AsTextSection();
+    ASSERT(text_section != nullptr);
+  }
+  // No text section added means no .eh_frame.
+  if (text_section == nullptr) return;
+
   // Multiplier which will be used to scale operands of DW_CFA_offset and
   // DW_CFA_val_offset.
   const intptr_t kDataAlignment = compiler::target::kWordSize;
@@ -1211,17 +1404,15 @@
   });
 
   // Emit an FDE covering each .text section.
-  const auto text_name = shstrtab_->Lookup(".text");
-  ASSERT(text_name != StringTable::kNotIndexed);
-  for (auto section : sections_) {
-    if (section->name() != text_name) continue;
+  for (const auto& portion : text_section->portions()) {
+    ASSERT(portion.symbol_name != nullptr);  // Needed for relocations.
     dwarf_stream.WritePrefixedLength([&]() {
       // Offset to CIE. Note that unlike pcrel this offset is encoded
       // backwards: it will be subtracted from the current position.
       dwarf_stream.u4(stream.Position() - cie_start);
       // Start address as a PC relative reference.
-      dwarf_stream.RelativeSymbolOffset<int32_t>(section->symbol_name);
-      dwarf_stream.u4(section->MemorySize());  // Size.
+      dwarf_stream.RelativeSymbolOffset<int32_t>(portion.symbol_name);
+      dwarf_stream.u4(portion.size);           // Size.
       dwarf_stream.u1(0);                      // Augmentation Data length.
 
       // FP at FP+kSavedCallerPcSlotFromFp*kWordSize
@@ -1248,25 +1439,28 @@
     });
   }
 
-  dwarf_stream.u4(0);  // end of section
+  dwarf_stream.u4(0);  // end of section (FDE with zero length)
 
-  auto const eh_frame = new (zone_)
-      BitsContainer(type_, /*writable=*/false, /*executable=*/false,
-                    dwarf_stream.bytes_written(), dwarf_stream.buffer(),
-                    dwarf_stream.relocations(), /*symbols=*/nullptr);
-  AddSection(eh_frame, ".eh_frame");
-#endif  // defined(DART_PRECOMPILER) && \
-        //   (defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64))
+  auto* const eh_frame = new (zone_)
+      BitsContainer(type_, /*writable=*/false, /*executable=*/false);
+  eh_frame->AddPortion(dwarf_stream.buffer(), dwarf_stream.bytes_written(),
+                       dwarf_stream.relocations());
+  section_table_->Add(eh_frame, ".eh_frame");
+#endif  // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
 }
 
 void Elf::FinalizeDwarfSections() {
   if (dwarf_ == nullptr) return;
-#if defined(DART_PRECOMPILER)
+
+  // Currently we only output DWARF information involving code.
+  ASSERT(section_table_->HasSectionNamed(kTextName));
+
   auto add_debug = [&](const char* name, const DwarfElfStream& stream) {
-    auto const image = new (zone_) BitsContainer(
-        elf::SectionHeaderType::SHT_PROGBITS, stream.bytes_written(),
-        stream.buffer(), stream.relocations(), /*symbols=*/nullptr);
-    AddSection(image, name);
+    auto const container =
+        new (zone_) BitsContainer(elf::SectionHeaderType::SHT_PROGBITS);
+    container->AddPortion(stream.buffer(), stream.bytes_written(),
+                          stream.relocations());
+    section_table_->Add(container, name);
   };
   {
     ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
@@ -1288,38 +1482,86 @@
     dwarf_->WriteLineNumberProgram(&dwarf_stream);
     add_debug(".debug_line", dwarf_stream);
   }
-#endif
 }
 
-void Elf::OrderSectionsAndCreateSegments() {
-  GrowableArray<Section*> reordered_sections;
-  // The first section in the section header table is always a reserved
-  // entry containing only 0 values.
-  reordered_sections.Add(new (zone_) ReservedSection());
+ProgramTable* SectionTable::CreateProgramTable(SymbolTable* symtab) {
+  const intptr_t num_sections = sections_.length();
+  // Should have at least the reserved entry in sections_.
+  ASSERT(!sections_.is_empty());
+  ASSERT_EQUAL(sections_[0]->alignment, 0);
+
+  // The new program table that collects the segments for allocated sections
+  // and a few special segments.
+  auto* const program_table = new (zone_) ProgramTable(zone_);
+
+  GrowableArray<Section*> reordered_sections(zone_, num_sections);
+  // Maps the old indices of sections to the new ones.
+  GrowableArray<intptr_t> index_map(zone_, num_sections);
+  index_map.FillWith(0, 0, num_sections);
 
   Segment* current_segment = nullptr;
+  // Only called for sections in the section table (i.e., not special sections
+  // appearing in segments only or the section table itself).
   auto add_to_reordered_sections = [&](Section* section) {
-    section->set_index(reordered_sections.length());
+    intptr_t new_index = reordered_sections.length();
+    index_map[section->index] = new_index;
+    section->index = new_index;
     reordered_sections.Add(section);
-    if (!section->IsAllocated()) return;
-    const bool was_added =
-        current_segment == nullptr ? false : current_segment->Add(section);
-    if (!was_added) {
-      // There is no current segment or it is incompatible for merging, so
-      // following compatible segments will be merged into this one if possible.
-      current_segment =
-          new (zone_) Segment(zone_, section, elf::ProgramHeaderType::PT_LOAD);
-      section->load_segment = current_segment;
-      segments_.Add(current_segment);
+    if (section->IsAllocated()) {
+      ASSERT(current_segment != nullptr);
+      if (!current_segment->Add(section)) {
+        // The current segment is incompatible for the current sectioni, so
+        // create a new one.
+        current_segment = new (zone_)
+            Segment(zone_, section, elf::ProgramHeaderType::PT_LOAD);
+        program_table->Add(current_segment);
+      }
     }
   };
 
-  // Add writable, non-executable sections first, due to a bug in Jelly Bean's
-  // ELF loader when a writable segment is placed between two non-writable
-  // segments. See also Elf::WriteProgramTable(), which double-checks this.
+  // The first section in the section header table is always a reserved
+  // entry containing only 0 values, so copy it over from sections_.
+  add_to_reordered_sections(sections_[0]);
+
+  // Android requires the program header table be in the first load segment, so
+  // create PseudoSections representing the ELF header and program header
+  // table to initialize that segment.
+  //
+  // The Android dynamic linker in Jelly Bean incorrectly assumes that all
+  // non-writable segments are continguous. Thus, we make the first segment
+  // writable and put all writable sections (like the BSS) into it, which means
+  // we mark the created PseudoSections as writable to pass the segment checks.
+  //
+  // The bug is here:
+  //   https://github.com/aosp-mirror/platform_bionic/blob/94963af28e445384e19775a838a29e6a71708179/linker/linker.c#L1991-L2001
+  auto* const elf_header = new (zone_) ElfHeader(*program_table, *this);
+
+  // Self-reference to program header table. Required by Android but not by
+  // Linux. Must appear before any PT_LOAD entries.
+  program_table->Add(new (zone_) Segment(zone_, program_table,
+                                         elf::ProgramHeaderType::PT_PHDR));
+
+  // Create the initial load segment which contains the ELF header and program
+  // table.
+  current_segment =
+      new (zone_) Segment(zone_, elf_header, elf::ProgramHeaderType::PT_LOAD);
+  program_table->Add(current_segment);
+  current_segment->Add(program_table);
+
+  // We now do several passes over the collected sections to reorder them in
+  // a way that minimizes segments (and thus padding) in the resulting snapshot.
+
+  // If a build ID was created, we put it after the program table so it can
+  // be read with a minimum number of bytes from the ELF file.
+  auto* const build_id = Find(Elf::kBuildIdNoteName);
+  if (build_id != nullptr) {
+    ASSERT(build_id->type == elf::SectionHeaderType::SHT_NOTE);
+    add_to_reordered_sections(build_id);
+  }
+  // Now add all the other writable sections.
   for (auto* const section : sections_) {
-    if (section->IsAllocated() && section->IsWritable() &&
-        !section->IsExecutable()) {
+    if (section == build_id) continue;
+    if (section->IsWritable()) {  // Implies IsAllocated() && !IsExecutable()
       add_to_reordered_sections(section);
     }
   }
@@ -1335,101 +1577,133 @@
 
   // Now add the non-writable, executable sections in a new segment.
   for (auto* const section : sections_) {
-    if (section->IsAllocated() && !section->IsWritable() &&
-        section->IsExecutable()) {
+    if (section->IsExecutable()) {  // Implies IsAllocated() && !IsWritable()
       add_to_reordered_sections(section);
     }
   }
 
-  // We put all unallocated sections last because otherwise, they would
+  // We put all non-reserved unallocated sections last. Otherwise, they would
   // affect the file offset but not the memory offset of any following allocated
   // sections. Doing it in this order makes it easier to keep file and memory
   // offsets page-aligned with respect to each other, which is required for
   // some loaders.
-  for (auto* const section : sections_) {
-    if (!section->IsAllocated()) {
-      add_to_reordered_sections(section);
-    }
+  for (intptr_t i = 1; i < num_sections; i++) {
+    auto* const section = sections_[i];
+    if (section->IsAllocated()) continue;
+    add_to_reordered_sections(section);
   }
 
-  // Now replace sections_.
+  // All sections should have been accounted for in the loops above.
+  ASSERT_EQUAL(sections_.length(), reordered_sections.length());
+  // Replace the content of sections_ with the reordered sections.
   sections_.Clear();
   sections_.AddArray(reordered_sections);
-}
 
-void Elf::Finalize() {
-  ASSERT(program_table_file_size_ < 0);
+  // This must be true for uses of the map to be correct.
+  ASSERT_EQUAL(index_map[elf::SHN_UNDEF], elf::SHN_UNDEF);
 
-  // Generate the build ID now that we have all user-provided sections.
-  // Generating it at this point also means it'll be the first writable
-  // non-executable section added to sections_ and thus end up right after the
-  // program table after reordering. This limits how much of the ELF file needs
-  // to be read to get the build ID (header + program table + note segment).
-  GenerateBuildId();
-
-  // We add BSS in all cases, even to the separate debugging information ELF,
-  // to ensure that relocated addresses are consistent between ELF snapshots
-  // and ELF separate debugging information.
-  CreateBSS();
-
-  // Adding the dynamic symbol table and associated sections.
-  AddSection(dynstrtab_, ".dynstr");
-  AddSection(dynsym_, ".dynsym");
-
-  auto const hash = new (zone_) SymbolHashTable(zone_, dynstrtab_, dynsym_);
-  AddSection(hash, ".hash");
-
-  auto const dynamic = new (zone_) DynamicTable(zone_);
-  AddSection(dynamic, ".dynamic");
-
-  if (!IsStripped()) {
-    AddSection(strtab_, ".strtab");
-    AddSection(symtab_, ".symtab");
+  // Since the section indices have been updated, change links to match
+  // and update the indexes of symbols in any symbol tables.
+  for (auto* const section : sections_) {
+    // SHN_UNDEF maps to SHN_UNDEF, so no need to check for it.
+    section->link = index_map[section->link];
+    if (auto* const table = section->AsSymbolTable()) {
+      table->UpdateSectionIndices(index_map);
+    }
   }
-  AddSection(shstrtab_, ".shstrtab");
-  FinalizeEhFrame();
-  FinalizeDwarfSections();
-
-  OrderSectionsAndCreateSegments();
-
-  // Now that the sections have indices, set up links between them as needed.
-  dynsym_->link = dynstrtab_->index();
-  hash->link = dynsym_->index();
-  dynamic->link = dynstrtab_->index();
-  if (!IsStripped()) {
-    symtab_->link = strtab_->index();
+  if (symtab->index == elf::SHN_UNDEF) {
+    // The output is stripped, so this wasn't finalized during the loop above.
+    symtab->UpdateSectionIndices(index_map);
   }
 
-  // Now add any special non-load segments.
-
-  if (build_id_ != nullptr) {
+  // Add any special non-load segments.
+  if (build_id != nullptr) {
     // Add a PT_NOTE segment for the build ID.
-    segments_.Add(new (zone_) NoteSegment(zone_, build_id_));
+    program_table->Add(
+        new (zone_) Segment(zone_, build_id, elf::ProgramHeaderType::PT_NOTE));
   }
 
   // Add a PT_DYNAMIC segment for the dynamic symbol table.
-  segments_.Add(new (zone_) DynamicSegment(zone_, dynamic));
+  ASSERT(HasSectionNamed(Elf::kDynamicTableName));
+  auto* const dynamic = Find(Elf::kDynamicTableName)->AsDynamicTable();
+  program_table->Add(
+      new (zone_) Segment(zone_, dynamic, elf::ProgramHeaderType::PT_DYNAMIC));
 
-  // At this point, all sections have been added and ordered and all sections
-  // appropriately grouped into segments. Add the program table and then
-  // calculate file and memory offsets.
-  FinalizeProgramTable();
+  return program_table;
+}
+
+void Elf::Finalize() {
+  // Generate the build ID now that we have all user-provided sections.
+  GenerateBuildId();
+
+  // We add a BSS section in all cases, even to the separate debugging
+  // information, to ensure that relocated addresses are consistent between ELF
+  // snapshots and the corresponding separate debugging information.
+  CreateBSS();
+
+  FinalizeEhFrame();
+  FinalizeDwarfSections();
+
+  // Create and initialize the dynamic and static symbol tables and any
+  // other associated sections now that all other sections have been added.
+  InitializeSymbolTables();
+  // Creates an appropriate program table containing load segments for allocated
+  // sections and any other segments needed. May reorder sections to minimize
+  // the number of load segments, so also takes the static symbol table so
+  // symbol section indices can be adjusted if needed.
+  program_table_ = section_table_->CreateProgramTable(symtab_);
+  // Calculate file and memory offsets, and finalizes symbol values in any
+  // symbol tables.
   ComputeOffsets();
 
-  // Now that we have reordered the sections and set memory offsets, we can
-  // update the symbol tables to add index and address information. This must
-  // be done prior to writing the symbol tables and any sections with
-  // relocations.
-  FinalizeSymbols();
-  // Also update the entries in the dynamic table.
-  dynamic->FinalizeEntries(dynstrtab_, dynsym_, hash);
+#if defined(DEBUG)
+  if (type_ == Type::Snapshot) {
+    // For files that will be dynamically loaded, ensure the file offsets
+    // of allocated sections are page aligned to the memory offsets.
+    for (auto* const segment : program_table_->segments()) {
+      for (auto* const section : segment->sections()) {
+        ASSERT_EQUAL(section->file_offset() % Elf::kPageSize,
+                     section->memory_offset() % Elf::kPageSize);
+      }
+    }
+  }
+#endif
 
   // Finally, write the ELF file contents.
   ElfWriteStream wrapped(unwrapped_stream_, *this);
-  WriteHeader(&wrapped);
-  WriteProgramTable(&wrapped);
-  WriteSections(&wrapped);
-  WriteSectionTable(&wrapped);
+
+  auto write_section = [&](const Section* section) {
+    wrapped.Align(section->alignment);
+    ASSERT_EQUAL(wrapped.Position(), section->file_offset());
+    section->Write(&wrapped);
+    ASSERT_EQUAL(wrapped.Position(),
+                 section->file_offset() + section->FileSize());
+  };
+
+  // To match ComputeOffsets, first we write allocated sections and then
+  // unallocated sections. We access the allocated sections via the load
+  // segments so we can properly align the stream for each entered segment.
+  intptr_t section_index = 1;  // We don't visit the reserved section.
+  for (auto* const segment : program_table_->segments()) {
+    if (segment->type != elf::ProgramHeaderType::PT_LOAD) continue;
+    wrapped.Align(segment->Alignment());
+    for (auto* const section : segment->sections()) {
+      ASSERT(section->IsAllocated());
+      write_section(section);
+      if (!section->IsPseudoSection()) {
+        ASSERT_EQUAL(section->index, section_index);
+        section_index++;
+      }
+    }
+  }
+  const auto& sections = section_table_->sections();
+  for (; section_index < sections.length(); section_index++) {
+    auto* const section = sections[section_index];
+    ASSERT(!section->IsAllocated());
+    write_section(section);
+  }
+  // Finally, write the section table.
+  write_section(section_table_);
 }
 
 // For the build ID, we generate a 128-bit hash, where each 32 bits is a hash of
@@ -1449,32 +1723,40 @@
     sizeof(elf::Note) + sizeof(elf::ELF_NOTE_GNU);
 
 void Elf::GenerateBuildId() {
+  // Not idempotent.
+  ASSERT(section_table_->Find(kBuildIdNoteName) == nullptr);
   uint32_t hashes[kBuildIdSegmentNamesLength];
+  // Currently, we construct the build ID out of data from two different
+  // sections: the .text section and the .rodata section. We only create
+  // a build ID when we have all four sections and when we have the actual
+  // bytes from those sections.
+  //
+  // TODO(dartbug.com/43274): Generate build IDs for separate debugging
+  // information for assembly snapshots.
+  //
+  // TODO(dartbug.com/43516): Generate build IDs for snapshots with deferred
+  // sections.
+  auto* const text_section = section_table_->Find(kTextName);
+  if (text_section == nullptr) return;
+  ASSERT(text_section->IsTextSection());
+  auto* const text_bits = text_section->AsBitsContainer();
+  auto* const data_section = section_table_->Find(kDataName);
+  if (data_section == nullptr) return;
+  ASSERT(data_section->IsDataSection());
+  auto* const data_bits = data_section->AsBitsContainer();
+  // Now try to find
   for (intptr_t i = 0; i < kBuildIdSegmentNamesLength; i++) {
-    auto const name = kBuildIdSegmentNames[i];
-    auto const section = FindSectionBySymbolName(name);
-    // If we're missing a section, then we don't generate a final build ID.
-    if (section == nullptr) return;
-    auto const bits = section->AsBitsContainer();
-    if (bits == nullptr) {
-      FATAL1("Section for symbol %s is not a BitsContainer", name);
+    auto* const name = kBuildIdSegmentNames[i];
+    hashes[i] = text_bits->Hash(name);
+    if (hashes[i] == 0) {
+      hashes[i] = data_bits->Hash(name);
     }
-    // For now, if we don't have section contents (because we're generating
-    // assembly), don't generate a final build ID, as we'll have different
-    // build IDs in the snapshot and the separate debugging information.
-    //
-    // TODO(dartbug.com/43274): Change once we generate consistent build IDs
-    // between assembly snapshots and their debugging information.
-    if (bits->bytes() == nullptr) return;
-    hashes[i] = bits->Hash();
+    // The symbol wasn't found in either section or there were no bytes
+    // associated with the symbol.
+    if (hashes[i] == 0) return;
   }
   auto const description_bytes = reinterpret_cast<uint8_t*>(hashes);
   const size_t description_length = sizeof(hashes);
-  // To ensure we can quickly check for a final build ID, we ensure the first
-  // byte contains a non-zero value.
-  if (description_bytes[0] == 0) {
-    description_bytes[0] = 1;
-  }
   // Now that we have the description field contents, create the section.
   ZoneWriteStream stream(zone(), kBuildIdHeaderSize + description_length);
   stream.WriteFixed<decltype(elf::Note::name_size)>(sizeof(elf::ELF_NOTE_GNU));
@@ -1484,101 +1766,93 @@
   stream.WriteBytes(elf::ELF_NOTE_GNU, sizeof(elf::ELF_NOTE_GNU));
   ASSERT_EQUAL(stream.bytes_written(), kBuildIdHeaderSize);
   stream.WriteBytes(description_bytes, description_length);
-  // While the build ID section does not need to be writable, the first segment
-  // in our ELF files is writable (see Elf::WriteProgramTable) and so this
-  // ensures we can put it right after the program table without padding.
-  build_id_ = new (zone_) BitsContainer(
-      elf::SectionHeaderType::SHT_NOTE,
-      /*allocate=*/true, /*executable=*/false,
-      /*writable=*/true, stream.bytes_written(), stream.buffer(),
-      /*relocations=*/nullptr, /*symbols=*/nullptr, kNoteAlignment);
-  AddSection(build_id_, kBuildIdNoteName, kSnapshotBuildIdAsmSymbol);
+  auto* const container = new (zone_) NoteSection();
+  container->AddPortion(stream.buffer(), stream.bytes_written(),
+                        /*relocations=*/nullptr, /*symbols=*/nullptr,
+                        kSnapshotBuildIdAsmSymbol);
+  section_table_->Add(container, kBuildIdNoteName);
 }
 
-void Elf::FinalizeProgramTable() {
-  ASSERT(program_table_file_size_ < 0);
-
-  program_table_file_offset_ = sizeof(elf::ElfHeader);
-
-  // There is one additional segment we need the size of the program table to
-  // create, so calculate it as if that segment were already in place.
-  program_table_file_size_ =
-      (1 + segments_.length()) * sizeof(elf::ProgramHeader);
-
-  auto const program_table_segment_size =
-      program_table_file_offset_ + program_table_file_size_;
-
-  // Segment for loading the initial part of the ELF file, including the
-  // program header table. Required by Android but not by Linux.
-  Segment* const initial_load =
-      new (zone_) ProgramTableLoadSegment(zone_, program_table_segment_size);
-  // Merge the initial writable segment into this one and replace it (so it
-  // doesn't change the number of segments).
-  const bool was_merged = initial_load->Merge(segments_[0]);
-  ASSERT(was_merged);
-  segments_[0] = initial_load;
-
-  // Self-reference to program header table. Required by Android but not by
-  // Linux. Must appear before any PT_LOAD entries.
-  segments_.InsertAt(
-      0, new (zone_) ProgramTableSelfSegment(zone_, program_table_file_offset_,
-                                             program_table_file_size_));
-}
-
-static const intptr_t kElfSectionTableAlignment = compiler::target::kWordSize;
-
 void Elf::ComputeOffsets() {
-  // We calculate the size and offset of the program header table during
-  // finalization.
-  ASSERT(program_table_file_offset_ > 0 && program_table_file_size_ > 0);
-  intptr_t file_offset = program_table_file_offset_ + program_table_file_size_;
-  // Program table memory size is same as file size.
-  intptr_t memory_offset = file_offset;
+  intptr_t file_offset = 0;
+  intptr_t memory_offset = 0;
 
-  // When calculating memory and file offsets for sections, we'll need to know
-  // if we've changed segments. Start with the one for the program table.
-  ASSERT(segments_[0]->type != elf::ProgramHeaderType::PT_LOAD);
-  const auto* current_segment = segments_[1];
-  ASSERT(current_segment->type == elf::ProgramHeaderType::PT_LOAD);
+  // Maps indices of allocated sections in the section table to memory offsets.
+  const intptr_t num_sections = section_table_->SectionCount();
+  GrowableArray<intptr_t> address_map(zone_, num_sections);
+  address_map.Add(0);  // Don't adjust offsets for symbols with index SHN_UNDEF.
 
-  // The non-reserved sections are output to the file in order after the program
-  // header table. If we're entering a new segment, then we need to align
-  // according to the PT_LOAD segment alignment as well to keep the file offsets
-  // aligned with the memory addresses.
-  for (intptr_t i = 1; i < sections_.length(); i++) {
-    auto const section = sections_[i];
+  auto calculate_section_offsets = [&](Section* section) {
     file_offset = Utils::RoundUp(file_offset, section->alignment);
-    memory_offset = Utils::RoundUp(memory_offset, section->alignment);
-    if (section->IsAllocated() && section->load_segment != current_segment) {
-      current_segment = section->load_segment;
-      ASSERT(current_segment->type == elf::ProgramHeaderType::PT_LOAD);
-      const intptr_t load_align = Segment::Alignment(current_segment->type);
-      file_offset = Utils::RoundUp(file_offset, load_align);
-      memory_offset = Utils::RoundUp(memory_offset, load_align);
-    }
     section->set_file_offset(file_offset);
-    if (section->IsAllocated()) {
-      section->set_memory_offset(memory_offset);
-#if defined(DEBUG)
-      if (type_ == Type::Snapshot) {
-        // For files that will be dynamically loaded, make sure the file offsets
-        // of allocated sections are page aligned to the memory offsets.
-        ASSERT_EQUAL(section->file_offset() % Elf::kPageSize,
-                     section->memory_offset() % Elf::kPageSize);
-      }
-#endif
-    }
     file_offset += section->FileSize();
-    memory_offset += section->MemorySize();
+    if (section->IsAllocated()) {
+      memory_offset = Utils::RoundUp(memory_offset, section->alignment);
+      section->set_memory_offset(memory_offset);
+      memory_offset += section->MemorySize();
+    }
+  };
+
+  intptr_t section_index = 1;  // We don't visit the reserved section.
+  for (auto* const segment : program_table_->segments()) {
+    if (segment->type != elf::ProgramHeaderType::PT_LOAD) continue;
+    // Adjust file and memory offsets for segment alignment on entry.
+    file_offset = Utils::RoundUp(file_offset, segment->Alignment());
+    memory_offset = Utils::RoundUp(memory_offset, segment->Alignment());
+    for (auto* const section : segment->sections()) {
+      ASSERT(section->IsAllocated());
+      calculate_section_offsets(section);
+      if (!section->IsPseudoSection()) {
+        // Note: this assumes that the sections in the section header has all
+        // allocated sections before all (non-reserved) unallocated sections and
+        // in the same order as the load segments in in the program table.
+        address_map.Add(section->memory_offset());
+        ASSERT_EQUAL(section->index, section_index);
+        section_index++;
+      }
+    }
   }
 
-  file_offset = Utils::RoundUp(file_offset, kElfSectionTableAlignment);
-  section_table_file_offset_ = file_offset;
-  section_table_file_size_ = sections_.length() * sizeof(elf::SectionHeader);
-  file_offset += section_table_file_size_;
+  const auto& sections = section_table_->sections();
+  for (; section_index < sections.length(); section_index++) {
+    auto* const section = sections[section_index];
+    ASSERT(!section->IsAllocated());
+    calculate_section_offsets(section);
+  }
+
+  ASSERT_EQUAL(section_index, sections.length());
+  // Now that all sections have been handled, set the file offset for the
+  // section table, as it will be written after the last section.
+  calculate_section_offsets(section_table_);
+
+#if defined(DEBUG)
+  // Double check that segment starts are aligned as expected.
+  for (auto* const segment : program_table_->segments()) {
+    ASSERT(Utils::IsAligned(segment->MemoryOffset(), segment->Alignment()));
+  }
+#endif
+
+  // This must be true for uses of the map to be correct.
+  ASSERT_EQUAL(address_map[elf::SHN_UNDEF], 0);
+  // Adjust addresses in symbol tables as we now have section memory offsets.
+  // Also finalize the entries of the dynamic table, as some are memory offsets.
+  for (auto* const section : sections) {
+    if (auto* const table = section->AsSymbolTable()) {
+      table->Finalize(address_map);
+    } else if (auto* const dynamic = section->AsDynamicTable()) {
+      dynamic->Finalize();
+    }
+  }
+  // Also adjust addresses in symtab for stripped snapshots.
+  if (IsStripped()) {
+    ASSERT_EQUAL(symtab_->index, elf::SHN_UNDEF);
+    symtab_->Finalize(address_map);
+  }
 }
 
-void Elf::WriteHeader(ElfWriteStream* stream) {
+void ElfHeader::Write(ElfWriteStream* stream) const {
+  ASSERT_EQUAL(file_offset(), 0);
+  ASSERT_EQUAL(memory_offset(), 0);
 #if defined(TARGET_ARCH_IS_32_BIT)
   uint8_t size = elf::ELFCLASS32;
 #else
@@ -1618,8 +1892,8 @@
 
   stream->WriteWord(elf::EV_CURRENT);  // Version
   stream->WriteAddr(0);                // "Entry point"
-  stream->WriteOff(program_table_file_offset_);
-  stream->WriteOff(section_table_file_offset_);
+  stream->WriteOff(program_table_.file_offset());
+  stream->WriteOff(section_table_.file_offset());
 
 #if defined(TARGET_ARCH_ARM)
   uword flags = elf::EF_ARM_ABI | (TargetCPUFeatures::hardfp_supported()
@@ -1631,26 +1905,32 @@
   stream->WriteWord(flags);
 
   stream->WriteHalf(sizeof(elf::ElfHeader));
-  stream->WriteHalf(sizeof(elf::ProgramHeader));
-  stream->WriteHalf(segments_.length());
-  stream->WriteHalf(sizeof(elf::SectionHeader));
-  stream->WriteHalf(sections_.length());
-  stream->WriteHalf(shstrtab_->index());
-
-  ASSERT_EQUAL(stream->Position(), sizeof(elf::ElfHeader));
+  stream->WriteHalf(program_table_.entry_size);
+  stream->WriteHalf(program_table_.SegmentCount());
+  stream->WriteHalf(section_table_.entry_size);
+  stream->WriteHalf(section_table_.SectionCount());
+  stream->WriteHalf(stream->elf().section_table().StringTableIndex());
 }
 
-void Elf::WriteProgramTable(ElfWriteStream* stream) {
-  ASSERT(program_table_file_size_ >= 0);  // Check for finalization.
-  ASSERT(stream->Position() == program_table_file_offset_);
+void ProgramTable::Write(ElfWriteStream* stream) const {
+  ASSERT(segments_.length() > 0);
+  // Make sure all relevant segments were created by checking the type of the
+  // first.
+  ASSERT(segments_[0]->type == elf::ProgramHeaderType::PT_PHDR);
+  const intptr_t start = stream->Position();
+  // Should be immediately following the ELF header.
+  ASSERT_EQUAL(start, sizeof(elf::ElfHeader));
 #if defined(DEBUG)
   // Here, we count the number of times that a PT_LOAD writable segment is
-  // followed by a non-writable segment. We initialize last_writable to true so
-  // that we catch the case where the first segment is non-writable.
+  // followed by a non-writable segment. We initialize last_writable to true
+  // so that we catch the case where the first segment is non-writable.
   bool last_writable = true;
   int non_writable_groups = 0;
 #endif
-  for (auto const segment : segments_) {
+  for (intptr_t i = 0; i < segments_.length(); i++) {
+    const Segment* const segment = segments_[i];
+    ASSERT(segment->type != elf::ProgramHeaderType::PT_NULL);
+    ASSERT_EQUAL(i == 0, segment->type == elf::ProgramHeaderType::PT_PHDR);
 #if defined(DEBUG)
     if (segment->type == elf::ProgramHeaderType::PT_LOAD) {
       if (last_writable && !segment->IsWritable()) {
@@ -1662,7 +1942,7 @@
     const intptr_t start = stream->Position();
     segment->WriteProgramHeader(stream);
     const intptr_t end = stream->Position();
-    ASSERT_EQUAL(end - start, sizeof(elf::ProgramHeader));
+    ASSERT_EQUAL(end - start, entry_size);
   }
 #if defined(DEBUG)
   // All PT_LOAD non-writable segments must be contiguous. If not, some older
@@ -1672,49 +1952,16 @@
 #endif
 }
 
-void Elf::WriteSectionTable(ElfWriteStream* stream) {
-  ASSERT(section_table_file_size_ >= 0);  // Check for finalization.
-  stream->Align(kElfSectionTableAlignment);
-  ASSERT_EQUAL(stream->Position(), section_table_file_offset_);
-
-  for (auto const section : sections_) {
+void SectionTable::Write(ElfWriteStream* stream) const {
+  for (intptr_t i = 0; i < sections_.length(); i++) {
+    const Section* const section = sections_[i];
+    ASSERT_EQUAL(i == 0, section->IsReservedSection());
+    ASSERT_EQUAL(section->index, i);
+    ASSERT(section->link < sections_.length());
     const intptr_t start = stream->Position();
     section->WriteSectionHeader(stream);
     const intptr_t end = stream->Position();
-    ASSERT_EQUAL(end - start, sizeof(elf::SectionHeader));
-  }
-}
-
-void Elf::WriteSections(ElfWriteStream* stream) {
-  ASSERT(section_table_file_size_ >= 0);  // Check for finalization.
-  // Should be writing the first section immediately after the program table.
-  ASSERT_EQUAL(stream->Position(),
-               program_table_file_offset_ + program_table_file_size_);
-  // Skip the reserved first section, as its alignment is 0 (which will cause
-  // stream->Align() to fail) and it never contains file contents anyway.
-  ASSERT_EQUAL(static_cast<uint32_t>(sections_[0]->type),
-               static_cast<uint32_t>(elf::SectionHeaderType::SHT_NULL));
-  ASSERT_EQUAL(sections_[0]->alignment, 0);
-  // The program table is considered part of the first load segment (the
-  // second segment in segments_), so other sections in the same segment should
-  // not have extra segment alignment added.
-  ASSERT(segments_[0]->type != elf::ProgramHeaderType::PT_LOAD);
-  const Segment* current_segment = segments_[1];
-  ASSERT(current_segment->type == elf::ProgramHeaderType::PT_LOAD);
-  for (intptr_t i = 1; i < sections_.length(); i++) {
-    Section* section = sections_[i];
-    stream->Align(section->alignment);
-    if (section->IsAllocated() && section->load_segment != current_segment) {
-      // Changing segments, so align accordingly.
-      current_segment = section->load_segment;
-      ASSERT(current_segment->type == elf::ProgramHeaderType::PT_LOAD);
-      const intptr_t load_align = Segment::Alignment(current_segment->type);
-      stream->Align(load_align);
-    }
-    ASSERT_EQUAL(stream->Position(), section->file_offset());
-    section->Write(stream);
-    ASSERT_EQUAL(stream->Position(),
-                 section->file_offset() + section->FileSize());
+    ASSERT_EQUAL(end - start, entry_size);
   }
 }
 
diff --git a/runtime/vm/elf.h b/runtime/vm/elf.h
index c98f423..19400f7 100644
--- a/runtime/vm/elf.h
+++ b/runtime/vm/elf.h
@@ -16,10 +16,9 @@
 #if defined(DART_PRECOMPILER)
 
 class Dwarf;
-class ElfWriteStream;
+class ProgramTable;
 class Section;
-class Segment;
-class StringTable;
+class SectionTable;
 class SymbolTable;
 
 class Elf : public ZoneAllocated {
@@ -34,15 +33,21 @@
 
   Elf(Zone* zone, BaseWriteStream* stream, Type type, Dwarf* dwarf = nullptr);
 
+  // The max page size on all supported architectures. Used to determine
+  // the alignment of load segments, so that they are guaranteed page-aligned,
+  // and no ELF section or segment should have a larger alignment.
   static constexpr intptr_t kPageSize = 16 * KB;
-  static constexpr uword kNoSectionStart = 0;
 
   bool IsStripped() const { return dwarf_ == nullptr; }
 
   Zone* zone() const { return zone_; }
   const Dwarf* dwarf() const { return dwarf_; }
   Dwarf* dwarf() { return dwarf_; }
-  const SymbolTable* symtab() const { return symtab_; }
+  const SymbolTable& symtab() const {
+    ASSERT(symtab_ != nullptr);
+    return *symtab_;
+  }
+  const SectionTable& section_table() const { return *section_table_; }
 
   // Stores the information needed to appropriately generate a
   // relocation from the target to the source at the given section offset.
@@ -81,36 +86,18 @@
   void Finalize();
 
  private:
-  static constexpr const char* kBuildIdNoteName = ".note.gnu.build-id";
-
-  // Adds the section and also creates a PT_LOAD segment for the section if it
-  // is an allocated section.
-  //
-  // For allocated sections, if a symbol_name is provided, a symbol for the
-  // section will be added to the dynamic table (if allocated) and static
-  // table (if not stripped) during finalization.
-  void AddSection(Section* section,
-                  const char* name,
-                  const char* symbol_name = nullptr);
-
-  const Section* FindSectionBySymbolName(const char* symbol_name) const;
+  static constexpr const char kBuildIdNoteName[] = ".note.gnu.build-id";
+  static constexpr const char kTextName[] = ".text";
+  static constexpr const char kDataName[] = ".rodata";
+  static constexpr const char kBssName[] = ".bss";
+  static constexpr const char kDynamicTableName[] = ".dynamic";
 
   void CreateBSS();
   void GenerateBuildId();
-
-  void OrderSectionsAndCreateSegments();
-
-  void FinalizeSymbols();
+  void InitializeSymbolTables();
   void FinalizeDwarfSections();
-  void FinalizeProgramTable();
-  void ComputeOffsets();
-
   void FinalizeEhFrame();
-
-  void WriteHeader(ElfWriteStream* stream);
-  void WriteSectionTable(ElfWriteStream* stream);
-  void WriteProgramTable(ElfWriteStream* stream);
-  void WriteSections(ElfWriteStream* stream);
+  void ComputeOffsets();
 
   Zone* const zone_;
   BaseWriteStream* const unwrapped_stream_;
@@ -120,33 +107,18 @@
   // the static symbol table (and its corresponding string table).
   Dwarf* const dwarf_;
 
-  // All our strings would fit in a single page. However, we use separate
-  // .shstrtab and .dynstr to work around a bug in Android's strip utility.
-  StringTable* const shstrtab_;
-  StringTable* const dynstrtab_;
-  SymbolTable* const dynsym_;
+  // Contains all sections that will have entries in the section header table.
+  SectionTable* const section_table_;
+
+  // Contains all segments in the program header table. Set after finalizing
+  // the section table.
+  ProgramTable* program_table_ = nullptr;
 
   // The static tables are always created for use in relocation calculations,
   // even though they may not end up in the final ELF file.
-  StringTable* const strtab_;
-  SymbolTable* const symtab_;
+  SymbolTable* symtab_ = nullptr;
 
-  // We always create a BSS section for all Elf files to keep memory offsets
-  // consistent, though it is NOBITS for separate debugging information.
-  Section* bss_ = nullptr;
-
-  // We currently create a GNU build ID for all ELF snapshots and associated
-  // debugging information.
-  Section* build_id_ = nullptr;
-
-  GrowableArray<Section*> sections_;
-  GrowableArray<Segment*> segments_;
-
-  intptr_t memory_offset_;
-  intptr_t section_table_file_offset_ = -1;
-  intptr_t section_table_file_size_ = -1;
-  intptr_t program_table_file_offset_ = -1;
-  intptr_t program_table_file_size_ = -1;
+  friend class SectionTable;  // For section name static fields.
 };
 
 #endif  // DART_PRECOMPILER
diff --git a/runtime/vm/hash_map.h b/runtime/vm/hash_map.h
index 3273f4c..ec5ea08 100644
--- a/runtime/vm/hash_map.h
+++ b/runtime/vm/hash_map.h
@@ -631,7 +631,7 @@
     DirectChainedHashMap<IntKeyRawPointerValueTrait<V> >::Insert(pair);
   }
 
-  inline V Lookup(const Key& key) {
+  inline V Lookup(const Key& key) const {
     Pair* pair =
         DirectChainedHashMap<IntKeyRawPointerValueTrait<V> >::Lookup(key);
     if (pair == NULL) {
@@ -641,7 +641,7 @@
     }
   }
 
-  inline Pair* LookupPair(const Key& key) {
+  inline Pair* LookupPair(const Key& key) const {
     return DirectChainedHashMap<IntKeyRawPointerValueTrait<V> >::Lookup(key);
   }
 
diff --git a/runtime/vm/heap/safepoint.cc b/runtime/vm/heap/safepoint.cc
index 4a7e5de..52da48b 100644
--- a/runtime/vm/heap/safepoint.cc
+++ b/runtime/vm/heap/safepoint.cc
@@ -155,7 +155,7 @@
       if (!Thread::IsAtSafepoint(level_, state)) {
         // Send OOB message to get it to safepoint.
         if (current->IsMutatorThread()) {
-          current->ScheduleInterruptsLocked(Thread::kVMInterrupt);
+          current->ScheduleInterrupts(Thread::kVMInterrupt);
         }
         MonitorLocker sl(&parked_lock_);
         num_threads_not_parked_++;
diff --git a/runtime/vm/image_snapshot.cc b/runtime/vm/image_snapshot.cc
index bee78c8..e465cb6 100644
--- a/runtime/vm/image_snapshot.cc
+++ b/runtime/vm/image_snapshot.cc
@@ -613,7 +613,6 @@
       FLAG_precompiled_mode && FLAG_use_bare_instructions;
 
   // Start snapshot at page boundary.
-  ASSERT(ImageWriter::kTextAlignment >= VirtualMemory::PageSize());
   if (!EnterSection(ProgramSection::Text, vm, ImageWriter::kTextAlignment)) {
     return;
   }
diff --git a/runtime/vm/image_snapshot.h b/runtime/vm/image_snapshot.h
index 3b2ed09..09a1555 100644
--- a/runtime/vm/image_snapshot.h
+++ b/runtime/vm/image_snapshot.h
@@ -241,15 +241,8 @@
   // ROData sections contain objects wrapped in an Image object.
   static constexpr intptr_t kRODataAlignment = kMaxObjectAlignment;
   // Text sections contain objects (even in bare instructions mode) wrapped
-  // in an Image object, and for now we also align them to the same page
-  // size assumed by Elf objects.
-  static constexpr intptr_t kTextAlignment = 16 * KB;
-#if defined(DART_PRECOMPILER)
-  static_assert(kTextAlignment == Elf::kPageSize,
-                "Page alignment must be consistent with max object alignment");
-  static_assert(Elf::kPageSize >= kMaxObjectAlignment,
-                "Page alignment must be consistent with max object alignment");
-#endif
+  // in an Image object.
+  static constexpr intptr_t kTextAlignment = kMaxObjectAlignment;
 
   void ResetOffsets() {
     next_data_offset_ = Image::kHeaderSize;
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index cb00d29..8993e5f 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -867,19 +867,6 @@
   return Isolate::IsSystemIsolate(isolate);
 }
 
-NoOOBMessageScope::NoOOBMessageScope(Thread* thread)
-    : ThreadStackResource(thread) {
-  if (thread->isolate() != nullptr) {
-    thread->DeferOOBMessageInterrupts();
-  }
-}
-
-NoOOBMessageScope::~NoOOBMessageScope() {
-  if (thread()->isolate() != nullptr) {
-    thread()->RestoreOOBMessageInterrupts();
-  }
-}
-
 Bequest::~Bequest() {
   if (handle_ == nullptr) {
     return;
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index c17a81d..4c2d9f5 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -134,16 +134,6 @@
   DISALLOW_COPY_AND_ASSIGN(LambdaCallable);
 };
 
-// Disallow OOB message handling within this scope.
-class NoOOBMessageScope : public ThreadStackResource {
- public:
-  explicit NoOOBMessageScope(Thread* thread);
-  ~NoOOBMessageScope();
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(NoOOBMessageScope);
-};
-
 // Fixed cache for exception handler lookup.
 typedef FixedCache<intptr_t, ExceptionHandlerInfo, 16> HandlerInfoCache;
 // Fixed cache for catch entry state lookup.
diff --git a/runtime/vm/isolate_test.cc b/runtime/vm/isolate_test.cc
index 9402106..2003591 100644
--- a/runtime/vm/isolate_test.cc
+++ b/runtime/vm/isolate_test.cc
@@ -145,106 +145,4 @@
   barrier.Exit();
 }
 
-class IsolateTestHelper {
- public:
-  static uword GetStackLimit(Thread* thread) { return thread->stack_limit_; }
-  static uword GetSavedStackLimit(Thread* thread) {
-    return thread->saved_stack_limit_;
-  }
-  static uword GetDeferredInterruptsMask(Thread* thread) {
-    return thread->deferred_interrupts_mask_;
-  }
-  static uword GetDeferredInterrupts(Thread* thread) {
-    return thread->deferred_interrupts_;
-  }
-};
-
-TEST_CASE(NoOOBMessageScope) {
-  // Finish any GC in progress so that no kVMInterrupt is added for GC reasons.
-  {
-    TransitionNativeToVM transition(thread);
-    GCTestHelper::CollectAllGarbage();
-    const Error& error = Error::Handle(thread->HandleInterrupts());
-    RELEASE_ASSERT(error.IsNull());
-  }
-
-  // EXPECT_EQ is picky about type agreement for its arguments.
-  const uword kZero = 0;
-  const uword kMessageInterrupt = Thread::kMessageInterrupt;
-  const uword kVMInterrupt = Thread::kVMInterrupt;
-  uword stack_limit;
-  uword interrupt_bits;
-
-  // Initially no interrupts are scheduled or deferred.
-  EXPECT_EQ(IsolateTestHelper::GetStackLimit(thread),
-            IsolateTestHelper::GetSavedStackLimit(thread));
-  EXPECT_EQ(kZero, IsolateTestHelper::GetDeferredInterruptsMask(thread));
-  EXPECT_EQ(kZero, IsolateTestHelper::GetDeferredInterrupts(thread));
-
-  {
-    // Defer message interrupts.
-    NoOOBMessageScope no_msg_scope(thread);
-    EXPECT_EQ(IsolateTestHelper::GetStackLimit(thread),
-              IsolateTestHelper::GetSavedStackLimit(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterruptsMask(thread));
-    EXPECT_EQ(kZero, IsolateTestHelper::GetDeferredInterrupts(thread));
-
-    // Schedule a message, it is deferred.
-    thread->ScheduleInterrupts(Thread::kMessageInterrupt);
-    EXPECT_EQ(IsolateTestHelper::GetStackLimit(thread),
-              IsolateTestHelper::GetSavedStackLimit(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterruptsMask(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterrupts(thread));
-
-    // Schedule a vm interrupt, it is not deferred.
-    thread->ScheduleInterrupts(Thread::kVMInterrupt);
-    stack_limit = IsolateTestHelper::GetStackLimit(thread);
-    EXPECT_NE(stack_limit, IsolateTestHelper::GetSavedStackLimit(thread));
-    EXPECT((stack_limit & Thread::kVMInterrupt) != 0);
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterruptsMask(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterrupts(thread));
-
-    // Clear the vm interrupt.  Message is still deferred.
-    interrupt_bits = thread->GetAndClearInterrupts();
-    EXPECT_EQ(kVMInterrupt, interrupt_bits);
-    EXPECT_EQ(IsolateTestHelper::GetStackLimit(thread),
-              IsolateTestHelper::GetSavedStackLimit(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterruptsMask(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterrupts(thread));
-  }
-
-  // Restore message interrupts.  Message is now pending.
-  stack_limit = IsolateTestHelper::GetStackLimit(thread);
-  EXPECT_NE(stack_limit, IsolateTestHelper::GetSavedStackLimit(thread));
-  EXPECT((stack_limit & Thread::kMessageInterrupt) != 0);
-  EXPECT_EQ(kZero, IsolateTestHelper::GetDeferredInterruptsMask(thread));
-  EXPECT_EQ(kZero, IsolateTestHelper::GetDeferredInterrupts(thread));
-
-  {
-    // Defer message interrupts, again.  The pending interrupt is deferred.
-    NoOOBMessageScope no_msg_scope(thread);
-    EXPECT_EQ(IsolateTestHelper::GetStackLimit(thread),
-              IsolateTestHelper::GetSavedStackLimit(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterruptsMask(thread));
-    EXPECT_EQ(kMessageInterrupt,
-              IsolateTestHelper::GetDeferredInterrupts(thread));
-  }
-
-  // Restore, then clear interrupts.  The world is as it was.
-  interrupt_bits = thread->GetAndClearInterrupts();
-  EXPECT_EQ(kMessageInterrupt, interrupt_bits);
-  EXPECT_EQ(IsolateTestHelper::GetStackLimit(thread),
-            IsolateTestHelper::GetSavedStackLimit(thread));
-  EXPECT_EQ(kZero, IsolateTestHelper::GetDeferredInterruptsMask(thread));
-  EXPECT_EQ(kZero, IsolateTestHelper::GetDeferredInterrupts(thread));
-}
-
 }  // namespace dart
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 7c9214a..8c08ad9 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -11162,7 +11162,6 @@
   }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
-  NoOOBMessageScope no_msg_scope(thread);
   NoReloadScope no_reload_scope(thread);
   const Function& initializer = Function::Handle(EnsureInitializerFunction());
   return DartEntry::InvokeFunction(initializer, Object::empty_array());
diff --git a/runtime/vm/object_graph.cc b/runtime/vm/object_graph.cc
index d924ea2..e05b2ef 100644
--- a/runtime/vm/object_graph.cc
+++ b/runtime/vm/object_graph.cc
@@ -1162,11 +1162,10 @@
 
         intptr_t field_count = 0;
         intptr_t min_offset = kIntptrMax;
-        for (intptr_t j = 0; OffsetsTable::offsets_table[j].class_id != -1;
-             j++) {
-          if (OffsetsTable::offsets_table[j].class_id == cid) {
+        for (const auto& entry : OffsetsTable::offsets_table()) {
+          if (entry.class_id == cid) {
             field_count++;
-            intptr_t offset = OffsetsTable::offsets_table[j].offset;
+            intptr_t offset = entry.offset;
             min_offset = Utils::Minimum(min_offset, offset);
           }
         }
@@ -1187,16 +1186,15 @@
         }
 
         WriteUnsigned(field_count);
-        for (intptr_t j = 0; OffsetsTable::offsets_table[j].class_id != -1;
-             j++) {
-          if (OffsetsTable::offsets_table[j].class_id == cid) {
+        for (const auto& entry : OffsetsTable::offsets_table()) {
+          if (entry.class_id == cid) {
             intptr_t flags = 1;  // Strong.
             WriteUnsigned(flags);
-            intptr_t offset = OffsetsTable::offsets_table[j].offset;
+            intptr_t offset = entry.offset;
             intptr_t index = (offset - min_offset) / kCompressedWordSize;
             ASSERT(index >= 0);
             WriteUnsigned(index);
-            WriteUtf8(OffsetsTable::offsets_table[j].field_name);
+            WriteUtf8(entry.field_name);
             WriteUtf8("");  // Reserved
           }
         }
diff --git a/runtime/vm/raw_object_fields.cc b/runtime/vm/raw_object_fields.cc
index 9c0d887a..0c64af6 100644
--- a/runtime/vm/raw_object_fields.cc
+++ b/runtime/vm/raw_object_fields.cc
@@ -231,8 +231,7 @@
 #define NON_HEADER_HASH_CLASSES_AND_FIELDS(F) F(String, hash_)
 
 OffsetsTable::OffsetsTable(Zone* zone) : cached_offsets_(zone) {
-  for (intptr_t i = 0; offsets_table[i].class_id != -1; ++i) {
-    OffsetsTableEntry entry = offsets_table[i];
+  for (const OffsetsTableEntry& entry : OffsetsTable::offsets_table()) {
     cached_offsets_.Insert({{entry.class_id, entry.offset}, entry.field_name});
   }
 }
@@ -242,37 +241,46 @@
   return cached_offsets_.LookupValue({class_id, offset});
 }
 
-#define DEFINE_OFFSETS_TABLE_ENTRY(class_name, field_name)                     \
-  {class_name::kClassId, #field_name,                                          \
-   OFFSET_OF(Untagged##class_name, field_name)},
+static MallocGrowableArray<OffsetsTable::OffsetsTableEntry> field_offsets_table;
 
-// clang-format off
-const OffsetsTable::OffsetsTableEntry OffsetsTable::offsets_table[] = {
-    COMMON_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
+const MallocGrowableArray<OffsetsTable::OffsetsTableEntry>&
+OffsetsTable::offsets_table() {
+  ASSERT(field_offsets_table.length() > 0);  // Initialized.
+  return field_offsets_table;
+}
+
+void OffsetsTable::Init() {
+#define DEFINE_OFFSETS_TABLE_ENTRY(class_name, field_name)                     \
+  field_offsets_table.Add({class_name::kClassId, #field_name,                  \
+                           OFFSET_OF(Untagged##class_name, field_name)});
+
+  COMMON_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
 #if !defined(PRODUCT)
-    NON_PRODUCT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
+  NON_PRODUCT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
 #endif
 
 #if !defined(HASH_IN_OBJECT_HEADER)
-    NON_HEADER_HASH_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
+  NON_HEADER_HASH_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
 #endif
 
 #if defined(DART_PRECOMPILED_RUNTIME)
-    AOT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
+  AOT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
 #if !defined(PRODUCT)
-    AOT_NON_PRODUCT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
+  AOT_NON_PRODUCT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
 #endif
 #else
-    JIT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
+  JIT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
 #if !defined(PRODUCT)
-    JIT_NON_PRODUCT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
+  JIT_NON_PRODUCT_CLASSES_AND_FIELDS(DEFINE_OFFSETS_TABLE_ENTRY)
 #endif
 #endif
-    {-1, nullptr, -1}
-};
-// clang-format on
 
 #undef DEFINE_OFFSETS_TABLE_ENTRY
+}
+
+void OffsetsTable::Cleanup() {
+  field_offsets_table.Clear();
+}
 
 #endif
 
diff --git a/runtime/vm/raw_object_fields.h b/runtime/vm/raw_object_fields.h
index 6a4f572..be56a51 100644
--- a/runtime/vm/raw_object_fields.h
+++ b/runtime/vm/raw_object_fields.h
@@ -30,12 +30,14 @@
   const char* FieldNameForOffset(intptr_t cid, intptr_t offset);
 
   struct OffsetsTableEntry {
-    const intptr_t class_id;
-    const char* const field_name;
-    const intptr_t offset;
+    intptr_t class_id;
+    const char* field_name;
+    intptr_t offset;
   };
 
-  static const OffsetsTableEntry offsets_table[];
+  static const MallocGrowableArray<OffsetsTableEntry>& offsets_table();
+  static void Init();
+  static void Cleanup();
 
  private:
   struct IntAndIntToStringMapTraits {
@@ -71,6 +73,9 @@
   const char* FieldNameForOffset(intptr_t cid, intptr_t offset) {
     return nullptr;
   }
+
+  static void Init() {}
+  static void Cleanup() {}
 };
 
 #endif
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index 856d43f..8a5ad77 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -1613,6 +1613,7 @@
 enum class MissHandler {
   kInlineCacheMiss,
   kSwitchableCallMiss,
+  kFixCallersTargetMonomorphic,
 };
 
 // Handles updating of type feedback and possible patching of instance calls.
@@ -2132,12 +2133,21 @@
                                      const Function& target) {
   // In JIT we can have two different miss handlers to which we return slightly
   // differently.
-  if (miss_handler_ == MissHandler::kSwitchableCallMiss) {
-    arguments_.SetArgAt(0, stub);  // Second return value.
-    arguments_.SetReturn(data);
-  } else {
-    ASSERT(miss_handler_ == MissHandler::kInlineCacheMiss);
-    arguments_.SetReturn(target);
+  switch (miss_handler_) {
+    case MissHandler::kSwitchableCallMiss: {
+      arguments_.SetArgAt(0, stub);  // Second return value.
+      arguments_.SetReturn(data);
+      break;
+    }
+    case MissHandler::kFixCallersTargetMonomorphic: {
+      arguments_.SetArgAt(1, data);  // Second return value.
+      arguments_.SetReturn(stub);
+      break;
+    }
+    case MissHandler::kInlineCacheMiss: {
+      arguments_.SetReturn(target);
+      break;
+    }
   }
 }
 
@@ -2964,49 +2974,25 @@
 
 // The caller must be a monomorphic call from unoptimized code.
 // Patch call to point to new target.
-DEFINE_RUNTIME_ENTRY(FixCallersTargetMonomorphic, 0) {
+DEFINE_RUNTIME_ENTRY(FixCallersTargetMonomorphic, 2) {
 #if !defined(DART_PRECOMPILED_RUNTIME)
-  StackFrameIterator iterator(ValidationPolicy::kDontValidateFrames, thread,
-                              StackFrameIterator::kNoCrossThreadIteration);
-  StackFrame* frame = iterator.NextFrame();
-  ASSERT(frame != NULL);
-  while (frame->IsStubFrame() || frame->IsExitFrame()) {
-    frame = iterator.NextFrame();
-    ASSERT(frame != NULL);
-  }
-  if (frame->IsEntryFrame()) {
-    // Since function's current code is always unpatched, the entry frame always
-    // calls to unpatched code.
-    UNREACHABLE();
-  }
-  ASSERT(frame->IsDartFrame());
-  const Code& caller_code = Code::Handle(zone, frame->LookupDartCode());
-  RELEASE_ASSERT(!caller_code.is_optimized());
+  const Instance& receiver = Instance::CheckedHandle(zone, arguments.ArgAt(0));
+  const Array& switchable_call_data =
+      Array::CheckedHandle(zone, arguments.ArgAt(1));
 
-  Object& cache = Object::Handle(zone);
-  const Code& old_target_code = Code::Handle(
-      zone, CodePatcher::GetInstanceCallAt(frame->pc(), caller_code, &cache));
-  const Function& target_function =
-      Function::Handle(zone, old_target_code.function());
-  const Code& current_target_code =
-      Code::Handle(zone, target_function.EnsureHasCode());
-  CodePatcher::PatchInstanceCallAt(frame->pc(), caller_code, cache,
-                                   current_target_code);
-  if (FLAG_trace_patching) {
-    OS::PrintErr(
-        "FixCallersTargetMonomorphic: caller %#" Px
-        " "
-        "target '%s' -> %#" Px " (%s)\n",
-        frame->pc(), target_function.ToFullyQualifiedCString(),
-        current_target_code.EntryPoint(),
-        current_target_code.is_optimized() ? "optimized" : "unoptimized");
-  }
-  // With isolate groups enabled, it is possible that the target code
-  // has been deactivated just now(as a result of re-optimizatin for example),
-  // which will result in another run through FixCallersTarget.
-  ASSERT(!current_target_code.IsDisabled() ||
-         IsolateGroup::AreIsolateGroupsEnabled());
-  arguments.SetReturn(current_target_code);
+  DartFrameIterator iterator(thread,
+                             StackFrameIterator::kNoCrossThreadIteration);
+  StackFrame* caller_frame = iterator.NextFrame();
+  const auto& caller_code = Code::Handle(zone, caller_frame->LookupDartCode());
+  const auto& caller_function =
+      Function::Handle(zone, caller_frame->LookupDartFunction());
+
+  GrowableArray<const Instance*> caller_arguments(1);
+  caller_arguments.Add(&receiver);
+  PatchableCallHandler handler(
+      thread, caller_arguments, MissHandler::kFixCallersTargetMonomorphic,
+      arguments, caller_frame, caller_code, caller_function);
+  handler.ResolveSwitchAndReturn(switchable_call_data);
 #else
   UNREACHABLE();
 #endif
diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc
index 16a1f8f..45d99f9 100644
--- a/runtime/vm/thread.cc
+++ b/runtime/vm/thread.cc
@@ -91,9 +91,6 @@
       no_safepoint_scope_depth_(0),
 #endif
       reusable_handles_(),
-      defer_oob_messages_count_(0),
-      deferred_interrupts_mask_(0),
-      deferred_interrupts_(0),
       stack_overflow_count_(0),
       hierarchy_info_(NULL),
       type_usage_info_(NULL),
@@ -398,7 +395,7 @@
   MonitorLocker ml(&thread_lock_);
   if (!HasScheduledInterrupts()) {
     // No interrupt pending, set stack_limit_ too.
-    stack_limit_ = limit;
+    stack_limit_.store(limit);
   }
   saved_stack_limit_ = limit;
 }
@@ -408,95 +405,33 @@
 }
 
 void Thread::ScheduleInterrupts(uword interrupt_bits) {
-  MonitorLocker ml(&thread_lock_);
-  ScheduleInterruptsLocked(interrupt_bits);
-}
-
-void Thread::ScheduleInterruptsLocked(uword interrupt_bits) {
-  ASSERT(thread_lock_.IsOwnedByCurrentThread());
   ASSERT((interrupt_bits & ~kInterruptsMask) == 0);  // Must fit in mask.
 
-  // Check to see if any of the requested interrupts should be deferred.
-  uword defer_bits = interrupt_bits & deferred_interrupts_mask_;
-  if (defer_bits != 0) {
-    deferred_interrupts_ |= defer_bits;
-    interrupt_bits &= ~deferred_interrupts_mask_;
-    if (interrupt_bits == 0) {
-      return;
+  uword old_limit = stack_limit_.load();
+  uword new_limit;
+  do {
+    if (old_limit == saved_stack_limit_) {
+      new_limit = (kInterruptStackLimit & ~kInterruptsMask) | interrupt_bits;
+    } else {
+      new_limit = old_limit | interrupt_bits;
     }
-  }
-
-  if (stack_limit_ == saved_stack_limit_) {
-    stack_limit_ = (kInterruptStackLimit & ~kInterruptsMask) | interrupt_bits;
-  } else {
-    stack_limit_ = stack_limit_ | interrupt_bits;
-  }
+  } while (!stack_limit_.compare_exchange_weak(old_limit, new_limit));
 }
 
 uword Thread::GetAndClearInterrupts() {
-  MonitorLocker ml(&thread_lock_);
-  if (stack_limit_ == saved_stack_limit_) {
-    return 0;  // No interrupt was requested.
-  }
-  uword interrupt_bits = stack_limit_ & kInterruptsMask;
-  stack_limit_ = saved_stack_limit_;
+  uword interrupt_bits = 0;
+  uword old_limit = stack_limit_.load();
+  uword new_limit = saved_stack_limit_;
+  do {
+    if (old_limit == saved_stack_limit_) {
+      return interrupt_bits;
+    }
+    interrupt_bits = interrupt_bits | (old_limit & kInterruptsMask);
+  } while (!stack_limit_.compare_exchange_weak(old_limit, new_limit));
+
   return interrupt_bits;
 }
 
-void Thread::DeferOOBMessageInterrupts() {
-  MonitorLocker ml(&thread_lock_);
-  defer_oob_messages_count_++;
-  if (defer_oob_messages_count_ > 1) {
-    // OOB message interrupts are already deferred.
-    return;
-  }
-  ASSERT(deferred_interrupts_mask_ == 0);
-  deferred_interrupts_mask_ = kMessageInterrupt;
-
-  if (stack_limit_ != saved_stack_limit_) {
-    // Defer any interrupts which are currently pending.
-    deferred_interrupts_ = stack_limit_ & deferred_interrupts_mask_;
-
-    // Clear deferrable interrupts, if present.
-    stack_limit_ = stack_limit_ & ~deferred_interrupts_mask_;
-
-    if ((stack_limit_ & kInterruptsMask) == 0) {
-      // No other pending interrupts.  Restore normal stack limit.
-      stack_limit_ = saved_stack_limit_;
-    }
-  }
-#if !defined(PRODUCT)
-  if (FLAG_trace_service && FLAG_trace_service_verbose) {
-    OS::PrintErr("[+%" Pd64 "ms] Isolate %s deferring OOB interrupts\n",
-                 Dart::UptimeMillis(), isolate()->name());
-  }
-#endif  // !defined(PRODUCT)
-}
-
-void Thread::RestoreOOBMessageInterrupts() {
-  MonitorLocker ml(&thread_lock_);
-  defer_oob_messages_count_--;
-  if (defer_oob_messages_count_ > 0) {
-    return;
-  }
-  ASSERT(defer_oob_messages_count_ == 0);
-  ASSERT(deferred_interrupts_mask_ == kMessageInterrupt);
-  deferred_interrupts_mask_ = 0;
-  if (deferred_interrupts_ != 0) {
-    if (stack_limit_ == saved_stack_limit_) {
-      stack_limit_ = kInterruptStackLimit & ~kInterruptsMask;
-    }
-    stack_limit_ = stack_limit_ | deferred_interrupts_;
-    deferred_interrupts_ = 0;
-  }
-#if !defined(PRODUCT)
-  if (FLAG_trace_service && FLAG_trace_service_verbose) {
-    OS::PrintErr("[+%" Pd64 "ms] Isolate %s restoring OOB interrupts\n",
-                 Dart::UptimeMillis(), isolate()->name());
-  }
-#endif  // !defined(PRODUCT)
-}
-
 ErrorPtr Thread::HandleInterrupts() {
   uword interrupt_bits = GetAndClearInterrupts();
   if ((interrupt_bits & kVMInterrupt) != 0) {
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index 2d80830..6e3ff67 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -417,11 +417,10 @@
   };
 
   void ScheduleInterrupts(uword interrupt_bits);
-  void ScheduleInterruptsLocked(uword interrupt_bits);
   ErrorPtr HandleInterrupts();
   uword GetAndClearInterrupts();
   bool HasScheduledInterrupts() const {
-    return (stack_limit_ & kInterruptsMask) != 0;
+    return (stack_limit_.load() & kInterruptsMask) != 0;
   }
 
   // Monitor corresponding to this thread.
@@ -1031,7 +1030,7 @@
   // in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64.
   // We use only word-sized fields to avoid differences in struct packing on the
   // different architectures. See also CheckOffsets in dart.cc.
-  RelaxedAtomic<uword> stack_limit_;
+  volatile RelaxedAtomic<uword> stack_limit_;
   uword write_barrier_mask_;
   uword heap_base_;
   Isolate* isolate_;
@@ -1107,9 +1106,6 @@
   int32_t no_safepoint_scope_depth_;
 #endif
   VMHandles reusable_handles_;
-  intptr_t defer_oob_messages_count_;
-  uint16_t deferred_interrupts_mask_;
-  uint16_t deferred_interrupts_;
   int32_t stack_overflow_count_;
   uint32_t runtime_call_count_ = 0;
 
@@ -1205,9 +1201,6 @@
 
   static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
 
-  void DeferOOBMessageInterrupts();
-  void RestoreOOBMessageInterrupts();
-
 #define REUSABLE_FRIEND_DECLARATION(name)                                      \
   friend class Reusable##name##HandleScope;
   REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
@@ -1218,9 +1211,7 @@
   friend class InterruptChecker;
   friend class Isolate;
   friend class IsolateGroup;
-  friend class IsolateTestHelper;
   friend class NoActiveIsolateScope;
-  friend class NoOOBMessageScope;
   friend class NoReloadScope;
   friend class Simulator;
   friend class StackZone;
diff --git a/sdk/lib/_internal/vm/lib/internal_patch.dart b/sdk/lib/_internal/vm/lib/internal_patch.dart
index 6cc2388..67501fb 100644
--- a/sdk/lib/_internal/vm/lib/internal_patch.dart
+++ b/sdk/lib/_internal/vm/lib/internal_patch.dart
@@ -193,6 +193,9 @@
 abstract class VMInternalsForTesting {
   // This function can be used by tests to enforce garbage collection.
   static void collectAllGarbage() native "Internal_collectAllGarbage";
+
+  static void deoptimizeFunctionsOnStack()
+      native "Internal_deoptimizeFunctionsOnStack";
 }
 
 @patch
diff --git a/tests/lib/isolate/kill_infinite_loop_in_initializer_test.dart b/tests/lib/isolate/kill_infinite_loop_in_initializer_test.dart
new file mode 100644
index 0000000..321fbda4
--- /dev/null
+++ b/tests/lib/isolate/kill_infinite_loop_in_initializer_test.dart
@@ -0,0 +1,44 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// VMOptions=--enable-isolate-groups --experimental-enable-isolate-groups-jit
+// VMOptions=--no-enable-isolate-groups
+
+// Regression test against out-of-band messages being blocked during lazy
+// static field initialization.
+
+import "dart:isolate";
+import "dart:async";
+import "package:expect/expect.dart";
+import "package:async_helper/async_helper.dart";
+
+dynamic staticFieldWithBadInitializer = badInitializer();
+
+badInitializer() {
+  print("badInitializer");
+  for (;;) {}
+  return 42; // Unreachable.
+}
+
+child(message) {
+  print("child");
+  RawReceivePort port = new RawReceivePort();
+  print(staticFieldWithBadInitializer);
+  port.close(); // Unreachable.
+}
+
+void main() {
+  asyncStart();
+  Isolate.spawn(child, null).then((Isolate isolate) {
+    print("spawned");
+    late RawReceivePort exitSignal;
+    exitSignal = new RawReceivePort((_) {
+      print("onExit");
+      exitSignal.close();
+      asyncEnd();
+    });
+    isolate.addOnExitListener(exitSignal.sendPort);
+    isolate.kill(priority: Isolate.immediate);
+  });
+}
diff --git a/tests/lib/js/is_check_and_as_cast_test.dart b/tests/lib/js/is_check_and_as_cast_test.dart
index b84e625..40216d6 100644
--- a/tests/lib/js/is_check_and_as_cast_test.dart
+++ b/tests/lib/js/is_check_and_as_cast_test.dart
@@ -34,13 +34,6 @@
   external int get a;
 }
 
-@JS()
-class Baz {
-  external Baz(int a, int b);
-  external int get a;
-  external int get b;
-}
-
 // JS object literals
 @JS()
 @anonymous
@@ -60,6 +53,11 @@
 
 class DartClass {}
 
+// Avoid static type optimization by running all tests using this.
+@pragma('dart2js:noInline')
+@pragma('dart2js:assumeDynamic')
+confuse(x) => x;
+
 void main() {
   eval(r"""
     function Foo(a) {
@@ -68,11 +66,6 @@
     function Bar(a) {
       this.a = a;
     }
-    function Baz(a, b) {
-      Foo.call(this, a);
-      this.b = b;
-    }
-    Baz.prototype.__proto__ = Foo.prototype;
     var a = {
       x: 1,
     };
@@ -84,6 +77,7 @@
   // JS class object can be checked and casted with itself.
   var foo = Foo(42);
   expect(foo is Foo, isTrue);
+  expect(confuse(foo) is Foo, isTrue);
   expect(() => (foo as Foo), returnsNormally);
 
   // Try it with dynamic.
@@ -93,64 +87,73 @@
 
   // Casts are allowed between any JS class objects.
   expect(foo is Bar, isTrue);
+  expect(confuse(foo) is Bar, isTrue);
   expect(d is Bar, isTrue);
   expect(() => (foo as Bar), returnsNormally);
   expect(() => (d as Bar), returnsNormally);
 
-  // Type-checking and casting works regardless of the inheritance chain.
-  var baz = Baz(42, 43);
-  expect(baz is Foo, isTrue);
-  expect(() => (baz as Foo), returnsNormally);
-  expect(foo is Baz, isTrue);
-  expect(() => (foo as Baz), returnsNormally);
-
   // BarCopy is the same JS class as Bar.
   var barCopy = BarCopy(42);
   expect(barCopy is Bar, isTrue);
+  expect(confuse(barCopy) is Bar, isTrue);
   expect(() => (barCopy as Bar), returnsNormally);
 
   // JS object literal can be checked and casted with itself.
   expect(a is LiteralA, isTrue);
+  expect(confuse(a) is LiteralA, isTrue);
   expect(() => (a as LiteralA), returnsNormally);
 
   // Like class objects, casts are allowed between any object literals.
   expect(a is LiteralB, isTrue);
+  expect(confuse(a) is LiteralB, isTrue);
   expect(() => (a as LiteralB), returnsNormally);
 
   // Similarly, casts are allowed between any class objects and object literals.
   expect(foo is LiteralB, isTrue);
+  expect(confuse(foo) is LiteralB, isTrue);
   expect(() => (foo as LiteralB), returnsNormally);
   expect(a is Foo, isTrue);
+  expect(confuse(a) is Foo, isTrue);
   expect(() => (a as Foo), returnsNormally);
 
   // You cannot cast between JS interop objects and Dart objects, however.
   var dartClass = DartClass();
   expect(dartClass is Foo, isFalse);
+  expect(confuse(dartClass) is Foo, isFalse);
   expect(() => (dartClass as Foo), throws);
   expect(dartClass is LiteralA, isFalse);
+  expect(confuse(dartClass) is LiteralA, isFalse);
   expect(() => (dartClass as LiteralA), throws);
 
   expect(foo is DartClass, isFalse);
+  expect(confuse(foo) is DartClass, isFalse);
   expect(() => (foo as DartClass), throws);
   expect(a is DartClass, isFalse);
+  expect(confuse(a) is DartClass, isFalse);
   expect(() => (a as DartClass), throws);
 
   // Test that nullability is still respected with JS types.
   expect(foo is Foo?, isTrue);
+  expect(confuse(foo) is Foo?, isTrue);
   expect(() => (foo as Foo?), returnsNormally);
   Foo? nullableFoo = null;
   expect(nullableFoo is Foo?, isTrue);
+  expect(confuse(nullableFoo) is Foo?, isTrue);
   expect(() => (nullableFoo as Foo?), returnsNormally);
   expect(nullableFoo is Foo, isFalse);
+  expect(confuse(nullableFoo) is Foo, isFalse);
   expect(() => (nullableFoo as Foo),
       hasUnsoundNullSafety ? returnsNormally : throws);
 
   expect(a is LiteralA?, isTrue);
+  expect(confuse(a) is LiteralA?, isTrue);
   expect(() => (a as LiteralA?), returnsNormally);
   LiteralA? nullableA = null;
   expect(nullableA is LiteralA?, isTrue);
+  expect(confuse(nullableA) is LiteralA?, isTrue);
   expect(() => (nullableA as LiteralA?), returnsNormally);
   expect(nullableA is LiteralA, isFalse);
+  expect(confuse(nullableA) is LiteralA, isFalse);
   expect(() => (nullableA as LiteralA),
       hasUnsoundNullSafety ? returnsNormally : throws);
 }
diff --git a/tests/lib_2/isolate/kill_infinite_loop_in_initializer_test.dart b/tests/lib_2/isolate/kill_infinite_loop_in_initializer_test.dart
new file mode 100644
index 0000000..af8650e
--- /dev/null
+++ b/tests/lib_2/isolate/kill_infinite_loop_in_initializer_test.dart
@@ -0,0 +1,44 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// VMOptions=--enable-isolate-groups --experimental-enable-isolate-groups-jit
+// VMOptions=--no-enable-isolate-groups
+
+// Regression test against out-of-band messages being blocked during lazy
+// static field initialization.
+
+import "dart:isolate";
+import "dart:async";
+import "package:expect/expect.dart";
+import "package:async_helper/async_helper.dart";
+
+dynamic staticFieldWithBadInitializer = badInitializer();
+
+badInitializer() {
+  print("badInitializer");
+  for (;;) {}
+  return 42; // Unreachable.
+}
+
+child(message) {
+  print("child");
+  RawReceivePort port = new RawReceivePort();
+  print(staticFieldWithBadInitializer);
+  port.close(); // Unreachable.
+}
+
+void main() {
+  asyncStart();
+  Isolate.spawn(child, null).then((Isolate isolate) {
+    print("spawned");
+    RawReceivePort exitSignal;
+    exitSignal = new RawReceivePort((_) {
+      print("onExit");
+      exitSignal.close();
+      asyncEnd();
+    });
+    isolate.addOnExitListener(exitSignal.sendPort);
+    isolate.kill(priority: Isolate.immediate);
+  });
+}
diff --git a/tests/lib_2/js/is_check_and_as_cast_test.dart b/tests/lib_2/js/is_check_and_as_cast_test.dart
new file mode 100644
index 0000000..fe11da5
--- /dev/null
+++ b/tests/lib_2/js/is_check_and_as_cast_test.dart
@@ -0,0 +1,133 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Tests `is` checks and `as` casts between various JS objects. Currently, all
+// checks and casts should be allowed between JS objects.
+
+@JS()
+library is_check_and_as_cast_test;
+
+import 'package:js/js.dart';
+import 'package:expect/minitest.dart';
+
+@JS()
+external void eval(String code);
+
+@JS()
+class Foo {
+  external Foo(int a);
+  external int get a;
+}
+
+// Class with same structure as Foo but separate JS class.
+@JS()
+class Bar {
+  external Bar(int a);
+  external int get a;
+}
+
+@JS('Bar')
+class BarCopy {
+  external BarCopy(int a);
+  external int get a;
+}
+
+// JS object literals
+@JS()
+@anonymous
+class LiteralA {
+  external int get x;
+}
+
+@JS()
+@anonymous
+class LiteralB {
+  external int get y;
+}
+
+// Library is annotated with JS so we don't need the annotation here.
+external LiteralA get a;
+external LiteralB get b;
+
+class DartClass {}
+
+// Avoid static type optimization by running all tests using this.
+@pragma('dart2js:noInline')
+@pragma('dart2js:assumeDynamic')
+confuse(x) => x;
+
+void main() {
+  eval(r"""
+    function Foo(a) {
+      this.a = a;
+    }
+    function Bar(a) {
+      this.a = a;
+    }
+    var a = {
+      x: 1,
+    };
+    var b = {
+      y: 2,
+    };
+      """);
+
+  // JS class object can be checked and casted with itself.
+  var foo = Foo(42);
+  expect(foo is Foo, isTrue);
+  expect(confuse(foo) is Foo, isTrue);
+  expect(() => (foo as Foo), returnsNormally);
+
+  // Try it with dynamic.
+  dynamic d = Foo(42);
+  expect(d is Foo, isTrue);
+  expect(() => (d as Foo), returnsNormally);
+
+  // Casts are allowed between any JS class objects.
+  expect(foo is Bar, isTrue);
+  expect(confuse(foo) is Bar, isTrue);
+  expect(d is Bar, isTrue);
+  expect(() => (foo as Bar), returnsNormally);
+  expect(() => (d as Bar), returnsNormally);
+
+  // BarCopy is the same JS class as Bar.
+  var barCopy = BarCopy(42);
+  expect(barCopy is Bar, isTrue);
+  expect(confuse(barCopy) is Bar, isTrue);
+  expect(() => (barCopy as Bar), returnsNormally);
+
+  // JS object literal can be checked and casted with itself.
+  expect(a is LiteralA, isTrue);
+  expect(confuse(a) is LiteralA, isTrue);
+  expect(() => (a as LiteralA), returnsNormally);
+
+  // Like class objects, casts are allowed between any object literals.
+  expect(a is LiteralB, isTrue);
+  expect(confuse(a) is LiteralB, isTrue);
+  expect(() => (a as LiteralB), returnsNormally);
+
+  // Similarly, casts are allowed between any class objects and object literals.
+  expect(foo is LiteralB, isTrue);
+  expect(confuse(foo) is LiteralB, isTrue);
+  expect(() => (foo as LiteralB), returnsNormally);
+  expect(a is Foo, isTrue);
+  expect(confuse(a) is Foo, isTrue);
+  expect(() => (a as Foo), returnsNormally);
+
+  // You cannot cast between JS interop objects and Dart objects, however.
+  var dartClass = DartClass();
+  expect(dartClass is Foo, isFalse);
+  expect(confuse(dartClass) is Foo, isFalse);
+  expect(() => (dartClass as Foo), throws);
+  expect(dartClass is LiteralA, isFalse);
+  expect(confuse(dartClass) is LiteralA, isFalse);
+  expect(() => (dartClass as LiteralA), throws);
+
+  expect(foo is DartClass, isFalse);
+  expect(confuse(foo) is DartClass, isFalse);
+  expect(() => (foo as DartClass), throws);
+  expect(a is DartClass, isFalse);
+  expect(confuse(a) is DartClass, isFalse);
+  expect(() => (a as DartClass), throws);
+}
diff --git a/tools/VERSION b/tools/VERSION
index 8225f5d..67f5806 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 14
 PATCH 0
-PRERELEASE 308
+PRERELEASE 309
 PRERELEASE_PATCH 0
\ No newline at end of file
diff --git a/tools/bots/test_matrix.json b/tools/bots/test_matrix.json
index 0120f7e..6120112 100644
--- a/tools/bots/test_matrix.json
+++ b/tools/bots/test_matrix.json
@@ -1727,11 +1727,44 @@
       },
       "steps": [
         {
-          "name": "build dart",
+          "name": "build dart ia32",
           "script": "tools/build.py",
           "arguments": [
             "--mode=all",
-            "--arch=all",
+            "--arch=ia32",
+            "--no-clang",
+            "--no-goma",
+            "runtime"
+          ]
+        },
+        {
+          "name": "build dart x64",
+          "script": "tools/build.py",
+          "arguments": [
+            "--mode=all",
+            "--arch=x64",
+            "--no-clang",
+            "--no-goma",
+            "runtime"
+          ]
+        },
+        {
+          "name": "build dart simarm",
+          "script": "tools/build.py",
+          "arguments": [
+            "--mode=all",
+            "--arch=simarm",
+            "--no-clang",
+            "--no-goma",
+            "runtime"
+          ]
+        },
+        {
+          "name": "build dart simarm64",
+          "script": "tools/build.py",
+          "arguments": [
+            "--mode=all",
+            "--arch=simarm64",
             "--no-clang",
             "--no-goma",
             "runtime"