Version 2.12.0-62.0.dev

Merge commit 'db263169e5b6f3e20d38e6933fa63497e52dcdda' into 'dev'
diff --git a/pkg/analysis_server/lib/src/domain_completion.dart b/pkg/analysis_server/lib/src/domain_completion.dart
index 98e3a03..0aafdda 100644
--- a/pkg/analysis_server/lib/src/domain_completion.dart
+++ b/pkg/analysis_server/lib/src/domain_completion.dart
@@ -20,6 +20,7 @@
 import 'package:analysis_server/src/services/completion/yaml/analysis_options_generator.dart';
 import 'package:analysis_server/src/services/completion/yaml/fix_data_generator.dart';
 import 'package:analysis_server/src/services/completion/yaml/pubspec_generator.dart';
+import 'package:analysis_server/src/services/completion/yaml/yaml_completion_generator.dart';
 import 'package:analyzer/dart/analysis/results.dart';
 import 'package:analyzer/dart/analysis/session.dart';
 import 'package:analyzer/exception/exception.dart';
@@ -150,7 +151,7 @@
 
   /// Return the suggestions that should be presented in the YAML [file] at the
   /// given [offset].
-  List<CompletionSuggestion> computeYamlSuggestions(String file, int offset) {
+  YamlCompletionResults computeYamlSuggestions(String file, int offset) {
     var provider = server.resourceProvider;
     if (AnalysisEngine.isAnalysisOptionsFileName(file)) {
       var generator = AnalysisOptionsGenerator(provider);
@@ -164,7 +165,7 @@
       var generator = FixDataGenerator(provider);
       return generator.getSuggestions(file, offset);
     }
-    return <CompletionSuggestion>[];
+    return const YamlCompletionResults.empty();
   }
 
   /// Process a `completion.getSuggestionDetails` request.
@@ -325,11 +326,12 @@
         server.sendResponse(CompletionGetSuggestionsResult(completionId)
             .toResponse(request.id));
         // Send a notification with results.
+        final suggestions = computeYamlSuggestions(file, offset);
         sendCompletionNotification(
           completionId,
-          0, // replacementOffset
-          0, // replacementLength,
-          computeYamlSuggestions(file, offset),
+          suggestions.replacementOffset,
+          suggestions.replacementLength,
+          suggestions.suggestions,
           null,
           null,
           null,
diff --git a/pkg/analysis_server/lib/src/lsp/handlers/handler_completion.dart b/pkg/analysis_server/lib/src/lsp/handlers/handler_completion.dart
index 88c5e97..c0b5988 100644
--- a/pkg/analysis_server/lib/src/lsp/handlers/handler_completion.dart
+++ b/pkg/analysis_server/lib/src/lsp/handlers/handler_completion.dart
@@ -16,8 +16,13 @@
 import 'package:analysis_server/src/services/completion/completion_performance.dart';
 import 'package:analysis_server/src/services/completion/dart/completion_manager.dart';
 import 'package:analysis_server/src/services/completion/filtering/fuzzy_matcher.dart';
+import 'package:analysis_server/src/services/completion/yaml/analysis_options_generator.dart';
+import 'package:analysis_server/src/services/completion/yaml/fix_data_generator.dart';
+import 'package:analysis_server/src/services/completion/yaml/pubspec_generator.dart';
+import 'package:analysis_server/src/services/completion/yaml/yaml_completion_generator.dart';
 import 'package:analyzer/dart/analysis/results.dart';
 import 'package:analyzer/source/line_info.dart';
+import 'package:analyzer/src/generated/engine.dart';
 import 'package:analyzer/src/services/available_declarations.dart';
 import 'package:analyzer_plugin/protocol/protocol_common.dart';
 import 'package:analyzer_plugin/protocol/protocol_generated.dart' as plugin;
@@ -89,18 +94,47 @@
         await lineInfo.mapResult((lineInfo) => toOffset(lineInfo, pos));
 
     return offset.mapResult((offset) async {
-      // For server results we need a valid unit, but if we don't have one
-      // we shouldn't consider this an error when merging with plugin results.
-      final serverResultsFuture = unit.isError
-          ? Future.value(success(const <CompletionItem>[]))
-          : _getServerItems(
-              completionCapabilities,
-              clientSupportedCompletionKinds,
-              includeSuggestionSets,
-              unit.result,
-              offset,
-              token,
-            );
+      Future<ErrorOr<List<CompletionItem>>> serverResultsFuture;
+      final pathContext = server.resourceProvider.pathContext;
+      final filename = pathContext.basename(path.result);
+      final fileExtension = pathContext.extension(path.result);
+
+      if (fileExtension == '.dart' && !unit.isError) {
+        serverResultsFuture = _getServerDartItems(
+          completionCapabilities,
+          clientSupportedCompletionKinds,
+          includeSuggestionSets,
+          unit.result,
+          offset,
+          token,
+        );
+      } else if (fileExtension == '.yaml') {
+        YamlCompletionGenerator generator;
+        switch (filename) {
+          case AnalysisEngine.PUBSPEC_YAML_FILE:
+            generator = PubspecGenerator(server.resourceProvider);
+            break;
+          case AnalysisEngine.ANALYSIS_OPTIONS_YAML_FILE:
+            generator = AnalysisOptionsGenerator(server.resourceProvider);
+            break;
+          case AnalysisEngine.FIX_DATA_FILE:
+            generator = FixDataGenerator(server.resourceProvider);
+            break;
+        }
+        if (generator != null) {
+          serverResultsFuture = _getServerYamlItems(
+            generator,
+            completionCapabilities,
+            clientSupportedCompletionKinds,
+            path.result,
+            lineInfo.result,
+            offset,
+            token,
+          );
+        }
+      }
+
+      serverResultsFuture ??= Future.value(success(const <CompletionItem>[]));
 
       final pluginResultsFuture = _getPluginResults(completionCapabilities,
           clientSupportedCompletionKinds, lineInfo.result, path.result, offset);
@@ -175,7 +209,7 @@
     ).toList());
   }
 
-  Future<ErrorOr<List<CompletionItem>>> _getServerItems(
+  Future<ErrorOr<List<CompletionItem>>> _getServerDartItems(
     CompletionClientCapabilities completionCapabilities,
     HashSet<CompletionItemKind> clientSupportedCompletionKinds,
     bool includeSuggestionSets,
@@ -355,6 +389,33 @@
     });
   }
 
+  Future<ErrorOr<List<CompletionItem>>> _getServerYamlItems(
+    YamlCompletionGenerator generator,
+    CompletionClientCapabilities completionCapabilities,
+    HashSet<CompletionItemKind> clientSupportedCompletionKinds,
+    String path,
+    LineInfo lineInfo,
+    int offset,
+    CancellationToken token,
+  ) async {
+    final suggestions = generator.getSuggestions(path, offset);
+    final completionItems = suggestions.suggestions
+        .map(
+          (item) => toCompletionItem(
+            completionCapabilities,
+            clientSupportedCompletionKinds,
+            lineInfo,
+            item,
+            suggestions.replacementOffset,
+            suggestions.replacementLength,
+            includeCommitCharacters: false,
+            completeFunctionCalls: false,
+          ),
+        )
+        .toList();
+    return success(completionItems);
+  }
+
   Iterable<CompletionItem> _pluginResultsToItems(
     CompletionClientCapabilities completionCapabilities,
     HashSet<CompletionItemKind> clientSupportedCompletionKinds,
diff --git a/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart b/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart
index fe82284..08e8739 100644
--- a/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart
+++ b/pkg/analysis_server/lib/src/lsp/server_capabilities_computer.dart
@@ -242,15 +242,25 @@
         // folders as well.
         .map((glob) => DocumentFilter(scheme: 'file', pattern: '**/$glob'));
 
-    final allTypes = {dartFiles, ...pluginTypes}.toList();
+    final fullySupportedTypes = {dartFiles, ...pluginTypes}.toList();
 
     // Add pubspec + analysis options only for synchronisation. We do not support
     // things like hovers/formatting/etc. for these files so there's no point
     // in having the client send those requests (plus, for things like formatting
     // this could result in the editor reporting "multiple formatters installed"
     // and prevent a built-in YAML formatter from being selected).
-    final allSynchronisedTypes = {
-      ...allTypes,
+    final synchronisedTypes = {
+      ...fullySupportedTypes,
+      pubspecFile,
+      analysisOptionsFile,
+      fixDataFile,
+    }.toList();
+
+    // Completion is supported for some synchronised files that we don't _fully_
+    // support (eg. YAML). If these gain support for things like hover, we may
+    // wish to move them to fullySupprtedTypes but add an exclusion for formatting.
+    final completionSupportedTypes = {
+      ...fullySupportedTypes,
       pubspecFile,
       analysisOptionsFile,
       fixDataFile,
@@ -278,25 +288,25 @@
     register(
       dynamicRegistrations.textSync,
       Method.textDocument_didOpen,
-      TextDocumentRegistrationOptions(documentSelector: allSynchronisedTypes),
+      TextDocumentRegistrationOptions(documentSelector: synchronisedTypes),
     );
     register(
       dynamicRegistrations.textSync,
       Method.textDocument_didClose,
-      TextDocumentRegistrationOptions(documentSelector: allSynchronisedTypes),
+      TextDocumentRegistrationOptions(documentSelector: synchronisedTypes),
     );
     register(
       dynamicRegistrations.textSync,
       Method.textDocument_didChange,
       TextDocumentChangeRegistrationOptions(
           syncKind: TextDocumentSyncKind.Incremental,
-          documentSelector: allSynchronisedTypes),
+          documentSelector: synchronisedTypes),
     );
     register(
       dynamicRegistrations.completion,
       Method.textDocument_completion,
       CompletionRegistrationOptions(
-        documentSelector: allTypes,
+        documentSelector: completionSupportedTypes,
         triggerCharacters: dartCompletionTriggerCharacters,
         allCommitCharacters:
             previewCommitCharacters ? dartCompletionCommitCharacters : null,
@@ -306,13 +316,13 @@
     register(
       dynamicRegistrations.hover,
       Method.textDocument_hover,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       dynamicRegistrations.signatureHelp,
       Method.textDocument_signatureHelp,
       SignatureHelpRegistrationOptions(
-        documentSelector: allTypes,
+        documentSelector: fullySupportedTypes,
         triggerCharacters: dartSignatureHelpTriggerCharacters,
         retriggerCharacters: dartSignatureHelpRetriggerCharacters,
       ),
@@ -320,22 +330,22 @@
     register(
       dynamicRegistrations.references,
       Method.textDocument_references,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       dynamicRegistrations.documentHighlights,
       Method.textDocument_documentHighlight,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       dynamicRegistrations.documentSymbol,
       Method.textDocument_documentSymbol,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       enableFormatter && dynamicRegistrations.formatting,
       Method.textDocument_formatting,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       enableFormatter && dynamicRegistrations.typeFormatting,
@@ -356,18 +366,18 @@
     register(
       dynamicRegistrations.definition,
       Method.textDocument_definition,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       dynamicRegistrations.implementation,
       Method.textDocument_implementation,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       dynamicRegistrations.codeActions,
       Method.textDocument_codeAction,
       CodeActionRegistrationOptions(
-        documentSelector: allTypes,
+        documentSelector: fullySupportedTypes,
         codeActionKinds: DartCodeActionKind.serverSupportedKinds,
       ),
     );
@@ -375,12 +385,12 @@
       dynamicRegistrations.rename,
       Method.textDocument_rename,
       RenameRegistrationOptions(
-          documentSelector: allTypes, prepareProvider: true),
+          documentSelector: fullySupportedTypes, prepareProvider: true),
     );
     register(
       dynamicRegistrations.folding,
       Method.textDocument_foldingRange,
-      TextDocumentRegistrationOptions(documentSelector: allTypes),
+      TextDocumentRegistrationOptions(documentSelector: fullySupportedTypes),
     );
     register(
       dynamicRegistrations.didChangeConfiguration,
diff --git a/pkg/analysis_server/lib/src/services/completion/yaml/yaml_completion_generator.dart b/pkg/analysis_server/lib/src/services/completion/yaml/yaml_completion_generator.dart
index 3d0b64c..b00768b 100644
--- a/pkg/analysis_server/lib/src/services/completion/yaml/yaml_completion_generator.dart
+++ b/pkg/analysis_server/lib/src/services/completion/yaml/yaml_completion_generator.dart
@@ -24,7 +24,7 @@
 
   /// Return the completion suggestions appropriate for the given [offset] in
   /// the file at the given [filePath].
-  List<CompletionSuggestion> getSuggestions(String filePath, int offset) {
+  YamlCompletionResults getSuggestions(String filePath, int offset) {
     var file = resourceProvider.getFile(filePath);
     String content;
     try {
@@ -32,12 +32,12 @@
     } on FileSystemException {
       // If the file doesn't exist or can't be read, then there are no
       // suggestions.
-      return const <CompletionSuggestion>[];
+      return const YamlCompletionResults.empty();
     }
     var root = _parseYaml(content);
     if (root == null) {
       // If the contents can't be parsed, then there are no suggestions.
-      return const <CompletionSuggestion>[];
+      return const YamlCompletionResults.empty();
     }
     var path = _pathToOffset(root, offset);
     var completionNode = path.last;
@@ -52,17 +52,16 @@
       return getSuggestionsForPath(path, offset);
     }
     // There are no completions at the given location.
-    return const <CompletionSuggestion>[];
+    return const YamlCompletionResults.empty();
   }
 
   /// Given a [path] to the node in which completions are being requested and
   /// the offset of the cursor, return the completions appropriate at that
   /// location.
-  List<CompletionSuggestion> getSuggestionsForPath(
-      List<YamlNode> path, int offset) {
+  YamlCompletionResults getSuggestionsForPath(List<YamlNode> path, int offset) {
     var producer = _producerForPath(path);
     if (producer == null) {
-      return const <CompletionSuggestion>[];
+      return const YamlCompletionResults.empty();
     }
     var invalidSuggestions = _siblingsOnPath(path);
     var suggestions = <CompletionSuggestion>[];
@@ -71,7 +70,12 @@
         suggestions.add(suggestion);
       }
     }
-    return suggestions;
+    final node = path.isNotEmpty ? path.last : null;
+    final replaceNode = node is YamlScalar && node.containsOffset(offset);
+    final replacementOffset = replaceNode ? node.span.start.offset : offset;
+    final replacementLength = replaceNode ? node.span.length : 0;
+    return YamlCompletionResults(
+        suggestions, replacementOffset, replacementLength);
   }
 
   /// Return the result of parsing the file [content] into a YAML node.
@@ -164,6 +168,20 @@
   }
 }
 
+class YamlCompletionResults {
+  final List<CompletionSuggestion> suggestions;
+  final int replacementOffset;
+  final int replacementLength;
+
+  const YamlCompletionResults(
+      this.suggestions, this.replacementOffset, this.replacementLength);
+
+  const YamlCompletionResults.empty()
+      : suggestions = const [],
+        replacementOffset = 0,
+        replacementLength = 0;
+}
+
 extension on YamlMap {
   /// Return the node representing the key that corresponds to the value
   /// represented by the [value] node.
diff --git a/pkg/analysis_server/test/lsp/completion_test.dart b/pkg/analysis_server/test/lsp/completion_dart_test.dart
similarity index 100%
rename from pkg/analysis_server/test/lsp/completion_test.dart
rename to pkg/analysis_server/test/lsp/completion_dart_test.dart
diff --git a/pkg/analysis_server/test/lsp/completion_yaml_test.dart b/pkg/analysis_server/test/lsp/completion_yaml_test.dart
new file mode 100644
index 0000000..0802d65
--- /dev/null
+++ b/pkg/analysis_server/test/lsp/completion_yaml_test.dart
@@ -0,0 +1,302 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'package:linter/src/rules.dart';
+import 'package:test/test.dart';
+import 'package:test_reflective_loader/test_reflective_loader.dart';
+
+import 'server_abstract.dart';
+
+void main() {
+  defineReflectiveSuite(() {
+    defineReflectiveTests(PubspecCompletionTest);
+    defineReflectiveTests(AnalysisOptionsCompletionTest);
+    defineReflectiveTests(FixDataCompletionTest);
+  });
+}
+
+@reflectiveTest
+class AnalysisOptionsCompletionTest extends AbstractLspAnalysisServerTest
+    with CompletionTestMixin {
+  @override
+  void setUp() {
+    registerLintRules();
+    super.setUp();
+  }
+
+  Future<void> test_nested() async {
+    final content = '''
+linter:
+  rules:
+    - ^''';
+
+    final expected = '''
+linter:
+  rules:
+    - annotate_overrides''';
+
+    await verifyCompletions(
+      analysisOptionsUri,
+      content,
+      expectCompletions: [
+        'always_declare_return_types',
+        'annotate_overrides',
+      ],
+      verifyEditsFor: 'annotate_overrides',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_nested_prefix() async {
+    final content = '''
+linter:
+  rules:
+    - ann^''';
+
+    final expected = '''
+linter:
+  rules:
+    - annotate_overrides''';
+
+    await verifyCompletions(
+      analysisOptionsUri,
+      content,
+      expectCompletions: ['annotate_overrides'],
+      verifyEditsFor: 'annotate_overrides',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_topLevel() async {
+    final content = '''
+^''';
+    final expected = '''
+linter: ''';
+
+    await verifyCompletions(
+      analysisOptionsUri,
+      content,
+      expectCompletions: ['linter: '],
+      verifyEditsFor: 'linter: ',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_topLevel_prefix() async {
+    final content = '''
+li^''';
+    final expected = '''
+linter: ''';
+
+    await verifyCompletions(
+      analysisOptionsUri,
+      content,
+      expectCompletions: ['linter: '],
+      verifyEditsFor: 'linter: ',
+      expectedContent: expected,
+    );
+  }
+}
+
+mixin CompletionTestMixin on AbstractLspAnalysisServerTest {
+  Future<void> verifyCompletions(
+    Uri fileUri,
+    String content, {
+    List<String> expectCompletions,
+    String verifyEditsFor,
+    String expectedContent,
+  }) async {
+    await initialize();
+    await openFile(fileUri, withoutMarkers(content));
+    final res = await getCompletion(fileUri, positionFromMarker(content));
+
+    for (final expectedCompletion in expectCompletions) {
+      expect(
+        res.any((c) => c.label == expectedCompletion),
+        isTrue,
+        reason:
+            '"$expectedCompletion" was not in ${res.map((c) => '"${c.label}"')}',
+      );
+    }
+
+    // Check the edits apply correctly.
+    if (verifyEditsFor != null) {
+      final item = res.singleWhere((c) => c.label == verifyEditsFor);
+      expect(item.insertTextFormat, isNull);
+      expect(item.insertText, isNull);
+      final updated = applyTextEdits(withoutMarkers(content), [item.textEdit]);
+      expect(updated, equals(expectedContent));
+    }
+  }
+}
+
+@reflectiveTest
+class FixDataCompletionTest extends AbstractLspAnalysisServerTest
+    with CompletionTestMixin {
+  Uri fixDataUri;
+
+  @override
+  void setUp() {
+    super.setUp();
+    fixDataUri = Uri.file(join(projectFolderPath, 'lib', 'fix_data.yaml'));
+  }
+
+  Future<void> test_nested() async {
+    final content = '''
+version: 1.0.0
+transforms:
+  - changes:
+    - ^''';
+    final expected = '''
+version: 1.0.0
+transforms:
+  - changes:
+    - kind: ''';
+
+    await verifyCompletions(
+      fixDataUri,
+      content,
+      expectCompletions: ['kind: '],
+      verifyEditsFor: 'kind: ',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_nested_prefix() async {
+    final content = '''
+version: 1.0.0
+transforms:
+  - changes:
+    - ki^''';
+    final expected = '''
+version: 1.0.0
+transforms:
+  - changes:
+    - kind: ''';
+
+    await verifyCompletions(
+      fixDataUri,
+      content,
+      expectCompletions: ['kind: '],
+      verifyEditsFor: 'kind: ',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_topLevel() async {
+    final content = '''
+version: 1.0.0
+^''';
+    final expected = '''
+version: 1.0.0
+transforms:''';
+
+    await verifyCompletions(
+      fixDataUri,
+      content,
+      expectCompletions: ['transforms:'],
+      verifyEditsFor: 'transforms:',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_topLevel_prefix() async {
+    final content = '''
+tra^''';
+    final expected = '''
+transforms:''';
+
+    await verifyCompletions(
+      fixDataUri,
+      content,
+      expectCompletions: ['transforms:'],
+      verifyEditsFor: 'transforms:',
+      expectedContent: expected,
+    );
+  }
+}
+
+@reflectiveTest
+class PubspecCompletionTest extends AbstractLspAnalysisServerTest
+    with CompletionTestMixin {
+  Future<void> test_nested() async {
+    final content = '''
+name: foo
+version: 1.0.0
+
+environment:
+  ^''';
+
+    final expected = '''
+name: foo
+version: 1.0.0
+
+environment:
+  sdk: ''';
+
+    await verifyCompletions(
+      pubspecFileUri,
+      content,
+      expectCompletions: ['flutter: ', 'sdk: '],
+      verifyEditsFor: 'sdk: ',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_nested_prefix() async {
+    final content = '''
+name: foo
+version: 1.0.0
+
+environment:
+  sd^''';
+
+    final expected = '''
+name: foo
+version: 1.0.0
+
+environment:
+  sdk: ''';
+
+    await verifyCompletions(
+      pubspecFileUri,
+      content,
+      expectCompletions: ['flutter: ', 'sdk: '],
+      verifyEditsFor: 'sdk: ',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_topLevel() async {
+    final content = '''
+version: 1.0.0
+^''';
+    final expected = '''
+version: 1.0.0
+name: ''';
+
+    await verifyCompletions(
+      pubspecFileUri,
+      content,
+      expectCompletions: ['name: ', 'description: '],
+      verifyEditsFor: 'name: ',
+      expectedContent: expected,
+    );
+  }
+
+  Future<void> test_topLevel_prefix() async {
+    final content = '''
+na^''';
+    final expected = '''
+name: ''';
+
+    await verifyCompletions(
+      pubspecFileUri,
+      content,
+      expectCompletions: ['name: ', 'description: '],
+      verifyEditsFor: 'name: ',
+      expectedContent: expected,
+    );
+  }
+}
diff --git a/pkg/analysis_server/test/lsp/test_all.dart b/pkg/analysis_server/test/lsp/test_all.dart
index 628b142..ea56793 100644
--- a/pkg/analysis_server/test/lsp/test_all.dart
+++ b/pkg/analysis_server/test/lsp/test_all.dart
@@ -13,7 +13,8 @@
 import 'code_actions_fixes_test.dart' as code_actions_fixes;
 import 'code_actions_refactor_test.dart' as code_actions_refactor;
 import 'code_actions_source_test.dart' as code_actions_source;
-import 'completion_test.dart' as completion;
+import 'completion_dart_test.dart' as completion_dart;
+import 'completion_yaml_test.dart' as completion_yaml;
 import 'configuration_test.dart' as configuration;
 import 'definition_test.dart' as definition;
 import 'diagnostic_test.dart' as diagnostic;
@@ -48,7 +49,8 @@
     code_actions_fixes.main();
     code_actions_source.main();
     code_actions_refactor.main();
-    completion.main();
+    completion_dart.main();
+    completion_yaml.main();
     configuration.main();
     definition.main();
     diagnostic.main();
diff --git a/pkg/analysis_server/test/src/services/completion/yaml/yaml_generator_test_support.dart b/pkg/analysis_server/test/src/services/completion/yaml/yaml_generator_test_support.dart
index 7e4cc84..3baebe1 100644
--- a/pkg/analysis_server/test/src/services/completion/yaml/yaml_generator_test_support.dart
+++ b/pkg/analysis_server/test/src/services/completion/yaml/yaml_generator_test_support.dart
@@ -59,6 +59,6 @@
     // Add the file to the file system.
     var file = newFile('/home/test/$fileName', content: content);
     // Generate completions.
-    results = generator.getSuggestions(file.path, completionOffset);
+    results = generator.getSuggestions(file.path, completionOffset).suggestions;
   }
 }
diff --git a/pkg/front_end/test/scanner_fasta_test.dart b/pkg/front_end/test/scanner_fasta_test.dart
index ea17792..08f0bc6 100644
--- a/pkg/front_end/test/scanner_fasta_test.dart
+++ b/pkg/front_end/test/scanner_fasta_test.dart
@@ -8,11 +8,8 @@
 import 'package:_fe_analyzer_shared/src/scanner/scanner.dart'
     as usedForFuzzTesting;
 import 'package:_fe_analyzer_shared/src/scanner/scanner.dart';
-import 'package:_fe_analyzer_shared/src/scanner/string_scanner.dart' as fasta;
 import 'package:_fe_analyzer_shared/src/scanner/token.dart' as fasta;
 import 'package:_fe_analyzer_shared/src/scanner/token_constants.dart' as fasta;
-import 'package:_fe_analyzer_shared/src/scanner/utf8_bytes_scanner.dart'
-    as fasta;
 import 'package:_fe_analyzer_shared/src/scanner/errors.dart';
 import 'package:_fe_analyzer_shared/src/scanner/token.dart';
 import 'package:front_end/src/fasta/fasta_codes.dart';
diff --git a/pkg/frontend_server/lib/src/to_string_transformer.dart b/pkg/frontend_server/lib/src/to_string_transformer.dart
new file mode 100644
index 0000000..8969e51
--- /dev/null
+++ b/pkg/frontend_server/lib/src/to_string_transformer.dart
@@ -0,0 +1,94 @@
+// Copyright (c) 2020, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'package:kernel/ast.dart';
+import 'package:kernel/visitor.dart';
+import '../frontend_server.dart';
+
+// Transformer/visitor for toString
+// If we add any more of these, they really should go into a separate library.
+
+/// A [RecursiveVisitor] that replaces [Object.toString] overrides with
+/// `super.toString()`.
+class ToStringVisitor extends RecursiveVisitor<void> {
+  /// The [packageUris] must not be null.
+  ToStringVisitor(this._packageUris) : assert(_packageUris != null);
+
+  /// A set of package URIs to apply this transformer to, e.g. 'dart:ui' and
+  /// 'package:flutter/foundation.dart'.
+  final Set<String> _packageUris;
+
+  /// Turn 'dart:ui' into 'dart:ui', or
+  /// 'package:flutter/src/semantics_event.dart' into 'package:flutter'.
+  String _importUriToPackage(Uri importUri) =>
+      '${importUri.scheme}:${importUri.pathSegments.first}';
+
+  bool _isInTargetPackage(Procedure node) {
+    return _packageUris
+        .contains(_importUriToPackage(node.enclosingLibrary.importUri));
+  }
+
+  bool _hasKeepAnnotation(Procedure node) {
+    for (ConstantExpression expression
+        in node.annotations.whereType<ConstantExpression>()) {
+      if (expression.constant is! InstanceConstant) {
+        continue;
+      }
+      final InstanceConstant constant = expression.constant as InstanceConstant;
+      if (constant.classNode.name == '_KeepToString' &&
+          constant.classNode.enclosingLibrary.importUri.toString() ==
+              'dart:ui') {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @override
+  void visitProcedure(Procedure node) {
+    if (node.name.text == 'toString' &&
+        node.enclosingClass != null &&
+        node.enclosingLibrary != null &&
+        !node.isStatic &&
+        !node.isAbstract &&
+        !node.enclosingClass.isEnum &&
+        _isInTargetPackage(node) &&
+        !_hasKeepAnnotation(node)) {
+      node.function.body.replaceWith(
+        ReturnStatement(
+          SuperMethodInvocation(
+            node.name,
+            Arguments(<Expression>[]),
+          ),
+        ),
+      );
+    }
+  }
+
+  @override
+  void defaultMember(Member node) {}
+}
+
+/// Replaces [Object.toString] overrides with calls to super for the specified
+/// [packageUris].
+class ToStringTransformer extends ProgramTransformer {
+  /// The [packageUris] parameter must not be null, but may be empty.
+  ToStringTransformer(this._child, this._packageUris)
+      : assert(_packageUris != null);
+
+  final ProgramTransformer _child;
+
+  /// A set of package URIs to apply this transformer to, e.g. 'dart:ui' and
+  /// 'package:flutter/foundation.dart'.
+  final Set<String> _packageUris;
+
+  @override
+  void transform(Component component) {
+    assert(_child is! ToStringTransformer);
+    if (_packageUris.isNotEmpty) {
+      component.visitChildren(ToStringVisitor(_packageUris));
+    }
+    _child?.transform(component);
+  }
+}
diff --git a/runtime/vm/class_id.h b/runtime/vm/class_id.h
index 4c973f4..8421944 100644
--- a/runtime/vm/class_id.h
+++ b/runtime/vm/class_id.h
@@ -352,21 +352,29 @@
 }
 
 inline bool IsFfiTypeClassId(intptr_t index) {
-  // Make sure this is updated when new Ffi types are added.
-  COMPILE_ASSERT(kFfiNativeFunctionCid == kFfiPointerCid + 1 &&
-                 kFfiInt8Cid == kFfiPointerCid + 2 &&
-                 kFfiInt16Cid == kFfiPointerCid + 3 &&
-                 kFfiInt32Cid == kFfiPointerCid + 4 &&
-                 kFfiInt64Cid == kFfiPointerCid + 5 &&
-                 kFfiUint8Cid == kFfiPointerCid + 6 &&
-                 kFfiUint16Cid == kFfiPointerCid + 7 &&
-                 kFfiUint32Cid == kFfiPointerCid + 8 &&
-                 kFfiUint64Cid == kFfiPointerCid + 9 &&
-                 kFfiIntPtrCid == kFfiPointerCid + 10 &&
-                 kFfiFloatCid == kFfiPointerCid + 11 &&
-                 kFfiDoubleCid == kFfiPointerCid + 12 &&
-                 kFfiVoidCid == kFfiPointerCid + 13);
-  return (index >= kFfiPointerCid && index <= kFfiVoidCid);
+  switch (index) {
+    case kFfiPointerCid:
+    case kFfiNativeFunctionCid:
+#define CASE_FFI_CID(name) case kFfi##name##Cid:
+      CLASS_LIST_FFI_TYPE_MARKER(CASE_FFI_CID)
+#undef CASE_FFI_CID
+      return true;
+    default:
+      return false;
+  }
+  UNREACHABLE();
+}
+
+inline bool IsFfiPredefinedClassId(classid_t class_id) {
+  switch (class_id) {
+#define CASE_FFI_CID(name) case kFfi##name##Cid:
+    CLASS_LIST_FFI(CASE_FFI_CID)
+#undef CASE_FFI_CID
+    return true;
+    default:
+      return false;
+  }
+  UNREACHABLE();
 }
 
 inline bool IsFfiTypeIntClassId(intptr_t index) {
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index 57ba87a..20428a1 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -3255,8 +3255,10 @@
       !destination.IsFpuRegisters()) {
     // TODO(40209): If this is stack to stack, we could use FpuTMP.
     // Test the impact on code size and speed.
-    EmitNativeMove(destination.Split(zone_, 0), source.Split(zone_, 0), temp);
-    EmitNativeMove(destination.Split(zone_, 1), source.Split(zone_, 1), temp);
+    EmitNativeMove(destination.Split(zone_, 2, 0), source.Split(zone_, 2, 0),
+                   temp);
+    EmitNativeMove(destination.Split(zone_, 2, 1), source.Split(zone_, 2, 1),
+                   temp);
     return;
   }
 
@@ -3331,7 +3333,7 @@
     for (intptr_t i : {0, 1}) {
       const auto& src_split = compiler::ffi::NativeLocation::FromPairLocation(
           zone_, src_loc, src_type, i);
-      EmitNativeMove(dst.Split(zone_, i), src_split, temp);
+      EmitNativeMove(dst.Split(zone_, 2, i), src_split, temp);
     }
   } else {
     const auto& src =
@@ -3351,7 +3353,7 @@
     for (intptr_t i : {0, 1}) {
       const auto& dest_split = compiler::ffi::NativeLocation::FromPairLocation(
           zone_, dst_loc, dst_type, i);
-      EmitNativeMove(dest_split, src.Split(zone_, i), temp);
+      EmitNativeMove(dest_split, src.Split(zone_, 2, i), temp);
     }
   } else {
     const auto& dest =
@@ -3398,7 +3400,7 @@
             compiler::ffi::NativeLocation::FromLocation(zone_, intermediate,
                                                         src_type_split);
         EmitMove(intermediate, src.AsPairLocation()->At(i), temp);
-        EmitNativeMove(dst.Split(zone_, i), intermediate_native, temp);
+        EmitNativeMove(dst.Split(zone_, 2, i), intermediate_native, temp);
       }
     } else {
       const auto& intermediate_native =
diff --git a/runtime/vm/compiler/ffi/native_calling_convention.cc b/runtime/vm/compiler/ffi/native_calling_convention.cc
index 5b21cf2..e53a1e8 100644
--- a/runtime/vm/compiler/ffi/native_calling_convention.cc
+++ b/runtime/vm/compiler/ffi/native_calling_convention.cc
@@ -53,6 +53,13 @@
   return rep;
 }
 
+// The native dual of `kUnboxedFfiIntPtr`.
+//
+// It has the same signedness as `kUnboxedFfiIntPtr` to avoid sign conversions
+// when converting between both.
+const PrimitiveType kFfiIntPtr =
+    compiler::target::kWordSize == 8 ? kInt64 : kUint32;
+
 // Represents the state of a stack frame going into a call, between allocations
 // of argument locations.
 class ArgumentAllocator : public ValueObject {
@@ -62,57 +69,287 @@
   const NativeLocation& AllocateArgument(const NativeType& payload_type) {
     const auto& payload_type_converted = ConvertIfSoftFp(zone_, payload_type);
     if (payload_type_converted.IsFloat()) {
-      const auto kind = FpuRegKind(payload_type);
-      const intptr_t reg_index = FirstFreeFpuRegisterIndex(kind);
-      if (reg_index != kNoFpuRegister) {
-        AllocateFpuRegisterAtIndex(kind, reg_index);
-        if (CallingConventions::kArgumentIntRegXorFpuReg) {
-          cpu_regs_used++;
-        }
-        return *new (zone_) NativeFpuRegistersLocation(
-            payload_type, payload_type, kind, reg_index);
-      } else {
-        BlockAllFpuRegisters();
-        if (CallingConventions::kArgumentIntRegXorFpuReg) {
-          ASSERT(cpu_regs_used == CallingConventions::kNumArgRegs);
-        }
-        // Transfer on stack.
+      return AllocateFloat(payload_type);
+    }
+    if (payload_type_converted.IsInt()) {
+      return AllocateInt(payload_type);
+    }
+
+    // Compounds are laid out differently per ABI, so they are implemented
+    // per ABI.
+    //
+    // Compounds always have a PointerToMemory, Stack, or Multiple location,
+    // even if the parts of a compound fit in 1 cpu or fpu register it will
+    // be nested in a MultipleNativeLocations.
+    const NativeCompoundType& compound_type = payload_type.AsCompound();
+    return AllocateCompound(compound_type);
+  }
+
+ private:
+  const NativeLocation& AllocateFloat(const NativeType& payload_type) {
+    const auto kind = FpuRegKind(payload_type);
+    const intptr_t reg_index = FirstFreeFpuRegisterIndex(kind);
+    if (reg_index != kNoFpuRegister) {
+      AllocateFpuRegisterAtIndex(kind, reg_index);
+      if (CallingConventions::kArgumentIntRegXorFpuReg) {
+        cpu_regs_used++;
       }
-    } else if (payload_type_converted.IsInt()) {
-      // Some calling conventions require the callee to make the lowest 32 bits
-      // in registers non-garbage.
-      const auto& container_type =
-          CallingConventions::kArgumentRegisterExtension == kExtendedTo4
-              ? payload_type_converted.WidenTo4Bytes(zone_)
-              : payload_type_converted;
-      if (target::kWordSize == 4 && payload_type.SizeInBytes() == 8) {
-        if (CallingConventions::kArgumentRegisterAlignment ==
-            kAlignedToWordSizeBut8AlignedTo8) {
-          cpu_regs_used += cpu_regs_used % 2;
-        }
-        if (cpu_regs_used + 2 <= CallingConventions::kNumArgRegs) {
-          const Register register_1 = AllocateCpuRegister();
-          const Register register_2 = AllocateCpuRegister();
-          return *new (zone_) NativeRegistersLocation(
-              zone_, payload_type, container_type, register_1, register_2);
-        }
-      } else {
-        ASSERT(payload_type.SizeInBytes() <= target::kWordSize);
-        if (cpu_regs_used + 1 <= CallingConventions::kNumArgRegs) {
-          return *new (zone_) NativeRegistersLocation(
-              zone_, payload_type, container_type, AllocateCpuRegister());
-        } else {
-          // Transfer on stack.
-        }
+      return *new (zone_) NativeFpuRegistersLocation(payload_type, payload_type,
+                                                     kind, reg_index);
+    }
+
+    BlockAllFpuRegisters();
+    if (CallingConventions::kArgumentIntRegXorFpuReg) {
+      ASSERT(cpu_regs_used == CallingConventions::kNumArgRegs);
+    }
+    return AllocateStack(payload_type);
+  }
+
+  const NativeLocation& AllocateInt(const NativeType& payload_type) {
+    const auto& payload_type_converted = ConvertIfSoftFp(zone_, payload_type);
+
+    // Some calling conventions require the callee to make the lowest 32 bits
+    // in registers non-garbage.
+    const auto& container_type =
+        CallingConventions::kArgumentRegisterExtension == kExtendedTo4
+            ? payload_type_converted.WidenTo4Bytes(zone_)
+            : payload_type_converted;
+    if (target::kWordSize == 4 && payload_type.SizeInBytes() == 8) {
+      if (CallingConventions::kArgumentRegisterAlignment ==
+          kAlignedToWordSizeBut8AlignedTo8) {
+        cpu_regs_used += cpu_regs_used % 2;
+      }
+      if (cpu_regs_used + 2 <= CallingConventions::kNumArgRegs) {
+        const Register register_1 = AllocateCpuRegister();
+        const Register register_2 = AllocateCpuRegister();
+        return *new (zone_) NativeRegistersLocation(
+            zone_, payload_type, container_type, register_1, register_2);
       }
     } else {
-      UNREACHABLE();
+      ASSERT(payload_type.SizeInBytes() <= target::kWordSize);
+      if (cpu_regs_used + 1 <= CallingConventions::kNumArgRegs) {
+        return *new (zone_) NativeRegistersLocation(
+            zone_, payload_type, container_type, AllocateCpuRegister());
+      }
+    }
+    return AllocateStack(payload_type);
+  }
+
+#if defined(TARGET_ARCH_X64) && !defined(TARGET_OS_WINDOWS)
+  // If fits in two fpu and/or cpu registers, transfer in those. Otherwise,
+  // transfer on stack.
+  const NativeLocation& AllocateCompound(
+      const NativeCompoundType& payload_type) {
+    const intptr_t size = payload_type.SizeInBytes();
+    if (size <= 16 && size > 0) {
+      intptr_t required_regs =
+          payload_type.NumberOfWordSizeChunksNotOnlyFloat();
+      intptr_t required_xmm_regs =
+          payload_type.NumberOfWordSizeChunksOnlyFloat();
+      const bool regs_available =
+          cpu_regs_used + required_regs <= CallingConventions::kNumArgRegs;
+      const bool fpu_regs_available =
+          FirstFreeFpuRegisterIndex(kQuadFpuReg) != kNoFpuRegister &&
+          FirstFreeFpuRegisterIndex(kQuadFpuReg) + required_xmm_regs <=
+              CallingConventions::kNumFpuArgRegs;
+      if (regs_available && fpu_regs_available) {
+        // Transfer in registers.
+        NativeLocations& multiple_locations = *new (zone_) NativeLocations(
+            zone_, required_regs + required_xmm_regs);
+        for (intptr_t offset = 0; offset < size;
+             offset += compiler::target::kWordSize) {
+          if (payload_type.ContainsOnlyFloats(
+                  offset, Utils::Minimum<intptr_t>(size - offset, 8))) {
+            const intptr_t reg_index = FirstFreeFpuRegisterIndex(kQuadFpuReg);
+            AllocateFpuRegisterAtIndex(kQuadFpuReg, reg_index);
+            const auto& type = *new (zone_) NativePrimitiveType(kDouble);
+            multiple_locations.Add(new (zone_) NativeFpuRegistersLocation(
+                type, type, kQuadFpuReg, reg_index));
+          } else {
+            const auto& type = *new (zone_) NativePrimitiveType(kInt64);
+            multiple_locations.Add(new (zone_) NativeRegistersLocation(
+                zone_, type, type, AllocateCpuRegister()));
+          }
+        }
+        return *new (zone_)
+            MultipleNativeLocations(payload_type, multiple_locations);
+      }
+    }
+    return AllocateStack(payload_type);
+  }
+#endif  // defined(TARGET_ARCH_X64) && !defined(TARGET_OS_WINDOWS)
+
+#if defined(TARGET_ARCH_X64) && defined(TARGET_OS_WINDOWS)
+  // If struct fits in a single register and size is a power of two, then
+  // use a single register and sign extend.
+  // Otherwise, pass a pointer to a copy.
+  const NativeLocation& AllocateCompound(
+      const NativeCompoundType& payload_type) {
+    const NativeCompoundType& compound_type = payload_type.AsCompound();
+    const intptr_t size = compound_type.SizeInBytes();
+    if (size <= 8 && Utils::IsPowerOfTwo(size)) {
+      if (cpu_regs_used < CallingConventions::kNumArgRegs) {
+        NativeLocations& multiple_locations =
+            *new (zone_) NativeLocations(zone_, 1);
+        const auto& type = *new (zone_) NativePrimitiveType(
+            PrimitiveTypeFromSizeInBytes(size));
+        multiple_locations.Add(new (zone_) NativeRegistersLocation(
+            zone_, type, type, AllocateCpuRegister()));
+        return *new (zone_)
+            MultipleNativeLocations(compound_type, multiple_locations);
+      }
+
+    } else if (size > 0) {
+      // Pointer in register if available, else pointer on stack.
+      const auto& pointer_type = *new (zone_) NativePrimitiveType(kFfiIntPtr);
+      const auto& pointer_location = AllocateArgument(pointer_type);
+      return *new (zone_)
+          PointerToMemoryLocation(pointer_location, compound_type);
     }
 
     return AllocateStack(payload_type);
   }
+#endif  // defined(TARGET_ARCH_X64) && defined(TARGET_OS_WINDOWS)
 
- private:
+#if defined(TARGET_ARCH_IA32)
+  const NativeLocation& AllocateCompound(
+      const NativeCompoundType& payload_type) {
+    return AllocateStack(payload_type);
+  }
+#endif  // defined(TARGET_ARCH_IA32)
+
+#if defined(TARGET_ARCH_ARM)
+  // Transfer homogenuous floats in FPU registers, and allocate the rest
+  // in 4 or 8 size chunks in registers and stack.
+  const NativeLocation& AllocateCompound(
+      const NativeCompoundType& payload_type) {
+    const auto& compound_type = payload_type.AsCompound();
+    if (compound_type.ContainsHomogenuousFloats() && !SoftFpAbi() &&
+        compound_type.members().length() <= 4) {
+      const auto& elem_type = *(compound_type.members().At(0));
+      const intptr_t size = compound_type.SizeInBytes();
+      const intptr_t elem_size = compound_type.members().At(0)->SizeInBytes();
+      const auto reg_kind = FpuRegisterKindFromSize(elem_size);
+      ASSERT(size % elem_size == 0);
+      const intptr_t num_registers = size / elem_size;
+      const intptr_t first_reg =
+          FirstFreeFpuRegisterIndex(reg_kind, num_registers);
+      if (first_reg != kNoFpuRegister) {
+        AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
+
+        NativeLocations& multiple_locations =
+            *new (zone_) NativeLocations(zone_, num_registers);
+        for (int i = 0; i < num_registers; i++) {
+          const intptr_t reg_index = first_reg + i;
+          multiple_locations.Add(new (zone_) NativeFpuRegistersLocation(
+              elem_type, elem_type, reg_kind, reg_index));
+        }
+        return *new (zone_)
+            MultipleNativeLocations(compound_type, multiple_locations);
+
+      } else {
+        BlockAllFpuRegisters();
+        return AllocateStack(payload_type);
+      }
+    } else {
+      const intptr_t chunck_size = payload_type.AlignmentInBytesStack();
+      ASSERT(chunck_size == 4 || chunck_size == 8);
+      const intptr_t size_rounded =
+          Utils::RoundUp(payload_type.SizeInBytes(), chunck_size);
+      const intptr_t num_chuncks = size_rounded / chunck_size;
+      const auto& chuck_type =
+          *new (zone_) NativePrimitiveType(chunck_size == 4 ? kInt32 : kInt64);
+
+      NativeLocations& multiple_locations =
+          *new (zone_) NativeLocations(zone_, num_chuncks);
+      for (int i = 0; i < num_chuncks; i++) {
+        const auto& allocated_chunk = &AllocateArgument(chuck_type);
+        // The last chunk should not be 8 bytes, if the struct only has 4
+        // remaining bytes to be allocated.
+        if (i == num_chuncks - 1 && chunck_size == 8 &&
+            Utils::RoundUp(payload_type.SizeInBytes(), 4) % 8 == 4) {
+          const auto& small_chuck_type = *new (zone_) NativePrimitiveType(
+              chunck_size == 4 ? kInt32 : kInt64);
+          multiple_locations.Add(&allocated_chunk->WithOtherNativeType(
+              zone_, small_chuck_type, small_chuck_type));
+        } else {
+          multiple_locations.Add(allocated_chunk);
+        }
+      }
+      return *new (zone_)
+          MultipleNativeLocations(compound_type, multiple_locations);
+    }
+  }
+#endif  // defined(TARGET_ARCH_ARM)
+
+#if defined(TARGET_ARCH_ARM64)
+  // Slightly different from Arm32. FPU registers don't alias the same way,
+  // structs up to 16 bytes block remaining registers if they do not fit in
+  // registers, and larger structs go on stack always.
+  const NativeLocation& AllocateCompound(
+      const NativeCompoundType& payload_type) {
+    const auto& compound_type = payload_type.AsCompound();
+    const intptr_t size = compound_type.SizeInBytes();
+    if (compound_type.ContainsHomogenuousFloats() &&
+        compound_type.members().length() <= 4) {
+      const auto& elem_type = *(compound_type.members().At(0));
+      const intptr_t elem_size = compound_type.members().At(0)->SizeInBytes();
+      const auto reg_kind = kQuadFpuReg;
+      ASSERT(size % elem_size == 0);
+      const intptr_t num_registers = size / elem_size;
+      const intptr_t first_reg =
+          FirstFreeFpuRegisterIndex(reg_kind, num_registers);
+      if (first_reg != kNoFpuRegister) {
+        AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
+
+        NativeLocations& multiple_locations =
+            *new (zone_) NativeLocations(zone_, num_registers);
+        for (int i = 0; i < num_registers; i++) {
+          const intptr_t reg_index = first_reg + i;
+          multiple_locations.Add(new (zone_) NativeFpuRegistersLocation(
+              elem_type, elem_type, reg_kind, reg_index));
+        }
+        return *new (zone_)
+            MultipleNativeLocations(compound_type, multiple_locations);
+      }
+      BlockAllFpuRegisters();
+      return AllocateStack(payload_type);
+    }
+
+    if (size <= 16) {
+      const intptr_t required_regs = size / 8;
+      const bool regs_available =
+          cpu_regs_used + required_regs <= CallingConventions::kNumArgRegs;
+
+      if (regs_available) {
+        const intptr_t size_rounded =
+            Utils::RoundUp(payload_type.SizeInBytes(), 8);
+        const intptr_t num_chuncks = size_rounded / 8;
+        const auto& chuck_type = *new (zone_) NativePrimitiveType(kInt64);
+
+        NativeLocations& multiple_locations =
+            *new (zone_) NativeLocations(zone_, num_chuncks);
+        for (int i = 0; i < num_chuncks; i++) {
+          const auto& allocated_chunk = &AllocateArgument(chuck_type);
+          multiple_locations.Add(allocated_chunk);
+        }
+        return *new (zone_)
+            MultipleNativeLocations(compound_type, multiple_locations);
+
+      } else {
+        // Block all CPU registers.
+        cpu_regs_used = CallingConventions::kNumArgRegs;
+        return AllocateStack(payload_type);
+      }
+    }
+
+    const auto& pointer_location =
+        AllocateArgument(*new (zone_) NativePrimitiveType(kInt64));
+    return *new (zone_)
+        PointerToMemoryLocation(pointer_location, compound_type);
+  }
+#endif  // defined(TARGET_ARCH_ARM64)
+
   static FpuRegisterKind FpuRegKind(const NativeType& payload_type) {
 #if defined(TARGET_ARCH_ARM)
     return FpuRegisterKindFromSize(payload_type.SizeInBytes());
@@ -164,13 +401,13 @@
   }
 
   // If no register is free, returns -1.
-  int FirstFreeFpuRegisterIndex(FpuRegisterKind kind) {
+  int FirstFreeFpuRegisterIndex(FpuRegisterKind kind, int amount = 1) {
     const intptr_t size = SizeFromFpuRegisterKind(kind) / 4;
     ASSERT(size == 1 || size == 2 || size == 4);
     if (fpu_reg_parts_used == -1) return kNoFpuRegister;
-    const intptr_t mask = (1 << size) - 1;
+    const intptr_t mask = (1 << (size * amount)) - 1;
     intptr_t index = 0;
-    while (index < NumFpuRegisters(kind)) {
+    while (index + amount <= NumFpuRegisters(kind)) {
       const intptr_t mask_shifted = mask << (index * size);
       if ((fpu_reg_parts_used & mask_shifted) == 0) {
         return index;
@@ -180,10 +417,12 @@
     return kNoFpuRegister;
   }
 
-  void AllocateFpuRegisterAtIndex(FpuRegisterKind kind, int index) {
+  void AllocateFpuRegisterAtIndex(FpuRegisterKind kind,
+                                  int index,
+                                  int amount = 1) {
     const intptr_t size = SizeFromFpuRegisterKind(kind) / 4;
     ASSERT(size == 1 || size == 2 || size == 4);
-    const intptr_t mask = (1 << size) - 1;
+    const intptr_t mask = (1 << size * amount) - 1;
     const intptr_t mask_shifted = (mask << (index * size));
     ASSERT((mask_shifted & fpu_reg_parts_used) == 0);
     fpu_reg_parts_used |= mask_shifted;
@@ -215,12 +454,24 @@
 // Location for the arguments of a C signature function.
 static NativeLocations& ArgumentLocations(
     Zone* zone,
-    const ZoneGrowableArray<const NativeType*>& arg_reps) {
+    const ZoneGrowableArray<const NativeType*>& arg_reps,
+    const NativeLocation& return_location) {
   intptr_t num_arguments = arg_reps.length();
   auto& result = *new (zone) NativeLocations(zone, num_arguments);
 
   // Loop through all arguments and assign a register or a stack location.
+  // Allocate result pointer for composite returns first.
   ArgumentAllocator frame_state(zone);
+#if !defined(TARGET_ARCH_ARM64)
+  // Arm64 allocates the pointer in R8, which is not an argument location.
+  if (return_location.IsPointerToMemory()) {
+    const auto& pointer_location =
+        return_location.AsPointerToMemory().pointer_location();
+    const auto& pointer_location_allocated =
+        frame_state.AllocateArgument(pointer_location.payload_type());
+    ASSERT(pointer_location.Equals(pointer_location_allocated));
+  }
+#endif
   for (intptr_t i = 0; i < num_arguments; i++) {
     const NativeType& rep = *arg_reps[i];
     result.Add(&frame_state.AllocateArgument(rep));
@@ -228,6 +479,192 @@
   return result;
 }
 
+#if !defined(TARGET_ARCH_IA32)
+static const NativeLocation& PointerToMemoryResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  const auto& pointer_type = *new (zone) NativePrimitiveType(kFfiIntPtr);
+  const auto& pointer_location = *new (zone) NativeRegistersLocation(
+      zone, pointer_type, pointer_type,
+      CallingConventions::kPointerToReturnStructRegisterCall);
+  const auto& pointer_return_location = *new (zone) NativeRegistersLocation(
+      zone, pointer_type, pointer_type,
+      CallingConventions::kPointerToReturnStructRegisterReturn);
+  return *new (zone) PointerToMemoryLocation(
+      pointer_location, pointer_return_location, payload_type);
+}
+#endif  // !defined(TARGET_ARCH_IA32)
+
+#if defined(TARGET_ARCH_IA32)
+// ia32 Passes pointers to result locations on the stack.
+static const NativeLocation& PointerToMemoryResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  const auto& pointer_type = *new (zone) NativePrimitiveType(kFfiIntPtr);
+  const auto& pointer_location = *new (zone) NativeStackLocation(
+      pointer_type, pointer_type, CallingConventions::kStackPointerRegister, 0);
+  const auto& pointer_return_location = *new (zone) NativeRegistersLocation(
+      zone, pointer_type, pointer_type,
+      CallingConventions::kPointerToReturnStructRegisterReturn);
+  return *new (zone) PointerToMemoryLocation(
+      pointer_location, pointer_return_location, payload_type);
+}
+#endif  // defined(TARGET_ARCH_IA32)
+
+#if defined(TARGET_ARCH_X64) && !defined(TARGET_OS_WINDOWS)
+static const NativeLocation& CompoundResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  const intptr_t size = payload_type.SizeInBytes();
+  if (size <= 16 && size > 0) {
+    // Allocate the same as argument, but use return registers instead of
+    // argument registers.
+    NativeLocations& multiple_locations =
+        *new (zone) NativeLocations(zone, size > 8 ? 2 : 1);
+    intptr_t used_regs = 0;
+    intptr_t used_xmm_regs = 0;
+
+    const auto& double_type = *new (zone) NativePrimitiveType(kDouble);
+    const auto& int64_type = *new (zone) NativePrimitiveType(kInt64);
+
+    const bool first_half_in_xmm =
+        payload_type.ContainsOnlyFloats(0, Utils::Minimum<intptr_t>(size, 8));
+    if (first_half_in_xmm) {
+      multiple_locations.Add(new (zone) NativeFpuRegistersLocation(
+          double_type, double_type, kQuadFpuReg,
+          CallingConventions::kReturnFpuReg));
+      used_xmm_regs++;
+    } else {
+      multiple_locations.Add(new (zone) NativeRegistersLocation(
+          zone, int64_type, int64_type, CallingConventions::kReturnReg));
+      used_regs++;
+    }
+    if (size > 8) {
+      const bool second_half_in_xmm = payload_type.ContainsOnlyFloats(
+          8, Utils::Minimum<intptr_t>(size - 8, 8));
+      if (second_half_in_xmm) {
+        const FpuRegister reg = used_xmm_regs == 0
+                                    ? CallingConventions::kReturnFpuReg
+                                    : CallingConventions::kSecondReturnFpuReg;
+        multiple_locations.Add(new (zone) NativeFpuRegistersLocation(
+            double_type, double_type, kQuadFpuReg, reg));
+        used_xmm_regs++;
+      } else {
+        const Register reg = used_regs == 0
+                                 ? CallingConventions::kReturnReg
+                                 : CallingConventions::kSecondReturnReg;
+        multiple_locations.Add(new (zone) NativeRegistersLocation(
+            zone, int64_type, int64_type, reg));
+        used_regs++;
+      }
+    }
+    return *new (zone)
+        MultipleNativeLocations(payload_type, multiple_locations);
+  }
+  return PointerToMemoryResultLocation(zone, payload_type);
+}
+#endif  // defined(TARGET_ARCH_X64) && !defined(TARGET_OS_WINDOWS)
+
+#if defined(TARGET_ARCH_X64) && defined(TARGET_OS_WINDOWS)
+// If struct fits in a single register do that, and sign extend.
+// Otherwise, pass a pointer to memory.
+static const NativeLocation& CompoundResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  const intptr_t size = payload_type.SizeInBytes();
+  if (size <= 8 && size > 0 && Utils::IsPowerOfTwo(size)) {
+    NativeLocations& multiple_locations = *new (zone) NativeLocations(zone, 1);
+    const auto& type =
+        *new (zone) NativePrimitiveType(PrimitiveTypeFromSizeInBytes(size));
+    multiple_locations.Add(new (zone) NativeRegistersLocation(
+        zone, type, type, CallingConventions::kReturnReg));
+    return *new (zone)
+        MultipleNativeLocations(payload_type, multiple_locations);
+  }
+  return PointerToMemoryResultLocation(zone, payload_type);
+}
+#endif  // defined(TARGET_ARCH_X64) && defined(TARGET_OS_WINDOWS)
+
+#if defined(TARGET_ARCH_IA32) && !defined(TARGET_OS_WINDOWS)
+static const NativeLocation& CompoundResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  return PointerToMemoryResultLocation(zone, payload_type);
+}
+#endif  // defined(TARGET_ARCH_IA32) && !defined(TARGET_OS_WINDOWS)
+
+#if defined(TARGET_ARCH_IA32) && defined(TARGET_OS_WINDOWS)
+// Windows uses up to two return registers, while Linux does not.
+static const NativeLocation& CompoundResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  const intptr_t size = payload_type.SizeInBytes();
+  if (size <= 8 && Utils::IsPowerOfTwo(size)) {
+    NativeLocations& multiple_locations =
+        *new (zone) NativeLocations(zone, size > 4 ? 2 : 1);
+    const auto& type = *new (zone) NativePrimitiveType(kUint32);
+    multiple_locations.Add(new (zone) NativeRegistersLocation(
+        zone, type, type, CallingConventions::kReturnReg));
+    if (size > 4) {
+      multiple_locations.Add(new (zone) NativeRegistersLocation(
+          zone, type, type, CallingConventions::kSecondReturnReg));
+    }
+    return *new (zone)
+        MultipleNativeLocations(payload_type, multiple_locations);
+  }
+  return PointerToMemoryResultLocation(zone, payload_type);
+}
+#endif  // defined(TARGET_ARCH_IA32) && defined(TARGET_OS_WINDOWS)
+
+#if defined(TARGET_ARCH_ARM)
+// Arm passes homogenous float return values in FPU registers and small
+// composities in a single integer register. The rest is stored into the
+// location passed in by pointer.
+static const NativeLocation& CompoundResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  const intptr_t num_members = payload_type.members().length();
+  if (payload_type.ContainsHomogenuousFloats() && !SoftFpAbi() &&
+      num_members <= 4) {
+    NativeLocations& multiple_locations =
+        *new (zone) NativeLocations(zone, num_members);
+    for (int i = 0; i < num_members; i++) {
+      const auto& member = payload_type.members().At(0)->AsPrimitive();
+      multiple_locations.Add(new (zone) NativeFpuRegistersLocation(
+          member, member, FpuRegisterKindFromSize(member.SizeInBytes()), i));
+    }
+    return *new (zone)
+        MultipleNativeLocations(payload_type, multiple_locations);
+  }
+  const intptr_t size = payload_type.SizeInBytes();
+  if (size <= 4) {
+    NativeLocations& multiple_locations = *new (zone) NativeLocations(zone, 1);
+    const auto& type = *new (zone) NativePrimitiveType(kUint32);
+    multiple_locations.Add(new (zone)
+                               NativeRegistersLocation(zone, type, type, R0));
+    return *new (zone)
+        MultipleNativeLocations(payload_type, multiple_locations);
+  }
+  return PointerToMemoryResultLocation(zone, payload_type);
+}
+#endif  // defined(TARGET_ARCH_ARM)
+
+#if defined(TARGET_ARCH_ARM64)
+// If allocated to integer or fpu registers as argument, same for return,
+// otherwise a pointer to the result location is passed in.
+static const NativeLocation& CompoundResultLocation(
+    Zone* zone,
+    const NativeCompoundType& payload_type) {
+  ArgumentAllocator frame_state(zone);
+  const auto& location_as_argument = frame_state.AllocateArgument(payload_type);
+  if (!location_as_argument.IsStack() &&
+      !location_as_argument.IsPointerToMemory()) {
+    return location_as_argument;
+  }
+  return PointerToMemoryResultLocation(zone, payload_type);
+}
+#endif  // defined(TARGET_ARCH_ARM64)
+
 // Location for the result of a C signature function.
 static const NativeLocation& ResultLocation(Zone* zone,
                                             const NativeType& payload_type) {
@@ -236,29 +673,38 @@
       CallingConventions::kReturnRegisterExtension == kExtendedTo4
           ? payload_type_converted.WidenTo4Bytes(zone)
           : payload_type_converted;
+
   if (container_type.IsFloat()) {
     return *new (zone) NativeFpuRegistersLocation(
         payload_type, container_type, CallingConventions::kReturnFpuReg);
   }
 
-  ASSERT(container_type.IsInt() || container_type.IsVoid());
-  if (container_type.SizeInBytes() == 8 && target::kWordSize == 4) {
+  if (container_type.IsInt() || container_type.IsVoid()) {
+    if (container_type.SizeInBytes() == 8 && target::kWordSize == 4) {
+      return *new (zone) NativeRegistersLocation(
+          zone, payload_type, container_type, CallingConventions::kReturnReg,
+          CallingConventions::kSecondReturnReg);
+    }
+
+    ASSERT(container_type.SizeInBytes() <= target::kWordSize);
     return *new (zone) NativeRegistersLocation(
-        zone, payload_type, container_type, CallingConventions::kReturnReg,
-        CallingConventions::kSecondReturnReg);
+        zone, payload_type, container_type, CallingConventions::kReturnReg);
   }
 
-  ASSERT(container_type.SizeInBytes() <= target::kWordSize);
-  return *new (zone) NativeRegistersLocation(zone, payload_type, container_type,
-                                             CallingConventions::kReturnReg);
+  // Compounds are laid out differently per ABI, so they are implemented
+  // per ABI.
+  const auto& compound_type = payload_type.AsCompound();
+  return CompoundResultLocation(zone, compound_type);
 }
 
 const NativeCallingConvention& NativeCallingConvention::FromSignature(
     Zone* zone,
     const NativeFunctionType& signature) {
-  const auto& argument_locations =
-      ArgumentLocations(zone, signature.argument_types());
+  // With struct return values, a possible pointer to a return value can
+  // occupy an argument position. Hence, allocate return value first.
   const auto& return_location = ResultLocation(zone, signature.return_type());
+  const auto& argument_locations =
+      ArgumentLocations(zone, signature.argument_types(), return_location);
   return *new (zone)
       NativeCallingConvention(argument_locations, return_location);
 }
diff --git a/runtime/vm/compiler/ffi/native_calling_convention_test.cc b/runtime/vm/compiler/ffi/native_calling_convention_test.cc
index aed5fc7..8b27760 100644
--- a/runtime/vm/compiler/ffi/native_calling_convention_test.cc
+++ b/runtime/vm/compiler/ffi/native_calling_convention_test.cc
@@ -80,6 +80,221 @@
   RunSignatureTest(Z, "floatx10", arguments, floatType);
 }
 
+// Test with 3-byte struct.
+//
+// On ia32, result pointer is passed on stack and passed back in eax.
+//
+// On x64, is passed and returned in registers, except for on Windows where it
+// is passed on stack because of its size not being a power of two.
+//
+// See the *.expect in ./unit_tests for this behavior.
+UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_struct3bytesx10) {
+  const auto& int8type = *new (Z) NativePrimitiveType(kInt8);
+
+  auto& member_types = *new (Z) NativeTypes(Z, 3);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  const auto& struct_type =
+      NativeCompoundType::FromNativeTypes(Z, member_types);
+
+  auto& arguments = *new (Z) NativeTypes(Z, 10);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+
+  RunSignatureTest(Z, "struct3bytesx10", arguments, struct_type);
+}
+
+// Test with homogenous struct.
+//
+// On arm softfp, the return pointer is passed in the first int register, and
+// the first struct is passed in the next 3 registers and 1 stack slot.
+//
+// On arm hardfp, arm64, and x64 non-Windows the structs are passed in FPU
+// registers until exhausted, the rest is passed on the stack, and struct is
+// returned in FPU registers.
+//
+// On ia32 a return pointer and all arguments are passed on the stack.
+//
+// On x64 on Windows the structs are passed by pointer and pointer to the
+// return value is passed in.
+//
+// See the *.expect in ./unit_tests for this behavior.
+UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_struct16bytesHomogenousx10) {
+  const auto& float_type = *new (Z) NativePrimitiveType(kFloat);
+  const auto& int8type = *new (Z) NativePrimitiveType(kInt8);
+
+  // If passed in FPU registers, uses an even amount of them.
+  auto& member_types = *new (Z) NativeTypes(Z, 4);
+  member_types.Add(&float_type);
+  member_types.Add(&float_type);
+  member_types.Add(&float_type);
+  member_types.Add(&float_type);
+  const auto& struct_type =
+      NativeCompoundType::FromNativeTypes(Z, member_types);
+
+  auto& arguments = *new (Z) NativeTypes(Z, 13);
+  arguments.Add(&struct_type);
+  arguments.Add(&float_type);  // Claim a single FPU register.
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&float_type);   // Check float register back filling, if any.
+  arguments.Add(&int8type);     // Check integer register back filling, if any.
+  arguments.Add(&struct_type);  // Check stack alignment of struct.
+
+  RunSignatureTest(Z, "struct16bytesHomogenousx10", arguments, struct_type);
+}
+
+// A fairly big struct.
+//
+// On arm, split up in 8-byte chunks. The first chunk goes into two registers,
+// the rest on the stack. Note that r1 goes unused and is not backfilled.
+//
+// On arm64 and Windows x64 passed by a pointer to copy.
+//
+// On ia32, wholly passed on stack.
+//
+// On non-Windows x64, wholly passed on stack, and the integer argument
+// backfills a still unoccupied integer register.
+//
+// See the *.expect in ./unit_tests for this behavior.
+UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_struct128bytesx1) {
+  const auto& int32_type = *new (Z) NativePrimitiveType(kInt32);
+  const auto& int64_type = *new (Z) NativePrimitiveType(kInt64);
+
+  auto& member_types = *new (Z) NativeTypes(Z, 16);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  member_types.Add(&int64_type);
+  const auto& struct_type =
+      NativeCompoundType::FromNativeTypes(Z, member_types);
+
+  auto& arguments = *new (Z) NativeTypes(Z, 2);
+  arguments.Add(&struct_type);
+  arguments.Add(&int32_type);  // Check integer register backfilling, if any.
+
+  RunSignatureTest(Z, "struct128bytesx1", arguments, struct_type);
+}
+
+#if defined(TARGET_ARCH_X64)
+// On x64 non-Windows a struct can be spread over an FPU and int register.
+//
+// See the *.expect in ./unit_tests for this behavior.
+UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_struct16bytesMixedx10) {
+  const auto& float_type = *new (Z) NativePrimitiveType(kFloat);
+  const auto& int32_type = *new (Z) NativePrimitiveType(kInt32);
+
+  auto& member_types = *new (Z) NativeTypes(Z, 4);
+  member_types.Add(&float_type);
+  member_types.Add(&float_type);
+  member_types.Add(&int32_type);
+  member_types.Add(&int32_type);
+  const auto& struct_type =
+      NativeCompoundType::FromNativeTypes(Z, member_types);
+
+  auto& arguments = *new (Z) NativeTypes(Z, 11);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);  // Integer registers exhausted, on stack.
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&float_type);  // Use remaining FPU register.
+
+  RunSignatureTest(Z, "struct16bytesMixedx10", arguments, struct_type);
+}
+
+// On x64 non-Windows a struct can be spread over an FPU and int register.
+//
+// See the *.expect in ./unit_tests for this behavior.
+UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_struct16bytesMixedx10_2) {
+  const auto& float_type = *new (Z) NativePrimitiveType(kFloat);
+  const auto& int32_type = *new (Z) NativePrimitiveType(kInt32);
+
+  auto& member_types = *new (Z) NativeTypes(Z, 4);
+  member_types.Add(&float_type);
+  member_types.Add(&float_type);
+  member_types.Add(&int32_type);
+  member_types.Add(&int32_type);
+  const auto& struct_type =
+      NativeCompoundType::FromNativeTypes(Z, member_types);
+
+  auto& arguments = *new (Z) NativeTypes(Z, 15);
+  arguments.Add(&float_type);
+  arguments.Add(&float_type);
+  arguments.Add(&float_type);
+  arguments.Add(&float_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);  // FPU registers exhausted, on stack.
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&struct_type);
+  arguments.Add(&int32_type);  // Use remaining integer register.
+
+  RunSignatureTest(Z, "struct16bytesMixedx10_2", arguments, struct_type);
+}
+#endif  // defined(TARGET_ARCH_X64)
+
+// On ia32 Windows a struct can be returned in registers, on non-Windows not.
+//
+// See the *.expect in ./unit_tests for this behavior.
+UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_struct8bytesx1) {
+  const auto& int8type = *new (Z) NativePrimitiveType(kInt8);
+
+  auto& member_types = *new (Z) NativeTypes(Z, 4);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  member_types.Add(&int8type);
+  const auto& struct_type =
+      NativeCompoundType::FromNativeTypes(Z, member_types);
+
+  auto& arguments = *new (Z) NativeTypes(Z, 1);
+  arguments.Add(&struct_type);
+
+  RunSignatureTest(Z, "struct8bytesx1", arguments, struct_type);
+}
+
 }  // namespace ffi
 }  // namespace compiler
 }  // namespace dart
diff --git a/runtime/vm/compiler/ffi/native_location.cc b/runtime/vm/compiler/ffi/native_location.cc
index 12439aa..f757a31 100644
--- a/runtime/vm/compiler/ffi/native_location.cc
+++ b/runtime/vm/compiler/ffi/native_location.cc
@@ -24,7 +24,6 @@
       break;
   }
   if (loc.IsPairLocation()) {
-    // TODO(36730): We could possibly consume a pair location as struct.
     return false;
   }
   return false;
@@ -33,7 +32,6 @@
 NativeLocation& NativeLocation::FromLocation(Zone* zone,
                                              Location loc,
                                              Representation rep) {
-  // TODO(36730): We could possibly consume a pair location as struct.
   ASSERT(LocationCanBeExpressed(loc, rep));
 
   const NativeType& native_rep =
@@ -61,7 +59,6 @@
   UNREACHABLE();
 }
 
-// TODO(36730): Remove when being able to consume as struct.
 NativeLocation& NativeLocation::FromPairLocation(Zone* zone,
                                                  Location pair_loc,
                                                  Representation pair_rep,
@@ -92,6 +89,16 @@
   return static_cast<const NativeStackLocation&>(*this);
 }
 
+const MultipleNativeLocations& NativeLocation::AsMultiple() const {
+  ASSERT(IsMultiple());
+  return static_cast<const MultipleNativeLocations&>(*this);
+}
+
+const PointerToMemoryLocation& NativeLocation::AsPointerToMemory() const {
+  ASSERT(IsPointerToMemory());
+  return static_cast<const PointerToMemoryLocation&>(*this);
+}
+
 #if !defined(FFI_UNIT_TESTS)
 Location NativeRegistersLocation::AsLocation() const {
   ASSERT(IsExpressibleAsLocation());
@@ -132,21 +139,46 @@
 #endif
 
 NativeRegistersLocation& NativeRegistersLocation::Split(Zone* zone,
+                                                        intptr_t num_parts,
                                                         intptr_t index) const {
-  ASSERT(num_regs() == 2);
+  ASSERT(num_parts == 2);
+  ASSERT(num_regs() == num_parts);
   return *new (zone) NativeRegistersLocation(
       zone, payload_type().Split(zone, index),
       container_type().Split(zone, index), reg_at(index));
 }
 
 NativeStackLocation& NativeStackLocation::Split(Zone* zone,
+                                                intptr_t num_parts,
                                                 intptr_t index) const {
-  ASSERT(index == 0 || index == 1);
   const intptr_t size = payload_type().SizeInBytes();
 
-  return *new (zone) NativeStackLocation(
-      payload_type().Split(zone, index), container_type().Split(zone, index),
-      base_register_, offset_in_bytes_ + size / 2 * index);
+  if (payload_type().IsPrimitive()) {
+    ASSERT(num_parts == 2);
+    return *new (zone) NativeStackLocation(
+        payload_type().Split(zone, index), container_type().Split(zone, index),
+        base_register_, offset_in_bytes_ + size / num_parts * index);
+  } else {
+    const intptr_t size_rounded_up =
+        Utils::RoundUp(size, compiler::target::kWordSize);
+    ASSERT(size_rounded_up / compiler::target::kWordSize == num_parts);
+
+    // Blocks of compiler::target::kWordSize.
+    return *new (zone) NativeStackLocation(
+        *new (zone) NativePrimitiveType(
+            compiler::target::kWordSize == 8 ? kInt64 : kInt32),
+        *new (zone) NativePrimitiveType(
+            compiler::target::kWordSize == 8 ? kInt64 : kInt32),
+        base_register_, offset_in_bytes_ + compiler::target::kWordSize * index);
+  }
+}
+
+intptr_t MultipleNativeLocations::StackTopInBytes() const {
+  intptr_t height = 0;
+  for (int i = 0; i < locations_.length(); i++) {
+    height = Utils::Maximum(height, locations_[i]->StackTopInBytes());
+  }
+  return height;
 }
 
 NativeLocation& NativeLocation::WidenTo4Bytes(Zone* zone) const {
@@ -210,6 +242,17 @@
   return other_stack.offset_in_bytes_ == offset_in_bytes_;
 }
 
+bool PointerToMemoryLocation::Equals(const NativeLocation& other) const {
+  if (!other.IsPointerToMemory()) {
+    return false;
+  }
+  const auto& other_pointer = other.AsPointerToMemory();
+  if (!other_pointer.pointer_location_.Equals(pointer_location_)) {
+    return false;
+  }
+  return other_pointer.payload_type().Equals(payload_type());
+}
+
 #if !defined(FFI_UNIT_TESTS)
 compiler::Address NativeLocationToStackSlotAddress(
     const NativeStackLocation& loc) {
@@ -219,10 +262,10 @@
 
 static void PrintRepresentations(BaseTextBuffer* f, const NativeLocation& loc) {
   f->AddString(" ");
-  loc.container_type().PrintTo(f);
+  loc.container_type().PrintTo(f, /*multi_line=*/false, /*verbose=*/false);
   if (!loc.container_type().Equals(loc.payload_type())) {
     f->AddString("[");
-    loc.payload_type().PrintTo(f);
+    loc.payload_type().PrintTo(f, /*multi_line=*/false, /*verbose=*/false);
     f->AddString("]");
   }
 }
@@ -238,7 +281,9 @@
   } else {
     f->AddString("(");
     for (intptr_t i = 0; i < num_regs(); i++) {
-      if (i != 0) f->Printf(", ");
+      if (i != 0) {
+        f->Printf(", ");
+      }
       f->Printf("%s", RegisterNames::RegisterName(regs_->At(i)));
     }
     f->AddString(")");
@@ -277,6 +322,27 @@
   return textBuffer.buffer();
 }
 
+void PointerToMemoryLocation::PrintTo(BaseTextBuffer* f) const {
+  f->Printf("P(");
+  pointer_location().PrintTo(f);
+  if (!pointer_location().Equals(pointer_return_location())) {
+    f->Printf(", ret:");
+    pointer_return_location().PrintTo(f);
+  }
+  f->Printf(")");
+  PrintRepresentations(f, *this);
+}
+
+void MultipleNativeLocations::PrintTo(BaseTextBuffer* f) const {
+  f->Printf("M(");
+  for (intptr_t i = 0; i < locations_.length(); i++) {
+    if (i != 0) f->Printf(", ");
+    locations_[i]->PrintTo(f);
+  }
+  f->Printf(")");
+  PrintRepresentations(f, *this);
+}
+
 #if !defined(FFI_UNIT_TESTS)
 const char* NativeLocation::ToCString() const {
   return ToCString(Thread::Current()->zone());
diff --git a/runtime/vm/compiler/ffi/native_location.h b/runtime/vm/compiler/ffi/native_location.h
index 614798a..9108b21 100644
--- a/runtime/vm/compiler/ffi/native_location.h
+++ b/runtime/vm/compiler/ffi/native_location.h
@@ -30,6 +30,8 @@
 class NativeRegistersLocation;
 class NativeFpuRegistersLocation;
 class NativeStackLocation;
+class MultipleNativeLocations;
+class PointerToMemoryLocation;
 
 // NativeLocation objects are used in the FFI to describe argument and return
 // value locations in all native ABIs that the FFI supports.
@@ -95,6 +97,8 @@
   virtual bool IsRegisters() const { return false; }
   virtual bool IsFpuRegisters() const { return false; }
   virtual bool IsStack() const { return false; }
+  virtual bool IsMultiple() const { return false; }
+  virtual bool IsPointerToMemory() const { return false; }
 
   virtual bool IsExpressibleAsLocation() const { return false; }
 #if !defined(FFI_UNIT_TESTS)
@@ -113,13 +117,18 @@
   const NativeRegistersLocation& AsRegisters() const;
   const NativeFpuRegistersLocation& AsFpuRegisters() const;
   const NativeStackLocation& AsStack() const;
+  const MultipleNativeLocations& AsMultiple() const;
+  const PointerToMemoryLocation& AsPointerToMemory() const;
 
-  virtual NativeLocation& Split(Zone* zone, intptr_t index) const {
-    ASSERT(index == 0 || index == 1);
+  // Retrieve one part from this location when it is split into multiple parts.
+  virtual NativeLocation& Split(Zone* zone,
+                                intptr_t num_parts,
+                                intptr_t index) const {
     UNREACHABLE();
   }
 
-  // Return the top of the stack in bytes.
+  // Return the top of the stack in bytes. Recurses over its constituents when
+  // MultipleNativeLocations.
   virtual intptr_t StackTopInBytes() const { return 0; }
 
   // Equality of location, ignores the payload and container native types.
@@ -187,7 +196,9 @@
   intptr_t num_regs() const { return regs_->length(); }
   Register reg_at(intptr_t index) const { return regs_->At(index); }
 
-  virtual NativeRegistersLocation& Split(Zone* zone, intptr_t index) const;
+  virtual NativeRegistersLocation& Split(Zone* zone,
+                                         intptr_t num_parts,
+                                         intptr_t index) const;
 
   virtual void PrintTo(BaseTextBuffer* f) const;
 
@@ -326,7 +337,9 @@
   }
 #endif
 
-  virtual NativeStackLocation& Split(Zone* zone, intptr_t index) const;
+  virtual NativeStackLocation& Split(Zone* zone,
+                                     intptr_t num_parts,
+                                     intptr_t index) const;
 
   virtual intptr_t StackTopInBytes() const {
     return offset_in_bytes() + container_type().SizeInBytes();
@@ -351,6 +364,96 @@
   DISALLOW_COPY_AND_ASSIGN(NativeStackLocation);
 };
 
+// The location of a pointer pointing to a compound.
+//
+// For arguments a pointer to a copy of an object. The backing copy of the
+// object typically resides on the stack.
+//
+// For return values a pointer to empty space that should hold the object. This
+// space also typically resides on the stack.
+class PointerToMemoryLocation : public NativeLocation {
+ public:
+  PointerToMemoryLocation(const NativeLocation& pointer_location,
+                          const NativeCompoundType& object_pointed_to)
+      : NativeLocation(object_pointed_to, object_pointed_to),
+        pointer_location_(pointer_location),
+        pointer_return_location_(pointer_location) {
+    ASSERT(pointer_location.IsRegisters() || pointer_location.IsStack());
+  }
+  PointerToMemoryLocation(const NativeLocation& pointer_location,
+                          const NativeLocation& pointer_return_location,
+                          const NativeCompoundType& object_pointed_to)
+      : NativeLocation(object_pointed_to, object_pointed_to),
+        pointer_location_(pointer_location),
+        pointer_return_location_(pointer_return_location) {
+    ASSERT(pointer_location.IsRegisters() || pointer_location.IsStack());
+  }
+
+  virtual ~PointerToMemoryLocation() {}
+
+  virtual bool IsPointerToMemory() const { return true; }
+
+  virtual void PrintTo(BaseTextBuffer* f) const;
+
+  virtual bool Equals(const NativeLocation& other) const;
+
+  virtual NativeLocation& WithOtherNativeType(
+      Zone* zone,
+      const NativeType& new_payload_type,
+      const NativeType& new_container_type) const {
+    UNREACHABLE();
+  }
+
+  virtual intptr_t StackTopInBytes() const {
+    return pointer_location().StackTopInBytes();
+  }
+
+  // The location where the pointer is passed to the function.
+  const NativeLocation& pointer_location() const { return pointer_location_; }
+
+  // The location where the pointer is returned from the function.
+  const NativeLocation& pointer_return_location() const {
+    return pointer_return_location_;
+  }
+
+ private:
+  const NativeLocation& pointer_location_;
+  // The return location is only in use for return values, not for arguments.
+  const NativeLocation& pointer_return_location_;
+
+  DISALLOW_COPY_AND_ASSIGN(PointerToMemoryLocation);
+};
+
+using NativeLocations = ZoneGrowableArray<const NativeLocation*>;
+
+// A struct broken up over multiple native locations.
+class MultipleNativeLocations : public NativeLocation {
+ public:
+  MultipleNativeLocations(const NativeCompoundType& payload_type,
+                          const NativeLocations& locations)
+      : NativeLocation(payload_type, payload_type), locations_(locations) {}
+  virtual ~MultipleNativeLocations() {}
+
+  virtual bool IsMultiple() const { return true; }
+
+  virtual void PrintTo(BaseTextBuffer* f) const;
+
+  virtual NativeLocation& WithOtherNativeType(
+      Zone* zone,
+      const NativeType& new_payload_type,
+      const NativeType& new_container_type) const {
+    UNREACHABLE();
+  }
+
+  virtual intptr_t StackTopInBytes() const;
+
+  const NativeLocations& locations() const { return locations_; }
+
+ private:
+  const NativeLocations& locations_;
+  DISALLOW_COPY_AND_ASSIGN(MultipleNativeLocations);
+};
+
 #if !defined(FFI_UNIT_TESTS)
 // Return a memory operand for stack slot locations.
 compiler::Address NativeLocationToStackSlotAddress(
diff --git a/runtime/vm/compiler/ffi/native_location_test.cc b/runtime/vm/compiler/ffi/native_location_test.cc
index ea1fb27..8ba10d9 100644
--- a/runtime/vm/compiler/ffi/native_location_test.cc
+++ b/runtime/vm/compiler/ffi/native_location_test.cc
@@ -28,8 +28,8 @@
   const auto& native_location =
       *new (Z) NativeStackLocation(native_type, native_type, SPREG, 0);
 
-  const auto& half_0 = native_location.Split(Z, 0);
-  const auto& half_1 = native_location.Split(Z, 1);
+  const auto& half_0 = native_location.Split(Z, 2, 0);
+  const auto& half_1 = native_location.Split(Z, 2, 1);
 
   EXPECT_EQ(0, half_0.offset_in_bytes());
   EXPECT_EQ(4, half_1.offset_in_bytes());
diff --git a/runtime/vm/compiler/ffi/native_type.cc b/runtime/vm/compiler/ffi/native_type.cc
index 20c33dd..1ffb1a2e 100644
--- a/runtime/vm/compiler/ffi/native_type.cc
+++ b/runtime/vm/compiler/ffi/native_type.cc
@@ -24,6 +24,23 @@
 
 namespace ffi {
 
+PrimitiveType PrimitiveTypeFromSizeInBytes(intptr_t size) {
+  ASSERT(size <= 8);
+  ASSERT(size > 0);
+  switch (size) {
+    case 1:
+      return kUint8;
+    case 2:
+      return kUint16;
+    case 4:
+      return kUint32;
+    case 8:
+      // Dart unboxed Representation for unsigned and signed is equal.
+      return kInt64;
+  }
+  UNREACHABLE();
+}
+
 const NativePrimitiveType& NativeType::AsPrimitive() const {
   ASSERT(IsPrimitive());
   return static_cast<const NativePrimitiveType&>(*this);
@@ -320,20 +337,8 @@
   }
 }
 
-static bool IsPredefinedFfiCid(classid_t class_id) {
-  switch (class_id) {
-#define CASE_FFI_CID_TRUE(name)                                                \
-  case kFfi##name##Cid:                                                        \
-    return true;
-    CLASS_LIST_FFI(CASE_FFI_CID_TRUE)
-    default:
-      return false;
-  }
-  UNREACHABLE();
-}
-
 NativeType& NativeType::FromTypedDataClassId(Zone* zone, classid_t class_id) {
-  ASSERT(IsPredefinedFfiCid(class_id));
+  ASSERT(IsFfiPredefinedClassId(class_id));
   const auto fundamental_rep = TypeRepresentation(class_id);
   return *new (zone) NativePrimitiveType(fundamental_rep);
 }
@@ -341,7 +346,7 @@
 #if !defined(FFI_UNIT_TESTS)
 NativeType& NativeType::FromAbstractType(Zone* zone, const AbstractType& type) {
   const classid_t class_id = type.type_class_id();
-  if (IsPredefinedFfiCid(class_id)) {
+  if (IsFfiPredefinedClassId(class_id)) {
     return NativeType::FromTypedDataClassId(zone, class_id);
   }
 
@@ -394,15 +399,17 @@
 }
 #endif  // !defined(DART_PRECOMPILED_RUNTIME) && !defined(FFI_UNIT_TESTS)
 
-const char* NativeType::ToCString(Zone* zone, bool multi_line) const {
+const char* NativeType::ToCString(Zone* zone,
+                                  bool multi_line,
+                                  bool verbose) const {
   ZoneTextBuffer textBuffer(zone);
-  PrintTo(&textBuffer, multi_line);
+  PrintTo(&textBuffer, multi_line, verbose);
   return textBuffer.buffer();
 }
 
 #if !defined(FFI_UNIT_TESTS)
-const char* NativeType::ToCString(bool multi_line) const {
-  return ToCString(Thread::Current()->zone(), multi_line);
+const char* NativeType::ToCString() const {
+  return ToCString(Thread::Current()->zone());
 }
 #endif
 
@@ -437,11 +444,15 @@
   }
 }
 
-void NativeType::PrintTo(BaseTextBuffer* f, bool multi_line) const {
+void NativeType::PrintTo(BaseTextBuffer* f,
+                         bool multi_line,
+                         bool verbose) const {
   f->AddString("I");
 }
 
-void NativePrimitiveType::PrintTo(BaseTextBuffer* f, bool multi_line) const {
+void NativePrimitiveType::PrintTo(BaseTextBuffer* f,
+                                  bool multi_line,
+                                  bool verbose) const {
   f->Printf("%s", PrimitiveTypeToCString(representation_));
 }
 
@@ -451,30 +462,35 @@
   return textBuffer.buffer();
 }
 
-void NativeCompoundType::PrintTo(BaseTextBuffer* f, bool multi_line) const {
+void NativeCompoundType::PrintTo(BaseTextBuffer* f,
+                                 bool multi_line,
+                                 bool verbose) const {
   f->AddString("Compound(");
-  f->Printf("size: %" Pd ", ", SizeInBytes());
-  f->Printf("field alignment: %" Pd ", ", AlignmentInBytesField());
-  f->Printf("stack alignment: %" Pd ", ", AlignmentInBytesStack());
-  f->AddString("members: {");
-  if (multi_line) {
-    f->AddString("\n  ");
-  }
-  for (intptr_t i = 0; i < members_.length(); i++) {
-    if (i > 0) {
-      if (multi_line) {
-        f->AddString(",\n  ");
-      } else {
-        f->AddString(", ");
-      }
+  f->Printf("size: %" Pd "", SizeInBytes());
+  if (verbose) {
+    f->Printf(", field alignment: %" Pd ", ", AlignmentInBytesField());
+    f->Printf("stack alignment: %" Pd ", ", AlignmentInBytesStack());
+    f->AddString("members: {");
+    if (multi_line) {
+      f->AddString("\n  ");
     }
-    f->Printf("%" Pd ": ", member_offsets_[i]);
-    members_[i]->PrintTo(f);
+    for (intptr_t i = 0; i < members_.length(); i++) {
+      if (i > 0) {
+        if (multi_line) {
+          f->AddString(",\n  ");
+        } else {
+          f->AddString(", ");
+        }
+      }
+      f->Printf("%" Pd ": ", member_offsets_[i]);
+      members_[i]->PrintTo(f);
+    }
+    if (multi_line) {
+      f->AddString("\n");
+    }
+    f->AddString("}");
   }
-  if (multi_line) {
-    f->AddString("\n");
-  }
-  f->AddString("})");
+  f->AddString(")");
   if (multi_line) {
     f->AddString("\n");
   }
diff --git a/runtime/vm/compiler/ffi/native_type.h b/runtime/vm/compiler/ffi/native_type.h
index eaf4129..140c54f 100644
--- a/runtime/vm/compiler/ffi/native_type.h
+++ b/runtime/vm/compiler/ffi/native_type.h
@@ -107,10 +107,14 @@
   // Otherwise, return original representation.
   const NativeType& WidenTo4Bytes(Zone* zone) const;
 
-  virtual void PrintTo(BaseTextBuffer* f, bool multi_line = false) const;
-  const char* ToCString(Zone* zone, bool multi_line = false) const;
+  virtual void PrintTo(BaseTextBuffer* f,
+                       bool multi_line = false,
+                       bool verbose = true) const;
+  const char* ToCString(Zone* zone,
+                        bool multi_line = false,
+                        bool verbose = true) const;
 #if !defined(FFI_UNIT_TESTS)
-  const char* ToCString(bool multi_line = false) const;
+  const char* ToCString() const;
 #endif
 
   virtual ~NativeType() {}
@@ -135,6 +139,8 @@
   // TODO(37470): Add packed data structures.
 };
 
+PrimitiveType PrimitiveTypeFromSizeInBytes(intptr_t size);
+
 // Represents a primitive native type.
 //
 // These are called object types in the C standard (ISO/IEC 9899:2011) and
@@ -167,7 +173,9 @@
   virtual bool Equals(const NativeType& other) const;
   virtual NativePrimitiveType& Split(Zone* zone, intptr_t part) const;
 
-  virtual void PrintTo(BaseTextBuffer* f, bool multi_line = false) const;
+  virtual void PrintTo(BaseTextBuffer* f,
+                       bool multi_line = false,
+                       bool verbose = true) const;
 
   virtual ~NativePrimitiveType() {}
 
@@ -200,7 +208,9 @@
 
   virtual bool Equals(const NativeType& other) const;
 
-  virtual void PrintTo(BaseTextBuffer* f, bool multi_line = false) const;
+  virtual void PrintTo(BaseTextBuffer* f,
+                       bool multi_line = false,
+                       bool verbose = true) const;
 
   // Whether a range within a struct contains only floats.
   //
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_android.expect
new file mode 100644
index 0000000..1170c02
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_android.expect
@@ -0,0 +1,4 @@
+P(r0 int64) Compound(size: 128)
+r1 int32
+=>
+P(r8 int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_ios.expect
new file mode 100644
index 0000000..1170c02
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_ios.expect
@@ -0,0 +1,4 @@
+P(r0 int64) Compound(size: 128)
+r1 int32
+=>
+P(r8 int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_linux.expect
new file mode 100644
index 0000000..1170c02
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_linux.expect
@@ -0,0 +1,4 @@
+P(r0 int64) Compound(size: 128)
+r1 int32
+=>
+P(r8 int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_macos.expect
new file mode 100644
index 0000000..1170c02
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm64_macos.expect
@@ -0,0 +1,4 @@
+P(r0 int64) Compound(size: 128)
+r1 int32
+=>
+P(r8 int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_android.expect
new file mode 100644
index 0000000..230a6ab
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_android.expect
@@ -0,0 +1,4 @@
+M((r2, r3) int64, S+0 int64, S+8 int64, S+16 int64, S+24 int64, S+32 int64, S+40 int64, S+48 int64, S+56 int64, S+64 int64, S+72 int64, S+80 int64, S+88 int64, S+96 int64, S+104 int64, S+112 int64) Compound(size: 128)
+S+120 int32
+=>
+P(r0 uint32) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_ios.expect
new file mode 100644
index 0000000..230a6ab
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_ios.expect
@@ -0,0 +1,4 @@
+M((r2, r3) int64, S+0 int64, S+8 int64, S+16 int64, S+24 int64, S+32 int64, S+40 int64, S+48 int64, S+56 int64, S+64 int64, S+72 int64, S+80 int64, S+88 int64, S+96 int64, S+104 int64, S+112 int64) Compound(size: 128)
+S+120 int32
+=>
+P(r0 uint32) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_linux.expect
new file mode 100644
index 0000000..230a6ab
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/arm_linux.expect
@@ -0,0 +1,4 @@
+M((r2, r3) int64, S+0 int64, S+8 int64, S+16 int64, S+24 int64, S+32 int64, S+40 int64, S+48 int64, S+56 int64, S+64 int64, S+72 int64, S+80 int64, S+88 int64, S+96 int64, S+104 int64, S+112 int64) Compound(size: 128)
+S+120 int32
+=>
+P(r0 uint32) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_android.expect
new file mode 100644
index 0000000..c765e39
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_android.expect
@@ -0,0 +1,4 @@
+S+4 Compound(size: 128)
+S+132 int32
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_linux.expect
new file mode 100644
index 0000000..c765e39
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_linux.expect
@@ -0,0 +1,4 @@
+S+4 Compound(size: 128)
+S+132 int32
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_win.expect
new file mode 100644
index 0000000..c765e39
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/ia32_win.expect
@@ -0,0 +1,4 @@
+S+4 Compound(size: 128)
+S+132 int32
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_ios.expect
new file mode 100644
index 0000000..f5ce3da
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_ios.expect
@@ -0,0 +1,4 @@
+S+0 Compound(size: 128)
+rsi int32
+=>
+P(rdi int64, ret:rax int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_linux.expect
new file mode 100644
index 0000000..f5ce3da
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_linux.expect
@@ -0,0 +1,4 @@
+S+0 Compound(size: 128)
+rsi int32
+=>
+P(rdi int64, ret:rax int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_macos.expect
new file mode 100644
index 0000000..f5ce3da
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_macos.expect
@@ -0,0 +1,4 @@
+S+0 Compound(size: 128)
+rsi int32
+=>
+P(rdi int64, ret:rax int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_win.expect
new file mode 100644
index 0000000..ff78425
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct128bytesx1/x64_win.expect
@@ -0,0 +1,4 @@
+P(rdx int64) Compound(size: 128)
+r8 int32
+=>
+P(rcx int64, ret:rax int64) Compound(size: 128)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_android.expect
new file mode 100644
index 0000000..f4b87ed
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_android.expect
@@ -0,0 +1,15 @@
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
+v4 float
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+S+96 Compound(size: 16)
+S+112 Compound(size: 16)
+S+128 float
+r0 int8
+S+136 Compound(size: 16)
+=>
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_ios.expect
new file mode 100644
index 0000000..70977c2
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_ios.expect
@@ -0,0 +1,15 @@
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
+v4 float
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+S+96 Compound(size: 16)
+S+112 Compound(size: 16)
+S+128 float
+r0 int32[int8]
+S+132 Compound(size: 16)
+=>
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_linux.expect
new file mode 100644
index 0000000..f4b87ed
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_linux.expect
@@ -0,0 +1,15 @@
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
+v4 float
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+S+96 Compound(size: 16)
+S+112 Compound(size: 16)
+S+128 float
+r0 int8
+S+136 Compound(size: 16)
+=>
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_macos.expect
new file mode 100644
index 0000000..f4b87ed
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm64_macos.expect
@@ -0,0 +1,15 @@
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
+v4 float
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+S+96 Compound(size: 16)
+S+112 Compound(size: 16)
+S+128 float
+r0 int8
+S+136 Compound(size: 16)
+=>
+M(v0 float, v1 float, v2 float, v3 float) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_android.expect
new file mode 100644
index 0000000..24fce41
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_android.expect
@@ -0,0 +1,15 @@
+M(r1 int32, r2 int32, r3 int32, S+0 int32) Compound(size: 16)
+S+4 float
+M(S+8 int32, S+12 int32, S+16 int32, S+20 int32) Compound(size: 16)
+M(S+24 int32, S+28 int32, S+32 int32, S+36 int32) Compound(size: 16)
+M(S+40 int32, S+44 int32, S+48 int32, S+52 int32) Compound(size: 16)
+M(S+56 int32, S+60 int32, S+64 int32, S+68 int32) Compound(size: 16)
+M(S+72 int32, S+76 int32, S+80 int32, S+84 int32) Compound(size: 16)
+M(S+88 int32, S+92 int32, S+96 int32, S+100 int32) Compound(size: 16)
+M(S+104 int32, S+108 int32, S+112 int32, S+116 int32) Compound(size: 16)
+M(S+120 int32, S+124 int32, S+128 int32, S+132 int32) Compound(size: 16)
+S+136 float
+S+140 int32[int8]
+M(S+144 int32, S+148 int32, S+152 int32, S+156 int32) Compound(size: 16)
+=>
+P(r0 uint32) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_ios.expect
new file mode 100644
index 0000000..124f1cd
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_ios.expect
@@ -0,0 +1,15 @@
+M(s0 float, s1 float, s2 float, s3 float) Compound(size: 16)
+s4 float
+M(s5 float, s6 float, s7 float, s8 float) Compound(size: 16)
+M(s9 float, s10 float, s11 float, s12 float) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+S+96 float
+r0 int32[int8]
+S+100 Compound(size: 16)
+=>
+M(s0 float, s1 float, s2 float, s3 float) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_linux.expect
new file mode 100644
index 0000000..124f1cd
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/arm_linux.expect
@@ -0,0 +1,15 @@
+M(s0 float, s1 float, s2 float, s3 float) Compound(size: 16)
+s4 float
+M(s5 float, s6 float, s7 float, s8 float) Compound(size: 16)
+M(s9 float, s10 float, s11 float, s12 float) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+S+96 float
+r0 int32[int8]
+S+100 Compound(size: 16)
+=>
+M(s0 float, s1 float, s2 float, s3 float) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_android.expect
new file mode 100644
index 0000000..3c127f9
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_android.expect
@@ -0,0 +1,15 @@
+S+4 Compound(size: 16)
+S+20 float
+S+24 Compound(size: 16)
+S+40 Compound(size: 16)
+S+56 Compound(size: 16)
+S+72 Compound(size: 16)
+S+88 Compound(size: 16)
+S+104 Compound(size: 16)
+S+120 Compound(size: 16)
+S+136 Compound(size: 16)
+S+152 float
+S+156 int32[int8]
+S+160 Compound(size: 16)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_linux.expect
new file mode 100644
index 0000000..3c127f9
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_linux.expect
@@ -0,0 +1,15 @@
+S+4 Compound(size: 16)
+S+20 float
+S+24 Compound(size: 16)
+S+40 Compound(size: 16)
+S+56 Compound(size: 16)
+S+72 Compound(size: 16)
+S+88 Compound(size: 16)
+S+104 Compound(size: 16)
+S+120 Compound(size: 16)
+S+136 Compound(size: 16)
+S+152 float
+S+156 int32[int8]
+S+160 Compound(size: 16)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_win.expect
new file mode 100644
index 0000000..3c127f9
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/ia32_win.expect
@@ -0,0 +1,15 @@
+S+4 Compound(size: 16)
+S+20 float
+S+24 Compound(size: 16)
+S+40 Compound(size: 16)
+S+56 Compound(size: 16)
+S+72 Compound(size: 16)
+S+88 Compound(size: 16)
+S+104 Compound(size: 16)
+S+120 Compound(size: 16)
+S+136 Compound(size: 16)
+S+152 float
+S+156 int32[int8]
+S+160 Compound(size: 16)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_ios.expect
new file mode 100644
index 0000000..991a252
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_ios.expect
@@ -0,0 +1,15 @@
+M(xmm0 double, xmm1 double) Compound(size: 16)
+xmm2 float
+M(xmm3 double, xmm4 double) Compound(size: 16)
+M(xmm5 double, xmm6 double) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+xmm7 float
+rdi int32[int8]
+S+96 Compound(size: 16)
+=>
+M(xmm0 double, xmm1 double) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_linux.expect
new file mode 100644
index 0000000..991a252
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_linux.expect
@@ -0,0 +1,15 @@
+M(xmm0 double, xmm1 double) Compound(size: 16)
+xmm2 float
+M(xmm3 double, xmm4 double) Compound(size: 16)
+M(xmm5 double, xmm6 double) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+xmm7 float
+rdi int32[int8]
+S+96 Compound(size: 16)
+=>
+M(xmm0 double, xmm1 double) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_macos.expect
new file mode 100644
index 0000000..991a252
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_macos.expect
@@ -0,0 +1,15 @@
+M(xmm0 double, xmm1 double) Compound(size: 16)
+xmm2 float
+M(xmm3 double, xmm4 double) Compound(size: 16)
+M(xmm5 double, xmm6 double) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+xmm7 float
+rdi int32[int8]
+S+96 Compound(size: 16)
+=>
+M(xmm0 double, xmm1 double) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_win.expect
new file mode 100644
index 0000000..8e31945
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesHomogenousx10/x64_win.expect
@@ -0,0 +1,15 @@
+P(rdx int64) Compound(size: 16)
+xmm2 float
+P(r9 int64) Compound(size: 16)
+P(S+0 int64) Compound(size: 16)
+P(S+8 int64) Compound(size: 16)
+P(S+16 int64) Compound(size: 16)
+P(S+24 int64) Compound(size: 16)
+P(S+32 int64) Compound(size: 16)
+P(S+40 int64) Compound(size: 16)
+P(S+48 int64) Compound(size: 16)
+S+56 float
+S+64 int8
+P(S+72 int64) Compound(size: 16)
+=>
+P(rcx int64, ret:rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_ios.expect
new file mode 100644
index 0000000..2b50280
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_ios.expect
@@ -0,0 +1,13 @@
+M(xmm0 double, rdi int64) Compound(size: 16)
+M(xmm1 double, rsi int64) Compound(size: 16)
+M(xmm2 double, rdx int64) Compound(size: 16)
+M(xmm3 double, rcx int64) Compound(size: 16)
+M(xmm4 double, r8 int64) Compound(size: 16)
+M(xmm5 double, r9 int64) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+xmm6 float
+=>
+M(xmm0 double, rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_linux.expect
new file mode 100644
index 0000000..2b50280
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_linux.expect
@@ -0,0 +1,13 @@
+M(xmm0 double, rdi int64) Compound(size: 16)
+M(xmm1 double, rsi int64) Compound(size: 16)
+M(xmm2 double, rdx int64) Compound(size: 16)
+M(xmm3 double, rcx int64) Compound(size: 16)
+M(xmm4 double, r8 int64) Compound(size: 16)
+M(xmm5 double, r9 int64) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+xmm6 float
+=>
+M(xmm0 double, rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_macos.expect
new file mode 100644
index 0000000..2b50280
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_macos.expect
@@ -0,0 +1,13 @@
+M(xmm0 double, rdi int64) Compound(size: 16)
+M(xmm1 double, rsi int64) Compound(size: 16)
+M(xmm2 double, rdx int64) Compound(size: 16)
+M(xmm3 double, rcx int64) Compound(size: 16)
+M(xmm4 double, r8 int64) Compound(size: 16)
+M(xmm5 double, r9 int64) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+xmm6 float
+=>
+M(xmm0 double, rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_win.expect
new file mode 100644
index 0000000..d21ee2b
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10/x64_win.expect
@@ -0,0 +1,13 @@
+P(rdx int64) Compound(size: 16)
+P(r8 int64) Compound(size: 16)
+P(r9 int64) Compound(size: 16)
+P(S+0 int64) Compound(size: 16)
+P(S+8 int64) Compound(size: 16)
+P(S+16 int64) Compound(size: 16)
+P(S+24 int64) Compound(size: 16)
+P(S+32 int64) Compound(size: 16)
+P(S+40 int64) Compound(size: 16)
+P(S+48 int64) Compound(size: 16)
+S+56 float
+=>
+P(rcx int64, ret:rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_ios.expect
new file mode 100644
index 0000000..bcdf3b5
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_ios.expect
@@ -0,0 +1,17 @@
+xmm0 float
+xmm1 float
+xmm2 float
+xmm3 float
+M(xmm4 double, rdi int64) Compound(size: 16)
+M(xmm5 double, rsi int64) Compound(size: 16)
+M(xmm6 double, rdx int64) Compound(size: 16)
+M(xmm7 double, rcx int64) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+r8 int32
+=>
+M(xmm0 double, rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_linux.expect
new file mode 100644
index 0000000..bcdf3b5
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_linux.expect
@@ -0,0 +1,17 @@
+xmm0 float
+xmm1 float
+xmm2 float
+xmm3 float
+M(xmm4 double, rdi int64) Compound(size: 16)
+M(xmm5 double, rsi int64) Compound(size: 16)
+M(xmm6 double, rdx int64) Compound(size: 16)
+M(xmm7 double, rcx int64) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+r8 int32
+=>
+M(xmm0 double, rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_macos.expect
new file mode 100644
index 0000000..bcdf3b5
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_macos.expect
@@ -0,0 +1,17 @@
+xmm0 float
+xmm1 float
+xmm2 float
+xmm3 float
+M(xmm4 double, rdi int64) Compound(size: 16)
+M(xmm5 double, rsi int64) Compound(size: 16)
+M(xmm6 double, rdx int64) Compound(size: 16)
+M(xmm7 double, rcx int64) Compound(size: 16)
+S+0 Compound(size: 16)
+S+16 Compound(size: 16)
+S+32 Compound(size: 16)
+S+48 Compound(size: 16)
+S+64 Compound(size: 16)
+S+80 Compound(size: 16)
+r8 int32
+=>
+M(xmm0 double, rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_win.expect
new file mode 100644
index 0000000..bb695c2
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct16bytesMixedx10_2/x64_win.expect
@@ -0,0 +1,17 @@
+xmm1 float
+xmm2 float
+xmm3 float
+S+0 float
+P(S+8 int64) Compound(size: 16)
+P(S+16 int64) Compound(size: 16)
+P(S+24 int64) Compound(size: 16)
+P(S+32 int64) Compound(size: 16)
+P(S+40 int64) Compound(size: 16)
+P(S+48 int64) Compound(size: 16)
+P(S+56 int64) Compound(size: 16)
+P(S+64 int64) Compound(size: 16)
+P(S+72 int64) Compound(size: 16)
+P(S+80 int64) Compound(size: 16)
+S+88 int32
+=>
+P(rcx int64, ret:rax int64) Compound(size: 16)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_android.expect
new file mode 100644
index 0000000..121dccb
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_android.expect
@@ -0,0 +1,12 @@
+M(r0 int64) Compound(size: 3)
+M(r1 int64) Compound(size: 3)
+M(r2 int64) Compound(size: 3)
+M(r3 int64) Compound(size: 3)
+M(r4 int64) Compound(size: 3)
+M(r5 int64) Compound(size: 3)
+M(r6 int64) Compound(size: 3)
+M(r7 int64) Compound(size: 3)
+M(S+0 int64) Compound(size: 3)
+M(S+8 int64) Compound(size: 3)
+=>
+M(r0 int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_ios.expect
new file mode 100644
index 0000000..121dccb
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_ios.expect
@@ -0,0 +1,12 @@
+M(r0 int64) Compound(size: 3)
+M(r1 int64) Compound(size: 3)
+M(r2 int64) Compound(size: 3)
+M(r3 int64) Compound(size: 3)
+M(r4 int64) Compound(size: 3)
+M(r5 int64) Compound(size: 3)
+M(r6 int64) Compound(size: 3)
+M(r7 int64) Compound(size: 3)
+M(S+0 int64) Compound(size: 3)
+M(S+8 int64) Compound(size: 3)
+=>
+M(r0 int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_linux.expect
new file mode 100644
index 0000000..121dccb
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_linux.expect
@@ -0,0 +1,12 @@
+M(r0 int64) Compound(size: 3)
+M(r1 int64) Compound(size: 3)
+M(r2 int64) Compound(size: 3)
+M(r3 int64) Compound(size: 3)
+M(r4 int64) Compound(size: 3)
+M(r5 int64) Compound(size: 3)
+M(r6 int64) Compound(size: 3)
+M(r7 int64) Compound(size: 3)
+M(S+0 int64) Compound(size: 3)
+M(S+8 int64) Compound(size: 3)
+=>
+M(r0 int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_macos.expect
new file mode 100644
index 0000000..121dccb
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm64_macos.expect
@@ -0,0 +1,12 @@
+M(r0 int64) Compound(size: 3)
+M(r1 int64) Compound(size: 3)
+M(r2 int64) Compound(size: 3)
+M(r3 int64) Compound(size: 3)
+M(r4 int64) Compound(size: 3)
+M(r5 int64) Compound(size: 3)
+M(r6 int64) Compound(size: 3)
+M(r7 int64) Compound(size: 3)
+M(S+0 int64) Compound(size: 3)
+M(S+8 int64) Compound(size: 3)
+=>
+M(r0 int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_android.expect
new file mode 100644
index 0000000..572a779
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_android.expect
@@ -0,0 +1,12 @@
+M(r0 int32) Compound(size: 3)
+M(r1 int32) Compound(size: 3)
+M(r2 int32) Compound(size: 3)
+M(r3 int32) Compound(size: 3)
+M(S+0 int32) Compound(size: 3)
+M(S+4 int32) Compound(size: 3)
+M(S+8 int32) Compound(size: 3)
+M(S+12 int32) Compound(size: 3)
+M(S+16 int32) Compound(size: 3)
+M(S+20 int32) Compound(size: 3)
+=>
+M(r0 uint32) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_ios.expect
new file mode 100644
index 0000000..572a779
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_ios.expect
@@ -0,0 +1,12 @@
+M(r0 int32) Compound(size: 3)
+M(r1 int32) Compound(size: 3)
+M(r2 int32) Compound(size: 3)
+M(r3 int32) Compound(size: 3)
+M(S+0 int32) Compound(size: 3)
+M(S+4 int32) Compound(size: 3)
+M(S+8 int32) Compound(size: 3)
+M(S+12 int32) Compound(size: 3)
+M(S+16 int32) Compound(size: 3)
+M(S+20 int32) Compound(size: 3)
+=>
+M(r0 uint32) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_linux.expect
new file mode 100644
index 0000000..572a779
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/arm_linux.expect
@@ -0,0 +1,12 @@
+M(r0 int32) Compound(size: 3)
+M(r1 int32) Compound(size: 3)
+M(r2 int32) Compound(size: 3)
+M(r3 int32) Compound(size: 3)
+M(S+0 int32) Compound(size: 3)
+M(S+4 int32) Compound(size: 3)
+M(S+8 int32) Compound(size: 3)
+M(S+12 int32) Compound(size: 3)
+M(S+16 int32) Compound(size: 3)
+M(S+20 int32) Compound(size: 3)
+=>
+M(r0 uint32) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_android.expect
new file mode 100644
index 0000000..91c0595
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_android.expect
@@ -0,0 +1,12 @@
+S+4 Compound(size: 3)
+S+8 Compound(size: 3)
+S+12 Compound(size: 3)
+S+16 Compound(size: 3)
+S+20 Compound(size: 3)
+S+24 Compound(size: 3)
+S+28 Compound(size: 3)
+S+32 Compound(size: 3)
+S+36 Compound(size: 3)
+S+40 Compound(size: 3)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_linux.expect
new file mode 100644
index 0000000..91c0595
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_linux.expect
@@ -0,0 +1,12 @@
+S+4 Compound(size: 3)
+S+8 Compound(size: 3)
+S+12 Compound(size: 3)
+S+16 Compound(size: 3)
+S+20 Compound(size: 3)
+S+24 Compound(size: 3)
+S+28 Compound(size: 3)
+S+32 Compound(size: 3)
+S+36 Compound(size: 3)
+S+40 Compound(size: 3)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_win.expect
new file mode 100644
index 0000000..91c0595
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/ia32_win.expect
@@ -0,0 +1,12 @@
+S+4 Compound(size: 3)
+S+8 Compound(size: 3)
+S+12 Compound(size: 3)
+S+16 Compound(size: 3)
+S+20 Compound(size: 3)
+S+24 Compound(size: 3)
+S+28 Compound(size: 3)
+S+32 Compound(size: 3)
+S+36 Compound(size: 3)
+S+40 Compound(size: 3)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_ios.expect
new file mode 100644
index 0000000..7f90b7a
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_ios.expect
@@ -0,0 +1,12 @@
+M(rdi int64) Compound(size: 3)
+M(rsi int64) Compound(size: 3)
+M(rdx int64) Compound(size: 3)
+M(rcx int64) Compound(size: 3)
+M(r8 int64) Compound(size: 3)
+M(r9 int64) Compound(size: 3)
+S+0 Compound(size: 3)
+S+8 Compound(size: 3)
+S+16 Compound(size: 3)
+S+24 Compound(size: 3)
+=>
+M(rax int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_linux.expect
new file mode 100644
index 0000000..7f90b7a
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_linux.expect
@@ -0,0 +1,12 @@
+M(rdi int64) Compound(size: 3)
+M(rsi int64) Compound(size: 3)
+M(rdx int64) Compound(size: 3)
+M(rcx int64) Compound(size: 3)
+M(r8 int64) Compound(size: 3)
+M(r9 int64) Compound(size: 3)
+S+0 Compound(size: 3)
+S+8 Compound(size: 3)
+S+16 Compound(size: 3)
+S+24 Compound(size: 3)
+=>
+M(rax int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_macos.expect
new file mode 100644
index 0000000..7f90b7a
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_macos.expect
@@ -0,0 +1,12 @@
+M(rdi int64) Compound(size: 3)
+M(rsi int64) Compound(size: 3)
+M(rdx int64) Compound(size: 3)
+M(rcx int64) Compound(size: 3)
+M(r8 int64) Compound(size: 3)
+M(r9 int64) Compound(size: 3)
+S+0 Compound(size: 3)
+S+8 Compound(size: 3)
+S+16 Compound(size: 3)
+S+24 Compound(size: 3)
+=>
+M(rax int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_win.expect
new file mode 100644
index 0000000..4b80330
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct3bytesx10/x64_win.expect
@@ -0,0 +1,12 @@
+P(rdx int64) Compound(size: 3)
+P(r8 int64) Compound(size: 3)
+P(r9 int64) Compound(size: 3)
+P(S+0 int64) Compound(size: 3)
+P(S+8 int64) Compound(size: 3)
+P(S+16 int64) Compound(size: 3)
+P(S+24 int64) Compound(size: 3)
+P(S+32 int64) Compound(size: 3)
+P(S+40 int64) Compound(size: 3)
+P(S+48 int64) Compound(size: 3)
+=>
+P(rcx int64, ret:rax int64) Compound(size: 3)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_android.expect
new file mode 100644
index 0000000..aae2f45
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_android.expect
@@ -0,0 +1,3 @@
+M(r0 int64) Compound(size: 8)
+=>
+M(r0 int64) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_ios.expect
new file mode 100644
index 0000000..aae2f45
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_ios.expect
@@ -0,0 +1,3 @@
+M(r0 int64) Compound(size: 8)
+=>
+M(r0 int64) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_linux.expect
new file mode 100644
index 0000000..aae2f45
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_linux.expect
@@ -0,0 +1,3 @@
+M(r0 int64) Compound(size: 8)
+=>
+M(r0 int64) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_macos.expect
new file mode 100644
index 0000000..aae2f45
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm64_macos.expect
@@ -0,0 +1,3 @@
+M(r0 int64) Compound(size: 8)
+=>
+M(r0 int64) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_android.expect
new file mode 100644
index 0000000..0dac74d
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_android.expect
@@ -0,0 +1,3 @@
+M(r1 int32, r2 int32) Compound(size: 8)
+=>
+P(r0 uint32) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_ios.expect
new file mode 100644
index 0000000..0dac74d
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_ios.expect
@@ -0,0 +1,3 @@
+M(r1 int32, r2 int32) Compound(size: 8)
+=>
+P(r0 uint32) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_linux.expect
new file mode 100644
index 0000000..0dac74d
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/arm_linux.expect
@@ -0,0 +1,3 @@
+M(r1 int32, r2 int32) Compound(size: 8)
+=>
+P(r0 uint32) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_android.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_android.expect
new file mode 100644
index 0000000..b6e748c
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_android.expect
@@ -0,0 +1,3 @@
+S+4 Compound(size: 8)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_linux.expect
new file mode 100644
index 0000000..b6e748c
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_linux.expect
@@ -0,0 +1,3 @@
+S+4 Compound(size: 8)
+=>
+P(S+0 uint32, ret:eax uint32) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_win.expect
new file mode 100644
index 0000000..3778ea7
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/ia32_win.expect
@@ -0,0 +1,3 @@
+S+0 Compound(size: 8)
+=>
+M(eax uint32, edx uint32) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_ios.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_ios.expect
new file mode 100644
index 0000000..80d98b3
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_ios.expect
@@ -0,0 +1,3 @@
+M(rdi int64) Compound(size: 8)
+=>
+M(rax int64) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_linux.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_linux.expect
new file mode 100644
index 0000000..80d98b3
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_linux.expect
@@ -0,0 +1,3 @@
+M(rdi int64) Compound(size: 8)
+=>
+M(rax int64) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_macos.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_macos.expect
new file mode 100644
index 0000000..80d98b3
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_macos.expect
@@ -0,0 +1,3 @@
+M(rdi int64) Compound(size: 8)
+=>
+M(rax int64) Compound(size: 8)
diff --git a/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_win.expect b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_win.expect
new file mode 100644
index 0000000..5610272
--- /dev/null
+++ b/runtime/vm/compiler/ffi/unit_tests/struct8bytesx1/x64_win.expect
@@ -0,0 +1,3 @@
+M(rcx int64) Compound(size: 8)
+=>
+M(rax int64) Compound(size: 8)
diff --git a/runtime/vm/constants_arm.h b/runtime/vm/constants_arm.h
index 82a86da..5c2d42f 100644
--- a/runtime/vm/constants_arm.h
+++ b/runtime/vm/constants_arm.h
@@ -487,7 +487,7 @@
   static const intptr_t kArgumentRegisters = kAbiArgumentCpuRegs;
   static const Register ArgumentRegisters[];
   static const intptr_t kNumArgRegs = 4;
-  static const Register kPointerToReturnStructRegister = R0;
+  static const Register kPointerToReturnStructRegisterCall = R0;
 
   static const intptr_t kFpuArgumentRegisters = 0;
 
@@ -527,6 +527,7 @@
   static constexpr Register kReturnReg = R0;
   static constexpr Register kSecondReturnReg = R1;
   static constexpr FpuRegister kReturnFpuReg = Q0;
+  static constexpr Register kPointerToReturnStructRegisterReturn = kReturnReg;
 
   // We choose these to avoid overlap between themselves and reserved registers.
   static constexpr Register kFirstNonArgumentRegister = R8;
@@ -536,7 +537,7 @@
 
   COMPILE_ASSERT(
       ((R(kFirstNonArgumentRegister) | R(kSecondNonArgumentRegister)) &
-       (kArgumentRegisters | R(kPointerToReturnStructRegister))) == 0);
+       (kArgumentRegisters | R(kPointerToReturnStructRegisterCall))) == 0);
 };
 
 #undef R
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index da0e6d7..5e94310 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -346,7 +346,8 @@
   // The native ABI uses R8 to pass the pointer to the memory preallocated for
   // struct return values. Arm64 is the only ABI in which this pointer is _not_
   // in ArgumentRegisters[0] or on the stack.
-  static const Register kPointerToReturnStructRegister = R8;
+  static const Register kPointerToReturnStructRegisterCall = R8;
+  static const Register kPointerToReturnStructRegisterReturn = R8;
 
   static const FpuRegister FpuArgumentRegisters[];
   static const intptr_t kFpuArgumentRegisters =
@@ -363,6 +364,9 @@
 
   // How stack arguments are aligned.
 #if defined(TARGET_OS_MACOS_IOS)
+  // > Function arguments may consume slots on the stack that are not multiples
+  // > of 8 bytes.
+  // https://developer.apple.com/documentation/xcode/writing_arm64_code_for_apple_platforms
   static constexpr AlignmentStrategy kArgumentStackAlignment =
       kAlignedToValueSize;
 #else
@@ -395,7 +399,7 @@
 
   COMPILE_ASSERT(
       ((R(kFirstNonArgumentRegister) | R(kSecondNonArgumentRegister)) &
-       (kArgumentRegisters | R(kPointerToReturnStructRegister))) == 0);
+       (kArgumentRegisters | R(kPointerToReturnStructRegisterCall))) == 0);
 };
 
 #undef R
diff --git a/runtime/vm/constants_ia32.h b/runtime/vm/constants_ia32.h
index eba92da..8e47d4d 100644
--- a/runtime/vm/constants_ia32.h
+++ b/runtime/vm/constants_ia32.h
@@ -251,7 +251,7 @@
   static const intptr_t kArgumentRegisters = 0;
   static const intptr_t kFpuArgumentRegisters = 0;
   static const intptr_t kNumArgRegs = 0;
-  static const Register kPointerToReturnStructRegister = kNoRegister;
+  static const Register kPointerToReturnStructRegisterCall = kNoRegister;
 
   static const XmmRegister FpuArgumentRegisters[];
   static const intptr_t kXmmArgumentRegisters = 0;
@@ -264,6 +264,7 @@
 
   static constexpr Register kReturnReg = EAX;
   static constexpr Register kSecondReturnReg = EDX;
+  static constexpr Register kPointerToReturnStructRegisterReturn = kReturnReg;
 
   // Floating point values are returned on the "FPU stack" (in "ST" registers).
   // However, we use XMM0 in our compiler pipeline as the location.
diff --git a/runtime/vm/constants_x64.h b/runtime/vm/constants_x64.h
index e154cb2..824a871 100644
--- a/runtime/vm/constants_x64.h
+++ b/runtime/vm/constants_x64.h
@@ -309,12 +309,12 @@
   static const intptr_t kArgumentRegisters =
       R(kArg1Reg) | R(kArg2Reg) | R(kArg3Reg) | R(kArg4Reg);
   static const intptr_t kNumArgRegs = 4;
+  static const Register kPointerToReturnStructRegisterCall = kArg1Reg;
 
   static const XmmRegister FpuArgumentRegisters[];
   static const intptr_t kFpuArgumentRegisters =
       R(XMM0) | R(XMM1) | R(XMM2) | R(XMM3);
   static const intptr_t kNumFpuArgRegs = 4;
-  static const intptr_t kPointerToReturnStructRegister = kArg1Reg;
 
   // can ArgumentRegisters[i] and XmmArgumentRegisters[i] both be used at the
   // same time? (Windows no, rest yes)
@@ -354,6 +354,7 @@
   static constexpr Register kReturnReg = RAX;
   static constexpr Register kSecondReturnReg = kNoRegister;
   static constexpr FpuRegister kReturnFpuReg = XMM0;
+  static constexpr Register kPointerToReturnStructRegisterReturn = kReturnReg;
 
   // Whether larger than wordsize arguments are aligned to even registers.
   static constexpr AlignmentStrategy kArgumentRegisterAlignment =
@@ -384,7 +385,7 @@
                                              R(kArg3Reg) | R(kArg4Reg) |
                                              R(kArg5Reg) | R(kArg6Reg);
   static const intptr_t kNumArgRegs = 6;
-  static const Register kPointerToReturnStructRegister = kArg1Reg;
+  static const Register kPointerToReturnStructRegisterCall = kArg1Reg;
 
   static const XmmRegister FpuArgumentRegisters[];
   static const intptr_t kFpuArgumentRegisters = R(XMM0) | R(XMM1) | R(XMM2) |
@@ -415,8 +416,10 @@
   static const XmmRegister xmmFirstNonParameterReg = XMM8;
 
   static constexpr Register kReturnReg = RAX;
-  static constexpr Register kSecondReturnReg = kNoRegister;
+  static constexpr Register kSecondReturnReg = RDX;
   static constexpr FpuRegister kReturnFpuReg = XMM0;
+  static constexpr FpuRegister kSecondReturnFpuReg = XMM1;
+  static constexpr Register kPointerToReturnStructRegisterReturn = kReturnReg;
 
   // Whether larger than wordsize arguments are aligned to even registers.
   static constexpr AlignmentStrategy kArgumentRegisterAlignment =
@@ -451,7 +454,7 @@
 
   COMPILE_ASSERT(
       ((R(kFirstNonArgumentRegister) | R(kSecondNonArgumentRegister)) &
-       (kArgumentRegisters | R(kPointerToReturnStructRegister))) == 0);
+       (kArgumentRegisters | R(kPointerToReturnStructRegisterCall))) == 0);
 };
 
 #undef R
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 83e255f..2e907af 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -24247,6 +24247,8 @@
   // for each frame.
   intptr_t frame_index = 0;
   uint32_t frame_skip = 0;
+  // If we're already in a gap, don't print multiple gap markers.
+  bool in_gap = false;
   do {
     for (intptr_t i = frame_skip; i < stack_trace.Length(); i++) {
       code_object = stack_trace.CodeAtFrame(i);
@@ -24260,72 +24262,87 @@
           // To account for gap frames.
           frame_index += Smi::Value(stack_trace.PcOffsetAtFrame(i));
         }
-      } else if (code_object.raw() == StubCode::AsynchronousGapMarker().raw()) {
-        buffer.AddString("<asynchronous suspension>\n");
-      } else {
-        intptr_t pc_offset = Smi::Value(stack_trace.PcOffsetAtFrame(i));
-        ASSERT(code_object.IsCode());
-        code ^= code_object.raw();
-        ASSERT(code.IsFunctionCode());
-        function = code.function();
-        const uword pc = code.PayloadStart() + pc_offset;
-#if defined(DART_PRECOMPILED_RUNTIME)
-        // When printing non-symbolic frames, we normally print call
-        // addresses, not return addresses, by subtracting one from the PC to
-        // get an address within the preceding instruction.
-        //
-        // The one exception is a normal closure registered as a listener on a
-        // future. In this case, the returned pc_offset is 0, as the closure
-        // is invoked with the value of the resolved future. Thus, we must
-        // report the return address, as returning a value before the closure
-        // payload will cause failures to decode the frame using DWARF info.
-        const bool is_future_listener = pc_offset == 0;
-        const uword call_addr = is_future_listener ? pc : pc - 1;
-        if (FLAG_dwarf_stack_traces_mode) {
-          // If we have access to the owning function and it would be
-          // invisible in a symbolic stack trace, don't show this frame.
-          // (We can't do the same for inlined functions, though.)
-          if (!FLAG_show_invisible_frames && !function.IsNull() &&
-              !function.is_visible()) {
-            continue;
-          }
-          // This output is formatted like Android's debuggerd. Note debuggerd
-          // prints call addresses instead of return addresses.
-          buffer.Printf("    #%02" Pd " abs %" Pp "", frame_index, call_addr);
-          PrintNonSymbolicStackFrameBody(&buffer, call_addr,
-                                         isolate_instructions, vm_instructions);
-          frame_index++;
-          continue;
-        } else if (function.IsNull()) {
-          // We can't print the symbolic information since the owner was not
-          // retained, so instead print the static symbol + offset like the
-          // non-symbolic stack traces.
-          PrintSymbolicStackFrameIndex(&buffer, frame_index);
-          PrintNonSymbolicStackFrameBody(&buffer, call_addr,
-                                         isolate_instructions, vm_instructions);
-          frame_index++;
-          continue;
-        }
-#endif
-        if (code.is_optimized() && stack_trace.expand_inlined()) {
-          code.GetInlinedFunctionsAtReturnAddress(pc_offset, &inlined_functions,
-                                                  &inlined_token_positions);
-          ASSERT(inlined_functions.length() >= 1);
-          for (intptr_t j = inlined_functions.length() - 1; j >= 0; j--) {
-            const auto& inlined = *inlined_functions[j];
-            auto const pos = inlined_token_positions[j];
-            if (FLAG_show_invisible_frames || function.is_visible()) {
-              PrintSymbolicStackFrame(zone, &buffer, inlined, pos, frame_index);
-              frame_index++;
-            }
-          }
-        } else if (FLAG_show_invisible_frames || function.is_visible()) {
-          auto const pos = code.GetTokenIndexOfPC(pc);
-          PrintSymbolicStackFrame(zone, &buffer, function, pos, frame_index);
-          frame_index++;
-        }
+        continue;
       }
+
+      if (code_object.raw() == StubCode::AsynchronousGapMarker().raw()) {
+        if (!in_gap) {
+          buffer.AddString("<asynchronous suspension>\n");
+        }
+        in_gap = true;
+        continue;
+      }
+
+      intptr_t pc_offset = Smi::Value(stack_trace.PcOffsetAtFrame(i));
+      ASSERT(code_object.IsCode());
+      code ^= code_object.raw();
+      ASSERT(code.IsFunctionCode());
+      function = code.function();
+      const uword pc = code.PayloadStart() + pc_offset;
+
+      // If the function is not to be shown, skip.
+      if (!FLAG_show_invisible_frames && !function.IsNull() &&
+          !function.is_visible()) {
+        continue;
+      }
+
+      // A visible frame ends any gap we might be in.
+      in_gap = false;
+
+#if defined(DART_PRECOMPILED_RUNTIME)
+      // When printing non-symbolic frames, we normally print call
+      // addresses, not return addresses, by subtracting one from the PC to
+      // get an address within the preceding instruction.
+      //
+      // The one exception is a normal closure registered as a listener on a
+      // future. In this case, the returned pc_offset is 0, as the closure
+      // is invoked with the value of the resolved future. Thus, we must
+      // report the return address, as returning a value before the closure
+      // payload will cause failures to decode the frame using DWARF info.
+      const bool is_future_listener = pc_offset == 0;
+      const uword call_addr = is_future_listener ? pc : pc - 1;
+
+      if (FLAG_dwarf_stack_traces_mode) {
+        // This output is formatted like Android's debuggerd. Note debuggerd
+        // prints call addresses instead of return addresses.
+        buffer.Printf("    #%02" Pd " abs %" Pp "", frame_index, call_addr);
+        PrintNonSymbolicStackFrameBody(&buffer, call_addr, isolate_instructions,
+                                       vm_instructions);
+        frame_index++;
+        continue;
+      }
+
+      if (function.IsNull()) {
+        in_gap = false;
+        // We can't print the symbolic information since the owner was not
+        // retained, so instead print the static symbol + offset like the
+        // non-symbolic stack traces.
+        PrintSymbolicStackFrameIndex(&buffer, frame_index);
+        PrintNonSymbolicStackFrameBody(&buffer, call_addr, isolate_instructions,
+                                       vm_instructions);
+        frame_index++;
+        continue;
+      }
+#endif
+
+      if (code.is_optimized() && stack_trace.expand_inlined()) {
+        code.GetInlinedFunctionsAtReturnAddress(pc_offset, &inlined_functions,
+                                                &inlined_token_positions);
+        ASSERT(inlined_functions.length() >= 1);
+        for (intptr_t j = inlined_functions.length() - 1; j >= 0; j--) {
+          const auto& inlined = *inlined_functions[j];
+          auto const pos = inlined_token_positions[j];
+          PrintSymbolicStackFrame(zone, &buffer, inlined, pos, frame_index);
+          frame_index++;
+        }
+        continue;
+      }
+
+      auto const pos = code.GetTokenIndexOfPC(pc);
+      PrintSymbolicStackFrame(zone, &buffer, function, pos, frame_index);
+      frame_index++;
     }
+
     // Follow the link.
     frame_skip = stack_trace.skip_sync_start_in_parent_stack()
                      ? StackTrace::kSyncAsyncCroppedFrames
diff --git a/tools/VERSION b/tools/VERSION
index 82152ac..69c5f6b 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 12
 PATCH 0
-PRERELEASE 61
+PRERELEASE 62
 PRERELEASE_PATCH 0
\ No newline at end of file