Version 2.14.0-30.0.dev
Merge commit '6616bb46967a616b39eb4cc457c133f53a4a8da2' into 'dev'
diff --git a/pkg/_fe_analyzer_shared/test/flow_analysis/why_not_promoted/data/nullable_operator_call_error.dart b/pkg/_fe_analyzer_shared/test/flow_analysis/why_not_promoted/data/nullable_operator_call_error.dart
new file mode 100644
index 0000000..c619de5
--- /dev/null
+++ b/pkg/_fe_analyzer_shared/test/flow_analysis/why_not_promoted/data/nullable_operator_call_error.dart
@@ -0,0 +1,32 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This test contains a test case for each condition that can lead to the front
+// end's `NullableOperatorCallError` error, for which we wish to report "why not
+// promoted" context information.
+
+class C1 {
+ int? bad;
+}
+
+userDefinableBinaryOpLhs(C1 c) {
+ if (c.bad == null) return;
+ c.bad
+ /*cfe.invoke: notPromoted(propertyNotPromoted(target: member:C1.bad, type: int?))*/
+ /*analyzer.notPromoted(propertyNotPromoted(target: member:C1.bad, type: int?))*/
+ +
+ 1;
+}
+
+class C2 {
+ int? bad;
+}
+
+userDefinableUnaryOp(C2 c) {
+ if (c.bad == null) return;
+ /*cfe.invoke: notPromoted(propertyNotPromoted(target: member:C2.bad, type: int?))*/
+ -c.
+ /*analyzer.notPromoted(propertyNotPromoted(target: member:C2.bad, type: int?))*/
+ bad;
+}
diff --git a/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart b/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart
index 20a67a7..69a9aa7 100644
--- a/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart
+++ b/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart
@@ -666,7 +666,8 @@
read,
readType,
node.binaryName,
- node.rhs);
+ node.rhs,
+ null);
Expression binary = binaryResult.expression;
DartType binaryType = binaryResult.inferredType;
@@ -2906,7 +2907,8 @@
read,
readType,
node.binaryName,
- node.rhs);
+ node.rhs,
+ null);
DartType binaryType = binaryResult.inferredType;
Expression binary =
@@ -3999,7 +4001,8 @@
Expression left,
DartType leftType,
Name binaryName,
- Expression right) {
+ Expression right,
+ Map<DartType, NonPromotionReason> Function() whyNotPromoted) {
assert(binaryName != equalsName);
ObjectAccessTarget binaryTarget = inferrer.findInterfaceMember(
@@ -4203,6 +4206,10 @@
}
if (!inferrer.isTopLevel && binaryTarget.isNullable) {
+ List<LocatedMessage> context = inferrer.getWhyNotPromotedContext(
+ whyNotPromoted?.call(),
+ binary,
+ (type) => !type.isPotentiallyNullable);
return new ExpressionInferenceResult(
binaryType,
inferrer.helper.wrapInProblem(
@@ -4210,7 +4217,8 @@
templateNullableOperatorCallError.withArguments(
binaryName.text, leftType, inferrer.isNonNullableByDefault),
binary.fileOffset,
- binaryName.text.length));
+ binaryName.text.length,
+ context: context));
}
return new ExpressionInferenceResult(binaryType, binary);
}
@@ -4220,8 +4228,12 @@
///
/// [fileOffset] is used as the file offset for created nodes.
/// [expressionType] is the already inferred type of the [expression].
- ExpressionInferenceResult _computeUnaryExpression(int fileOffset,
- Expression expression, DartType expressionType, Name unaryName) {
+ ExpressionInferenceResult _computeUnaryExpression(
+ int fileOffset,
+ Expression expression,
+ DartType expressionType,
+ Name unaryName,
+ Map<DartType, NonPromotionReason> Function() whyNotPromoted) {
ObjectAccessTarget unaryTarget = inferrer.findInterfaceMember(
expressionType, unaryName, fileOffset,
includeExtensionMethods: true);
@@ -4352,6 +4364,8 @@
}
if (!inferrer.isTopLevel && unaryTarget.isNullable) {
+ List<LocatedMessage> context = inferrer.getWhyNotPromotedContext(
+ whyNotPromoted?.call(), unary, (type) => !type.isPotentiallyNullable);
// TODO(johnniwinther): Special case 'unary-' in messages. It should
// probably be referred to as "Unary operator '-' ...".
return new ExpressionInferenceResult(
@@ -4361,7 +4375,8 @@
templateNullableOperatorCallError.withArguments(unaryName.text,
expressionType, inferrer.isNonNullableByDefault),
unary.fileOffset,
- unaryName == unaryMinusName ? 1 : unaryName.text.length));
+ unaryName == unaryMinusName ? 1 : unaryName.text.length,
+ context: context));
}
return new ExpressionInferenceResult(unaryType, unary);
}
@@ -5121,7 +5136,8 @@
left,
readType,
node.binaryName,
- node.rhs);
+ node.rhs,
+ null);
Expression binary = binaryResult.expression;
DartType binaryType = binaryResult.inferredType;
@@ -5263,7 +5279,8 @@
left,
readType,
node.binaryName,
- node.rhs);
+ node.rhs,
+ null);
Expression binary = binaryResult.expression;
DartType binaryType = binaryResult.inferredType;
@@ -5417,7 +5434,8 @@
left,
readType,
node.binaryName,
- node.rhs);
+ node.rhs,
+ null);
Expression binary = binaryResult.expression;
DartType binaryType = binaryResult.inferredType;
@@ -5596,7 +5614,8 @@
left,
readType,
node.binaryName,
- node.rhs);
+ node.rhs,
+ null);
Expression binary = binaryResult.expression;
DartType binaryType = binaryResult.inferredType;
@@ -6922,13 +6941,16 @@
BinaryExpression node, DartType typeContext) {
ExpressionInferenceResult leftResult =
inferrer.inferExpression(node.left, const UnknownType(), true);
+ Map<DartType, NonPromotionReason> Function() whyNotPromoted =
+ inferrer.flowAnalysis?.whyNotPromoted(leftResult.expression);
return _computeBinaryExpression(
node.fileOffset,
typeContext,
leftResult.expression,
leftResult.inferredType,
node.binaryName,
- node.right);
+ node.right,
+ whyNotPromoted);
}
ExpressionInferenceResult visitUnary(
@@ -7001,8 +7023,10 @@
expressionResult =
inferrer.inferExpression(node.expression, const UnknownType(), true);
}
+ Map<DartType, NonPromotionReason> Function() whyNotPromoted =
+ inferrer.flowAnalysis?.whyNotPromoted(expressionResult.expression);
return _computeUnaryExpression(node.fileOffset, expressionResult.expression,
- expressionResult.inferredType, node.unaryName);
+ expressionResult.inferredType, node.unaryName, whyNotPromoted);
}
ExpressionInferenceResult visitParenthesized(
diff --git a/pkg/native_stack_traces/CHANGELOG.md b/pkg/native_stack_traces/CHANGELOG.md
index 9b16d1c..4a9f873 100644
--- a/pkg/native_stack_traces/CHANGELOG.md
+++ b/pkg/native_stack_traces/CHANGELOG.md
@@ -1,5 +1,9 @@
# Changelog
+## 0.4.1
+
+- Exported some ELF utilities in lib/elf.dart for use in Dart tests.
+
## 0.4.0
- Stable null safe version of package.
diff --git a/pkg/native_stack_traces/lib/elf.dart b/pkg/native_stack_traces/lib/elf.dart
new file mode 100644
index 0000000..de32cd3
--- /dev/null
+++ b/pkg/native_stack_traces/lib/elf.dart
@@ -0,0 +1,11 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+export 'src/elf.dart' show Elf, Symbol;
+export 'src/constants.dart'
+ show
+ isolateDataSymbolName,
+ isolateSymbolName,
+ vmDataSymbolName,
+ vmSymbolName;
diff --git a/pkg/native_stack_traces/lib/src/constants.dart b/pkg/native_stack_traces/lib/src/constants.dart
index c44d615..ca987f5 100644
--- a/pkg/native_stack_traces/lib/src/constants.dart
+++ b/pkg/native_stack_traces/lib/src/constants.dart
@@ -12,5 +12,11 @@
// The dynamic symbol name for the VM instructions section.
const String vmSymbolName = "_kDartVmSnapshotInstructions";
+// The dynamic symbol name for the VM data section.
+const String vmDataSymbolName = "_kDartVmSnapshotData";
+
// The dynamic symbol name for the isolate instructions section.
const String isolateSymbolName = "_kDartIsolateSnapshotInstructions";
+
+// The dynamic symbol name for the isolate data section.
+const String isolateDataSymbolName = "_kDartIsolateSnapshotData";
diff --git a/pkg/native_stack_traces/pubspec.yaml b/pkg/native_stack_traces/pubspec.yaml
index df71c5a..f93e01b 100644
--- a/pkg/native_stack_traces/pubspec.yaml
+++ b/pkg/native_stack_traces/pubspec.yaml
@@ -1,6 +1,6 @@
name: native_stack_traces
description: Utilities for working with non-symbolic stack traces.
-version: 0.4.0
+version: 0.4.1
homepage: https://github.com/dart-lang/sdk/tree/master/pkg/native_stack_traces
diff --git a/runtime/tests/vm/dart/use_flag_test_helper.dart b/runtime/tests/vm/dart/use_flag_test_helper.dart
index d6854ac..431fce5 100644
--- a/runtime/tests/vm/dart/use_flag_test_helper.dart
+++ b/runtime/tests/vm/dart/use_flag_test_helper.dart
@@ -116,13 +116,16 @@
print('Running $executable ${args.join(' ')}');
final result = await Process.run(executable, args);
+ print('Subcommand terminated with exit code ${result.exitCode}.');
if (result.stdout.isNotEmpty) {
print('Subcommand stdout:');
print(result.stdout);
}
- if (result.stderr.isNotEmpty) {
- print('Subcommand stderr:');
- print(result.stderr);
+ if (result.exitCode != 0) {
+ if (result.stderr.isNotEmpty) {
+ print('Subcommand stderr:');
+ print(result.stderr);
+ }
}
return result;
diff --git a/runtime/tests/vm/dart/v8_snapshot_profile_writer_test.dart b/runtime/tests/vm/dart/v8_snapshot_profile_writer_test.dart
index aa7eac0..8719b76 100644
--- a/runtime/tests/vm/dart/v8_snapshot_profile_writer_test.dart
+++ b/runtime/tests/vm/dart/v8_snapshot_profile_writer_test.dart
@@ -6,6 +6,7 @@
import 'dart:io';
import 'package:expect/expect.dart';
+import 'package:native_stack_traces/elf.dart';
import 'package:path/path.dart' as path;
import 'package:vm_snapshot_analysis/v8_profile.dart';
@@ -14,33 +15,112 @@
// Used to ensure we don't have multiple equivalent calls to test.
final _seenDescriptions = <String>{};
-Future<void> test(String dillPath,
+Snapshot testProfile(String profilePath) {
+ final profile =
+ Snapshot.fromJson(jsonDecode(File(profilePath).readAsStringSync()));
+
+ // Verify that there are no "unknown" nodes. These are emitted when we see a
+ // reference to an some object but no other metadata about the object was
+ // recorded. We should at least record the type for every object in the
+ // graph (in some cases the shallow size can legitimately be 0, e.g. for
+ // "base objects" not written to the snapshot or artificial nodes).
+ for (final node in profile.nodes) {
+ Expect.notEquals("Unknown", node.type, "unknown node ${node}");
+ }
+
+ final root = profile.nodeAt(0);
+ final reachable = <Node>{};
+
+ // HeapSnapshotWorker.HeapSnapshot.calculateDistances (from HeapSnapshot.js)
+ // assumes that the graph root has at most one edge to any other node
+ // (most likely an oversight).
+ for (final edge in root.edges) {
+ Expect.isTrue(
+ reachable.add(edge.target),
+ "root\n\n$root\n\nhas multiple edges to node\n\n${edge.target}:\n\n"
+ "${root.edges.where((e) => e.target == edge.target).toList()}");
+ }
+
+ // Check that all other nodes are reachable from the root.
+ final stack = <Node>[...reachable];
+ while (!stack.isEmpty) {
+ final next = stack.removeLast();
+ for (final edge in next.edges) {
+ if (reachable.add(edge.target)) {
+ stack.add(edge.target);
+ }
+ }
+ }
+
+ final unreachable =
+ profile.nodes.skip(1).where((Node n) => !reachable.contains(n)).toSet();
+ Expect.isEmpty(unreachable);
+
+ return profile;
+}
+
+Future<void> testJIT(String dillPath) async {
+ final description = 'jit';
+ Expect.isTrue(_seenDescriptions.add(description),
+ "test configuration $description would be run multiple times");
+
+ await withTempDir('v8-snapshot-profile-$description', (String tempDir) async {
+ // Generate the snapshot profile.
+ final profilePath = path.join(tempDir, 'profile.heapsnapshot');
+ final vmTextPath = path.join(tempDir, 'vm_instructions.bin');
+ final isolateTextPath = path.join(tempDir, 'isolate_instructions.bin');
+ final vmDataPath = path.join(tempDir, 'vm_data.bin');
+ final isolateDataPath = path.join(tempDir, 'isolate_data.bin');
+
+ await run(genSnapshot, <String>[
+ '--snapshot-kind=core-jit',
+ '--vm_snapshot_instructions=$vmTextPath',
+ '--isolate_snapshot_instructions=$isolateTextPath',
+ '--vm_snapshot_data=$vmDataPath',
+ '--isolate_snapshot_data=$isolateDataPath',
+ "--write-v8-snapshot-profile-to=$profilePath",
+ dillPath,
+ ]);
+
+ print("Snapshot profile generated at $profilePath.");
+
+ final profile = testProfile(profilePath);
+
+ // Verify that the total size of the snapshot text and data sections is
+ // the same as the sum of the shallow sizes of all objects in the profile.
+ // This ensures that all bytes are accounted for in some way.
+ final actualSize = await File(vmTextPath).length() +
+ await File(isolateTextPath).length() +
+ await File(vmDataPath).length() +
+ await File(isolateDataPath).length();
+ final expectedSize =
+ profile.nodes.fold<int>(0, (size, n) => size + n.selfSize);
+
+ Expect.equals(expectedSize, actualSize, "failed on $description snapshot");
+ });
+}
+
+Future<void> testAOT(String dillPath,
{bool useAsm = false,
bool useBare = true,
bool forceDrops = false,
bool useDispatch = true,
- bool stripUtil = false, // Note: forced if useAsm.
- bool stripFlag = false, // Note: forced if !stripUtil (and thus !useAsm).
+ bool stripUtil = false, // Note: forced true if useAsm.
+ bool stripFlag = false,
bool disassemble = false}) async {
- // We don't assume forced disassembler support in Product mode, so skip any
- // disassembly test.
- if (!const bool.fromEnvironment('dart.vm.product') && disassemble) {
- return;
+ if (const bool.fromEnvironment('dart.vm.product') && disassemble) {
+ Expect.isFalse(disassemble, 'no use of disassembler in PRODUCT mode');
}
- // The assembler may add extra unnecessary information to the compiled
- // snapshot whether or not we generate DWARF information in the assembly, so
- // we force the use of a utility when generating assembly.
+ // For assembly, we can't test the sizes of the snapshot sections, since we
+ // don't have a Mach-O reader for Mac snapshots and for ELF, the assembler
+ // merges the text/data sections and the VM/isolate section symbols may not
+ // have length information. Thus, we force external stripping so we can test
+ // the approximate size of the stripped snapshot.
if (useAsm) {
stripUtil = true;
}
- // We must strip the output in some way when generating ELF snapshots,
- // else the debugging information added will cause the test to fail.
- if (!stripUtil) {
- stripFlag = true;
- }
-
final descriptionBuilder = StringBuffer()..write(useAsm ? 'assembly' : 'elf');
if (!useBare) {
descriptionBuilder.write('-nonbare');
@@ -70,7 +150,7 @@
final profilePath = path.join(tempDir, 'profile.heapsnapshot');
final snapshotPath = path.join(tempDir, 'test.snap');
final commonSnapshotArgs = [
- if (stripFlag) '--strip',
+ if (stripFlag) '--strip', // gen_snapshot specific and not a VM flag.
useBare ? '--use-bare-instructions' : '--no-use-bare-instructions',
"--write-v8-snapshot-profile-to=$profilePath",
if (forceDrops) ...[
@@ -78,9 +158,8 @@
'--no-retain-function-objects',
'--no-retain-code-objects'
],
- if (!useDispatch) '--no-use-table-dispatch',
- if (disassemble) '--disassemble',
- '--ignore-unrecognized-flags',
+ useDispatch ? '--use-table-dispatch' : '--no-use-table-dispatch',
+ if (disassemble) '--disassemble', // Not defined in PRODUCT mode.
dillPath,
];
@@ -102,70 +181,70 @@
]);
}
- String strippedPath;
- if (stripUtil) {
- strippedPath = snapshotPath + '.stripped';
- await stripSnapshot(snapshotPath, strippedPath, forceElf: !useAsm);
- } else {
- strippedPath = snapshotPath;
- }
-
+ print("Snapshot generated at $snapshotPath.");
print("Snapshot profile generated at $profilePath.");
- final profile =
- Snapshot.fromJson(jsonDecode(File(profilePath).readAsStringSync()));
+ final profile = testProfile(profilePath);
- // Verify that there are no "unknown" nodes. These are emitted when we see a
- // reference to an some object but no other metadata about the object was
- // recorded. We should at least record the type for every object in the
- // graph (in some cases the shallow size can legitimately be 0, e.g. for
- // "base objects" not written to the snapshot or artificial nodes).
- for (final node in profile.nodes) {
- Expect.notEquals("Unknown", node.type, "unknown node ${node}");
+ final expectedSize =
+ profile.nodes.fold<int>(0, (size, n) => size + n.selfSize);
+
+ var checkedSize = false;
+ if (!useAsm) {
+ // Verify that the total size of the snapshot text and data sections is
+ // the same as the sum of the shallow sizes of all objects in the profile.
+ // This ensures that all bytes are accounted for in some way.
+ final elf = Elf.fromFile(snapshotPath);
+ Expect.isNotNull(elf);
+ elf!; // To refine type to non-nullable version.
+
+ final vmTextSectionSymbol = elf.dynamicSymbolFor(vmSymbolName);
+ Expect.isNotNull(vmTextSectionSymbol);
+ final vmDataSectionSymbol = elf.dynamicSymbolFor(vmDataSymbolName);
+ Expect.isNotNull(vmDataSectionSymbol);
+ final isolateTextSectionSymbol = elf.dynamicSymbolFor(isolateSymbolName);
+ Expect.isNotNull(isolateTextSectionSymbol);
+ final isolateDataSectionSymbol =
+ elf.dynamicSymbolFor(isolateDataSymbolName);
+ Expect.isNotNull(isolateDataSectionSymbol);
+
+ final actualSize = vmTextSectionSymbol!.size +
+ vmDataSectionSymbol!.size +
+ isolateTextSectionSymbol!.size +
+ isolateDataSectionSymbol!.size;
+
+ Expect.equals(
+ expectedSize, actualSize, "failed on $description snapshot");
+ checkedSize = true;
}
- final root = profile.nodeAt(0);
- final reachable = <Node>{};
-
- // HeapSnapshotWorker.HeapSnapshot.calculateDistances (from HeapSnapshot.js)
- // assumes that the graph root has at most one edge to any other node
- // (most likely an oversight).
- for (final edge in root.edges) {
- Expect.isTrue(
- reachable.add(edge.target),
- "root\n\n$root\n\nhas multiple edges to node\n\n${edge.target}:\n\n"
- "${root.edges.where((e) => e.target == edge.target).toList()}");
- }
-
- // Check that all other nodes are reachable from the root.
- final stack = <Node>[...reachable];
- while (!stack.isEmpty) {
- final next = stack.removeLast();
- for (final edge in next.edges) {
- if (reachable.add(edge.target)) {
- stack.add(edge.target);
- }
+ if (stripUtil || stripFlag) {
+ var strippedSnapshotPath = snapshotPath;
+ if (stripUtil) {
+ strippedSnapshotPath = snapshotPath + '.stripped';
+ await stripSnapshot(snapshotPath, strippedSnapshotPath,
+ forceElf: !useAsm);
+ print("Stripped snapshot generated at $strippedSnapshotPath.");
}
+
+ // Verify that the actual size of the stripped snapshot is close to the
+ // sum of the shallow sizes of all objects in the profile. They will not
+ // be exactly equal because of global headers and padding.
+ final actualSize = await File(strippedSnapshotPath).length();
+
+ // See Elf::kPages in runtime/vm/elf.h, which is also used for assembly
+ // padding.
+ final segmentAlignment = 16 * 1024;
+ // Not every byte is accounted for by the snapshot profile, and data and
+ // instruction segments are padded to an alignment boundary.
+ final tolerance = 0.03 * actualSize + 2 * segmentAlignment;
+
+ Expect.approxEquals(expectedSize, actualSize, tolerance,
+ "failed on $description snapshot");
+ checkedSize = true;
}
- final unreachable =
- profile.nodes.skip(1).where((Node n) => !reachable.contains(n)).toSet();
- Expect.isEmpty(unreachable);
-
- // Verify that the actual size of the snapshot is close to the sum of the
- // shallow sizes of all objects in the profile. They will not be exactly
- // equal because of global headers and padding.
- final actual = await File(strippedPath).length();
- final expected = profile.nodes.fold<int>(0, (size, n) => size + n.selfSize);
-
- // See Elf::kPages in runtime/vm/elf.h.
- final segmentAlignment = 16 * 1024;
- // Not every byte is accounted for by the snapshot profile, and data and
- // instruction segments are padded to an alignment boundary.
- final tolerance = 0.03 * actual + 2 * segmentAlignment;
-
- Expect.approxEquals(
- expected, actual, tolerance, "failed on $description snapshot");
+ Expect.isTrue(checkedSize, "no snapshot size checks were performed");
});
}
@@ -264,10 +343,24 @@
await testMacros();
await withTempDir('v8-snapshot-profile-writer', (String tempDir) async {
- // We only need to generate the dill file once.
+ // We only need to generate the dill file once for all JIT tests.
final _thisTestPath = path.join(sdkDir, 'runtime', 'tests', 'vm', 'dart',
'v8_snapshot_profile_writer_test.dart');
- final dillPath = path.join(tempDir, 'test.dill');
+ final jitDillPath = path.join(tempDir, 'jit_test.dill');
+ await run(genKernel, <String>[
+ '--platform',
+ platformDill,
+ ...Platform.executableArguments.where((arg) =>
+ arg.startsWith('--enable-experiment=') ||
+ arg == '--sound-null-safety' ||
+ arg == '--no-sound-null-safety'),
+ '-o',
+ jitDillPath,
+ _thisTestPath
+ ]);
+
+ // We only need to generate the dill file once for all AOT tests.
+ final aotDillPath = path.join(tempDir, 'aot_test.dill');
await run(genKernel, <String>[
'--aot',
'--platform',
@@ -277,26 +370,38 @@
arg == '--sound-null-safety' ||
arg == '--no-sound-null-safety'),
'-o',
- dillPath,
+ aotDillPath,
_thisTestPath
]);
- // Just as a reminder (these rules are applied in order inside test):
- // If useAsm is true, then stripUtil is forced (as the assembler may add
- // extra information that needs stripping).
- // If stripUtil is false, then stripFlag is forced (as the output must be
- // stripped in some way to remove DWARF information).
+ // Just as a reminder for AOT tests:
+ // * If useAsm is true, then stripUtil is forced (as the assembler may add
+ // extra information that needs stripping), so no need to specify
+ // stripUtil for useAsm tests.
- // Test stripped ELF generation directly.
- await test(dillPath);
- await test(dillPath, useBare: false);
- await test(dillPath, forceDrops: true);
- await test(dillPath, forceDrops: true, useBare: false);
- await test(dillPath, forceDrops: true, useDispatch: false);
- await test(dillPath, forceDrops: true, useDispatch: false, useBare: false);
+ // Test profile generation with a core JIT snapshot.
+ await testJIT(jitDillPath);
- // Regression test for dartbug.com/41149.
- await test(dillPath, useBare: false, disassemble: true);
+ // Test unstripped ELF generation directly.
+ await testAOT(aotDillPath);
+ await testAOT(aotDillPath, useBare: false);
+ await testAOT(aotDillPath, forceDrops: true);
+ await testAOT(aotDillPath, forceDrops: true, useBare: false);
+ await testAOT(aotDillPath, forceDrops: true, useDispatch: false);
+ await testAOT(aotDillPath,
+ forceDrops: true, useDispatch: false, useBare: false);
+
+ // Test flag-stripped ELF generation.
+ await testAOT(aotDillPath, stripFlag: true);
+ await testAOT(aotDillPath, useBare: false, stripFlag: true);
+
+ // Since we can't force disassembler support after the fact when running
+ // in PRODUCT mode, skip any --disassemble tests. Do these tests last as
+ // they have lots of output and so the log will be truncated.
+ if (!const bool.fromEnvironment('dart.vm.product')) {
+ // Regression test for dartbug.com/41149.
+ await testAOT(aotDillPath, useBare: false, disassemble: true);
+ }
// We neither generate assembly nor have a stripping utility on Windows.
if (Platform.isWindows) {
@@ -308,24 +413,24 @@
if (Platform.isMacOS && clangBuildToolsDir == null) {
printSkip('ELF external stripping test');
} else {
- // Test unstripped ELF generation that is then stripped externally.
- await test(dillPath, stripUtil: true);
- await test(dillPath, stripUtil: true, useBare: false);
+ // Test unstripped ELF generation that is then externally stripped.
+ await testAOT(aotDillPath, stripUtil: true);
+ await testAOT(aotDillPath, stripUtil: true, useBare: false);
}
// TODO(sstrickl): Currently we can't assemble for SIMARM64 on MacOSX.
- // For example, the test runner still uses blobs for dartkp-mac-*-simarm64.
- // Change assembleSnapshot and remove this check when we can.
+ // For example, the test runner still uses blobs for
+ // dartkp-mac-*-simarm64. Change assembleSnapshot and remove this check
+ // when we can.
if (Platform.isMacOS && buildDir.endsWith('SIMARM64')) {
printSkip('assembly tests');
return;
}
-
- // Test unstripped assembly generation that is then compiled and stripped.
- await test(dillPath, useAsm: true);
- await test(dillPath, useAsm: true, useBare: false);
- // Test stripped assembly generation that is then compiled and stripped.
- await test(dillPath, useAsm: true, stripFlag: true);
- await test(dillPath, useAsm: true, stripFlag: true, useBare: false);
+ // Test unstripped assembly generation that is then externally stripped.
+ await testAOT(aotDillPath, useAsm: true);
+ await testAOT(aotDillPath, useAsm: true, useBare: false);
+ // Test stripped assembly generation that is then externally stripped.
+ await testAOT(aotDillPath, useAsm: true, stripFlag: true);
+ await testAOT(aotDillPath, useAsm: true, stripFlag: true, useBare: false);
});
}
diff --git a/runtime/tests/vm/dart_2/use_flag_test_helper.dart b/runtime/tests/vm/dart_2/use_flag_test_helper.dart
index 24f0687..650a9bf 100644
--- a/runtime/tests/vm/dart_2/use_flag_test_helper.dart
+++ b/runtime/tests/vm/dart_2/use_flag_test_helper.dart
@@ -114,13 +114,16 @@
print('Running $executable ${args.join(' ')}');
final result = await Process.run(executable, args);
+ print('Subcommand terminated with exit code ${result.exitCode}.');
if (result.stdout.isNotEmpty) {
print('Subcommand stdout:');
print(result.stdout);
}
- if (result.stderr.isNotEmpty) {
- print('Subcommand stderr:');
- print(result.stderr);
+ if (result.exitCode != 0) {
+ if (result.stderr.isNotEmpty) {
+ print('Subcommand stderr:');
+ print(result.stderr);
+ }
}
return result;
diff --git a/runtime/tests/vm/dart_2/v8_snapshot_profile_writer_test.dart b/runtime/tests/vm/dart_2/v8_snapshot_profile_writer_test.dart
index 7977a71..0628aa0 100644
--- a/runtime/tests/vm/dart_2/v8_snapshot_profile_writer_test.dart
+++ b/runtime/tests/vm/dart_2/v8_snapshot_profile_writer_test.dart
@@ -6,6 +6,7 @@
import 'dart:io';
import 'package:expect/expect.dart';
+import 'package:native_stack_traces/elf.dart';
import 'package:path/path.dart' as path;
import 'package:vm_snapshot_analysis/v8_profile.dart';
@@ -14,33 +15,112 @@
// Used to ensure we don't have multiple equivalent calls to test.
final _seenDescriptions = <String>{};
-Future<void> test(String dillPath,
+Snapshot testProfile(String profilePath) {
+ final profile =
+ Snapshot.fromJson(jsonDecode(File(profilePath).readAsStringSync()));
+
+ // Verify that there are no "unknown" nodes. These are emitted when we see a
+ // reference to an some object but no other metadata about the object was
+ // recorded. We should at least record the type for every object in the
+ // graph (in some cases the shallow size can legitimately be 0, e.g. for
+ // "base objects" not written to the snapshot or artificial nodes).
+ for (final node in profile.nodes) {
+ Expect.notEquals("Unknown", node.type, "unknown node ${node}");
+ }
+
+ final root = profile.nodeAt(0);
+ final reachable = <Node>{};
+
+ // HeapSnapshotWorker.HeapSnapshot.calculateDistances (from HeapSnapshot.js)
+ // assumes that the graph root has at most one edge to any other node
+ // (most likely an oversight).
+ for (final edge in root.edges) {
+ Expect.isTrue(
+ reachable.add(edge.target),
+ "root\n\n$root\n\nhas multiple edges to node\n\n${edge.target}:\n\n"
+ "${root.edges.where((e) => e.target == edge.target).toList()}");
+ }
+
+ // Check that all other nodes are reachable from the root.
+ final stack = <Node>[...reachable];
+ while (!stack.isEmpty) {
+ final next = stack.removeLast();
+ for (final edge in next.edges) {
+ if (reachable.add(edge.target)) {
+ stack.add(edge.target);
+ }
+ }
+ }
+
+ final unreachable =
+ profile.nodes.skip(1).where((Node n) => !reachable.contains(n)).toSet();
+ Expect.isEmpty(unreachable);
+
+ return profile;
+}
+
+Future<void> testJIT(String dillPath) async {
+ final description = 'jit';
+ Expect.isTrue(_seenDescriptions.add(description),
+ "test configuration $description would be run multiple times");
+
+ await withTempDir('v8-snapshot-profile-$description', (String tempDir) async {
+ // Generate the snapshot profile.
+ final profilePath = path.join(tempDir, 'profile.heapsnapshot');
+ final vmTextPath = path.join(tempDir, 'vm_instructions.bin');
+ final isolateTextPath = path.join(tempDir, 'isolate_instructions.bin');
+ final vmDataPath = path.join(tempDir, 'vm_data.bin');
+ final isolateDataPath = path.join(tempDir, 'isolate_data.bin');
+
+ await run(genSnapshot, <String>[
+ '--snapshot-kind=core-jit',
+ '--vm_snapshot_instructions=$vmTextPath',
+ '--isolate_snapshot_instructions=$isolateTextPath',
+ '--vm_snapshot_data=$vmDataPath',
+ '--isolate_snapshot_data=$isolateDataPath',
+ "--write-v8-snapshot-profile-to=$profilePath",
+ dillPath,
+ ]);
+
+ print("Snapshot profile generated at $profilePath.");
+
+ final profile = testProfile(profilePath);
+
+ // Verify that the total size of the snapshot text and data sections is
+ // the same as the sum of the shallow sizes of all objects in the profile.
+ // This ensures that all bytes are accounted for in some way.
+ final actualSize = await File(vmTextPath).length() +
+ await File(isolateTextPath).length() +
+ await File(vmDataPath).length() +
+ await File(isolateDataPath).length();
+ final expectedSize =
+ profile.nodes.fold<int>(0, (size, n) => size + n.selfSize);
+
+ Expect.equals(expectedSize, actualSize, "failed on $description snapshot");
+ });
+}
+
+Future<void> testAOT(String dillPath,
{bool useAsm = false,
bool useBare = true,
bool forceDrops = false,
bool useDispatch = true,
- bool stripUtil = false, // Note: forced if useAsm.
- bool stripFlag = false, // Note: forced if !stripUtil (and thus !useAsm).
+ bool stripUtil = false, // Note: forced true if useAsm.
+ bool stripFlag = false,
bool disassemble = false}) async {
- // We don't assume forced disassembler support in Product mode, so skip any
- // disassembly test.
- if (!const bool.fromEnvironment('dart.vm.product') && disassemble) {
- return;
+ if (const bool.fromEnvironment('dart.vm.product') && disassemble) {
+ Expect.isFalse(disassemble, 'no use of disassembler in PRODUCT mode');
}
- // The assembler may add extra unnecessary information to the compiled
- // snapshot whether or not we generate DWARF information in the assembly, so
- // we force the use of a utility when generating assembly.
+ // For assembly, we can't test the sizes of the snapshot sections, since we
+ // don't have a Mach-O reader for Mac snapshots and for ELF, the assembler
+ // merges the text/data sections and the VM/isolate section symbols may not
+ // have length information. Thus, we force external stripping so we can test
+ // the approximate size of the stripped snapshot.
if (useAsm) {
stripUtil = true;
}
- // We must strip the output in some way when generating ELF snapshots,
- // else the debugging information added will cause the test to fail.
- if (!stripUtil) {
- stripFlag = true;
- }
-
final descriptionBuilder = StringBuffer()..write(useAsm ? 'assembly' : 'elf');
if (!useBare) {
descriptionBuilder.write('-nonbare');
@@ -70,7 +150,7 @@
final profilePath = path.join(tempDir, 'profile.heapsnapshot');
final snapshotPath = path.join(tempDir, 'test.snap');
final commonSnapshotArgs = [
- if (stripFlag) '--strip',
+ if (stripFlag) '--strip', // gen_snapshot specific and not a VM flag.
useBare ? '--use-bare-instructions' : '--no-use-bare-instructions',
"--write-v8-snapshot-profile-to=$profilePath",
if (forceDrops) ...[
@@ -78,9 +158,8 @@
'--no-retain-function-objects',
'--no-retain-code-objects'
],
- if (!useDispatch) '--no-use-table-dispatch',
- if (disassemble) '--disassemble',
- '--ignore-unrecognized-flags',
+ useDispatch ? '--use-table-dispatch' : '--no-use-table-dispatch',
+ if (disassemble) '--disassemble', // Not defined in PRODUCT mode.
dillPath,
];
@@ -110,62 +189,69 @@
strippedPath = snapshotPath;
}
+ print("Snapshot generated at $snapshotPath.");
print("Snapshot profile generated at $profilePath.");
- final profile =
- Snapshot.fromJson(jsonDecode(File(profilePath).readAsStringSync()));
+ final profile = testProfile(profilePath);
- // Verify that there are no "unknown" nodes. These are emitted when we see a
- // reference to an some object but no other metadata about the object was
- // recorded. We should at least record the type for every object in the
- // graph (in some cases the shallow size can legitimately be 0, e.g. for
- // "base objects" not written to the snapshot or artificial nodes).
- for (final node in profile.nodes) {
- Expect.notEquals("Unknown", node.type, "unknown node ${node}");
+ final expectedSize =
+ profile.nodes.fold<int>(0, (size, n) => size + n.selfSize);
+
+ var checkedSize = false;
+ if (!useAsm) {
+ // Verify that the total size of the snapshot text and data sections is
+ // the same as the sum of the shallow sizes of all objects in the profile.
+ // This ensures that all bytes are accounted for in some way.
+ final elf = Elf.fromFile(snapshotPath);
+ Expect.isNotNull(elf);
+
+ final vmTextSectionSymbol = elf.dynamicSymbolFor(vmSymbolName);
+ Expect.isNotNull(vmTextSectionSymbol);
+ final vmDataSectionSymbol = elf.dynamicSymbolFor(vmDataSymbolName);
+ Expect.isNotNull(vmDataSectionSymbol);
+ final isolateTextSectionSymbol = elf.dynamicSymbolFor(isolateSymbolName);
+ Expect.isNotNull(isolateTextSectionSymbol);
+ final isolateDataSectionSymbol =
+ elf.dynamicSymbolFor(isolateDataSymbolName);
+ Expect.isNotNull(isolateDataSectionSymbol);
+
+ final actualSize = vmTextSectionSymbol.size +
+ vmDataSectionSymbol.size +
+ isolateTextSectionSymbol.size +
+ isolateDataSectionSymbol.size;
+
+ Expect.equals(
+ expectedSize, actualSize, "failed on $description snapshot");
+ checkedSize = true;
}
- final root = profile.nodeAt(0);
- final reachable = <Node>{};
-
- // HeapSnapshotWorker.HeapSnapshot.calculateDistances (from HeapSnapshot.js)
- // assumes that the graph root has at most one edge to any other node
- // (most likely an oversight).
- for (final edge in root.edges) {
- Expect.isTrue(
- reachable.add(edge.target),
- "root\n\n$root\n\nhas multiple edges to node\n\n${edge.target}:\n\n"
- "${root.edges.where((e) => e.target == edge.target).toList()}");
- }
-
- // Check that all other nodes are reachable from the root.
- final stack = <Node>[...reachable];
- while (!stack.isEmpty) {
- final next = stack.removeLast();
- for (final edge in next.edges) {
- if (reachable.add(edge.target)) {
- stack.add(edge.target);
- }
+ if (stripUtil || stripFlag) {
+ var strippedSnapshotPath = snapshotPath;
+ if (stripUtil) {
+ strippedSnapshotPath = snapshotPath + '.stripped';
+ await stripSnapshot(snapshotPath, strippedSnapshotPath,
+ forceElf: !useAsm);
+ print("Stripped snapshot generated at $strippedSnapshotPath.");
}
+
+ // Verify that the actual size of the stripped snapshot is close to the
+ // sum of the shallow sizes of all objects in the profile. They will not
+ // be exactly equal because of global headers and padding.
+ final actualSize = await File(strippedSnapshotPath).length();
+
+ // See Elf::kPages in runtime/vm/elf.h, which is also used for assembly
+ // padding.
+ final segmentAlignment = 16 * 1024;
+ // Not every byte is accounted for by the snapshot profile, and data and
+ // instruction segments are padded to an alignment boundary.
+ final tolerance = 0.03 * actualSize + 2 * segmentAlignment;
+
+ Expect.approxEquals(expectedSize, actualSize, tolerance,
+ "failed on $description snapshot");
+ checkedSize = true;
}
- final unreachable =
- profile.nodes.skip(1).where((Node n) => !reachable.contains(n)).toSet();
- Expect.isEmpty(unreachable);
-
- // Verify that the actual size of the snapshot is close to the sum of the
- // shallow sizes of all objects in the profile. They will not be exactly
- // equal because of global headers and padding.
- final actual = await File(strippedPath).length();
- final expected = profile.nodes.fold<int>(0, (size, n) => size + n.selfSize);
-
- // See Elf::kPages in runtime/vm/elf.h.
- final segmentAlignment = 16 * 1024;
- // Not every byte is accounted for by the snapshot profile, and data and
- // instruction segments are padded to an alignment boundary.
- final tolerance = 0.03 * actual + 2 * segmentAlignment;
-
- Expect.approxEquals(
- expected, actual, tolerance, "failed on $description snapshot");
+ Expect.isTrue(checkedSize, "no snapshot size checks were performed");
});
}
@@ -264,35 +350,52 @@
await testMacros();
await withTempDir('v8-snapshot-profile-writer', (String tempDir) async {
- // We only need to generate the dill file once.
- final _thisTestPath = path.join(sdkDir, 'runtime', 'tests', 'vm', 'dart_2',
+ // We only need to generate the dill file once for all JIT tests.
+ final _thisTestPath = path.join(sdkDir, 'runtime', 'tests', 'vm', 'dart',
'v8_snapshot_profile_writer_test.dart');
- final dillPath = path.join(tempDir, 'test.dill');
+ final jitDillPath = path.join(tempDir, 'jit_test.dill');
+ await run(genKernel,
+ <String>['--platform', platformDill, '-o', jitDillPath, _thisTestPath]);
+
+ // We only need to generate the dill file once for all AOT tests.
+ final aotDillPath = path.join(tempDir, 'aot_test.dill');
await run(genKernel, <String>[
'--aot',
'--platform',
platformDill,
'-o',
- dillPath,
+ aotDillPath,
_thisTestPath
]);
- // Just as a reminder (these rules are applied in order inside test):
- // If useAsm is true, then stripUtil is forced (as the assembler may add
- // extra information that needs stripping).
- // If stripUtil is false, then stripFlag is forced (as the output must be
- // stripped in some way to remove DWARF information).
+ // Just as a reminder for AOT tests:
+ // * If useAsm is true, then stripUtil is forced (as the assembler may add
+ // extra information that needs stripping), so no need to specify
+ // stripUtil for useAsm tests.
- // Test stripped ELF generation directly.
- await test(dillPath);
- await test(dillPath, useBare: false);
- await test(dillPath, forceDrops: true);
- await test(dillPath, forceDrops: true, useBare: false);
- await test(dillPath, forceDrops: true, useDispatch: false);
- await test(dillPath, forceDrops: true, useDispatch: false, useBare: false);
+ // Test profile generation with a core JIT snapshot.
+ await testJIT(jitDillPath);
- // Regression test for dartbug.com/41149.
- await test(dillPath, useBare: false, disassemble: true);
+ // Test unstripped ELF generation directly.
+ await testAOT(aotDillPath);
+ await testAOT(aotDillPath, useBare: false);
+ await testAOT(aotDillPath, forceDrops: true);
+ await testAOT(aotDillPath, forceDrops: true, useBare: false);
+ await testAOT(aotDillPath, forceDrops: true, useDispatch: false);
+ await testAOT(aotDillPath,
+ forceDrops: true, useDispatch: false, useBare: false);
+
+ // Test flag-stripped ELF generation.
+ await testAOT(aotDillPath, stripFlag: true);
+ await testAOT(aotDillPath, useBare: false, stripFlag: true);
+
+ // Since we can't force disassembler support after the fact when running
+ // in PRODUCT mode, skip any --disassemble tests. Do these tests last as
+ // they have lots of output and so the log will be truncated.
+ if (!const bool.fromEnvironment('dart.vm.product')) {
+ // Regression test for dartbug.com/41149.
+ await testAOT(aotDillPath, useBare: false, disassemble: true);
+ }
// We neither generate assembly nor have a stripping utility on Windows.
if (Platform.isWindows) {
@@ -304,24 +407,24 @@
if (Platform.isMacOS && clangBuildToolsDir == null) {
printSkip('ELF external stripping test');
} else {
- // Test unstripped ELF generation that is then stripped externally.
- await test(dillPath, stripUtil: true);
- await test(dillPath, stripUtil: true, useBare: false);
+ // Test unstripped ELF generation that is then externally stripped.
+ await testAOT(aotDillPath, stripUtil: true);
+ await testAOT(aotDillPath, stripUtil: true, useBare: false);
}
// TODO(sstrickl): Currently we can't assemble for SIMARM64 on MacOSX.
- // For example, the test runner still uses blobs for dartkp-mac-*-simarm64.
- // Change assembleSnapshot and remove this check when we can.
+ // For example, the test runner still uses blobs for
+ // dartkp-mac-*-simarm64. Change assembleSnapshot and remove this check
+ // when we can.
if (Platform.isMacOS && buildDir.endsWith('SIMARM64')) {
printSkip('assembly tests');
return;
}
-
- // Test unstripped assembly generation that is then compiled and stripped.
- await test(dillPath, useAsm: true);
- await test(dillPath, useAsm: true, useBare: false);
- // Test stripped assembly generation that is then compiled and stripped.
- await test(dillPath, useAsm: true, stripFlag: true);
- await test(dillPath, useAsm: true, stripFlag: true, useBare: false);
+ // Test unstripped assembly generation that is then externally stripped.
+ await testAOT(aotDillPath, useAsm: true);
+ await testAOT(aotDillPath, useAsm: true, useBare: false);
+ // Test stripped assembly generation that is then externally stripped.
+ await testAOT(aotDillPath, useAsm: true, stripFlag: true);
+ await testAOT(aotDillPath, useAsm: true, stripFlag: true, useBare: false);
});
}
diff --git a/runtime/vm/clustered_snapshot.cc b/runtime/vm/clustered_snapshot.cc
index 4689812..851945b 100644
--- a/runtime/vm/clustered_snapshot.cc
+++ b/runtime/vm/clustered_snapshot.cc
@@ -163,6 +163,9 @@
intptr_t start_size = serializer->bytes_written();
intptr_t start_data = serializer->GetDataSize();
intptr_t start_objects = serializer->next_ref_index();
+ uint64_t cid_and_canonical =
+ (static_cast<uint64_t>(cid_) << 1) | (is_canonical() ? 0x1 : 0x0);
+ serializer->Write<uint64_t>(cid_and_canonical);
WriteAlloc(serializer);
intptr_t stop_size = serializer->bytes_written();
intptr_t stop_data = serializer->GetDataSize();
@@ -176,7 +179,9 @@
}
size_ += (stop_size - start_size) + (stop_data - start_data);
num_objects_ += (stop_objects - start_objects);
- target_memory_size_ += num_objects_ * target_instance_size_;
+ if (target_instance_size_ != kSizeVaries) {
+ target_memory_size_ += num_objects_ * target_instance_size_;
+ }
}
void SerializationCluster::WriteAndMeasureFill(Serializer* serializer) {
@@ -226,7 +231,9 @@
class ClassSerializationCluster : public SerializationCluster {
public:
explicit ClassSerializationCluster(intptr_t num_cids)
- : SerializationCluster("Class", compiler::target::Class::InstanceSize()),
+ : SerializationCluster("Class",
+ kClassCid,
+ compiler::target::Class::InstanceSize()),
predefined_(kNumPredefinedCids),
objects_(num_cids) {}
~ClassSerializationCluster() {}
@@ -252,7 +259,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kClassCid);
intptr_t count = predefined_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -339,7 +345,7 @@
ClassDeserializationCluster() : DeserializationCluster("Class") {}
~ClassDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
predefined_start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
intptr_t count = d->ReadUnsigned();
@@ -361,7 +367,7 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
ClassTable* table = d->isolate_group()->class_table();
for (intptr_t id = predefined_start_index_; id < predefined_stop_index_;
@@ -482,10 +488,12 @@
bool kAllCanonicalObjectsAreIncludedIntoSet = true>
class CanonicalSetSerializationCluster : public SerializationCluster {
protected:
- CanonicalSetSerializationCluster(bool represents_canonical_set,
+ CanonicalSetSerializationCluster(intptr_t cid,
+ bool is_canonical,
+ bool represents_canonical_set,
const char* name,
intptr_t target_instance_size = 0)
- : SerializationCluster(name, target_instance_size),
+ : SerializationCluster(name, cid, target_instance_size, is_canonical),
represents_canonical_set_(represents_canonical_set) {}
virtual bool IsInCanonicalSet(Serializer* s, PointerType ptr) {
@@ -581,13 +589,15 @@
template <typename SetType, bool kAllCanonicalObjectsAreIncludedIntoSet = true>
class CanonicalSetDeserializationCluster : public DeserializationCluster {
public:
- CanonicalSetDeserializationCluster(bool is_root_unit, const char* name)
- : DeserializationCluster(name),
+ CanonicalSetDeserializationCluster(bool is_canonical,
+ bool is_root_unit,
+ const char* name)
+ : DeserializationCluster(name, is_canonical),
is_root_unit_(is_root_unit),
table_(Array::Handle()) {}
- void BuildCanonicalSetFromLayout(Deserializer* d, bool is_canonical) {
- if (!is_root_unit_ || !is_canonical) {
+ void BuildCanonicalSetFromLayout(Deserializer* d) {
+ if (!is_root_unit_ || !is_canonical()) {
return;
}
@@ -681,8 +691,11 @@
TypeArguments,
TypeArgumentsPtr> {
public:
- explicit TypeArgumentsSerializationCluster(bool represents_canonical_set)
- : CanonicalSetSerializationCluster(represents_canonical_set,
+ TypeArgumentsSerializationCluster(bool is_canonical,
+ bool represents_canonical_set)
+ : CanonicalSetSerializationCluster(kTypeArgumentsCid,
+ is_canonical,
+ represents_canonical_set,
"TypeArguments") {}
~TypeArgumentsSerializationCluster() {}
@@ -698,7 +711,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kTypeArgumentsCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
ReorderObjects(s);
@@ -738,11 +750,14 @@
class TypeArgumentsDeserializationCluster
: public CanonicalSetDeserializationCluster<CanonicalTypeArgumentsSet> {
public:
- explicit TypeArgumentsDeserializationCluster(bool is_root_unit)
- : CanonicalSetDeserializationCluster(is_root_unit, "TypeArguments") {}
+ explicit TypeArgumentsDeserializationCluster(bool is_canonical,
+ bool is_root_unit)
+ : CanonicalSetDeserializationCluster(is_canonical,
+ is_root_unit,
+ "TypeArguments") {}
~TypeArgumentsDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -752,16 +767,16 @@
TypeArguments::InstanceSize(length)));
}
stop_index_ = d->next_index();
- BuildCanonicalSetFromLayout(d, stamp_canonical);
+ BuildCanonicalSetFromLayout(d);
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
TypeArgumentsPtr type_args = static_cast<TypeArgumentsPtr>(d->Ref(id));
const intptr_t length = d->ReadUnsigned();
Deserializer::InitializeHeader(type_args, kTypeArgumentsCid,
TypeArguments::InstanceSize(length),
- stamp_canonical);
+ primary && is_canonical());
type_args->untag()->length_ = Smi::New(length);
type_args->untag()->hash_ = Smi::New(d->Read<int32_t>());
type_args->untag()->nullability_ = Smi::New(d->ReadUnsigned());
@@ -773,13 +788,13 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (!table_.IsNull()) {
auto object_store = d->isolate_group()->object_store();
VerifyCanonicalSet(
d, refs, Array::Handle(object_store->canonical_type_arguments()));
object_store->set_canonical_type_arguments(table_);
- } else if (canonicalize) {
+ } else if (!primary && is_canonical()) {
TypeArguments& type_arg = TypeArguments::Handle(d->zone());
for (intptr_t i = start_index_; i < stop_index_; i++) {
type_arg ^= refs.At(i);
@@ -795,6 +810,7 @@
public:
PatchClassSerializationCluster()
: SerializationCluster("PatchClass",
+ kPatchClassCid,
compiler::target::PatchClass::InstanceSize()) {}
~PatchClassSerializationCluster() {}
@@ -805,7 +821,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kPatchClassCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -836,7 +851,7 @@
PatchClassDeserializationCluster() : DeserializationCluster("PatchClass") {}
~PatchClassDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -847,8 +862,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
PatchClassPtr cls = static_cast<PatchClassPtr>(d->Ref(id));
Deserializer::InitializeHeader(cls, kPatchClassCid,
@@ -868,6 +883,7 @@
public:
FunctionSerializationCluster()
: SerializationCluster("Function",
+ kFunctionCid,
compiler::target::Function::InstanceSize()) {}
~FunctionSerializationCluster() {}
@@ -887,7 +903,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kFunctionCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -948,7 +963,7 @@
FunctionDeserializationCluster() : DeserializationCluster("Function") {}
~FunctionDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -958,8 +973,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
Snapshot::Kind kind = d->kind();
for (intptr_t id = start_index_; id < stop_index_; id++) {
@@ -1015,7 +1030,7 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (d->kind() == Snapshot::kFullAOT) {
Function& func = Function::Handle(d->zone());
for (intptr_t i = start_index_; i < stop_index_; i++) {
@@ -1060,6 +1075,7 @@
public:
ClosureDataSerializationCluster()
: SerializationCluster("ClosureData",
+ kClosureDataCid,
compiler::target::ClosureData::InstanceSize()) {}
~ClosureDataSerializationCluster() {}
@@ -1076,7 +1092,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kClosureDataCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -1111,7 +1126,7 @@
ClosureDataDeserializationCluster() : DeserializationCluster("ClosureData") {}
~ClosureDataDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -1122,8 +1137,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
ClosureDataPtr data = static_cast<ClosureDataPtr>(d->Ref(id));
Deserializer::InitializeHeader(data, kClosureDataCid,
@@ -1150,6 +1165,7 @@
FfiTrampolineDataSerializationCluster()
: SerializationCluster(
"FfiTrampolineData",
+ kFfiTrampolineDataCid,
compiler::target::FfiTrampolineData::InstanceSize()) {}
~FfiTrampolineDataSerializationCluster() {}
@@ -1160,7 +1176,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kFfiTrampolineDataCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -1195,7 +1210,7 @@
: DeserializationCluster("FfiTrampolineData") {}
~FfiTrampolineDataDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -1206,8 +1221,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
FfiTrampolineDataPtr data = static_cast<FfiTrampolineDataPtr>(d->Ref(id));
Deserializer::InitializeHeader(data, kFfiTrampolineDataCid,
@@ -1223,8 +1238,9 @@
class FieldSerializationCluster : public SerializationCluster {
public:
FieldSerializationCluster()
- : SerializationCluster("Field", compiler::target::Field::InstanceSize()) {
- }
+ : SerializationCluster("Field",
+ kFieldCid,
+ compiler::target::Field::InstanceSize()) {}
~FieldSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -1256,7 +1272,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kFieldCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -1316,7 +1331,7 @@
FieldDeserializationCluster() : DeserializationCluster("Field") {}
~FieldDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -1326,8 +1341,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
Snapshot::Kind kind = d->kind();
for (intptr_t id = start_index_; id < stop_index_; id++) {
@@ -1383,7 +1398,7 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
Field& field = Field::Handle(d->zone());
if (!IsolateGroup::Current()->use_field_guards()) {
for (intptr_t i = start_index_; i < stop_index_; i++) {
@@ -1410,6 +1425,7 @@
public:
ScriptSerializationCluster()
: SerializationCluster("Script",
+ kScriptCid,
compiler::target::Script::InstanceSize()) {}
~ScriptSerializationCluster() {}
@@ -1420,7 +1436,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kScriptCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -1461,7 +1476,7 @@
ScriptDeserializationCluster() : DeserializationCluster("Script") {}
~ScriptDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -1471,8 +1486,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
ScriptPtr script = static_cast<ScriptPtr>(d->Ref(id));
Deserializer::InitializeHeader(script, kScriptCid,
@@ -1494,6 +1509,7 @@
public:
LibrarySerializationCluster()
: SerializationCluster("Library",
+ kLibraryCid,
compiler::target::Library::InstanceSize()) {}
~LibrarySerializationCluster() {}
@@ -1504,7 +1520,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kLibraryCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -1539,7 +1554,7 @@
LibraryDeserializationCluster() : DeserializationCluster("Library") {}
~LibraryDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -1549,8 +1564,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
LibraryPtr lib = static_cast<LibraryPtr>(d->Ref(id));
Deserializer::InitializeHeader(lib, kLibraryCid, Library::InstanceSize());
@@ -1576,6 +1591,7 @@
public:
NamespaceSerializationCluster()
: SerializationCluster("Namespace",
+ kNamespaceCid,
compiler::target::Namespace::InstanceSize()) {}
~NamespaceSerializationCluster() {}
@@ -1586,7 +1602,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kNamespaceCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -1614,7 +1629,7 @@
NamespaceDeserializationCluster() : DeserializationCluster("Namespace") {}
~NamespaceDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -1624,8 +1639,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
NamespacePtr ns = static_cast<NamespacePtr>(d->Ref(id));
Deserializer::InitializeHeader(ns, kNamespaceCid,
@@ -1642,6 +1657,7 @@
KernelProgramInfoSerializationCluster()
: SerializationCluster(
"KernelProgramInfo",
+ kKernelProgramInfoCid,
compiler::target::KernelProgramInfo::InstanceSize()) {}
~KernelProgramInfoSerializationCluster() {}
@@ -1652,7 +1668,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kKernelProgramInfoCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -1683,7 +1698,7 @@
: DeserializationCluster("KernelProgramInfo") {}
~KernelProgramInfoDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -1694,8 +1709,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
KernelProgramInfoPtr info = static_cast<KernelProgramInfoPtr>(d->Ref(id));
Deserializer::InitializeHeader(info, kKernelProgramInfoCid,
@@ -1705,7 +1720,7 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
Array& array = Array::Handle(d->zone());
KernelProgramInfo& info = KernelProgramInfo::Handle(d->zone());
for (intptr_t id = start_index_; id < stop_index_; id++) {
@@ -1721,7 +1736,7 @@
class CodeSerializationCluster : public SerializationCluster {
public:
explicit CodeSerializationCluster(Heap* heap)
- : SerializationCluster("Code"), array_(Array::Handle()) {}
+ : SerializationCluster("Code", kCodeCid), array_(Array::Handle()) {}
~CodeSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -1881,7 +1896,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kCodeCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2057,7 +2071,7 @@
CodeDeserializationCluster() : DeserializationCluster("Code") {}
~CodeDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
PageSpace* old_space = d->heap()->old_space();
start_index_ = d->next_index();
d->set_code_start_index(start_index_);
@@ -2087,8 +2101,8 @@
}
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
ReadFill(d, id, false);
}
@@ -2150,7 +2164,7 @@
#endif
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
d->EndInstructions();
#if !defined(PRODUCT)
@@ -2191,7 +2205,8 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class ObjectPoolSerializationCluster : public SerializationCluster {
public:
- ObjectPoolSerializationCluster() : SerializationCluster("ObjectPool") {}
+ ObjectPoolSerializationCluster()
+ : SerializationCluster("ObjectPool", kObjectPoolCid) {}
~ObjectPoolSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -2213,7 +2228,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kObjectPoolCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2283,7 +2297,7 @@
ObjectPoolDeserializationCluster() : DeserializationCluster("ObjectPool") {}
~ObjectPoolDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -2295,8 +2309,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
fill_position_ = d->position();
for (intptr_t id = start_index_; id < stop_index_; id++) {
@@ -2329,7 +2343,7 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (d->is_non_root_unit()) {
// If this is a non-root unit, some pool entries that should be canonical
// may have been replaced be with other objects during canonicalization.
@@ -2410,23 +2424,11 @@
};
#endif
-#if defined(DART_PRECOMPILED_RUNTIME)
-class WeakSerializationReferenceDeserializationCluster
- : public DeserializationCluster {
- public:
- WeakSerializationReferenceDeserializationCluster()
- : DeserializationCluster("WeakSerializationReference") {}
- ~WeakSerializationReferenceDeserializationCluster() {}
-
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {}
- void ReadFill(Deserializer* d, bool stamp_canonical) {}
-};
-#endif
-
#if !defined(DART_PRECOMPILED_RUNTIME)
class PcDescriptorsSerializationCluster : public SerializationCluster {
public:
- PcDescriptorsSerializationCluster() : SerializationCluster("PcDescriptors") {}
+ PcDescriptorsSerializationCluster()
+ : SerializationCluster("PcDescriptors", kPcDescriptorsCid) {}
~PcDescriptorsSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -2435,7 +2437,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kPcDescriptorsCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2472,7 +2473,7 @@
: DeserializationCluster("PcDescriptors") {}
~PcDescriptorsDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -2484,8 +2485,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
const intptr_t length = d->ReadUnsigned();
PcDescriptorsPtr desc = static_cast<PcDescriptorsPtr>(d->Ref(id));
@@ -2501,7 +2502,8 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class CodeSourceMapSerializationCluster : public SerializationCluster {
public:
- CodeSourceMapSerializationCluster() : SerializationCluster("CodeSourceMap") {}
+ CodeSourceMapSerializationCluster()
+ : SerializationCluster("CodeSourceMap", kCodeSourceMapCid) {}
~CodeSourceMapSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -2510,7 +2512,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kCodeSourceMapCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2547,7 +2548,7 @@
: DeserializationCluster("CodeSourceMap") {}
~CodeSourceMapDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool is_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -2559,7 +2560,7 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool is_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
const intptr_t length = d->ReadUnsigned();
CodeSourceMapPtr map = static_cast<CodeSourceMapPtr>(d->Ref(id));
@@ -2576,7 +2577,7 @@
class CompressedStackMapsSerializationCluster : public SerializationCluster {
public:
CompressedStackMapsSerializationCluster()
- : SerializationCluster("CompressedStackMaps") {}
+ : SerializationCluster("CompressedStackMaps", kCompressedStackMapsCid) {}
~CompressedStackMapsSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -2585,7 +2586,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kCompressedStackMapsCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2625,7 +2625,7 @@
: DeserializationCluster("CompressedStackMaps") {}
~CompressedStackMapsDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool is_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -2637,7 +2637,7 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool is_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
const intptr_t flags_and_size = d->ReadUnsigned();
const intptr_t length =
@@ -2665,6 +2665,8 @@
intptr_t cid,
bool is_canonical)
: CanonicalSetSerializationCluster(
+ cid,
+ is_canonical,
is_canonical && IsStringClassId(cid),
ImageWriter::TagObjectTypeAsReadOnly(zone, type)),
zone_(zone),
@@ -2689,7 +2691,6 @@
void WriteAlloc(Serializer* s) {
const bool is_string_cluster = IsStringClassId(cid_);
- s->WriteCid(cid_);
intptr_t count = objects_.length();
s->WriteUnsigned(count);
@@ -2729,11 +2730,16 @@
class RODataDeserializationCluster
: public CanonicalSetDeserializationCluster<CanonicalStringSet> {
public:
- explicit RODataDeserializationCluster(bool is_root_unit, intptr_t cid)
- : CanonicalSetDeserializationCluster(is_root_unit, "ROData"), cid_(cid) {}
+ explicit RODataDeserializationCluster(bool is_canonical,
+ bool is_root_unit,
+ intptr_t cid)
+ : CanonicalSetDeserializationCluster(is_canonical,
+ is_root_unit,
+ "ROData"),
+ cid_(cid) {}
~RODataDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
intptr_t count = d->ReadUnsigned();
uint32_t running_offset = 0;
@@ -2743,14 +2749,16 @@
d->AssignRef(object);
}
stop_index_ = d->next_index();
- BuildCanonicalSetFromLayout(d, cid_ == kStringCid);
+ if (cid_ == kStringCid) {
+ BuildCanonicalSetFromLayout(d);
+ }
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
// No-op.
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (!table_.IsNull()) {
auto object_store = d->isolate_group()->object_store();
VerifyCanonicalSet(d, refs, Array::Handle(object_store->symbol_table()));
@@ -2758,7 +2766,7 @@
if (d->isolate_group() == Dart::vm_isolate_group()) {
Symbols::InitFromSnapshot(d->isolate_group());
}
- } else if (canonicalize) {
+ } else if (!primary && is_canonical()) {
FATAL("Cannot recanonicalize RO objects.");
}
}
@@ -2772,7 +2780,7 @@
class ExceptionHandlersSerializationCluster : public SerializationCluster {
public:
ExceptionHandlersSerializationCluster()
- : SerializationCluster("ExceptionHandlers") {}
+ : SerializationCluster("ExceptionHandlers", kExceptionHandlersCid) {}
~ExceptionHandlersSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -2783,7 +2791,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kExceptionHandlersCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2827,7 +2834,7 @@
: DeserializationCluster("ExceptionHandlers") {}
~ExceptionHandlersDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -2839,8 +2846,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
ExceptionHandlersPtr handlers =
static_cast<ExceptionHandlersPtr>(d->Ref(id));
@@ -2865,7 +2872,8 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class ContextSerializationCluster : public SerializationCluster {
public:
- ContextSerializationCluster() : SerializationCluster("Context") {}
+ ContextSerializationCluster()
+ : SerializationCluster("Context", kContextCid) {}
~ContextSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -2880,7 +2888,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kContextCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2917,7 +2924,7 @@
ContextDeserializationCluster() : DeserializationCluster("Context") {}
~ContextDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -2929,8 +2936,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
ContextPtr context = static_cast<ContextPtr>(d->Ref(id));
const intptr_t length = d->ReadUnsigned();
@@ -2948,7 +2955,8 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class ContextScopeSerializationCluster : public SerializationCluster {
public:
- ContextScopeSerializationCluster() : SerializationCluster("ContextScope") {}
+ ContextScopeSerializationCluster()
+ : SerializationCluster("ContextScope", kContextScopeCid) {}
~ContextScopeSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -2960,7 +2968,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kContextScopeCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -2997,7 +3004,7 @@
: DeserializationCluster("ContextScope") {}
~ContextScopeDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3009,8 +3016,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
ContextScopePtr scope = static_cast<ContextScopePtr>(d->Ref(id));
const intptr_t length = d->ReadUnsigned();
@@ -3028,6 +3035,7 @@
public:
UnlinkedCallSerializationCluster()
: SerializationCluster("UnlinkedCall",
+ kUnlinkedCallCid,
compiler::target::UnlinkedCall::InstanceSize()) {}
~UnlinkedCallSerializationCluster() {}
@@ -3038,7 +3046,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kUnlinkedCallCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3068,7 +3075,7 @@
: DeserializationCluster("UnlinkedCall") {}
~UnlinkedCallDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3079,8 +3086,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
UnlinkedCallPtr unlinked = static_cast<UnlinkedCallPtr>(d->Ref(id));
Deserializer::InitializeHeader(unlinked, kUnlinkedCallCid,
@@ -3096,6 +3103,7 @@
public:
ICDataSerializationCluster()
: SerializationCluster("ICData",
+ kICDataCid,
compiler::target::ICData::InstanceSize()) {}
~ICDataSerializationCluster() {}
@@ -3106,7 +3114,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kICDataCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3139,7 +3146,7 @@
ICDataDeserializationCluster() : DeserializationCluster("ICData") {}
~ICDataDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3149,8 +3156,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
ICDataPtr ic = static_cast<ICDataPtr>(d->Ref(id));
Deserializer::InitializeHeader(ic, kICDataCid, ICData::InstanceSize());
@@ -3167,6 +3174,7 @@
MegamorphicCacheSerializationCluster()
: SerializationCluster(
"MegamorphicCache",
+ kMegamorphicCacheCid,
compiler::target::MegamorphicCache::InstanceSize()) {}
~MegamorphicCacheSerializationCluster() {}
@@ -3177,7 +3185,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kMegamorphicCacheCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3207,7 +3214,7 @@
: DeserializationCluster("MegamorphicCache") {}
~MegamorphicCacheDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3218,8 +3225,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
MegamorphicCachePtr cache = static_cast<MegamorphicCachePtr>(d->Ref(id));
Deserializer::InitializeHeader(cache, kMegamorphicCacheCid,
@@ -3230,7 +3237,7 @@
}
#if defined(DART_PRECOMPILED_RUNTIME)
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (FLAG_use_bare_instructions) {
// By default, every megamorphic call site will load the target
// [Function] from the hash table and call indirectly via loading the
@@ -3255,6 +3262,7 @@
SubtypeTestCacheSerializationCluster()
: SerializationCluster(
"SubtypeTestCache",
+ kSubtypeTestCacheCid,
compiler::target::SubtypeTestCache::InstanceSize()) {}
~SubtypeTestCacheSerializationCluster() {}
@@ -3265,7 +3273,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kSubtypeTestCacheCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3294,7 +3301,7 @@
: DeserializationCluster("SubtypeTestCache") {}
~SubtypeTestCacheDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3305,8 +3312,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
SubtypeTestCachePtr cache = static_cast<SubtypeTestCachePtr>(d->Ref(id));
Deserializer::InitializeHeader(cache, kSubtypeTestCacheCid,
@@ -3321,6 +3328,7 @@
public:
LoadingUnitSerializationCluster()
: SerializationCluster("LoadingUnit",
+ kLoadingUnitCid,
compiler::target::LoadingUnit::InstanceSize()) {}
~LoadingUnitSerializationCluster() {}
@@ -3331,7 +3339,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kLoadingUnitCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3360,7 +3367,7 @@
LoadingUnitDeserializationCluster() : DeserializationCluster("LoadingUnit") {}
~LoadingUnitDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3371,8 +3378,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
LoadingUnitPtr unit = static_cast<LoadingUnitPtr>(d->Ref(id));
Deserializer::InitializeHeader(unit, kLoadingUnitCid,
@@ -3391,6 +3398,7 @@
public:
LanguageErrorSerializationCluster()
: SerializationCluster("LanguageError",
+ kLanguageErrorCid,
compiler::target::LanguageError::InstanceSize()) {}
~LanguageErrorSerializationCluster() {}
@@ -3401,7 +3409,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kLanguageErrorCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3433,7 +3440,7 @@
: DeserializationCluster("LanguageError") {}
~LanguageErrorDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3444,8 +3451,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
LanguageErrorPtr error = static_cast<LanguageErrorPtr>(d->Ref(id));
Deserializer::InitializeHeader(error, kLanguageErrorCid,
@@ -3464,6 +3471,7 @@
UnhandledExceptionSerializationCluster()
: SerializationCluster(
"UnhandledException",
+ kUnhandledExceptionCid,
compiler::target::UnhandledException::InstanceSize()) {}
~UnhandledExceptionSerializationCluster() {}
@@ -3474,7 +3482,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kUnhandledExceptionCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3503,7 +3510,7 @@
: DeserializationCluster("UnhandledException") {}
~UnhandledExceptionDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3514,8 +3521,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
UnhandledExceptionPtr exception =
static_cast<UnhandledExceptionPtr>(d->Ref(id));
@@ -3529,8 +3536,8 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class InstanceSerializationCluster : public SerializationCluster {
public:
- explicit InstanceSerializationCluster(intptr_t cid)
- : SerializationCluster("Instance"), cid_(cid) {
+ InstanceSerializationCluster(bool is_canonical, intptr_t cid)
+ : SerializationCluster("Instance", cid, kSizeVaries, is_canonical) {
ClassPtr cls = IsolateGroup::Current()->class_table()->At(cid);
host_next_field_offset_in_words_ =
cls->untag()->host_next_field_offset_in_words_;
@@ -3564,7 +3571,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(cid_);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
@@ -3610,7 +3616,6 @@
}
private:
- const intptr_t cid_;
intptr_t host_next_field_offset_in_words_;
intptr_t target_next_field_offset_in_words_;
intptr_t target_instance_size_in_words_;
@@ -3620,12 +3625,13 @@
class AbstractInstanceDeserializationCluster : public DeserializationCluster {
protected:
- explicit AbstractInstanceDeserializationCluster(const char* name)
- : DeserializationCluster(name) {}
+ explicit AbstractInstanceDeserializationCluster(const char* name,
+ bool is_canonical)
+ : DeserializationCluster(name, is_canonical) {}
public:
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
- if (canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
+ if (!primary && is_canonical()) {
SafepointMutexLocker ml(
d->isolate_group()->constant_canonicalization_mutex());
Instance& instance = Instance::Handle(d->zone());
@@ -3641,11 +3647,12 @@
class InstanceDeserializationCluster
: public AbstractInstanceDeserializationCluster {
public:
- explicit InstanceDeserializationCluster(intptr_t cid)
- : AbstractInstanceDeserializationCluster("Instance"), cid_(cid) {}
+ explicit InstanceDeserializationCluster(intptr_t cid, bool is_canonical)
+ : AbstractInstanceDeserializationCluster("Instance", is_canonical),
+ cid_(cid) {}
~InstanceDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3659,7 +3666,7 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
intptr_t next_field_offset = next_field_offset_in_words_ << kWordSizeLog2;
intptr_t instance_size =
Object::RoundedAllocationSize(instance_size_in_words_ * kWordSize);
@@ -3668,7 +3675,7 @@
for (intptr_t id = start_index_; id < stop_index_; id++) {
InstancePtr instance = static_cast<InstancePtr>(d->Ref(id));
Deserializer::InitializeHeader(instance, cid_, instance_size,
- stamp_canonical);
+ primary && is_canonical());
intptr_t offset = Instance::NextFieldOffset();
while (offset < next_field_offset) {
if (unboxed_fields_bitmap.Get(offset / kWordSize)) {
@@ -3704,6 +3711,7 @@
public:
LibraryPrefixSerializationCluster()
: SerializationCluster("LibraryPrefix",
+ kLibraryPrefixCid,
compiler::target::LibraryPrefix::InstanceSize()) {}
~LibraryPrefixSerializationCluster() {}
@@ -3714,7 +3722,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kLibraryPrefixCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -3745,7 +3752,7 @@
: DeserializationCluster("LibraryPrefix") {}
~LibraryPrefixDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3756,8 +3763,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
LibraryPrefixPtr prefix = static_cast<LibraryPrefixPtr>(d->Ref(id));
Deserializer::InitializeHeader(prefix, kLibraryPrefixCid,
@@ -3781,8 +3788,10 @@
TypePtr,
/*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
public:
- explicit TypeSerializationCluster(bool represents_canonical_set)
+ TypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
: CanonicalSetSerializationCluster(
+ kTypeCid,
+ is_canonical,
represents_canonical_set,
"Type",
compiler::target::Type::InstanceSize()) {}
@@ -3806,7 +3815,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kTypeCid);
intptr_t count = objects_.length();
s->WriteUnsigned(count);
ReorderObjects(s);
@@ -3869,11 +3877,12 @@
CanonicalTypeSet,
/*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
public:
- explicit TypeDeserializationCluster(bool is_root_unit)
- : CanonicalSetDeserializationCluster(is_root_unit, "Type") {}
+ explicit TypeDeserializationCluster(bool is_canonical, bool is_root_unit)
+ : CanonicalSetDeserializationCluster(is_canonical, is_root_unit, "Type") {
+ }
~TypeDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -3882,14 +3891,14 @@
d->AssignRef(object);
}
stop_index_ = d->next_index();
- BuildCanonicalSetFromLayout(d, stamp_canonical);
+ BuildCanonicalSetFromLayout(d);
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
TypePtr type = static_cast<TypePtr>(d->Ref(id));
Deserializer::InitializeHeader(type, kTypeCid, Type::InstanceSize(),
- stamp_canonical);
+ primary && is_canonical());
ReadFromTo(type);
const uint8_t combined = d->Read<uint8_t>();
type->untag()->type_state_ = combined >> kNullabilityBitSize;
@@ -3897,13 +3906,13 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (!table_.IsNull()) {
auto object_store = d->isolate_group()->object_store();
VerifyCanonicalSet(d, refs,
Array::Handle(object_store->canonical_types()));
object_store->set_canonical_types(table_);
- } else if (canonicalize) {
+ } else if (!primary && is_canonical()) {
AbstractType& type = AbstractType::Handle(d->zone());
for (intptr_t i = start_index_; i < stop_index_; i++) {
type ^= refs.At(i);
@@ -3937,8 +3946,11 @@
FunctionType,
FunctionTypePtr> {
public:
- explicit FunctionTypeSerializationCluster(bool represents_canonical_set)
+ explicit FunctionTypeSerializationCluster(bool is_canonical,
+ bool represents_canonical_set)
: CanonicalSetSerializationCluster(
+ kFunctionTypeCid,
+ is_canonical,
represents_canonical_set,
"FunctionType",
compiler::target::FunctionType::InstanceSize()) {}
@@ -3951,7 +3963,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kFunctionTypeCid);
intptr_t count = objects_.length();
s->WriteUnsigned(count);
ReorderObjects(s);
@@ -3995,11 +4006,14 @@
class FunctionTypeDeserializationCluster
: public CanonicalSetDeserializationCluster<CanonicalFunctionTypeSet> {
public:
- explicit FunctionTypeDeserializationCluster(bool is_root_unit)
- : CanonicalSetDeserializationCluster(is_root_unit, "FunctionType") {}
+ explicit FunctionTypeDeserializationCluster(bool is_canonical,
+ bool is_root_unit)
+ : CanonicalSetDeserializationCluster(is_canonical,
+ is_root_unit,
+ "FunctionType") {}
~FunctionTypeDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4009,15 +4023,15 @@
d->AssignRef(object);
}
stop_index_ = d->next_index();
- BuildCanonicalSetFromLayout(d, stamp_canonical);
+ BuildCanonicalSetFromLayout(d);
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
FunctionTypePtr type = static_cast<FunctionTypePtr>(d->Ref(id));
Deserializer::InitializeHeader(type, kFunctionTypeCid,
FunctionType::InstanceSize(),
- stamp_canonical);
+ primary && is_canonical());
ReadFromTo(type);
const uint8_t combined = d->Read<uint8_t>();
type->untag()->type_state_ = combined >> kNullabilityBitSize;
@@ -4026,13 +4040,13 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (!table_.IsNull()) {
auto object_store = d->isolate_group()->object_store();
VerifyCanonicalSet(
d, refs, Array::Handle(object_store->canonical_function_types()));
object_store->set_canonical_function_types(table_);
- } else if (canonicalize) {
+ } else if (!primary && is_canonical()) {
AbstractType& type = AbstractType::Handle(d->zone());
for (intptr_t i = start_index_; i < stop_index_; i++) {
type ^= refs.At(i);
@@ -4065,6 +4079,7 @@
public:
TypeRefSerializationCluster()
: SerializationCluster("TypeRef",
+ kTypeRefCid,
compiler::target::TypeRef::InstanceSize()) {}
~TypeRefSerializationCluster() {}
@@ -4075,7 +4090,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kTypeRefCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -4103,7 +4117,7 @@
TypeRefDeserializationCluster() : DeserializationCluster("TypeRef") {}
~TypeRefDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4113,17 +4127,17 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
TypeRefPtr type = static_cast<TypeRefPtr>(d->Ref(id));
Deserializer::InitializeHeader(type, kTypeRefCid, TypeRef::InstanceSize(),
- stamp_canonical);
+ primary && is_canonical());
ReadFromTo(type);
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
- if (canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
+ if (!primary && is_canonical()) {
AbstractType& type = AbstractType::Handle(d->zone());
for (intptr_t i = start_index_; i < stop_index_; i++) {
type ^= refs.At(i);
@@ -4158,9 +4172,11 @@
TypeParameter,
TypeParameterPtr> {
public:
- explicit TypeParameterSerializationCluster(
- bool cluster_represents_canonical_set)
+ TypeParameterSerializationCluster(bool is_canonical,
+ bool cluster_represents_canonical_set)
: CanonicalSetSerializationCluster(
+ kTypeParameterCid,
+ is_canonical,
cluster_represents_canonical_set,
"TypeParameter",
compiler::target::TypeParameter::InstanceSize()) {}
@@ -4174,7 +4190,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kTypeParameterCid);
intptr_t count = objects_.length();
s->WriteUnsigned(count);
ReorderObjects(s);
@@ -4216,11 +4231,14 @@
class TypeParameterDeserializationCluster
: public CanonicalSetDeserializationCluster<CanonicalTypeParameterSet> {
public:
- explicit TypeParameterDeserializationCluster(bool is_root_unit)
- : CanonicalSetDeserializationCluster(is_root_unit, "TypeParameter") {}
+ explicit TypeParameterDeserializationCluster(bool is_canonical,
+ bool is_root_unit)
+ : CanonicalSetDeserializationCluster(is_canonical,
+ is_root_unit,
+ "TypeParameter") {}
~TypeParameterDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4229,15 +4247,15 @@
AllocateUninitialized(old_space, TypeParameter::InstanceSize()));
}
stop_index_ = d->next_index();
- BuildCanonicalSetFromLayout(d, stamp_canonical);
+ BuildCanonicalSetFromLayout(d);
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
TypeParameterPtr type = static_cast<TypeParameterPtr>(d->Ref(id));
Deserializer::InitializeHeader(type, kTypeParameterCid,
TypeParameter::InstanceSize(),
- stamp_canonical);
+ primary && is_canonical());
ReadFromTo(type);
type->untag()->parameterized_class_id_ = d->Read<int32_t>();
type->untag()->base_ = d->Read<uint16_t>();
@@ -4248,13 +4266,13 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
if (!table_.IsNull()) {
auto object_store = d->isolate_group()->object_store();
VerifyCanonicalSet(
d, refs, Array::Handle(object_store->canonical_type_parameters()));
object_store->set_canonical_type_parameters(table_);
- } else if (canonicalize) {
+ } else if (!primary && is_canonical()) {
TypeParameter& type_param = TypeParameter::Handle(d->zone());
for (intptr_t i = start_index_; i < stop_index_; i++) {
type_param ^= refs.At(i);
@@ -4286,9 +4304,11 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class ClosureSerializationCluster : public SerializationCluster {
public:
- ClosureSerializationCluster()
+ explicit ClosureSerializationCluster(bool is_canonical)
: SerializationCluster("Closure",
- compiler::target::Closure::InstanceSize()) {}
+ kClosureCid,
+ compiler::target::Closure::InstanceSize(),
+ is_canonical) {}
~ClosureSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -4298,7 +4318,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kClosureCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -4324,11 +4343,11 @@
class ClosureDeserializationCluster
: public AbstractInstanceDeserializationCluster {
public:
- ClosureDeserializationCluster()
- : AbstractInstanceDeserializationCluster("Closure") {}
+ explicit ClosureDeserializationCluster(bool is_canonical)
+ : AbstractInstanceDeserializationCluster("Closure", is_canonical) {}
~ClosureDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4338,11 +4357,12 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
ClosurePtr closure = static_cast<ClosurePtr>(d->Ref(id));
Deserializer::InitializeHeader(closure, kClosureCid,
- Closure::InstanceSize(), stamp_canonical);
+ Closure::InstanceSize(),
+ primary && is_canonical());
ReadFromTo(closure);
}
}
@@ -4351,8 +4371,8 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class MintSerializationCluster : public SerializationCluster {
public:
- MintSerializationCluster()
- : SerializationCluster("int", compiler::target::Mint::InstanceSize()) {}
+ explicit MintSerializationCluster(bool is_canonical)
+ : SerializationCluster("int", kMintCid, kSizeVaries, is_canonical) {}
~MintSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -4366,20 +4386,26 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kMintCid);
-
s->WriteUnsigned(smis_.length() + mints_.length());
for (intptr_t i = 0; i < smis_.length(); i++) {
SmiPtr smi = smis_[i];
s->AssignRef(smi);
AutoTraceObject(smi);
- s->Write<int64_t>(Smi::Value(smi));
+ const int64_t value = Smi::Value(smi);
+ s->Write<int64_t>(value);
+ if (!Smi::IsValid(value)) {
+ // This Smi will become a Mint when loaded.
+ target_memory_size_ += compiler::target::Mint::InstanceSize();
+ }
}
for (intptr_t i = 0; i < mints_.length(); i++) {
MintPtr mint = mints_[i];
s->AssignRef(mint);
AutoTraceObject(mint);
s->Write<int64_t>(mint->untag()->value_);
+ // All Mints on the host should be Mints on the target.
+ ASSERT(!Smi::IsValid(mint->untag()->value_));
+ target_memory_size_ += compiler::target::Mint::InstanceSize();
}
}
@@ -4393,10 +4419,11 @@
class MintDeserializationCluster : public DeserializationCluster {
public:
- MintDeserializationCluster() : DeserializationCluster("int") {}
+ explicit MintDeserializationCluster(bool is_canonical)
+ : DeserializationCluster("int", is_canonical) {}
~MintDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
PageSpace* old_space = d->heap()->old_space();
start_index_ = d->next_index();
@@ -4409,7 +4436,7 @@
MintPtr mint = static_cast<MintPtr>(
AllocateUninitialized(old_space, Mint::InstanceSize()));
Deserializer::InitializeHeader(mint, kMintCid, Mint::InstanceSize(),
- stamp_canonical);
+ is_canonical());
mint->untag()->value_ = value;
d->AssignRef(mint);
}
@@ -4417,10 +4444,10 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {}
+ void ReadFill(Deserializer* d, bool primary) {}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
- if (canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
+ if (!primary && is_canonical()) {
const Class& mint_cls = Class::Handle(
d->zone(), d->isolate_group()->object_store()->mint_class());
Object& number = Object::Handle(d->zone());
@@ -4446,9 +4473,11 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class DoubleSerializationCluster : public SerializationCluster {
public:
- DoubleSerializationCluster()
+ explicit DoubleSerializationCluster(bool is_canonical)
: SerializationCluster("double",
- compiler::target::Double::InstanceSize()) {}
+ kDoubleCid,
+ compiler::target::Double::InstanceSize(),
+ is_canonical) {}
~DoubleSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -4457,7 +4486,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kDoubleCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -4482,10 +4510,11 @@
class DoubleDeserializationCluster : public DeserializationCluster {
public:
- DoubleDeserializationCluster() : DeserializationCluster("double") {}
+ explicit DoubleDeserializationCluster(bool is_canonical)
+ : DeserializationCluster("double", is_canonical) {}
~DoubleDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4495,17 +4524,17 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
DoublePtr dbl = static_cast<DoublePtr>(d->Ref(id));
Deserializer::InitializeHeader(dbl, kDoubleCid, Double::InstanceSize(),
- stamp_canonical);
+ primary && is_canonical());
dbl->untag()->value_ = d->Read<double>();
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
- if (canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
+ if (!primary && is_canonical()) {
auto Z = d->zone();
auto isolate_group = d->isolate_group();
const Class& cls =
@@ -4533,6 +4562,7 @@
GrowableObjectArraySerializationCluster()
: SerializationCluster(
"GrowableObjectArray",
+ kGrowableObjectArrayCid,
compiler::target::GrowableObjectArray::InstanceSize()) {}
~GrowableObjectArraySerializationCluster() {}
@@ -4543,7 +4573,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kGrowableObjectArrayCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -4573,7 +4602,7 @@
: DeserializationCluster("GrowableObjectArray") {}
~GrowableObjectArrayDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4584,13 +4613,12 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
GrowableObjectArrayPtr list =
static_cast<GrowableObjectArrayPtr>(d->Ref(id));
Deserializer::InitializeHeader(list, kGrowableObjectArrayCid,
- GrowableObjectArray::InstanceSize(),
- stamp_canonical);
+ GrowableObjectArray::InstanceSize());
ReadFromTo(list);
}
}
@@ -4600,7 +4628,7 @@
class TypedDataSerializationCluster : public SerializationCluster {
public:
explicit TypedDataSerializationCluster(intptr_t cid)
- : SerializationCluster("TypedData"), cid_(cid) {}
+ : SerializationCluster("TypedData", cid) {}
~TypedDataSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -4609,7 +4637,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(cid_);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
const intptr_t element_size = TypedData::ElementSizeInBytes(cid_);
@@ -4638,7 +4665,6 @@
}
private:
- const intptr_t cid_;
GrowableArray<TypedDataPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
@@ -4649,7 +4675,7 @@
: DeserializationCluster("TypedData"), cid_(cid) {}
~TypedDataDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4662,8 +4688,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
intptr_t element_size = TypedData::ElementSizeInBytes(cid_);
for (intptr_t id = start_index_; id < stop_index_; id++) {
@@ -4671,8 +4697,7 @@
const intptr_t length = d->ReadUnsigned();
const intptr_t length_in_bytes = length * element_size;
Deserializer::InitializeHeader(data, cid_,
- TypedData::InstanceSize(length_in_bytes),
- stamp_canonical);
+ TypedData::InstanceSize(length_in_bytes));
data->untag()->length_ = Smi::New(length);
data->untag()->RecomputeDataField();
uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data());
@@ -4689,8 +4714,8 @@
public:
explicit TypedDataViewSerializationCluster(intptr_t cid)
: SerializationCluster("TypedDataView",
- compiler::target::TypedDataView::InstanceSize()),
- cid_(cid) {}
+ cid,
+ compiler::target::TypedDataView::InstanceSize()) {}
~TypedDataViewSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -4702,7 +4727,6 @@
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
- s->WriteCid(cid_);
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
TypedDataViewPtr view = objects_[i];
@@ -4720,7 +4744,6 @@
}
private:
- const intptr_t cid_;
GrowableArray<TypedDataViewPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
@@ -4731,7 +4754,7 @@
: DeserializationCluster("TypedDataView"), cid_(cid) {}
~TypedDataViewDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4742,8 +4765,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
TypedDataViewPtr view = static_cast<TypedDataViewPtr>(d->Ref(id));
Deserializer::InitializeHeader(view, cid_, TypedDataView::InstanceSize());
@@ -4751,8 +4774,8 @@
}
}
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
- ASSERT(!canonicalize);
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
+ ASSERT(primary || !is_canonical());
auto& view = TypedDataView::Handle(d->zone());
for (intptr_t id = start_index_; id < stop_index_; id++) {
view ^= refs.At(id);
@@ -4770,8 +4793,8 @@
explicit ExternalTypedDataSerializationCluster(intptr_t cid)
: SerializationCluster(
"ExternalTypedData",
- compiler::target::ExternalTypedData::InstanceSize()),
- cid_(cid) {}
+ cid,
+ compiler::target::ExternalTypedData::InstanceSize()) {}
~ExternalTypedDataSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -4780,7 +4803,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(cid_);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -4804,7 +4826,6 @@
}
private:
- const intptr_t cid_;
GrowableArray<ExternalTypedDataPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
@@ -4815,7 +4836,7 @@
: DeserializationCluster("ExternalTypedData"), cid_(cid) {}
~ExternalTypedDataDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4826,8 +4847,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
intptr_t element_size = ExternalTypedData::ElementSizeInBytes(cid_);
for (intptr_t id = start_index_; id < stop_index_; id++) {
@@ -4852,6 +4873,7 @@
public:
StackTraceSerializationCluster()
: SerializationCluster("StackTrace",
+ kStackTraceCid,
compiler::target::StackTrace::InstanceSize()) {}
~StackTraceSerializationCluster() {}
@@ -4862,7 +4884,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kStackTraceCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -4890,7 +4911,7 @@
StackTraceDeserializationCluster() : DeserializationCluster("StackTrace") {}
~StackTraceDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4901,8 +4922,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
StackTracePtr trace = static_cast<StackTracePtr>(d->Ref(id));
Deserializer::InitializeHeader(trace, kStackTraceCid,
@@ -4917,6 +4938,7 @@
public:
RegExpSerializationCluster()
: SerializationCluster("RegExp",
+ kRegExpCid,
compiler::target::RegExp::InstanceSize()) {}
~RegExpSerializationCluster() {}
@@ -4927,7 +4949,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kRegExpCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -4958,7 +4979,7 @@
RegExpDeserializationCluster() : DeserializationCluster("RegExp") {}
~RegExpDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -4968,8 +4989,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
RegExpPtr regexp = static_cast<RegExpPtr>(d->Ref(id));
Deserializer::InitializeHeader(regexp, kRegExpCid,
@@ -4987,6 +5008,7 @@
public:
WeakPropertySerializationCluster()
: SerializationCluster("WeakProperty",
+ kWeakPropertyCid,
compiler::target::WeakProperty::InstanceSize()) {}
~WeakPropertySerializationCluster() {}
@@ -5005,7 +5027,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kWeakPropertyCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -5041,7 +5062,7 @@
: DeserializationCluster("WeakProperty") {}
~WeakPropertyDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -5052,8 +5073,8 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
- ASSERT(!stamp_canonical); // Never canonical.
+ void ReadFill(Deserializer* d, bool primary) {
+ ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_; id < stop_index_; id++) {
WeakPropertyPtr property = static_cast<WeakPropertyPtr>(d->Ref(id));
Deserializer::InitializeHeader(property, kWeakPropertyCid,
@@ -5069,6 +5090,7 @@
public:
LinkedHashMapSerializationCluster()
: SerializationCluster("LinkedHashMap",
+ kLinkedHashMapCid,
compiler::target::LinkedHashMap::InstanceSize()) {}
~LinkedHashMapSerializationCluster() {}
@@ -5092,7 +5114,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kLinkedHashMapCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -5137,11 +5158,11 @@
class LinkedHashMapDeserializationCluster
: public AbstractInstanceDeserializationCluster {
public:
- LinkedHashMapDeserializationCluster()
- : AbstractInstanceDeserializationCluster("LinkedHashMap") {}
+ explicit LinkedHashMapDeserializationCluster(bool is_canonical)
+ : AbstractInstanceDeserializationCluster("LinkedHashMap", is_canonical) {}
~LinkedHashMapDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -5152,14 +5173,14 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
PageSpace* old_space = d->heap()->old_space();
for (intptr_t id = start_index_; id < stop_index_; id++) {
LinkedHashMapPtr map = static_cast<LinkedHashMapPtr>(d->Ref(id));
Deserializer::InitializeHeader(map, kLinkedHashMapCid,
LinkedHashMap::InstanceSize(),
- stamp_canonical);
+ primary && is_canonical());
map->untag()->type_arguments_ =
static_cast<TypeArgumentsPtr>(d->ReadRef());
@@ -5195,8 +5216,8 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class ArraySerializationCluster : public SerializationCluster {
public:
- explicit ArraySerializationCluster(intptr_t cid)
- : SerializationCluster("Array"), cid_(cid) {}
+ ArraySerializationCluster(bool is_canonical, intptr_t cid)
+ : SerializationCluster("Array", cid, kSizeVaries, is_canonical) {}
~ArraySerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -5211,7 +5232,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(cid_);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -5239,7 +5259,6 @@
}
private:
- intptr_t cid_;
GrowableArray<ArrayPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
@@ -5247,11 +5266,12 @@
class ArrayDeserializationCluster
: public AbstractInstanceDeserializationCluster {
public:
- explicit ArrayDeserializationCluster(intptr_t cid)
- : AbstractInstanceDeserializationCluster("Array"), cid_(cid) {}
+ explicit ArrayDeserializationCluster(bool is_canonical, intptr_t cid)
+ : AbstractInstanceDeserializationCluster("Array", is_canonical),
+ cid_(cid) {}
~ArrayDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -5263,12 +5283,12 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
ArrayPtr array = static_cast<ArrayPtr>(d->Ref(id));
const intptr_t length = d->ReadUnsigned();
Deserializer::InitializeHeader(array, cid_, Array::InstanceSize(length),
- stamp_canonical);
+ primary && is_canonical());
array->untag()->type_arguments_ =
static_cast<TypeArgumentsPtr>(d->ReadRef());
array->untag()->length_ = Smi::New(length);
@@ -5285,7 +5305,11 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class OneByteStringSerializationCluster : public SerializationCluster {
public:
- OneByteStringSerializationCluster() : SerializationCluster("OneByteString") {}
+ explicit OneByteStringSerializationCluster(bool is_canonical)
+ : SerializationCluster("OneByteString",
+ kOneByteStringCid,
+ kSizeVaries,
+ is_canonical) {}
~OneByteStringSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -5294,7 +5318,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kOneByteStringCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -5327,12 +5350,12 @@
class StringDeserializationCluster : public DeserializationCluster {
protected:
- explicit StringDeserializationCluster(const char* name)
- : DeserializationCluster(name) {}
+ StringDeserializationCluster(const char* name, bool is_canonical)
+ : DeserializationCluster(name, is_canonical) {}
public:
- void PostLoad(Deserializer* d, const Array& refs, bool canonicalize) {
- if (canonicalize) {
+ void PostLoad(Deserializer* d, const Array& refs, bool primary) {
+ if (!primary && is_canonical()) {
auto Z = d->zone();
auto isolate_group = d->isolate_group();
SafepointMutexLocker ml(isolate_group->constant_canonicalization_mutex());
@@ -5357,11 +5380,11 @@
class OneByteStringDeserializationCluster
: public StringDeserializationCluster {
public:
- OneByteStringDeserializationCluster()
- : StringDeserializationCluster("OneByteString") {}
+ explicit OneByteStringDeserializationCluster(bool is_canonical)
+ : StringDeserializationCluster("OneByteString", is_canonical) {}
~OneByteStringDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -5373,13 +5396,13 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
OneByteStringPtr str = static_cast<OneByteStringPtr>(d->Ref(id));
const intptr_t length = d->ReadUnsigned();
Deserializer::InitializeHeader(str, kOneByteStringCid,
OneByteString::InstanceSize(length),
- stamp_canonical);
+ primary && is_canonical());
str->untag()->length_ = Smi::New(length);
StringHasher hasher;
for (intptr_t j = 0; j < length; j++) {
@@ -5395,7 +5418,11 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
class TwoByteStringSerializationCluster : public SerializationCluster {
public:
- TwoByteStringSerializationCluster() : SerializationCluster("TwoByteString") {}
+ explicit TwoByteStringSerializationCluster(bool is_canonical)
+ : SerializationCluster("TwoByteString",
+ kTwoByteStringCid,
+ kSizeVaries,
+ is_canonical) {}
~TwoByteStringSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
@@ -5404,7 +5431,6 @@
}
void WriteAlloc(Serializer* s) {
- s->WriteCid(kTwoByteStringCid);
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
@@ -5439,11 +5465,11 @@
class TwoByteStringDeserializationCluster
: public StringDeserializationCluster {
public:
- TwoByteStringDeserializationCluster()
- : StringDeserializationCluster("TwoByteString") {}
+ explicit TwoByteStringDeserializationCluster(bool is_canonical)
+ : StringDeserializationCluster("TwoByteString", is_canonical) {}
~TwoByteStringDeserializationCluster() {}
- void ReadAlloc(Deserializer* d, bool stamp_canonical) {
+ void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
@@ -5455,13 +5481,13 @@
stop_index_ = d->next_index();
}
- void ReadFill(Deserializer* d, bool stamp_canonical) {
+ void ReadFill(Deserializer* d, bool primary) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
TwoByteStringPtr str = static_cast<TwoByteStringPtr>(d->Ref(id));
const intptr_t length = d->ReadUnsigned();
Deserializer::InitializeHeader(str, kTwoByteStringCid,
TwoByteString::InstanceSize(length),
- stamp_canonical);
+ primary && is_canonical());
str->untag()->length_ = Smi::New(length);
StringHasher hasher;
for (intptr_t j = 0; j < length; j++) {
@@ -5482,7 +5508,7 @@
intptr_t num_objects,
intptr_t size,
intptr_t target_memory_size = 0)
- : SerializationCluster(name) {
+ : SerializationCluster(name, -1) {
num_objects_ = num_objects;
size_ = size;
target_memory_size_ = target_memory_size;
@@ -6470,7 +6496,7 @@
Zone* Z = zone_;
if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
Push(isolate_group()->class_table()->At(cid));
- return new (Z) InstanceSerializationCluster(cid);
+ return new (Z) InstanceSerializationCluster(is_canonical, cid);
}
if (IsTypedDataViewClassId(cid)) {
return new (Z) TypedDataViewSerializationCluster(cid);
@@ -6505,8 +6531,8 @@
case kClassCid:
return new (Z) ClassSerializationCluster(num_cids_ + num_tlc_cids_);
case kTypeArgumentsCid:
- return new (Z)
- TypeArgumentsSerializationCluster(cluster_represents_canonical_set);
+ return new (Z) TypeArgumentsSerializationCluster(
+ is_canonical, cluster_represents_canonical_set);
case kPatchClassCid:
return new (Z) PatchClassSerializationCluster();
case kFunctionCid:
@@ -6558,21 +6584,22 @@
case kLibraryPrefixCid:
return new (Z) LibraryPrefixSerializationCluster();
case kTypeCid:
- return new (Z) TypeSerializationCluster(cluster_represents_canonical_set);
+ return new (Z) TypeSerializationCluster(is_canonical,
+ cluster_represents_canonical_set);
case kFunctionTypeCid:
- return new (Z)
- FunctionTypeSerializationCluster(cluster_represents_canonical_set);
+ return new (Z) FunctionTypeSerializationCluster(
+ is_canonical, cluster_represents_canonical_set);
case kTypeRefCid:
return new (Z) TypeRefSerializationCluster();
case kTypeParameterCid:
- return new (Z)
- TypeParameterSerializationCluster(cluster_represents_canonical_set);
+ return new (Z) TypeParameterSerializationCluster(
+ is_canonical, cluster_represents_canonical_set);
case kClosureCid:
- return new (Z) ClosureSerializationCluster();
+ return new (Z) ClosureSerializationCluster(is_canonical);
case kMintCid:
- return new (Z) MintSerializationCluster();
+ return new (Z) MintSerializationCluster(is_canonical);
case kDoubleCid:
- return new (Z) DoubleSerializationCluster();
+ return new (Z) DoubleSerializationCluster(is_canonical);
case kGrowableObjectArrayCid:
return new (Z) GrowableObjectArraySerializationCluster();
case kStackTraceCid:
@@ -6584,13 +6611,14 @@
case kLinkedHashMapCid:
return new (Z) LinkedHashMapSerializationCluster();
case kArrayCid:
- return new (Z) ArraySerializationCluster(kArrayCid);
+ return new (Z) ArraySerializationCluster(is_canonical, kArrayCid);
case kImmutableArrayCid:
- return new (Z) ArraySerializationCluster(kImmutableArrayCid);
+ return new (Z)
+ ArraySerializationCluster(is_canonical, kImmutableArrayCid);
case kOneByteStringCid:
- return new (Z) OneByteStringSerializationCluster();
+ return new (Z) OneByteStringSerializationCluster(is_canonical);
case kTwoByteStringCid:
- return new (Z) TwoByteStringSerializationCluster();
+ return new (Z) TwoByteStringSerializationCluster(is_canonical);
case kWeakSerializationReferenceCid:
#if defined(DART_PRECOMPILER)
ASSERT(kind_ == Snapshot::kFullAOT);
@@ -6631,17 +6659,14 @@
intptr_t Serializer::PrepareInstructions() {
if (!Snapshot::IncludesCode(kind())) return 0;
- CodeSerializationCluster* cluster =
- static_cast<CodeSerializationCluster*>(clusters_by_cid_[kCodeCid]);
-
// Code objects that have identical/duplicate instructions must be adjacent in
// the order that Code objects are written because the encoding of the
// reference from the Code to the Instructions assumes monotonically
// increasing offsets as part of a delta encoding. Also the code order table
// that allows for mapping return addresses back to Code objects depends on
// this sorting.
- if (cluster != nullptr) {
- CodeSerializationCluster::Sort(cluster->objects());
+ if (code_cluster_ != nullptr) {
+ CodeSerializationCluster::Sort(code_cluster_->objects());
}
if ((loading_units_ != nullptr) &&
(current_loading_unit_id_ == LoadingUnit::kRootId)) {
@@ -6649,8 +6674,9 @@
i++) {
auto unit_objects = loading_units_->At(i)->deferred_objects();
CodeSerializationCluster::Sort(unit_objects);
+ ASSERT(unit_objects->length() == 0 || code_cluster_ != nullptr);
for (intptr_t j = 0; j < unit_objects->length(); j++) {
- cluster->deferred_objects()->Add(unit_objects->At(j)->ptr());
+ code_cluster_->deferred_objects()->Add(unit_objects->At(j)->ptr());
}
}
}
@@ -6664,8 +6690,8 @@
// instructions are now written by UnitSerializationRoots. This order needs
// to be known to finalize bare-instructions-mode's PC-relative calls.
GrowableArray<CodePtr> code_objects;
- if (cluster != nullptr) {
- auto in = cluster->objects();
+ if (code_cluster_ != nullptr) {
+ auto in = code_cluster_->objects();
for (intptr_t i = 0; i < in->length(); i++) {
code_objects.Add(in->At(i));
}
@@ -6817,6 +6843,12 @@
}
SerializationCluster* cluster = *cluster_ref;
ASSERT(cluster != nullptr);
+ if (cluster->is_canonical() != is_canonical) {
+ FATAL("cluster for %s (cid %" Pd ") %s as canonical, but %s",
+ cluster->name(), cid,
+ cluster->is_canonical() ? "marked" : "not marked",
+ is_canonical ? "should be" : "should not be");
+ }
#if defined(SNAPSHOT_BACKTRACE)
current_parent_ = object;
@@ -6939,17 +6971,22 @@
// Now that we have computed the reachability fixpoint, we remove the
// count of now-reachable WSRs as they are not actually serialized.
num_written_objects_ -= cluster->Count(this);
+ // We don't need to write this cluster, so remove it from consideration.
+ clusters_by_cid_[kWeakSerializationReferenceCid] = nullptr;
}
+ ASSERT(clusters_by_cid_[kWeakSerializationReferenceCid] == nullptr);
#endif
- GrowableArray<SerializationCluster*> canonical_clusters;
+ code_cluster_ = CID_CLUSTER(Code);
+
+ GrowableArray<SerializationCluster*> clusters;
// The order that PostLoad runs matters for some classes because of
// assumptions during canonicalization of some classes about what is already
// canonical. Explicitly place these clusters first, then add the rest
// ordered by class id.
#define ADD_NEXT(cid) \
- if (canonical_clusters_by_cid_[cid] != nullptr) { \
- canonical_clusters.Add(canonical_clusters_by_cid_[cid]); \
+ if (auto const cluster = canonical_clusters_by_cid_[cid]) { \
+ clusters.Add(cluster); \
canonical_clusters_by_cid_[cid] = nullptr; \
}
ADD_NEXT(kOneByteStringCid)
@@ -6961,28 +6998,31 @@
ADD_NEXT(kTypeArgumentsCid)
ADD_NEXT(kClosureCid)
#undef ADD_NEXT
+ const intptr_t out_of_order_clusters = clusters.length();
for (intptr_t cid = 0; cid < num_cids_; cid++) {
- if (canonical_clusters_by_cid_[cid] != nullptr) {
- canonical_clusters.Add(canonical_clusters_by_cid_[cid]);
+ if (auto const cluster = canonical_clusters_by_cid_[cid]) {
+ clusters.Add(cluster);
}
}
- GrowableArray<SerializationCluster*> clusters;
+ // Put these back so they'll show up in PrintSnapshotSizes.
+ for (intptr_t i = 0; i < out_of_order_clusters; i++) {
+ auto const cluster = clusters.At(i);
+ canonical_clusters_by_cid_[cluster->cid()] = cluster;
+ }
// Code cluster should be deserialized before Function as
// FunctionDeserializationCluster::ReadFill uses instructions table
// which is filled in CodeDeserializationCluster::ReadFill.
- if (clusters_by_cid_[kCodeCid] != nullptr) {
- clusters.Add(clusters_by_cid_[kCodeCid]);
+ if (auto const cluster = clusters_by_cid_[kCodeCid]) {
+ clusters.Add(cluster);
+ clusters_by_cid_[kCodeCid] = nullptr;
}
for (intptr_t cid = 0; cid < num_cids_; cid++) {
- // We don't actually have any WSR objects, references to them are replaced
- // either with the target or replacement.
- if (cid == kWeakSerializationReferenceCid) continue;
- // The code serialization cluster is already handled above.
- if (cid == kCodeCid) continue;
- if (clusters_by_cid_[cid] != nullptr) {
+ if (auto const cluster = clusters_by_cid_[cid]) {
clusters.Add(clusters_by_cid_[cid]);
}
}
+ // Put this back so it'll show up in PrintSnapshotSizes if present.
+ clusters_by_cid_[kCodeCid] = code_cluster_;
instructions_table_len_ = PrepareInstructions();
@@ -6995,7 +7035,6 @@
WriteUnsigned(num_base_objects_);
WriteUnsigned(num_objects);
- WriteUnsigned(canonical_clusters.length());
WriteUnsigned(clusters.length());
// TODO(dartbug.com/36097): Not every snapshot carries the field table.
if (current_loading_unit_id_ <= LoadingUnit::kRootId) {
@@ -7007,13 +7046,6 @@
(FLAG_precompiled_mode && FLAG_use_bare_instructions));
WriteUnsigned(instructions_table_len_);
- for (SerializationCluster* cluster : canonical_clusters) {
- cluster->WriteAndMeasureAlloc(this);
- bytes_heap_allocated_ += cluster->target_memory_size();
-#if defined(DEBUG)
- Write<int32_t>(next_ref_index_);
-#endif
- }
for (SerializationCluster* cluster : clusters) {
cluster->WriteAndMeasureAlloc(this);
bytes_heap_allocated_ += cluster->target_memory_size();
@@ -7027,12 +7059,6 @@
// And recorded them all in [objects_].
ASSERT(objects_->length() == num_objects);
- for (SerializationCluster* cluster : canonical_clusters) {
- cluster->WriteAndMeasureFill(this);
-#if defined(DEBUG)
- Write<int32_t>(kSectionMarker);
-#endif
- }
for (SerializationCluster* cluster : clusters) {
cluster->WriteAndMeasureFill(this);
#if defined(DEBUG)
@@ -7110,11 +7136,10 @@
return;
}
- auto const code_cluster = CID_CLUSTER(Code);
- ASSERT(code_cluster != nullptr);
+ ASSERT(code_cluster_ != nullptr);
// Reference IDs in a cluster are allocated sequentially, so we can use the
// first code object's reference ID to calculate the cluster index.
- const intptr_t first_code_id = RefId(code_cluster->objects()->At(0));
+ const intptr_t first_code_id = RefId(code_cluster_->objects()->At(0));
// The first object in the code cluster must have its reference ID allocated.
ASSERT(IsAllocatedReference(first_code_id));
@@ -7194,9 +7219,16 @@
void Serializer::PrintSnapshotSizes() {
#if !defined(DART_PRECOMPILED_RUNTIME)
if (FLAG_print_snapshot_sizes_verbose) {
- OS::PrintErr(
- " Cluster Objs Size Fraction Cumulative "
- " HeapSize\n");
+ TextBuffer buffer(1024);
+ // Header, using format sizes matching those below to ensure alignment.
+ buffer.Printf("%25s", "Cluster");
+ buffer.Printf(" %6s", "Objs");
+ buffer.Printf(" %8s", "Size");
+ buffer.Printf(" %8s", "Fraction");
+ buffer.Printf(" %10s", "Cumulative");
+ buffer.Printf(" %8s", "HeapSize");
+ buffer.Printf(" %5s", "Cid");
+ buffer.AddString("\n");
GrowableArray<SerializationCluster*> clusters_by_size;
for (intptr_t cid = 1; cid < num_cids_; cid++) {
if (auto const cluster = canonical_clusters_by_cid_[cid]) {
@@ -7251,11 +7283,20 @@
SerializationCluster* cluster = clusters_by_size[i];
double fraction = static_cast<double>(cluster->size()) / total_size;
cumulative_fraction += fraction;
- OS::PrintErr("%25s %6" Pd " %8" Pd " %lf %lf %8" Pd "\n",
- cluster->name(), cluster->num_objects(), cluster->size(),
- fraction, cumulative_fraction,
- cluster->target_memory_size());
+ buffer.Printf("%25s", cluster->name());
+ buffer.Printf(" %6" Pd "", cluster->num_objects());
+ buffer.Printf(" %8" Pd "", cluster->size());
+ buffer.Printf(" %1.6lf", fraction);
+ buffer.Printf(" %1.8lf", cumulative_fraction);
+ buffer.Printf(" %8" Pd "", cluster->target_memory_size());
+ if (cluster->cid() != -1) {
+ buffer.Printf(" %5" Pd "", cluster->cid());
+ } else {
+ buffer.Printf(" %5s", "");
+ }
+ buffer.AddString("\n");
}
+ OS::PrintErr("%s", buffer.buffer());
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
}
@@ -7277,7 +7318,6 @@
refs_(nullptr),
next_ref_index_(kFirstReference),
previous_text_offset_(0),
- canonical_clusters_(nullptr),
clusters_(nullptr),
initial_field_table_(thread->isolate_group()->initial_field_table()),
is_non_root_unit_(is_non_root_unit),
@@ -7291,23 +7331,27 @@
}
Deserializer::~Deserializer() {
- delete[] canonical_clusters_;
delete[] clusters_;
}
DeserializationCluster* Deserializer::ReadCluster() {
- intptr_t cid = ReadCid();
+ const uint64_t cid_and_canonical = Read<uint64_t>();
+ const intptr_t cid = (cid_and_canonical >> 1) & kMaxUint32;
+ const bool is_canonical = (cid_and_canonical & 0x1) == 0x1;
Zone* Z = zone_;
if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
- return new (Z) InstanceDeserializationCluster(cid);
+ return new (Z) InstanceDeserializationCluster(cid, is_canonical);
}
if (IsTypedDataViewClassId(cid)) {
+ ASSERT(!is_canonical);
return new (Z) TypedDataViewDeserializationCluster(cid);
}
if (IsExternalTypedDataClassId(cid)) {
+ ASSERT(!is_canonical);
return new (Z) ExternalTypedDataDeserializationCluster(cid);
}
if (IsTypedDataClassId(cid)) {
+ ASSERT(!is_canonical);
return new (Z) TypedDataDeserializationCluster(cid);
}
@@ -7317,113 +7361,148 @@
case kPcDescriptorsCid:
case kCodeSourceMapCid:
case kCompressedStackMapsCid:
- return new (Z) RODataDeserializationCluster(!is_non_root_unit_, cid);
+ return new (Z)
+ RODataDeserializationCluster(is_canonical, !is_non_root_unit_, cid);
case kOneByteStringCid:
case kTwoByteStringCid:
if (!is_non_root_unit_) {
- return new (Z) RODataDeserializationCluster(!is_non_root_unit_, cid);
+ return new (Z) RODataDeserializationCluster(is_canonical,
+ !is_non_root_unit_, cid);
}
break;
case kStringCid:
RELEASE_ASSERT(!is_non_root_unit_);
- return new (Z) RODataDeserializationCluster(!is_non_root_unit_, cid);
+ return new (Z)
+ RODataDeserializationCluster(is_canonical, !is_non_root_unit_, cid);
}
}
#endif
switch (cid) {
case kClassCid:
+ ASSERT(!is_canonical);
return new (Z) ClassDeserializationCluster();
case kTypeArgumentsCid:
- return new (Z) TypeArgumentsDeserializationCluster(!is_non_root_unit_);
+ return new (Z)
+ TypeArgumentsDeserializationCluster(is_canonical, !is_non_root_unit_);
case kPatchClassCid:
+ ASSERT(!is_canonical);
return new (Z) PatchClassDeserializationCluster();
case kFunctionCid:
+ ASSERT(!is_canonical);
return new (Z) FunctionDeserializationCluster();
case kClosureDataCid:
+ ASSERT(!is_canonical);
return new (Z) ClosureDataDeserializationCluster();
case kFfiTrampolineDataCid:
+ ASSERT(!is_canonical);
return new (Z) FfiTrampolineDataDeserializationCluster();
case kFieldCid:
+ ASSERT(!is_canonical);
return new (Z) FieldDeserializationCluster();
case kScriptCid:
+ ASSERT(!is_canonical);
return new (Z) ScriptDeserializationCluster();
case kLibraryCid:
+ ASSERT(!is_canonical);
return new (Z) LibraryDeserializationCluster();
case kNamespaceCid:
+ ASSERT(!is_canonical);
return new (Z) NamespaceDeserializationCluster();
#if !defined(DART_PRECOMPILED_RUNTIME)
case kKernelProgramInfoCid:
+ ASSERT(!is_canonical);
return new (Z) KernelProgramInfoDeserializationCluster();
#endif // !DART_PRECOMPILED_RUNTIME
case kCodeCid:
+ ASSERT(!is_canonical);
return new (Z) CodeDeserializationCluster();
case kObjectPoolCid:
+ ASSERT(!is_canonical);
return new (Z) ObjectPoolDeserializationCluster();
case kPcDescriptorsCid:
+ ASSERT(!is_canonical);
return new (Z) PcDescriptorsDeserializationCluster();
case kCodeSourceMapCid:
+ ASSERT(!is_canonical);
return new (Z) CodeSourceMapDeserializationCluster();
case kCompressedStackMapsCid:
+ ASSERT(!is_canonical);
return new (Z) CompressedStackMapsDeserializationCluster();
case kExceptionHandlersCid:
+ ASSERT(!is_canonical);
return new (Z) ExceptionHandlersDeserializationCluster();
case kContextCid:
+ ASSERT(!is_canonical);
return new (Z) ContextDeserializationCluster();
case kContextScopeCid:
+ ASSERT(!is_canonical);
return new (Z) ContextScopeDeserializationCluster();
case kUnlinkedCallCid:
+ ASSERT(!is_canonical);
return new (Z) UnlinkedCallDeserializationCluster();
case kICDataCid:
+ ASSERT(!is_canonical);
return new (Z) ICDataDeserializationCluster();
case kMegamorphicCacheCid:
+ ASSERT(!is_canonical);
return new (Z) MegamorphicCacheDeserializationCluster();
case kSubtypeTestCacheCid:
+ ASSERT(!is_canonical);
return new (Z) SubtypeTestCacheDeserializationCluster();
case kLoadingUnitCid:
+ ASSERT(!is_canonical);
return new (Z) LoadingUnitDeserializationCluster();
case kLanguageErrorCid:
+ ASSERT(!is_canonical);
return new (Z) LanguageErrorDeserializationCluster();
case kUnhandledExceptionCid:
+ ASSERT(!is_canonical);
return new (Z) UnhandledExceptionDeserializationCluster();
case kLibraryPrefixCid:
+ ASSERT(!is_canonical);
return new (Z) LibraryPrefixDeserializationCluster();
case kTypeCid:
- return new (Z) TypeDeserializationCluster(!is_non_root_unit_);
+ return new (Z)
+ TypeDeserializationCluster(is_canonical, !is_non_root_unit_);
case kFunctionTypeCid:
- return new (Z) FunctionTypeDeserializationCluster(!is_non_root_unit_);
+ return new (Z)
+ FunctionTypeDeserializationCluster(is_canonical, !is_non_root_unit_);
case kTypeRefCid:
+ ASSERT(!is_canonical);
return new (Z) TypeRefDeserializationCluster();
case kTypeParameterCid:
- return new (Z) TypeParameterDeserializationCluster(!is_non_root_unit_);
+ return new (Z)
+ TypeParameterDeserializationCluster(is_canonical, !is_non_root_unit_);
case kClosureCid:
- return new (Z) ClosureDeserializationCluster();
+ return new (Z) ClosureDeserializationCluster(is_canonical);
case kMintCid:
- return new (Z) MintDeserializationCluster();
+ return new (Z) MintDeserializationCluster(is_canonical);
case kDoubleCid:
- return new (Z) DoubleDeserializationCluster();
+ return new (Z) DoubleDeserializationCluster(is_canonical);
case kGrowableObjectArrayCid:
+ ASSERT(!is_canonical);
return new (Z) GrowableObjectArrayDeserializationCluster();
case kStackTraceCid:
+ ASSERT(!is_canonical);
return new (Z) StackTraceDeserializationCluster();
case kRegExpCid:
+ ASSERT(!is_canonical);
return new (Z) RegExpDeserializationCluster();
case kWeakPropertyCid:
+ ASSERT(!is_canonical);
return new (Z) WeakPropertyDeserializationCluster();
case kLinkedHashMapCid:
- return new (Z) LinkedHashMapDeserializationCluster();
+ return new (Z) LinkedHashMapDeserializationCluster(is_canonical);
case kArrayCid:
- return new (Z) ArrayDeserializationCluster(kArrayCid);
+ return new (Z) ArrayDeserializationCluster(is_canonical, kArrayCid);
case kImmutableArrayCid:
- return new (Z) ArrayDeserializationCluster(kImmutableArrayCid);
+ return new (Z)
+ ArrayDeserializationCluster(is_canonical, kImmutableArrayCid);
case kOneByteStringCid:
- return new (Z) OneByteStringDeserializationCluster();
+ return new (Z) OneByteStringDeserializationCluster(is_canonical);
case kTwoByteStringCid:
- return new (Z) TwoByteStringDeserializationCluster();
- case kWeakSerializationReferenceCid:
-#if defined(DART_PRECOMPILED_RUNTIME)
- return new (Z) WeakSerializationReferenceDeserializationCluster();
-#endif
+ return new (Z) TwoByteStringDeserializationCluster(is_canonical);
default:
break;
}
@@ -7772,12 +7851,10 @@
Array& refs = Array::Handle(zone_);
num_base_objects_ = ReadUnsigned();
num_objects_ = ReadUnsigned();
- num_canonical_clusters_ = ReadUnsigned();
num_clusters_ = ReadUnsigned();
const intptr_t initial_field_table_len = ReadUnsigned();
const intptr_t instructions_table_len = ReadUnsigned();
- canonical_clusters_ = new DeserializationCluster*[num_canonical_clusters_];
clusters_ = new DeserializationCluster*[num_clusters_];
refs = Array::New(num_objects_ + kFirstReference, Heap::kOld);
if (initial_field_table_len > 0) {
@@ -7825,19 +7902,10 @@
{
TIMELINE_DURATION(thread(), Isolate, "ReadAlloc");
- for (intptr_t i = 0; i < num_canonical_clusters_; i++) {
- canonical_clusters_[i] = ReadCluster();
- TIMELINE_DURATION(thread(), Isolate, canonical_clusters_[i]->name());
- canonical_clusters_[i]->ReadAlloc(this, /*is_canonical*/ true);
-#if defined(DEBUG)
- intptr_t serializers_next_ref_index_ = Read<int32_t>();
- ASSERT_EQUAL(serializers_next_ref_index_, next_ref_index_);
-#endif
- }
for (intptr_t i = 0; i < num_clusters_; i++) {
clusters_[i] = ReadCluster();
TIMELINE_DURATION(thread(), Isolate, clusters_[i]->name());
- clusters_[i]->ReadAlloc(this, /*is_canonical*/ false);
+ clusters_[i]->ReadAlloc(this);
#if defined(DEBUG)
intptr_t serializers_next_ref_index_ = Read<int32_t>();
ASSERT_EQUAL(serializers_next_ref_index_, next_ref_index_);
@@ -7850,18 +7918,9 @@
{
TIMELINE_DURATION(thread(), Isolate, "ReadFill");
- for (intptr_t i = 0; i < num_canonical_clusters_; i++) {
- TIMELINE_DURATION(thread(), Isolate, canonical_clusters_[i]->name());
- bool stamp_canonical = primary;
- canonical_clusters_[i]->ReadFill(this, stamp_canonical);
-#if defined(DEBUG)
- int32_t section_marker = Read<int32_t>();
- ASSERT(section_marker == kSectionMarker);
-#endif
- }
for (intptr_t i = 0; i < num_clusters_; i++) {
TIMELINE_DURATION(thread(), Isolate, clusters_[i]->name());
- clusters_[i]->ReadFill(this, /*stamp_canonical*/ false);
+ clusters_[i]->ReadFill(this, primary);
#if defined(DEBUG)
int32_t section_marker = Read<int32_t>();
ASSERT(section_marker == kSectionMarker);
@@ -7891,14 +7950,9 @@
{
TIMELINE_DURATION(thread(), Isolate, "PostLoad");
- for (intptr_t i = 0; i < num_canonical_clusters_; i++) {
- TIMELINE_DURATION(thread(), Isolate, canonical_clusters_[i]->name());
- bool canonicalize = !primary;
- canonical_clusters_[i]->PostLoad(this, refs, canonicalize);
- }
for (intptr_t i = 0; i < num_clusters_; i++) {
TIMELINE_DURATION(thread(), Isolate, clusters_[i]->name());
- clusters_[i]->PostLoad(this, refs, /*canonicalize*/ false);
+ clusters_[i]->PostLoad(this, refs, primary);
}
}
}
diff --git a/runtime/vm/clustered_snapshot.h b/runtime/vm/clustered_snapshot.h
index a7a1e6a..ac3e31b 100644
--- a/runtime/vm/clustered_snapshot.h
+++ b/runtime/vm/clustered_snapshot.h
@@ -79,13 +79,17 @@
class SerializationCluster : public ZoneAllocated {
public:
+ static constexpr intptr_t kSizeVaries = -1;
explicit SerializationCluster(const char* name,
- intptr_t target_instance_size = 0)
+ intptr_t cid,
+ intptr_t target_instance_size = kSizeVaries,
+ bool is_canonical = false)
: name_(name),
- size_(0),
- num_objects_(0),
+ cid_(cid),
target_instance_size_(target_instance_size),
- target_memory_size_(0) {}
+ is_canonical_(is_canonical) {
+ ASSERT(target_instance_size == kSizeVaries || target_instance_size >= 0);
+ }
virtual ~SerializationCluster() {}
// Add [object] to the cluster and push its outgoing references.
@@ -103,6 +107,8 @@
void WriteAndMeasureFill(Serializer* serializer);
const char* name() const { return name_; }
+ intptr_t cid() const { return cid_; }
+ bool is_canonical() const { return is_canonical_; }
intptr_t size() const { return size_; }
intptr_t num_objects() const { return num_objects_; }
@@ -116,40 +122,47 @@
intptr_t target_memory_size() const { return target_memory_size_; }
protected:
- const char* name_;
- intptr_t size_;
- intptr_t num_objects_;
+ const char* const name_;
+ const intptr_t cid_;
const intptr_t target_instance_size_;
- intptr_t target_memory_size_;
+ const bool is_canonical_;
+ intptr_t size_ = 0;
+ intptr_t num_objects_ = 0;
+ intptr_t target_memory_size_ = 0;
};
class DeserializationCluster : public ZoneAllocated {
public:
- explicit DeserializationCluster(const char* name)
- : name_(name), start_index_(-1), stop_index_(-1) {}
+ explicit DeserializationCluster(const char* name, bool is_canonical = false)
+ : name_(name),
+ is_canonical_(is_canonical),
+ start_index_(-1),
+ stop_index_(-1) {}
virtual ~DeserializationCluster() {}
// Allocate memory for all objects in the cluster and write their addresses
// into the ref array. Do not touch this memory.
- virtual void ReadAlloc(Deserializer* deserializer, bool stamp_canonical) = 0;
+ virtual void ReadAlloc(Deserializer* deserializer) = 0;
// Initialize the cluster's objects. Do not touch the memory of other objects.
- virtual void ReadFill(Deserializer* deserializer, bool stamp_canonical) = 0;
+ virtual void ReadFill(Deserializer* deserializer, bool primary) = 0;
// Complete any action that requires the full graph to be deserialized, such
// as rehashing.
virtual void PostLoad(Deserializer* deserializer,
const Array& refs,
- bool canonicalize) {
- if (canonicalize) {
+ bool primary) {
+ if (!primary && is_canonical()) {
FATAL1("%s needs canonicalization but doesn't define PostLoad", name());
}
}
const char* name() const { return name_; }
+ bool is_canonical() const { return is_canonical_; }
protected:
- const char* name_;
+ const char* const name_;
+ const bool is_canonical_;
// The range of the ref array that belongs to this cluster.
intptr_t start_index_;
intptr_t stop_index_;
@@ -198,6 +211,8 @@
return ref == kUnallocatedReference || IsAllocatedReference(ref);
}
+class CodeSerializationCluster;
+
class Serializer : public ThreadStackResource {
public:
Serializer(Thread* thread,
@@ -473,6 +488,7 @@
ImageWriter* image_writer_;
SerializationCluster** canonical_clusters_by_cid_;
SerializationCluster** clusters_by_cid_;
+ CodeSerializationCluster* code_cluster_ = nullptr;
GrowableArray<ObjectPtr> stack_;
intptr_t num_cids_;
intptr_t num_tlc_cids_;
@@ -533,7 +549,6 @@
#define WriteCompressedField(obj, name) \
s->WritePropertyRef(obj->untag()->name(), #name "_")
-
// This class can be used to read version and features from a snapshot before
// the VM has been initialized.
class SnapshotHeaderReader {
@@ -693,14 +708,12 @@
ImageReader* image_reader_;
intptr_t num_base_objects_;
intptr_t num_objects_;
- intptr_t num_canonical_clusters_;
intptr_t num_clusters_;
ArrayPtr refs_;
intptr_t next_ref_index_;
intptr_t previous_text_offset_;
intptr_t code_start_index_ = 0;
intptr_t instructions_index_ = 0;
- DeserializationCluster** canonical_clusters_;
DeserializationCluster** clusters_;
FieldTable* initial_field_table_;
const bool is_non_root_unit_;
diff --git a/runtime/vm/image_snapshot.cc b/runtime/vm/image_snapshot.cc
index 344ae25..eaffe44 100644
--- a/runtime/vm/image_snapshot.cc
+++ b/runtime/vm/image_snapshot.cc
@@ -651,14 +651,14 @@
const char* bss_symbol = SectionSymbol(ProgramSection::Bss, vm);
ASSERT(bss_symbol != nullptr);
- if (FLAG_precompiled_mode) {
- if (profile_writer_ != nullptr) {
- profile_writer_->SetObjectTypeAndName(parent_id, image_type_,
- instructions_symbol);
- profile_writer_->AttributeBytesTo(parent_id, Image::kHeaderSize);
- profile_writer_->AddRoot(parent_id);
- }
+ if (profile_writer_ != nullptr) {
+ profile_writer_->SetObjectTypeAndName(parent_id, image_type_,
+ instructions_symbol);
+ profile_writer_->AttributeBytesTo(parent_id, Image::kHeaderSize);
+ profile_writer_->AddRoot(parent_id);
+ }
+ if (FLAG_precompiled_mode) {
const intptr_t section_header_length =
compiler::target::InstructionsSection::HeaderSize();
// Calculated using next_text_offset_, which doesn't include post-payload
diff --git a/runtime/vm/v8_snapshot_writer.cc b/runtime/vm/v8_snapshot_writer.cc
index 6fb5898..351c831 100644
--- a/runtime/vm/v8_snapshot_writer.cc
+++ b/runtime/vm/v8_snapshot_writer.cc
@@ -29,8 +29,6 @@
idx = edge_types_.Add("internal");
ASSERT_EQUAL(idx, static_cast<intptr_t>(Edge::Type::kInternal));
- unknown_type_string_index_ = node_types_.Add("Unknown");
-
SetObjectTypeAndName(kArtificialRootId, "ArtificialRoot",
"<artificial root>");
}
@@ -113,14 +111,10 @@
void V8SnapshotProfileWriter::NodeInfo::Write(JSONWriter* writer) const {
ASSERT(id.space() != IdSpace::kInvalid);
- ASSERT(type != kInvalidString);
if (type == kInvalidString) {
- // Fall back on this string in non-DEBUG modes. See
- // https://github.com/dart-lang/sdk/issues/45787 for context.
- writer->PrintValue(profile_writer_->unknown_type_string_index_);
- } else {
- writer->PrintValue(type);
+ FATAL("No type given for node %s", id.ToCString(profile_writer_->zone_));
}
+ writer->PrintValue(type);
if (name != kInvalidString) {
writer->PrintValue(name);
} else {
diff --git a/runtime/vm/v8_snapshot_writer.h b/runtime/vm/v8_snapshot_writer.h
index 9aededc..ea0a0e7 100644
--- a/runtime/vm/v8_snapshot_writer.h
+++ b/runtime/vm/v8_snapshot_writer.h
@@ -331,7 +331,6 @@
StringsTable edge_types_;
StringsTable strings_;
DirectChainedHashMap<ObjectIdSetKeyValueTrait> roots_;
- intptr_t unknown_type_string_index_;
#endif
};
diff --git a/tests/co19/co19-dartdevc.status b/tests/co19/co19-dartdevc.status
index d97af6e..f0c1a51 100644
--- a/tests/co19/co19-dartdevc.status
+++ b/tests/co19/co19-dartdevc.status
@@ -3,62 +3,11 @@
# BSD-style license that can be found in the LICENSE file.
[ $compiler == dartdevc || $compiler == dartdevk ]
-Language/Classes/Constructors/Generative_Constructors/formal_parameter_t07: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/fresh_instance_t01: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/implicit_superinitializer_t01: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/implicit_superinitializer_t02: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/initializers_t01: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/initializers_t15: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/initializing_formals_execution_t01: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/initializing_this_t01: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/redirection_t01: SkipSlow
-Language/Classes/Constructors/Generative_Constructors/syntax_t01: SkipSlow
-Language/Classes/Constructors/implicit_constructor_t01: SkipSlow
-Language/Classes/Constructors/implicit_constructor_t02: SkipSlow
-Language/Classes/Constructors/name_t01: SkipSlow
-Language/Classes/Constructors/name_t02: SkipSlow
-Language/Classes/Constructors/name_t03: SkipSlow
-Language/Classes/Getters/instance_getter_t01: SkipSlow
-Language/Classes/Getters/instance_getter_t02: SkipSlow
-Language/Classes/Getters/instance_getter_t03: SkipSlow
-Language/Classes/Getters/instance_getter_t04: SkipSlow
-Language/Classes/Getters/instance_getter_t05: SkipSlow
-Language/Classes/Getters/instance_getter_t06: SkipSlow
-Language/Classes/Getters/override_t04: SkipSlow
-Language/Classes/Getters/return_type_t01: SkipSlow
-Language/Classes/Getters/static_t01/none: SkipSlow
-Language/Classes/Getters/static_t02: SkipSlow
-Language/Classes/Getters/syntax_t01: SkipSlow
-Language/Classes/Getters/void_return_type_t01: SkipSlow
-Language/Classes/Instance_Methods/Operators/allowed_names_t01: SkipSlow
-Language/Classes/Instance_Methods/Operators/arity_0_or_1_t01: SkipSlow
-Language/Classes/Instance_Methods/Operators/arity_0_t01: SkipSlow
-Language/Classes/Instance_Methods/Operators/syntax_t01: SkipSlow
-Language/Classes/Instance_Methods/Operators/syntax_t03: SkipSlow
-Language/Classes/Instance_Methods/override_named_parameters_t03: SkipSlow
-Language/Classes/Instance_Methods/override_named_parameters_t04: SkipSlow
-Language/Classes/Instance_Methods/override_named_parameters_t06: SkipSlow
-Language/Classes/Instance_Methods/override_subtype_t05: SkipSlow
-Language/Classes/Instance_Methods/override_subtype_t06: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t01: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t02: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t04: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t05: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t06: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t07: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t08: SkipSlow
-Language/Classes/Instance_Methods/same_name_static_member_in_superclass_t09: SkipSlow
-Language/Classes/Instance_Variables/definition_t01: SkipSlow
-Language/Classes/Instance_Variables/definition_t02: SkipSlow
-Language/Classes/Instance_Variables/definition_t04: SkipSlow
-Language/Classes/Setters/instance_setter_t01: SkipSlow
Language/Expressions/Constants/integer_size_t03: SkipByDesign # uses integer literal not representable as JavaScript number
Language/Expressions/Constants/integer_size_t04: SkipByDesign # uses integer literal not representable as JavaScript number
Language/Expressions/Constants/literal_number_t01: SkipByDesign # uses integer literal not representable as JavaScript number
Language/Expressions/Constants/math_operators_t01: SkipByDesign # uses integer literal not representable as JavaScript number
Language/Expressions/Constants/math_operators_t06: SkipByDesign # uses integer literal not representable as JavaScript number
-Language/Expressions/Function_Invocation/async_generator_invokation_t08: SkipSlow
-Language/Expressions/Function_Invocation/async_generator_invokation_t10: SkipSlow
Language/Expressions/Null/instance_of_class_null_t01: SkipByDesign # dart:mirrors not supported https://github.com/dart-lang/co19/issues/522
Language/Expressions/Numbers/integer_size_t03: SkipByDesign # uses integer literal not representable as JavaScript number
Language/Expressions/Numbers/static_type_of_int_t01: SkipByDesign # uses integer literal not representable as JavaScript number
@@ -74,8 +23,6 @@
Language/Metadata/before*: SkipByDesign # dart:mirrors not supported https://github.com/dart-lang/co19/issues/523
Language/Metadata/syntax_t10: SkipByDesign # dart:mirrors is not supported
Language/Reference/Operator_Precedence/precedence_15_unary_prefix_t08: SkipByDesign # binary '~' produces different results in JavaScript and Dart
-Language/Types/Interface_Types/subtype_t27: SkipSlow
-Language/Types/Interface_Types/subtype_t28: SkipSlow
LanguageFeatures/Abstract-external-fields/static_analysis_external_A01_t01: SkipByDesign # External variables are not supported
LanguageFeatures/Abstract-external-fields/static_analysis_external_A01_t02: SkipByDesign # External variables are not supported
LanguageFeatures/Abstract-external-fields/static_analysis_external_A01_t03: SkipByDesign # External variables are not supported
@@ -105,12 +52,8 @@
LibTest/core/int/parse_A01_t02: SkipByDesign # big integers cannot be represented in JavaScript
LibTest/core/int/remainder_A01_t03: SkipByDesign # Division by zero is not an error in JavaScript
LibTest/ffi/*: SkipByDesign # dart:ffi is not supported
-LibTest/html/Element/blur_A01_t01: SkipSlow
-LibTest/html/Element/focus_A01_t01: SkipSlow
LibTest/html/HttpRequest/responseText_A01_t02: Skip # https://github.com/dart-lang/co19/issues/932
LibTest/html/HttpRequestUpload/*: Skip # https://github.com/dart-lang/co19/issues/932
-LibTest/html/IFrameElement/blur_A01_t01: SkipSlow
-LibTest/html/IFrameElement/focus_A01_t01: SkipSlow
LibTest/io/*: SkipByDesign # dart:io not supported.
LibTest/isolate/*: SkipByDesign # dart:isolate not supported.
LibTest/mirrors/*: SkipByDesign # dart:mirrors is not supported
diff --git a/tests/co19/co19-kernel.status b/tests/co19/co19-kernel.status
index 088865a..c62ddcc 100644
--- a/tests/co19/co19-kernel.status
+++ b/tests/co19/co19-kernel.status
@@ -3,14 +3,7 @@
# BSD-style license that can be found in the LICENSE file.
[ $compiler == dartk ]
-Language/Libraries_and_Scripts/Scripts/top_level_main_t01: Crash
-LibTest/isolate/SendPort/send_A01_t02: Crash
-LibTest/isolate/SendPort/send_A01_t03: Crash
-
-[ $compiler == fasta ]
-Language/Statements/For/syntax_t13: Crash # Assertion error: kernel_shadow_ast.dart: 'receiver == null': is not true.
-Language/Statements/For/syntax_t20: Crash # Assertion error: kernel_shadow_ast.dart: 'receiver == null': is not true.
-LanguageFeatures/Constant-update-2018/NewOperators_A01_t06/none: Crash
+Language/Libraries_and_Scripts/Scripts/top_level_main_t01: Crash # https://github.com/dart-lang/sdk/issues/42487
[ $runtime == dart_precompiled ]
Language/Metadata/syntax_t10: SkipByDesign # dart:mirrors is not supported
@@ -30,9 +23,6 @@
LibTest/core/List/List_class_A01_t02: Slow, Pass # Does many calls
LibTest/io/RawDatagramSocket/*: Skip # RawDatagramSocket are flaky. https://github.com/dart-lang/co19/issues/195
-[ $compiler == dartk && $runtime == vm && $system == linux ]
-LibTest/isolate/Isolate/spawn_A06_t03: Crash
-
[ $compiler == dartk && $runtime == vm && $system == macos ]
LibTest/collection/ListBase/ListBase_class_A01_t02: Slow, Pass
LibTest/collection/ListBase/ListBase_class_A01_t03: Slow, Pass
@@ -41,9 +31,6 @@
LibTest/core/List/List_class_A01_t02: Slow, Pass
LibTest/core/List/List_class_A01_t03: Slow, Pass
-[ $compiler == dartk && $runtime != vm ]
-Language/Classes/Constructors/Constant_Constructors/potentially_constant_expression_t01: Crash
-
[ $runtime == dart_precompiled && ($arch == simarm64 || $arch == simarm64c) ]
LibTest/collection/ListBase/ListBase_class_A01_t01: SkipSlow # Issue 43036
LibTest/collection/ListMixin/ListMixin_class_A01_t01: SkipSlow # Issue 43036
diff --git a/tests/co19/co19-runtime.status b/tests/co19/co19-runtime.status
index 80fa459..f810625 100644
--- a/tests/co19/co19-runtime.status
+++ b/tests/co19/co19-runtime.status
@@ -2,17 +2,10 @@
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
-[ $runtime != none ]
-LibTest/core/Uri/hasEmptyPath_A01_t01: RuntimeError
-LibTest/core/Uri/parse_A05_t01: RuntimeError
-
[ $system == windows ]
LibTest/io/Stdin/readByteSync_A01_t01: Skip # Issue 43645
LibTest/io/Stdin/readByteSync_A01_t02: Skip # Issue 43645
-[ $compiler != dart2js && $runtime != none && $runtime != vm && !$checked ]
-LibTest/async/Future/catchError_A03_t05: RuntimeError
-
[ $mode == debug && $runtime == dart_precompiled ]
LibTest/collection/ListBase/ListBase_class_A01_t03: SkipSlow # Very slow compilation in debug mode.
LibTest/collection/ListBase/ListBase_class_A01_t04: SkipSlow # Very slow compilation in debug mode.
diff --git a/tests/language/why_not_promoted/nullable_operator_call_error.dart b/tests/language/why_not_promoted/nullable_operator_call_error.dart
new file mode 100644
index 0000000..a145f34
--- /dev/null
+++ b/tests/language/why_not_promoted/nullable_operator_call_error.dart
@@ -0,0 +1,37 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This test contains a test case for each condition that can lead to the front
+// end's `NullableOperatorCallError` error, for which we wish to report "why not
+// promoted" context information.
+
+class C1 {
+ int? bad;
+ // ^^^
+ // [context 2] 'bad' refers to a property so it couldn't be promoted. See http://dart.dev/go/non-promo-property
+ // [context 3] 'bad' refers to a property so it couldn't be promoted.
+}
+
+userDefinableBinaryOpLhs(C1 c) {
+ if (c.bad == null) return;
+ c.bad + 1;
+ // ^
+ // [analyzer 2] COMPILE_TIME_ERROR.UNCHECKED_USE_OF_NULLABLE_VALUE
+ // [cfe 3] Operator '+' cannot be called on 'int?' because it is potentially null.
+}
+
+class C2 {
+ int? bad;
+ // ^^^
+ // [context 1] 'bad' refers to a property so it couldn't be promoted. See http://dart.dev/go/non-promo-property
+ // [context 4] 'bad' refers to a property so it couldn't be promoted.
+}
+
+userDefinableUnaryOp(C2 c) {
+ if (c.bad == null) return;
+ -c.bad;
+//^
+// [analyzer 1] COMPILE_TIME_ERROR.UNCHECKED_USE_OF_NULLABLE_VALUE
+// [cfe 4] Operator 'unary-' cannot be called on 'int?' because it is potentially null.
+}
diff --git a/tools/VERSION b/tools/VERSION
index 0db4e9f..4eca9c2 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 2
MINOR 14
PATCH 0
-PRERELEASE 29
+PRERELEASE 30
PRERELEASE_PATCH 0
\ No newline at end of file