Version 2.14.0-156.0.dev
Merge commit '57e467b6ee5364b5740864967ce2e29b03cee3f7' into 'dev'
diff --git a/pkg/front_end/lib/src/fasta/kernel/collections.dart b/pkg/front_end/lib/src/fasta/kernel/collections.dart
index c4b3d01..29984f2 100644
--- a/pkg/front_end/lib/src/fasta/kernel/collections.dart
+++ b/pkg/front_end/lib/src/fasta/kernel/collections.dart
@@ -2,8 +2,6 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
-// @dart = 2.9
-
library fasta.collections;
import 'package:kernel/ast.dart';
@@ -52,7 +50,7 @@
/// [IfElement] is converted to a [ForMapEntry], [ForInMapEntry], or
/// [IfMapEntry], respectively.
// TODO(johnniwinther): Merge this with [convertToMapEntry].
- MapLiteralEntry toMapLiteralEntry(
+ MapLiteralEntry? toMapLiteralEntry(
void onConvertElement(TreeNode from, TreeNode to));
}
@@ -65,37 +63,44 @@
///
/// It is set during type inference and is used to add appropriate type casts
/// during the desugaring.
- DartType elementType;
+ DartType? elementType;
- SpreadElement(this.expression, this.isNullAware) {
- expression?.parent = this;
+ SpreadElement(this.expression, {required this.isNullAware})
+ // ignore: unnecessary_null_comparison
+ : assert(expression != null),
+ // ignore: unnecessary_null_comparison
+ assert(isNullAware != null) {
+ expression.parent = this;
}
@override
- void visitChildren(Visitor<Object> v) {
- expression?.accept(v);
+ void visitChildren(Visitor v) {
+ expression.accept(v);
}
@override
void transformChildren(Transformer v) {
+ // ignore: unnecessary_null_comparison
if (expression != null) {
expression = v.transform(expression);
- expression?.parent = this;
+ expression.parent = this;
}
}
@override
void transformOrRemoveChildren(RemovingTransformer v) {
+ // ignore: unnecessary_null_comparison
if (expression != null) {
- expression = v.transformOrRemoveExpression(expression);
- expression?.parent = this;
+ expression = v.transform(expression);
+ expression.parent = this;
}
}
@override
SpreadMapEntry toMapLiteralEntry(
void onConvertElement(TreeNode from, TreeNode to)) {
- return new SpreadMapEntry(expression, isNullAware)..fileOffset = fileOffset;
+ return new SpreadMapEntry(expression, isNullAware: isNullAware)
+ ..fileOffset = fileOffset;
}
@override
@@ -117,63 +122,73 @@
class IfElement extends Expression with ControlFlowElement {
Expression condition;
Expression then;
- Expression otherwise;
+ Expression? otherwise;
- IfElement(this.condition, this.then, this.otherwise) {
- condition?.parent = this;
- then?.parent = this;
+ IfElement(this.condition, this.then, this.otherwise)
+ // ignore: unnecessary_null_comparison
+ : assert(condition != null),
+ // ignore: unnecessary_null_comparison
+ assert(then != null) {
+ condition.parent = this;
+ then.parent = this;
otherwise?.parent = this;
}
@override
- void visitChildren(Visitor<Object> v) {
- condition?.accept(v);
- then?.accept(v);
+ void visitChildren(Visitor v) {
+ condition.accept(v);
+ then.accept(v);
otherwise?.accept(v);
}
@override
void transformChildren(Transformer v) {
+ // ignore: unnecessary_null_comparison
if (condition != null) {
condition = v.transform(condition);
- condition?.parent = this;
+ condition.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (then != null) {
then = v.transform(then);
- then?.parent = this;
+ then.parent = this;
}
if (otherwise != null) {
- otherwise = v.transform(otherwise);
+ otherwise = v.transform(otherwise!);
otherwise?.parent = this;
}
}
@override
void transformOrRemoveChildren(RemovingTransformer v) {
+ // ignore: unnecessary_null_comparison
if (condition != null) {
- condition = v.transformOrRemoveExpression(condition);
- condition?.parent = this;
+ condition = v.transform(condition);
+ condition.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (then != null) {
- then = v.transformOrRemoveExpression(then);
- then?.parent = this;
+ then = v.transform(then);
+ then.parent = this;
}
if (otherwise != null) {
- otherwise = v.transformOrRemoveExpression(otherwise);
+ otherwise = v.transformOrRemoveExpression(otherwise!);
otherwise?.parent = this;
}
}
@override
- MapLiteralEntry toMapLiteralEntry(
+ MapLiteralEntry? toMapLiteralEntry(
void onConvertElement(TreeNode from, TreeNode to)) {
- MapLiteralEntry thenEntry;
+ MapLiteralEntry? thenEntry;
+ Expression then = this.then;
if (then is ControlFlowElement) {
ControlFlowElement thenElement = then;
thenEntry = thenElement.toMapLiteralEntry(onConvertElement);
}
if (thenEntry == null) return null;
- MapLiteralEntry otherwiseEntry;
+ MapLiteralEntry? otherwiseEntry;
+ Expression? otherwise = this.otherwise;
if (otherwise != null) {
if (otherwise is ControlFlowElement) {
ControlFlowElement otherwiseElement = otherwise;
@@ -200,7 +215,7 @@
printer.writeExpression(then);
if (otherwise != null) {
printer.write(' else ');
- printer.writeExpression(otherwise);
+ printer.writeExpression(otherwise!);
}
}
}
@@ -208,36 +223,39 @@
/// A 'for' element in a list or set literal.
class ForElement extends Expression with ControlFlowElement {
final List<VariableDeclaration> variables; // May be empty, but not null.
- Expression condition; // May be null.
+ Expression? condition; // May be null.
final List<Expression> updates; // May be empty, but not null.
Expression body;
- ForElement(this.variables, this.condition, this.updates, this.body) {
+ ForElement(this.variables, this.condition, this.updates, this.body)
+ // ignore: unnecessary_null_comparison
+ : assert(body != null) {
setParents(variables, this);
condition?.parent = this;
setParents(updates, this);
- body?.parent = this;
+ body.parent = this;
}
@override
- void visitChildren(Visitor<Object> v) {
+ void visitChildren(Visitor v) {
visitList(variables, v);
condition?.accept(v);
visitList(updates, v);
- body?.accept(v);
+ body.accept(v);
}
@override
void transformChildren(Transformer v) {
v.transformList(variables, this);
if (condition != null) {
- condition = v.transform(condition);
+ condition = v.transform(condition!);
condition?.parent = this;
}
v.transformList(updates, this);
+ // ignore: unnecessary_null_comparison
if (body != null) {
body = v.transform(body);
- body?.parent = this;
+ body.parent = this;
}
}
@@ -245,20 +263,22 @@
void transformOrRemoveChildren(RemovingTransformer v) {
v.transformVariableDeclarationList(variables, this);
if (condition != null) {
- condition = v.transformOrRemoveExpression(condition);
+ condition = v.transformOrRemoveExpression(condition!);
condition?.parent = this;
}
v.transformExpressionList(updates, this);
+ // ignore: unnecessary_null_comparison
if (body != null) {
- body = v.transformOrRemoveExpression(body);
- body?.parent = this;
+ body = v.transform(body);
+ body.parent = this;
}
}
@override
- MapLiteralEntry toMapLiteralEntry(
+ MapLiteralEntry? toMapLiteralEntry(
void onConvertElement(TreeNode from, TreeNode to)) {
- MapLiteralEntry bodyEntry;
+ MapLiteralEntry? bodyEntry;
+ Expression body = this.body;
if (body is ControlFlowElement) {
ControlFlowElement bodyElement = body;
bodyEntry = bodyElement.toMapLiteralEntry(onConvertElement);
@@ -286,99 +306,111 @@
class ForInElement extends Expression with ControlFlowElement {
VariableDeclaration variable; // Has no initializer.
Expression iterable;
- Expression syntheticAssignment; // May be null.
- Statement expressionEffects; // May be null.
+ Expression? syntheticAssignment; // May be null.
+ Statement? expressionEffects; // May be null.
Expression body;
- Expression problem; // May be null.
+ Expression? problem; // May be null.
bool isAsync; // True if this is an 'await for' loop.
ForInElement(this.variable, this.iterable, this.syntheticAssignment,
this.expressionEffects, this.body, this.problem,
- {this.isAsync: false}) {
- variable?.parent = this;
- iterable?.parent = this;
+ {this.isAsync: false})
+ // ignore: unnecessary_null_comparison
+ : assert(variable != null),
+ // ignore: unnecessary_null_comparison
+ assert(iterable != null),
+ // ignore: unnecessary_null_comparison
+ assert(body != null) {
+ variable.parent = this;
+ iterable.parent = this;
syntheticAssignment?.parent = this;
expressionEffects?.parent = this;
- body?.parent = this;
+ body.parent = this;
problem?.parent = this;
}
- Statement get prologue => syntheticAssignment != null
- ? (new ExpressionStatement(syntheticAssignment)
- ..fileOffset = syntheticAssignment.fileOffset)
+ Statement? get prologue => syntheticAssignment != null
+ ? (new ExpressionStatement(syntheticAssignment!)
+ ..fileOffset = syntheticAssignment!.fileOffset)
: expressionEffects;
- void visitChildren(Visitor<Object> v) {
- variable?.accept(v);
- iterable?.accept(v);
+ void visitChildren(Visitor v) {
+ variable.accept(v);
+ iterable.accept(v);
syntheticAssignment?.accept(v);
expressionEffects?.accept(v);
- body?.accept(v);
+ body.accept(v);
problem?.accept(v);
}
void transformChildren(Transformer v) {
+ // ignore: unnecessary_null_comparison
if (variable != null) {
variable = v.transform(variable);
- variable?.parent = this;
+ variable.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (iterable != null) {
iterable = v.transform(iterable);
- iterable?.parent = this;
+ iterable.parent = this;
}
if (syntheticAssignment != null) {
- syntheticAssignment = v.transform(syntheticAssignment);
+ syntheticAssignment = v.transform(syntheticAssignment!);
syntheticAssignment?.parent = this;
}
if (expressionEffects != null) {
- expressionEffects = v.transform(expressionEffects);
+ expressionEffects = v.transform(expressionEffects!);
expressionEffects?.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (body != null) {
body = v.transform(body);
- body?.parent = this;
+ body.parent = this;
}
if (problem != null) {
- problem = v.transform(problem);
+ problem = v.transform(problem!);
problem?.parent = this;
}
}
@override
void transformOrRemoveChildren(RemovingTransformer v) {
+ // ignore: unnecessary_null_comparison
if (variable != null) {
- variable = v.transformOrRemoveVariableDeclaration(variable);
- variable?.parent = this;
+ variable = v.transform(variable);
+ variable.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (iterable != null) {
- iterable = v.transformOrRemoveExpression(iterable);
- iterable?.parent = this;
+ iterable = v.transform(iterable);
+ iterable.parent = this;
}
if (syntheticAssignment != null) {
- syntheticAssignment = v.transformOrRemoveExpression(syntheticAssignment);
+ syntheticAssignment = v.transformOrRemoveExpression(syntheticAssignment!);
syntheticAssignment?.parent = this;
}
if (expressionEffects != null) {
- expressionEffects = v.transformOrRemoveStatement(expressionEffects);
+ expressionEffects = v.transformOrRemoveStatement(expressionEffects!);
expressionEffects?.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (body != null) {
- body = v.transformOrRemoveExpression(body);
- body?.parent = this;
+ body = v.transform(body);
+ body.parent = this;
}
if (problem != null) {
- problem = v.transformOrRemoveExpression(problem);
+ problem = v.transformOrRemoveExpression(problem!);
problem?.parent = this;
}
}
@override
- MapLiteralEntry toMapLiteralEntry(
+ MapLiteralEntry? toMapLiteralEntry(
void onConvertElement(TreeNode from, TreeNode to)) {
- MapLiteralEntry bodyEntry;
+ MapLiteralEntry? bodyEntry;
+ Expression body = this.body;
if (body is ControlFlowElement) {
- ControlFlowElement bodyElement = body;
- bodyEntry = bodyElement.toMapLiteralEntry(onConvertElement);
+ bodyEntry = body.toMapLiteralEntry(onConvertElement);
}
if (bodyEntry == null) return null;
ForInMapEntry result = new ForInMapEntry(variable, iterable,
@@ -447,30 +479,36 @@
///
/// It is set during type inference and is used to add appropriate type casts
/// during the desugaring.
- DartType entryType;
+ DartType? entryType;
- SpreadMapEntry(this.expression, this.isNullAware) {
- expression?.parent = this;
+ SpreadMapEntry(this.expression, {required this.isNullAware})
+ // ignore: unnecessary_null_comparison
+ : assert(expression != null),
+ // ignore: unnecessary_null_comparison
+ assert(isNullAware != null) {
+ expression.parent = this;
}
@override
- void visitChildren(Visitor<Object> v) {
- expression?.accept(v);
+ void visitChildren(Visitor v) {
+ expression.accept(v);
}
@override
void transformChildren(Transformer v) {
+ // ignore: unnecessary_null_comparison
if (expression != null) {
expression = v.transform(expression);
- expression?.parent = this;
+ expression.parent = this;
}
}
@override
void transformOrRemoveChildren(RemovingTransformer v) {
+ // ignore: unnecessary_null_comparison
if (expression != null) {
- expression = v.transformOrRemoveExpression(expression);
- expression?.parent = this;
+ expression = v.transform(expression);
+ expression.parent = this;
}
}
@@ -489,49 +527,57 @@
class IfMapEntry extends TreeNode with ControlFlowMapEntry {
Expression condition;
MapLiteralEntry then;
- MapLiteralEntry otherwise;
+ MapLiteralEntry? otherwise;
- IfMapEntry(this.condition, this.then, this.otherwise) {
- condition?.parent = this;
- then?.parent = this;
+ IfMapEntry(this.condition, this.then, this.otherwise)
+ // ignore: unnecessary_null_comparison
+ : assert(condition != null),
+ // ignore: unnecessary_null_comparison
+ assert(then != null) {
+ condition.parent = this;
+ then.parent = this;
otherwise?.parent = this;
}
@override
- void visitChildren(Visitor<Object> v) {
- condition?.accept(v);
- then?.accept(v);
+ void visitChildren(Visitor v) {
+ condition.accept(v);
+ then.accept(v);
otherwise?.accept(v);
}
@override
void transformChildren(Transformer v) {
+ // ignore: unnecessary_null_comparison
if (condition != null) {
condition = v.transform(condition);
- condition?.parent = this;
+ condition.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (then != null) {
then = v.transform(then);
- then?.parent = this;
+ then.parent = this;
}
if (otherwise != null) {
- otherwise = v.transform(otherwise);
+ otherwise = v.transform(otherwise!);
otherwise?.parent = this;
}
}
@override
void transformOrRemoveChildren(RemovingTransformer v) {
+ // ignore: unnecessary_null_comparison
if (condition != null) {
- condition = v.transformOrRemoveExpression(condition);
- condition?.parent = this;
+ condition = v.transform(condition);
+ condition.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (then != null) {
- then = v.transformOrRemove(then, dummyMapLiteralEntry);
- then?.parent = this;
+ then = v.transform(then);
+ then.parent = this;
}
if (otherwise != null) {
- otherwise = v.transformOrRemove(otherwise, dummyMapLiteralEntry);
+ otherwise = v.transformOrRemove(otherwise!, dummyMapLiteralEntry);
otherwise?.parent = this;
}
}
@@ -550,36 +596,39 @@
/// A 'for' element in a map literal.
class ForMapEntry extends TreeNode with ControlFlowMapEntry {
final List<VariableDeclaration> variables; // May be empty, but not null.
- Expression condition; // May be null.
+ Expression? condition; // May be null.
final List<Expression> updates; // May be empty, but not null.
MapLiteralEntry body;
- ForMapEntry(this.variables, this.condition, this.updates, this.body) {
+ ForMapEntry(this.variables, this.condition, this.updates, this.body)
+ // ignore: unnecessary_null_comparison
+ : assert(body != null) {
setParents(variables, this);
condition?.parent = this;
setParents(updates, this);
- body?.parent = this;
+ body.parent = this;
}
@override
- void visitChildren(Visitor<Object> v) {
+ void visitChildren(Visitor v) {
visitList(variables, v);
condition?.accept(v);
visitList(updates, v);
- body?.accept(v);
+ body.accept(v);
}
@override
void transformChildren(Transformer v) {
v.transformList(variables, this);
if (condition != null) {
- condition = v.transform(condition);
+ condition = v.transform(condition!);
condition?.parent = this;
}
v.transformList(updates, this);
+ // ignore: unnecessary_null_comparison
if (body != null) {
body = v.transform(body);
- body?.parent = this;
+ body.parent = this;
}
}
@@ -587,13 +636,14 @@
void transformOrRemoveChildren(RemovingTransformer v) {
v.transformVariableDeclarationList(variables, this);
if (condition != null) {
- condition = v.transformOrRemoveExpression(condition);
+ condition = v.transformOrRemoveExpression(condition!);
condition?.parent = this;
}
v.transformExpressionList(updates, this);
+ // ignore: unnecessary_null_comparison
if (body != null) {
- body = v.transformOrRemove(body, dummyMapLiteralEntry);
- body?.parent = this;
+ body = v.transform(body);
+ body.parent = this;
}
}
@@ -612,89 +662,100 @@
class ForInMapEntry extends TreeNode with ControlFlowMapEntry {
VariableDeclaration variable; // Has no initializer.
Expression iterable;
- Expression syntheticAssignment; // May be null.
- Statement expressionEffects; // May be null.
+ Expression? syntheticAssignment; // May be null.
+ Statement? expressionEffects; // May be null.
MapLiteralEntry body;
- Expression problem; // May be null.
+ Expression? problem; // May be null.
bool isAsync; // True if this is an 'await for' loop.
ForInMapEntry(this.variable, this.iterable, this.syntheticAssignment,
this.expressionEffects, this.body, this.problem,
- {this.isAsync})
- : assert(isAsync != null) {
- variable?.parent = this;
- iterable?.parent = this;
+ {required this.isAsync})
+ // ignore: unnecessary_null_comparison
+ : assert(iterable != null),
+ // ignore: unnecessary_null_comparison
+ assert(body != null),
+ // ignore: unnecessary_null_comparison
+ assert(isAsync != null) {
+ variable.parent = this;
+ iterable.parent = this;
syntheticAssignment?.parent = this;
expressionEffects?.parent = this;
- body?.parent = this;
+ body.parent = this;
problem?.parent = this;
}
- Statement get prologue => syntheticAssignment != null
- ? (new ExpressionStatement(syntheticAssignment)
- ..fileOffset = syntheticAssignment.fileOffset)
+ Statement? get prologue => syntheticAssignment != null
+ ? (new ExpressionStatement(syntheticAssignment!)
+ ..fileOffset = syntheticAssignment!.fileOffset)
: expressionEffects;
- void visitChildren(Visitor<Object> v) {
- variable?.accept(v);
- iterable?.accept(v);
+ void visitChildren(Visitor v) {
+ variable.accept(v);
+ iterable.accept(v);
syntheticAssignment?.accept(v);
expressionEffects?.accept(v);
- body?.accept(v);
+ body.accept(v);
problem?.accept(v);
}
void transformChildren(Transformer v) {
+ // ignore: unnecessary_null_comparison
if (variable != null) {
variable = v.transform(variable);
- variable?.parent = this;
+ variable.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (iterable != null) {
iterable = v.transform(iterable);
- iterable?.parent = this;
+ iterable.parent = this;
}
if (syntheticAssignment != null) {
- syntheticAssignment = v.transform(syntheticAssignment);
+ syntheticAssignment = v.transform(syntheticAssignment!);
syntheticAssignment?.parent = this;
}
if (expressionEffects != null) {
- expressionEffects = v.transform(expressionEffects);
+ expressionEffects = v.transform(expressionEffects!);
expressionEffects?.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (body != null) {
body = v.transform(body);
- body?.parent = this;
+ body.parent = this;
}
if (problem != null) {
- problem = v.transform(problem);
+ problem = v.transform(problem!);
problem?.parent = this;
}
}
@override
void transformOrRemoveChildren(RemovingTransformer v) {
+ // ignore: unnecessary_null_comparison
if (variable != null) {
- variable = v.transformOrRemoveVariableDeclaration(variable);
- variable?.parent = this;
+ variable = v.transform(variable);
+ variable.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (iterable != null) {
- iterable = v.transformOrRemoveExpression(iterable);
- iterable?.parent = this;
+ iterable = v.transform(iterable);
+ iterable.parent = this;
}
if (syntheticAssignment != null) {
- syntheticAssignment = v.transformOrRemoveExpression(syntheticAssignment);
+ syntheticAssignment = v.transformOrRemoveExpression(syntheticAssignment!);
syntheticAssignment?.parent = this;
}
if (expressionEffects != null) {
- expressionEffects = v.transformOrRemoveStatement(expressionEffects);
+ expressionEffects = v.transformOrRemoveStatement(expressionEffects!);
expressionEffects?.parent = this;
}
+ // ignore: unnecessary_null_comparison
if (body != null) {
- body = v.transformOrRemove(body, dummyMapLiteralEntry);
- body?.parent = this;
+ body = v.transform(body);
+ body.parent = this;
}
if (problem != null) {
- problem = v.transformOrRemoveExpression(problem);
+ problem = v.transformOrRemoveExpression(problem!);
problem?.parent = this;
}
}
@@ -720,7 +781,7 @@
Expression convertToElement(MapLiteralEntry entry, InferenceHelper helper,
void onConvertMapEntry(TreeNode from, TreeNode to)) {
if (entry is SpreadMapEntry) {
- return new SpreadElement(entry.expression, entry.isNullAware)
+ return new SpreadElement(entry.expression, isNullAware: entry.isNullAware)
..fileOffset = entry.expression.fileOffset;
}
if (entry is IfMapEntry) {
@@ -729,7 +790,7 @@
convertToElement(entry.then, helper, onConvertMapEntry),
entry.otherwise == null
? null
- : convertToElement(entry.otherwise, helper, onConvertMapEntry))
+ : convertToElement(entry.otherwise!, helper, onConvertMapEntry))
..fileOffset = entry.fileOffset;
onConvertMapEntry(entry, result);
return result;
@@ -774,7 +835,7 @@
if (element is IfElement) {
return isConvertibleToMapEntry(element.then) &&
(element.otherwise == null ||
- isConvertibleToMapEntry(element.otherwise));
+ isConvertibleToMapEntry(element.otherwise!));
}
if (element is ForElement) {
return isConvertibleToMapEntry(element.body);
@@ -795,7 +856,8 @@
MapLiteralEntry convertToMapEntry(Expression element, InferenceHelper helper,
void onConvertElement(TreeNode from, TreeNode to)) {
if (element is SpreadElement) {
- return new SpreadMapEntry(element.expression, element.isNullAware)
+ return new SpreadMapEntry(element.expression,
+ isNullAware: element.isNullAware)
..fileOffset = element.expression.fileOffset;
}
if (element is IfElement) {
@@ -804,7 +866,7 @@
convertToMapEntry(element.then, helper, onConvertElement),
element.otherwise == null
? null
- : convertToMapEntry(element.otherwise, helper, onConvertElement))
+ : convertToMapEntry(element.otherwise!, helper, onConvertElement))
..fileOffset = element.fileOffset;
onConvertElement(element, result);
return result;
diff --git a/pkg/front_end/lib/src/fasta/kernel/forest.dart b/pkg/front_end/lib/src/fasta/kernel/forest.dart
index 9839741..441c09f 100644
--- a/pkg/front_end/lib/src/fasta/kernel/forest.dart
+++ b/pkg/front_end/lib/src/fasta/kernel/forest.dart
@@ -225,7 +225,8 @@
{bool isNullAware}) {
assert(fileOffset != null);
assert(isNullAware != null);
- return new SpreadElement(expression, isNullAware)..fileOffset = fileOffset;
+ return new SpreadElement(expression, isNullAware: isNullAware)
+ ..fileOffset = fileOffset;
}
Expression createIfElement(
diff --git a/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart b/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart
index 1fbc4ba..f489134 100644
--- a/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart
+++ b/pkg/front_end/lib/src/fasta/kernel/inference_visitor.dart
@@ -1995,7 +1995,7 @@
entry,
(type) => !type.isPotentiallyNullable));
_copyNonPromotionReasonToReplacement(entry, problem);
- replacement = new SpreadMapEntry(problem, false)
+ replacement = new SpreadMapEntry(problem, isNullAware: false)
..fileOffset = entry.fileOffset;
}
diff --git a/pkg/front_end/test/test_generator_test.dart b/pkg/front_end/test/test_generator_test.dart
index 449a33f..e08987a 100644
--- a/pkg/front_end/test/test_generator_test.dart
+++ b/pkg/front_end/test/test_generator_test.dart
@@ -18,7 +18,8 @@
import 'incremental_suite.dart' as helper;
main() async {
- TestCompiler compiler = await TestCompiler.initialize();
+ CompilerAndOptions compilerAndOptions = TestCompiler.initialize();
+ TestCompiler compiler = compilerAndOptions.compiler;
bool hasNewline = true;
int numErrors = 0;
List<String> errorSource = [];
@@ -31,6 +32,7 @@
String source = outerContext
.generate(innerContext.generate(expression.generate("")));
String compileResult = await compiler.compile(source);
+ compilerAndOptions.options.skipPlatformVerification = true;
if (compileResult != "") {
if (!hasNewline) print("");
hasNewline = true;
@@ -122,13 +124,13 @@
return sb.toString();
}
- static Future<TestCompiler> initialize() async {
+ static CompilerAndOptions initialize() {
final Uri base = Uri.parse("org-dartlang-test:///");
final Uri sdkSummary = base.resolve("vm_platform_strong.dill");
final Uri sdkRoot = computePlatformBinariesLocation(forceBuildDir: true);
Uri platformUri = sdkRoot.resolve("vm_platform_strong.dill");
final List<int> sdkSummaryData =
- await new File.fromUri(platformUri).readAsBytes();
+ new File.fromUri(platformUri).readAsBytesSync();
MemoryFileSystem fs = new MemoryFileSystem(base);
fs.entityForUri(sdkSummary).writeAsBytesSync(sdkSummaryData);
@@ -169,8 +171,10 @@
helper.TestIncrementalCompiler compiler =
new helper.TestIncrementalCompiler(options, testUri);
- return new TestCompiler._(testUri, fs, formattedErrors, formattedWarnings,
- formattedErrorsCodes, formattedWarningsCodes, compiler);
+ return new CompilerAndOptions(
+ new TestCompiler._(testUri, fs, formattedErrors, formattedWarnings,
+ formattedErrorsCodes, formattedWarningsCodes, compiler),
+ options);
}
}
@@ -228,3 +232,10 @@
return "// @dart = 2.9\n${beforePlug}${plug}${afterPlug}";
}
}
+
+class CompilerAndOptions {
+ final TestCompiler compiler;
+ final CompilerOptions options;
+
+ CompilerAndOptions(this.compiler, this.options);
+}
diff --git a/pkg/front_end/test/text_representation/internal_ast_text_representation_test.dart b/pkg/front_end/test/text_representation/internal_ast_text_representation_test.dart
index 543a79f..b706ee71 100644
--- a/pkg/front_end/test/text_representation/internal_ast_text_representation_test.dart
+++ b/pkg/front_end/test/text_representation/internal_ast_text_representation_test.dart
@@ -785,9 +785,9 @@
}
void _testSpreadElement() {
- testExpression(new SpreadElement(new IntLiteral(0), false), '''
+ testExpression(new SpreadElement(new IntLiteral(0), isNullAware: false), '''
...0''');
- testExpression(new SpreadElement(new IntLiteral(0), true), '''
+ testExpression(new SpreadElement(new IntLiteral(0), isNullAware: true), '''
...?0''');
}
diff --git a/pkg/vm/lib/transformations/ffi_definitions.dart b/pkg/vm/lib/transformations/ffi_definitions.dart
index 62e13b7..4ececb0 100644
--- a/pkg/vm/lib/transformations/ffi_definitions.dart
+++ b/pkg/vm/lib/transformations/ffi_definitions.dart
@@ -568,8 +568,13 @@
final sizeAnnotations = _getArraySizeAnnotations(m).toList();
if (sizeAnnotations.length == 1) {
final arrayDimensions = sizeAnnotations.single;
- type = NativeTypeCfe(this, dartType,
- compoundCache: compoundCache, arrayDimensions: arrayDimensions);
+ arrayDimensions.length;
+ if (this.arrayDimensions(dartType) == arrayDimensions.length) {
+ type = NativeTypeCfe(this, dartType,
+ compoundCache: compoundCache, arrayDimensions: arrayDimensions);
+ } else {
+ type = InvalidNativeTypeCfe("Invalid array dimensions.");
+ }
}
} else if (isPointerType(dartType) || isCompoundSubtype(dartType)) {
type = NativeTypeCfe(this, dartType, compoundCache: compoundCache);
@@ -888,7 +893,6 @@
.whereType<IntConstant>()
.map((e) => e.value)
.toList();
- assert(result.length > 0);
return result;
}
}
@@ -982,12 +986,15 @@
if (compoundCache.containsKey(clazz)) {
return compoundCache[clazz];
} else {
- throw "$clazz not found in compoundCache";
+ throw "Class '$clazz' not found in compoundCache.";
}
}
if (transformer.isArrayType(dartType)) {
if (arrayDimensions == null) {
- throw "Must have array dimensions for ArrayType";
+ throw "Must have array dimensions for ArrayType.";
+ }
+ if (arrayDimensions.length == 0) {
+ throw "Must have a size for this array dimension.";
}
final elementType = transformer.arraySingleElementType(dartType);
final elementCfeType =
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 43a677c..62f28bc 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -2961,7 +2961,7 @@
break;
}
} else if (CreateArrayInstr* create_array = array->AsCreateArray()) {
- return create_array->element_type()->definition();
+ return create_array->type_arguments()->definition();
} else if (LoadFieldInstr* load_array = array->AsLoadField()) {
const Slot& slot = load_array->slot();
switch (slot.kind()) {
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index e5800c4..a53501e 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -6164,6 +6164,7 @@
class AllocateObjectInstr : public AllocationInstr {
public:
+ enum { kTypeArgumentsPos = 0 };
AllocateObjectInstr(const InstructionSource& source,
const Class& cls,
intptr_t deopt_id,
@@ -6173,7 +6174,7 @@
type_arguments_(type_arguments) {
ASSERT((cls.NumTypeArguments() > 0) == (type_arguments != nullptr));
if (type_arguments != nullptr) {
- SetInputAt(0, type_arguments);
+ SetInputAt(kTypeArgumentsPos, type_arguments);
}
}
@@ -6187,7 +6188,7 @@
return (type_arguments_ != nullptr) ? 1 : 0;
}
virtual Value* InputAt(intptr_t i) const {
- ASSERT(type_arguments_ != nullptr && i == 0);
+ ASSERT(type_arguments_ != nullptr && i == kTypeArgumentsPos);
return type_arguments_;
}
@@ -6205,7 +6206,7 @@
private:
virtual void RawSetInputAt(intptr_t i, Value* value) {
- ASSERT((type_arguments_ != nullptr) && (i == 0));
+ ASSERT((type_arguments_ != nullptr) && (i == kTypeArgumentsPos));
ASSERT(value != nullptr);
type_arguments_ = value;
}
@@ -6405,20 +6406,20 @@
class CreateArrayInstr : public TemplateArrayAllocation<2> {
public:
CreateArrayInstr(const InstructionSource& source,
- Value* element_type,
+ Value* type_arguments,
Value* num_elements,
intptr_t deopt_id)
: TemplateArrayAllocation(source, deopt_id) {
- SetInputAt(kElementTypePos, element_type);
+ SetInputAt(kTypeArgumentsPos, type_arguments);
SetInputAt(kLengthPos, num_elements);
}
- enum { kElementTypePos = 0, kLengthPos = 1 };
+ enum { kTypeArgumentsPos = 0, kLengthPos = 1 };
DECLARE_INSTRUCTION(CreateArray)
virtual CompileType ComputeType() const;
- Value* element_type() const { return inputs_[kElementTypePos]; }
+ Value* type_arguments() const { return inputs_[kTypeArgumentsPos]; }
virtual Value* num_elements() const { return inputs_[kLengthPos]; }
virtual bool HasUnknownSideEffects() const { return false; }
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 2358f4e..a0d6420 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -3114,9 +3114,11 @@
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- locs->set_in(kElementTypePos, Location::RegisterLocation(R1));
- locs->set_in(kLengthPos, Location::RegisterLocation(R2));
- locs->set_out(0, Location::RegisterLocation(R0));
+ locs->set_in(kTypeArgumentsPos,
+ Location::RegisterLocation(AllocateArrayABI::kTypeArgumentsReg));
+ locs->set_in(kLengthPos,
+ Location::RegisterLocation(AllocateArrayABI::kLengthReg));
+ locs->set_out(0, Location::RegisterLocation(AllocateArrayABI::kResultReg));
return locs;
}
@@ -3126,31 +3128,31 @@
compiler::Label* slow_path,
compiler::Label* done) {
const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
- const Register kLengthReg = R2;
- const Register kElemTypeReg = R1;
const intptr_t instance_size = Array::InstanceSize(num_elements);
__ TryAllocateArray(kArrayCid, instance_size, slow_path,
- R0, // instance
- R3, // end address
+ AllocateArrayABI::kResultReg, // instance
+ R3, // end address
R8, R6);
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R3: new object end address.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
- R0,
- compiler::FieldAddress(R0,
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
compiler::target::Array::type_arguments_offset()),
- kElemTypeReg);
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
__ StoreIntoObjectNoBarrier(
- R0, compiler::FieldAddress(R0, compiler::target::Array::length_offset()),
- kLengthReg);
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ compiler::target::Array::length_offset()),
+ AllocateArrayABI::kLengthReg);
// Initialize all array elements to raw_null.
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R3: new object end address.
// R6: iterator which initially points to the start of the variable
// data area to be initialized.
@@ -3166,12 +3168,15 @@
__ LoadImmediate(R9, 0x1);
#endif // DEBUG
}
- __ AddImmediate(R6, R0, sizeof(UntaggedArray) - kHeapObjectTag);
+ __ AddImmediate(R6, AllocateArrayABI::kResultReg,
+ sizeof(UntaggedArray) - kHeapObjectTag);
if (array_size < (kInlineArraySize * compiler::target::kWordSize)) {
__ InitializeFieldsNoBarrierUnrolled(
- R0, R6, 0, num_elements * compiler::target::kWordSize, R8, R9);
+ AllocateArrayABI::kResultReg, R6, 0,
+ num_elements * compiler::target::kWordSize, R8, R9);
} else {
- __ InitializeFieldsNoBarrier(R0, R6, R3, R8, R9);
+ __ InitializeFieldsNoBarrier(AllocateArrayABI::kResultReg, R6, R3, R8,
+ R9);
}
}
__ b(done);
@@ -3183,16 +3188,9 @@
const Class& list_class =
Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
- element_type()->definition());
+ type_arguments()->definition());
}
- const Register kLengthReg = R2;
- const Register kElemTypeReg = R1;
- const Register kResultReg = R0;
-
- ASSERT(locs()->in(kElementTypePos).reg() == kElemTypeReg);
- ASSERT(locs()->in(kLengthPos).reg() == kLengthReg);
-
compiler::Label slow_path, done;
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
@@ -3214,7 +3212,6 @@
UntaggedPcDescriptors::kOther, locs(), deopt_id(),
env());
__ Bind(&done);
- ASSERT(locs()->out(0).reg() == kResultReg);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
@@ -7539,10 +7536,10 @@
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
if (type_arguments() != nullptr) {
- locs->set_in(0,
- Location::RegisterLocation(kAllocationStubTypeArgumentsReg));
+ locs->set_in(kTypeArgumentsPos, Location::RegisterLocation(
+ AllocateObjectABI::kTypeArgumentsReg));
}
- locs->set_out(0, Location::RegisterLocation(R0));
+ locs->set_out(0, Location::RegisterLocation(AllocateObjectABI::kResultReg));
return locs;
}
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 9f3bfa7..8402871 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -2674,9 +2674,11 @@
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- locs->set_in(kElementTypePos, Location::RegisterLocation(R1));
- locs->set_in(kLengthPos, Location::RegisterLocation(R2));
- locs->set_out(0, Location::RegisterLocation(R0));
+ locs->set_in(kTypeArgumentsPos,
+ Location::RegisterLocation(AllocateArrayABI::kTypeArgumentsReg));
+ locs->set_in(kLengthPos,
+ Location::RegisterLocation(AllocateArrayABI::kLengthReg));
+ locs->set_out(0, Location::RegisterLocation(AllocateArrayABI::kResultReg));
return locs;
}
@@ -2686,29 +2688,32 @@
compiler::Label* slow_path,
compiler::Label* done) {
const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
- const Register kLengthReg = R2;
- const Register kElemTypeReg = R1;
const intptr_t instance_size = Array::InstanceSize(num_elements);
__ TryAllocateArray(kArrayCid, instance_size, slow_path,
- R0, // instance
- R3, // end address
+ AllocateArrayABI::kResultReg, // instance
+ R3, // end address
R6, R8);
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R3: new object end address.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
- R0, compiler::FieldAddress(R0, Array::type_arguments_offset()),
- kElemTypeReg);
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ Array::type_arguments_offset()),
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
__ StoreIntoObjectNoBarrier(
- R0, compiler::FieldAddress(R0, Array::length_offset()), kLengthReg);
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ Array::length_offset()),
+ AllocateArrayABI::kLengthReg);
// TODO(zra): Use stp once added.
// Initialize all array elements to raw_null.
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R3: new object end address.
// R8: iterator which initially points to the start of the variable
// data area to be initialized.
@@ -2716,7 +2721,8 @@
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(UntaggedArray);
__ LoadObject(R6, Object::null_object());
- __ AddImmediate(R8, R0, sizeof(UntaggedArray) - kHeapObjectTag);
+ __ AddImmediate(R8, AllocateArrayABI::kResultReg,
+ sizeof(UntaggedArray) - kHeapObjectTag);
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
@@ -2743,16 +2749,9 @@
const Class& list_class =
Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
- element_type()->definition());
+ type_arguments()->definition());
}
- const Register kLengthReg = R2;
- const Register kElemTypeReg = R1;
- const Register kResultReg = R0;
-
- ASSERT(locs()->in(kElementTypePos).reg() == kElemTypeReg);
- ASSERT(locs()->in(kLengthPos).reg() == kLengthReg);
-
compiler::Label slow_path, done;
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
@@ -2773,7 +2772,6 @@
compiler->GenerateStubCall(source(), allocate_array_stub,
UntaggedPcDescriptors::kOther, locs(), deopt_id(),
env());
- ASSERT(locs()->out(0).reg() == kResultReg);
__ Bind(&done);
}
@@ -6555,10 +6553,10 @@
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
if (type_arguments() != nullptr) {
- locs->set_in(0,
- Location::RegisterLocation(kAllocationStubTypeArgumentsReg));
+ locs->set_in(kTypeArgumentsPos, Location::RegisterLocation(
+ AllocateObjectABI::kTypeArgumentsReg));
}
- locs->set_out(0, Location::RegisterLocation(R0));
+ locs->set_out(0, Location::RegisterLocation(AllocateObjectABI::kResultReg));
return locs;
}
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index f237309..d165691 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -2393,9 +2393,11 @@
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- locs->set_in(0, Location::RegisterLocation(ECX));
- locs->set_in(1, Location::RegisterLocation(EDX));
- locs->set_out(0, Location::RegisterLocation(EAX));
+ locs->set_in(kTypeArgumentsPos,
+ Location::RegisterLocation(AllocateArrayABI::kTypeArgumentsReg));
+ locs->set_in(kLengthPos,
+ Location::RegisterLocation(AllocateArrayABI::kLengthReg));
+ locs->set_out(0, Location::RegisterLocation(AllocateArrayABI::kResultReg));
return locs;
}
@@ -2405,29 +2407,32 @@
compiler::Label* slow_path,
compiler::Label* done) {
const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
- const Register kLengthReg = EDX;
- const Register kElemTypeReg = ECX;
const intptr_t instance_size = Array::InstanceSize(num_elements);
- // Instance in EAX.
+ // Instance in AllocateArrayABI::kResultReg.
// Object end address in EBX.
__ TryAllocateArray(kArrayCid, instance_size, slow_path,
compiler::Assembler::kFarJump,
- EAX, // instance
- EBX, // end address
- EDI); // temp
+ AllocateArrayABI::kResultReg, // instance
+ EBX, // end address
+ EDI); // temp
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
- EAX, compiler::FieldAddress(EAX, Array::type_arguments_offset()),
- kElemTypeReg);
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ Array::type_arguments_offset()),
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
__ StoreIntoObjectNoBarrier(
- EAX, compiler::FieldAddress(EAX, Array::length_offset()), kLengthReg);
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ Array::length_offset()),
+ AllocateArrayABI::kLengthReg);
// Initialize all array elements to raw_null.
- // EAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// EBX: new object end address.
// EDI: iterator which initially points to the start of the variable
// data area to be initialized.
@@ -2435,19 +2440,22 @@
const intptr_t array_size = instance_size - sizeof(UntaggedArray);
const compiler::Immediate& raw_null =
compiler::Immediate(static_cast<intptr_t>(Object::null()));
- __ leal(EDI, compiler::FieldAddress(EAX, sizeof(UntaggedArray)));
+ __ leal(EDI, compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ sizeof(UntaggedArray)));
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
__ movl(EBX, raw_null);
while (current_offset < array_size) {
- __ StoreIntoObjectNoBarrier(EAX, compiler::Address(EDI, current_offset),
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+ compiler::Address(EDI, current_offset),
EBX);
current_offset += kWordSize;
}
} else {
compiler::Label init_loop;
__ Bind(&init_loop);
- __ StoreIntoObjectNoBarrier(EAX, compiler::Address(EDI, 0),
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+ compiler::Address(EDI, 0),
Object::null_object());
__ addl(EDI, compiler::Immediate(kWordSize));
__ cmpl(EDI, EBX);
@@ -2458,13 +2466,6 @@
}
void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- // Allocate the array. EDX = length, ECX = element type.
- const Register kLengthReg = EDX;
- const Register kElemTypeReg = ECX;
- const Register kResultReg = EAX;
- ASSERT(locs()->in(0).reg() == kElemTypeReg);
- ASSERT(locs()->in(1).reg() == kLengthReg);
-
compiler::Label slow_path, done;
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
if (compiler->is_optimizing() && num_elements()->BindsToConstant() &&
@@ -2485,7 +2486,6 @@
UntaggedPcDescriptors::kOther, locs(), deopt_id(),
env());
__ Bind(&done);
- ASSERT(locs()->out(0).reg() == kResultReg);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
@@ -6665,10 +6665,10 @@
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
if (type_arguments() != nullptr) {
- locs->set_in(0,
- Location::RegisterLocation(kAllocationStubTypeArgumentsReg));
+ locs->set_in(kTypeArgumentsPos, Location::RegisterLocation(
+ AllocateObjectABI::kTypeArgumentsReg));
}
- locs->set_out(0, Location::RegisterLocation(EAX));
+ locs->set_out(0, Location::RegisterLocation(AllocateObjectABI::kResultReg));
return locs;
}
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 43930b0..da71487 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -2769,9 +2769,11 @@
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- locs->set_in(0, Location::RegisterLocation(RBX));
- locs->set_in(1, Location::RegisterLocation(R10));
- locs->set_out(0, Location::RegisterLocation(RAX));
+ locs->set_in(kTypeArgumentsPos,
+ Location::RegisterLocation(AllocateArrayABI::kTypeArgumentsReg));
+ locs->set_in(kLengthPos,
+ Location::RegisterLocation(AllocateArrayABI::kLengthReg));
+ locs->set_out(0, Location::RegisterLocation(AllocateArrayABI::kResultReg));
return locs;
}
@@ -2781,46 +2783,52 @@
compiler::Label* slow_path,
compiler::Label* done) {
const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
- const Register kLengthReg = R10;
- const Register kElemTypeReg = RBX;
const intptr_t instance_size = Array::InstanceSize(num_elements);
__ TryAllocateArray(kArrayCid, instance_size, slow_path,
compiler::Assembler::kFarJump,
- RAX, // instance
- RCX, // end address
- R13); // temp
+ AllocateArrayABI::kResultReg, // instance
+ RCX, // end address
+ R13); // temp
// RAX: new object start as a tagged pointer.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
- RAX, compiler::FieldAddress(RAX, Array::type_arguments_offset()),
- kElemTypeReg);
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ Array::type_arguments_offset()),
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
__ StoreIntoObjectNoBarrier(
- RAX, compiler::FieldAddress(RAX, Array::length_offset()), kLengthReg);
+ AllocateArrayABI::kResultReg,
+ compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ Array::length_offset()),
+ AllocateArrayABI::kLengthReg);
// Initialize all array elements to raw_null.
- // RAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// RCX: new object end address.
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(UntaggedArray);
__ LoadObject(R12, Object::null_object());
- __ leaq(RDI, compiler::FieldAddress(RAX, sizeof(UntaggedArray)));
+ __ leaq(RDI, compiler::FieldAddress(AllocateArrayABI::kResultReg,
+ sizeof(UntaggedArray)));
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
- __ StoreIntoObjectNoBarrier(RAX, compiler::Address(RDI, current_offset),
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+ compiler::Address(RDI, current_offset),
R12);
current_offset += kWordSize;
}
} else {
compiler::Label init_loop;
__ Bind(&init_loop);
- __ StoreIntoObjectNoBarrier(RAX, compiler::Address(RDI, 0), R12);
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+ compiler::Address(RDI, 0), R12);
__ addq(RDI, compiler::Immediate(kWordSize));
__ cmpq(RDI, RCX);
__ j(BELOW, &init_loop, compiler::Assembler::kNearJump);
@@ -2835,16 +2843,9 @@
const Class& list_class =
Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
- element_type()->definition());
+ type_arguments()->definition());
}
- // Allocate the array. R10 = length, RBX = element type.
- const Register kLengthReg = R10;
- const Register kElemTypeReg = RBX;
- const Register kResultReg = RAX;
- ASSERT(locs()->in(0).reg() == kElemTypeReg);
- ASSERT(locs()->in(1).reg() == kLengthReg);
-
compiler::Label slow_path, done;
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
@@ -2866,7 +2867,6 @@
UntaggedPcDescriptors::kOther, locs(), deopt_id(),
env());
__ Bind(&done);
- ASSERT(locs()->out(0).reg() == kResultReg);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
@@ -6966,10 +6966,10 @@
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
if (type_arguments() != nullptr) {
- locs->set_in(0,
- Location::RegisterLocation(kAllocationStubTypeArgumentsReg));
+ locs->set_in(kTypeArgumentsPos, Location::RegisterLocation(
+ AllocateObjectABI::kTypeArgumentsReg));
}
- locs->set_out(0, Location::RegisterLocation(RAX));
+ locs->set_out(0, Location::RegisterLocation(AllocateObjectABI::kResultReg));
return locs;
}
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.cc b/runtime/vm/compiler/backend/redundancy_elimination.cc
index 9114a2a..5cbc45e 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination.cc
@@ -2111,7 +2111,7 @@
if (auto load = use->instruction()->AsLoadField()) {
if (load->slot().IsTypeArguments()) {
place_id = GetPlaceId(load);
- forward_def = alloc->element_type()->definition();
+ forward_def = alloc->type_arguments()->definition();
}
} else if (use->instruction()->IsLoadIndexed() ||
use->instruction()->IsStoreIndexed()) {
diff --git a/runtime/vm/compiler/frontend/constant_reader.cc b/runtime/vm/compiler/frontend/constant_reader.cc
index 572e967..d85926f 100644
--- a/runtime/vm/compiler/frontend/constant_reader.cc
+++ b/runtime/vm/compiler/frontend/constant_reader.cc
@@ -87,21 +87,20 @@
{
SafepointMutexLocker ml(
H.thread()->isolate_group()->kernel_constants_mutex());
- KernelConstantsMap constant_map(H.info().constants());
- result_ ^= constant_map.GetOrNull(constant_index);
- ASSERT(constant_map.Release().ptr() == H.info().constants());
+ const auto& constants_array = Array::Handle(Z, H.info().constants());
+ ASSERT(constant_index < constants_array.Length());
+ result_ ^= constants_array.At(constant_index);
}
// On miss, evaluate, and insert value.
- if (result_.IsNull()) {
+ if (result_.ptr() == Object::sentinel().ptr()) {
LeaveCompilerScope cs(H.thread());
result_ = ReadConstantInternal(constant_index);
SafepointMutexLocker ml(
H.thread()->isolate_group()->kernel_constants_mutex());
- KernelConstantsMap constant_map(H.info().constants());
- auto insert = constant_map.InsertNewOrGetValue(constant_index, result_);
- ASSERT(insert == result_.ptr());
- H.info().set_constants(constant_map.Release()); // update!
+ const auto& constants_array = Array::Handle(Z, H.info().constants());
+ ASSERT(constant_index < constants_array.Length());
+ constants_array.SetAt(constant_index, result_);
}
return result_.ptr();
}
@@ -120,12 +119,22 @@
return false;
}
-intptr_t ConstantReader::NavigateToIndex(KernelReaderHelper* reader,
- intptr_t constant_index) {
+intptr_t ConstantReader::NumConstants() {
+ ASSERT(!H.constants_table().IsNull());
+ KernelReaderHelper reader(Z, &H, script_, H.constants_table(), 0);
+ return NumConstants(&reader);
+}
+
+intptr_t ConstantReader::NumConstants(KernelReaderHelper* reader) {
// Get reader directly into raw bytes of constant table/constant mapping.
// Get the length of the constants (at the end of the mapping).
reader->SetOffset(reader->ReaderSize() - 4);
- const intptr_t num_constants = reader->ReadUInt32();
+ return reader->ReadUInt32();
+}
+
+intptr_t ConstantReader::NavigateToIndex(KernelReaderHelper* reader,
+ intptr_t constant_index) {
+ const intptr_t num_constants = NumConstants(reader);
// Get the binary offset of the constant at the wanted index.
reader->SetOffset(reader->ReaderSize() - 4 - (num_constants * 4) +
diff --git a/runtime/vm/compiler/frontend/constant_reader.h b/runtime/vm/compiler/frontend/constant_reader.h
index bb65f68..b4e52c4 100644
--- a/runtime/vm/compiler/frontend/constant_reader.h
+++ b/runtime/vm/compiler/frontend/constant_reader.h
@@ -35,9 +35,12 @@
// into sub-constants).
InstancePtr ReadConstant(intptr_t constant_index);
+ intptr_t NumConstants();
+
private:
InstancePtr ReadConstantInternal(intptr_t constant_index);
intptr_t NavigateToIndex(KernelReaderHelper* reader, intptr_t constant_index);
+ intptr_t NumConstants(KernelReaderHelper* reader);
KernelReaderHelper* helper_;
Zone* zone_;
@@ -49,38 +52,6 @@
DISALLOW_COPY_AND_ASSIGN(ConstantReader);
};
-class KernelConstMapKeyEqualsTraits : public AllStatic {
- public:
- static const char* Name() { return "KernelConstMapKeyEqualsTraits"; }
- static bool ReportStats() { return false; }
-
- static bool IsMatch(const Object& a, const Object& b) {
- const Smi& key1 = Smi::Cast(a);
- const Smi& key2 = Smi::Cast(b);
- return (key1.Value() == key2.Value());
- }
- static bool IsMatch(const intptr_t key1, const Object& b) {
- return KeyAsSmi(key1) == Smi::Cast(b).ptr();
- }
- static uword Hash(const Object& obj) {
- const Smi& key = Smi::Cast(obj);
- return HashValue(key.Value());
- }
- static uword Hash(const intptr_t key) {
- return HashValue(Smi::Value(KeyAsSmi(key)));
- }
- static ObjectPtr NewKey(const intptr_t key) { return KeyAsSmi(key); }
-
- private:
- static uword HashValue(intptr_t pos) { return pos % (Smi::kMaxValue - 13); }
-
- static SmiPtr KeyAsSmi(const intptr_t key) {
- ASSERT(key >= 0);
- return Smi::New(key);
- }
-};
-typedef UnorderedHashMap<KernelConstMapKeyEqualsTraits> KernelConstantsMap;
-
} // namespace kernel
} // namespace dart
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index 9f15dbb..5380c8c 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -1000,11 +1000,14 @@
}
// Called for inline allocation of arrays.
-// Input parameters:
+// Input registers (preserved):
// LR: return address.
-// R1: array element type (either NULL or an instantiated type).
-// R2: array length as Smi (must be preserved).
-// The newly allocated object is returned in R0.
+// AllocateArrayABI::kLengthReg: array length as Smi.
+// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+// Output registers:
+// AllocateArrayABI::kResultReg: newly allocated array.
+// Clobbered:
+// R3, R4, R8, R9
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@@ -1012,7 +1015,7 @@
// and is computed as:
// RoundedAllocationSize(
// (array_length * kwordSize) + target::Array::header_size()).
- __ mov(R3, Operand(R2)); // Array length.
+ __ mov(R3, Operand(AllocateArrayABI::kLengthReg)); // Array length.
// Check that length is a Smi.
__ tst(R3, Operand(kSmiTagMask));
__ b(&slow_case, NE);
@@ -1037,12 +1040,14 @@
// R9: Allocation size.
// Potential new object start.
- __ ldr(R0, Address(THR, target::Thread::top_offset()));
- __ adds(R3, R0, Operand(R9)); // Potential next object start.
+ __ ldr(AllocateArrayABI::kResultReg,
+ Address(THR, target::Thread::top_offset()));
+ __ adds(R3, AllocateArrayABI::kResultReg,
+ Operand(R9)); // Potential next object start.
__ b(&slow_case, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
- // R0: potential new object start.
+ // AllocateArrayABI::kResultReg: potential new object start.
// R3: potential next object start.
// R9: allocation size.
__ ldr(TMP, Address(THR, target::Thread::end_offset()));
@@ -1052,10 +1057,11 @@
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ str(R3, Address(THR, target::Thread::top_offset()));
- __ add(R0, R0, Operand(kHeapObjectTag));
+ __ add(AllocateArrayABI::kResultReg, AllocateArrayABI::kResultReg,
+ Operand(kHeapObjectTag));
// Initialize the tags.
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R3: new object end address.
// R9: allocation size.
{
@@ -1072,22 +1078,27 @@
target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R8, R8, Operand(TMP));
- __ str(R8,
- FieldAddress(R0, target::Array::tags_offset())); // Store tags.
+ __ str(R8, FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::tags_offset())); // Store tags.
}
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R3: new object end address.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, target::Array::type_arguments_offset()), R1);
+ AllocateArrayABI::kResultReg,
+ FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::type_arguments_offset()),
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
- __ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, target::Array::length_offset()), R2);
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+ FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::length_offset()),
+ AllocateArrayABI::kLengthReg);
// Initialize all array elements to raw_null.
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R8, R9: null
// R4: iterator which initially points to the start of the variable
// data area to be initialized.
@@ -1096,9 +1107,10 @@
__ LoadObject(R8, NullObject());
__ mov(R9, Operand(R8));
- __ AddImmediate(R4, R0, target::Array::header_size() - kHeapObjectTag);
- __ InitializeFieldsNoBarrier(R0, R4, R3, R8, R9);
- __ Ret(); // Returns the newly allocated object in R0.
+ __ AddImmediate(R4, AllocateArrayABI::kResultReg,
+ target::Array::header_size() - kHeapObjectTag);
+ __ InitializeFieldsNoBarrier(AllocateArrayABI::kResultReg, R4, R3, R8, R9);
+ __ Ret();
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
@@ -1110,11 +1122,13 @@
__ LoadImmediate(TMP, 0);
// Setup space on stack for return value.
// Push array length as Smi and element type.
- __ PushList((1 << R1) | (1 << R2) | (1 << IP));
+ __ PushList((1 << AllocateArrayABI::kTypeArgumentsReg) |
+ (1 << AllocateArrayABI::kLengthReg) | (1 << IP));
__ CallRuntime(kAllocateArrayRuntimeEntry, 2);
// Pop arguments; result is popped in IP.
- __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored.
- __ mov(R0, Operand(IP));
+ __ PopList((1 << AllocateArrayABI::kTypeArgumentsReg) |
+ (1 << AllocateArrayABI::kLengthReg) | (1 << IP));
+ __ mov(AllocateArrayABI::kResultReg, Operand(IP));
__ LeaveStubFrame();
// Write-barrier elimination might be enabled for this array (depending on the
@@ -1705,10 +1719,7 @@
static void GenerateAllocateObjectHelper(Assembler* assembler,
bool is_cls_parameterized) {
- const Register kInstanceReg = R0;
- // R1
const Register kTagsReg = R2;
- // kAllocationStubTypeArgumentsReg = R3
{
Label slow_case;
@@ -1723,10 +1734,12 @@
__ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
// Load two words from Thread::top: top and end.
- // kInstanceReg: potential next object start.
- __ ldrd(kInstanceReg, kEndReg, THR, target::Thread::top_offset());
+ // AllocateObjectABI::kResultReg: potential next object start.
+ __ ldrd(AllocateObjectABI::kResultReg, kEndReg, THR,
+ target::Thread::top_offset());
- __ add(kNewTopReg, kInstanceReg, Operand(kInstanceSizeReg));
+ __ add(kNewTopReg, AllocateObjectABI::kResultReg,
+ Operand(kInstanceSizeReg));
__ CompareRegisters(kEndReg, kNewTopReg);
__ b(&slow_case, UNSIGNED_LESS_EQUAL);
@@ -1737,7 +1750,8 @@
} // kEndReg = R1, kInstanceSizeReg = R9
// Tags.
- __ str(kTagsReg, Address(kInstanceReg, target::Object::tags_offset()));
+ __ str(kTagsReg, Address(AllocateObjectABI::kResultReg,
+ target::Object::tags_offset()));
// Initialize the remaining words of the object.
{
@@ -1746,7 +1760,7 @@
__ LoadObject(kNullReg, NullObject());
- __ AddImmediate(kFieldReg, kInstanceReg,
+ __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
target::Instance::first_field_offset());
Label done, init_loop;
__ Bind(&init_loop);
@@ -1778,14 +1792,16 @@
// Set the type arguments in the new object.
__ StoreIntoObjectNoBarrier(
- kInstanceReg,
- Address(kInstanceReg, kTypeOffestReg, LSL, target::kWordSizeLog2),
- kAllocationStubTypeArgumentsReg);
+ AllocateObjectABI::kResultReg,
+ Address(AllocateObjectABI::kResultReg, kTypeOffestReg, LSL,
+ target::kWordSizeLog2),
+ AllocateObjectABI::kTypeArgumentsReg);
__ Bind(¬_parameterized_case);
} // kClsIdReg = R1, kTypeOffestReg = R9
- __ AddImmediate(kInstanceReg, kInstanceReg, kHeapObjectTag);
+ __ AddImmediate(AllocateObjectABI::kResultReg,
+ AllocateObjectABI::kResultReg, kHeapObjectTag);
__ Ret();
@@ -1797,7 +1813,7 @@
const Register kStubReg = R8;
if (!is_cls_parameterized) {
- __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
+ __ LoadObject(AllocateObjectABI::kTypeArgumentsReg, NullObject());
}
// Tail call to generic allocation stub.
@@ -1819,10 +1835,8 @@
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
- const Register kInstanceReg = R0;
const Register kClsReg = R1;
const Register kTagsReg = R2;
- // kAllocationStubTypeArgumentsReg = R3
if (!FLAG_use_bare_instructions) {
__ ldr(CODE_REG,
@@ -1833,21 +1847,21 @@
// calling into the runtime.
__ EnterStubFrame();
- __ ExtractClassIdFromTags(kInstanceReg, kTagsReg);
- __ LoadClassById(kClsReg, kInstanceReg);
+ __ ExtractClassIdFromTags(AllocateObjectABI::kResultReg, kTagsReg);
+ __ LoadClassById(kClsReg, AllocateObjectABI::kResultReg);
- __ LoadObject(kInstanceReg, NullObject());
+ __ LoadObject(AllocateObjectABI::kResultReg, NullObject());
// Pushes result slot, then parameter class.
- __ PushRegisterPair(kClsReg, kInstanceReg);
+ __ PushRegisterPair(kClsReg, AllocateObjectABI::kResultReg);
// Should be Object::null() if class is non-parameterized.
- __ Push(kAllocationStubTypeArgumentsReg);
+ __ Push(AllocateObjectABI::kTypeArgumentsReg);
__ CallRuntime(kAllocateObjectRuntimeEntry, 2);
// Load result off the stack into result register.
- __ ldr(kInstanceReg, Address(SP, 2 * target::kWordSize));
+ __ ldr(AllocateObjectABI::kResultReg, Address(SP, 2 * target::kWordSize));
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
@@ -1880,10 +1894,7 @@
const uword tags =
target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
- // Note: Keep in sync with helper function.
- // kInstanceReg = R0
const Register kTagsReg = R2;
- // kAllocationStubTypeArgumentsReg = R3
__ LoadImmediate(kTagsReg, tags);
@@ -1919,7 +1930,7 @@
}
} else {
if (!is_cls_parameterized) {
- __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
+ __ LoadObject(AllocateObjectABI::kTypeArgumentsReg, NullObject());
}
__ ldr(PC,
Address(THR,
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index b2420bd..7f677dd 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -1118,12 +1118,14 @@
}
// Called for inline allocation of arrays.
-// Input parameters:
+// Input registers (preserved):
// LR: return address.
-// R2: array length as Smi.
-// R1: array element type (either NULL or an instantiated type).
-// NOTE: R2 cannot be clobbered here as the caller relies on it being saved.
-// The newly allocated object is returned in R0.
+// AllocateArrayABI::kLengthReg: array length as Smi.
+// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+// Output registers:
+// AllocateArrayABI::kResultReg: newly allocated array.
+// Clobbered:
+// R3, R7
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@@ -1132,12 +1134,12 @@
// RoundedAllocationSize(
// (array_length * kwordSize) + target::Array::header_size()).
// Check that length is a Smi.
- __ BranchIfNotSmi(R2, &slow_case);
+ __ BranchIfNotSmi(AllocateArrayABI::kLengthReg, &slow_case);
// Check length >= 0 && length <= kMaxNewSpaceElements
const intptr_t max_len =
target::ToRawSmi(target::Array::kMaxNewSpaceElements);
- __ CompareImmediate(R2, max_len, kObjectBytes);
+ __ CompareImmediate(AllocateArrayABI::kLengthReg, max_len, kObjectBytes);
__ b(&slow_case, HI);
const intptr_t cid = kArrayCid;
@@ -1145,26 +1147,28 @@
// Calculate and align allocation size.
// Load new object start and calculate next object start.
- // R1: array element type.
- // R2: array length as Smi.
- __ ldr(R0, Address(THR, target::Thread::top_offset()));
+ // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+ // AllocateArrayABI::kLengthReg: array length as Smi.
+ __ ldr(AllocateArrayABI::kResultReg,
+ Address(THR, target::Thread::top_offset()));
intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
target::ObjectAlignment::kObjectAlignment - 1;
__ LoadImmediate(R3, fixed_size_plus_alignment_padding);
- __ add(R3, R3, Operand(R2, LSL, 2), kObjectBytes); // R2 is Smi.
+ __ add(R3, R3, Operand(AllocateArrayABI::kLengthReg, LSL, 2),
+ kObjectBytes); // R2 is Smi.
ASSERT(kSmiTagShift == 1);
__ andi(R3, R3,
Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
- // R0: potential new object start.
+ // AllocateArrayABI::kResultReg: potential new object start.
// R3: object size in bytes.
- __ adds(R7, R3, Operand(R0));
+ __ adds(R7, R3, Operand(AllocateArrayABI::kResultReg));
__ b(&slow_case, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
- // R0: potential new object start.
- // R1: array element type.
- // R2: array length as Smi.
+ // AllocateArrayABI::kResultReg: potential new object start.
+ // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+ // AllocateArrayABI::kLengthReg: array length as Smi.
// R3: array size.
// R7: potential next object start.
__ LoadFromOffset(TMP, THR, target::Thread::end_offset());
@@ -1173,66 +1177,72 @@
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
- // R0: potential new object start.
+ // AllocateArrayABI::kResultReg: potential new object start.
// R3: array size.
// R7: potential next object start.
__ str(R7, Address(THR, target::Thread::top_offset()));
- __ add(R0, R0, Operand(kHeapObjectTag));
+ __ add(AllocateArrayABI::kResultReg, AllocateArrayABI::kResultReg,
+ Operand(kHeapObjectTag));
- // R0: new object start as a tagged pointer.
- // R1: array element type.
- // R2: array length as Smi.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
+ // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+ // AllocateArrayABI::kLengthReg: array length as Smi.
// R3: array size.
// R7: new object end address.
// Store the type argument field.
- __ StoreIntoObjectOffsetNoBarrier(
- R0, target::Array::type_arguments_offset(), R1);
+ __ StoreIntoObjectOffsetNoBarrier(AllocateArrayABI::kResultReg,
+ target::Array::type_arguments_offset(),
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
- __ StoreIntoObjectOffsetNoBarrier(R0, target::Array::length_offset(), R2);
+ __ StoreIntoObjectOffsetNoBarrier(AllocateArrayABI::kResultReg,
+ target::Array::length_offset(),
+ AllocateArrayABI::kLengthReg);
// Calculate the size tag.
- // R0: new object start as a tagged pointer.
- // R2: array length as Smi.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
+ // AllocateArrayABI::kLengthReg: array length as Smi.
// R3: array size.
// R7: new object end address.
const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R3, target::UntaggedObject::kSizeTagMaxSizeTag);
- // If no size tag overflow, shift R1 left, else set R1 to zero.
+ // If no size tag overflow, shift R3 left, else set R3 to zero.
__ LslImmediate(TMP, R3, shift);
- __ csel(R1, TMP, R1, LS);
- __ csel(R1, ZR, R1, HI);
+ __ csel(R3, TMP, R3, LS);
+ __ csel(R3, ZR, R3, HI);
// Get the class index and insert it into the tags.
const uword tags =
target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
- __ orr(R1, R1, Operand(TMP));
- __ StoreFieldToOffset(R1, R0, target::Array::tags_offset());
+ __ orr(R3, R3, Operand(TMP));
+ __ StoreFieldToOffset(R3, AllocateArrayABI::kResultReg,
+ target::Array::tags_offset());
// Initialize all array elements to raw_null.
- // R0: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// R7: new object end address.
- // R2: array length as Smi.
- __ AddImmediate(R1, R0, target::Array::data_offset() - kHeapObjectTag);
- // R1: iterator which initially points to the start of the variable
+ // AllocateArrayABI::kLengthReg: array length as Smi.
+ __ AddImmediate(R3, AllocateArrayABI::kResultReg,
+ target::Array::data_offset() - kHeapObjectTag);
+ // R3: iterator which initially points to the start of the variable
// data area to be initialized.
Label loop, done;
__ Bind(&loop);
// TODO(cshapiro): StoreIntoObjectNoBarrier
- __ CompareRegisters(R1, R7);
+ __ CompareRegisters(R3, R7);
__ b(&done, CS);
- __ str(NULL_REG, Address(R1)); // Store if unsigned lower.
- __ AddImmediate(R1, target::kWordSize);
- __ b(&loop); // Loop until R1 == R7.
+ __ str(NULL_REG, Address(R3)); // Store if unsigned lower.
+ __ AddImmediate(R3, target::kWordSize);
+ __ b(&loop); // Loop until R3 == R7.
__ Bind(&done);
// Done allocating and initializing the array.
- // R0: new object.
- // R2: array length as Smi (preserved for the caller.)
+ // AllocateArrayABI::kResultReg: new object.
+ // AllocateArrayABI::kLengthReg: array length as Smi (preserved).
__ ret();
// Unable to allocate the array using the fast inline code, just call
@@ -1245,13 +1255,13 @@
// Setup space on stack for return value.
// Push array length as Smi and element type.
__ Push(ZR);
- __ Push(R2);
- __ Push(R1);
+ __ Push(AllocateArrayABI::kLengthReg);
+ __ Push(AllocateArrayABI::kTypeArgumentsReg);
__ CallRuntime(kAllocateArrayRuntimeEntry, 2);
// Pop arguments; result is popped in IP.
- __ Pop(R1);
- __ Pop(R2);
- __ Pop(R0);
+ __ Pop(AllocateArrayABI::kTypeArgumentsReg);
+ __ Pop(AllocateArrayABI::kLengthReg);
+ __ Pop(AllocateArrayABI::kResultReg);
__ LeaveStubFrame();
// Write-barrier elimination might be enabled for this array (depending on the
@@ -1855,8 +1865,6 @@
static void GenerateAllocateObjectHelper(Assembler* assembler,
bool is_cls_parameterized) {
- const Register kInstanceReg = R0;
- // kAllocationStubTypeArgumentsReg = R1
const Register kTagsReg = R2;
{
@@ -1872,11 +1880,12 @@
__ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
// Load two words from Thread::top: top and end.
- // kInstanceReg: potential next object start.
- __ ldp(kInstanceReg, kEndReg,
+ // AllocateObjectABI::kResultReg: potential next object start.
+ __ ldp(AllocateObjectABI::kResultReg, kEndReg,
Address(THR, target::Thread::top_offset(), Address::PairOffset));
- __ add(kNewTopReg, kInstanceReg, Operand(kInstanceSizeReg));
+ __ add(kNewTopReg, AllocateObjectABI::kResultReg,
+ Operand(kInstanceSizeReg));
__ CompareRegisters(kEndReg, kNewTopReg);
__ b(&slow_case, UNSIGNED_LESS_EQUAL);
@@ -1887,13 +1896,14 @@
} // kInstanceSizeReg = R4, kEndReg = R5
// Tags.
- __ str(kTagsReg, Address(kInstanceReg, target::Object::tags_offset()));
+ __ str(kTagsReg, Address(AllocateObjectABI::kResultReg,
+ target::Object::tags_offset()));
// Initialize the remaining words of the object.
{
const Register kFieldReg = R4;
- __ AddImmediate(kFieldReg, kInstanceReg,
+ __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
target::Instance::first_field_offset());
Label done, init_loop;
__ Bind(&init_loop);
@@ -1925,14 +1935,16 @@
// Set the type arguments in the new object.
__ StoreIntoObjectNoBarrier(
- kInstanceReg,
- Address(kInstanceReg, kTypeOffestReg, UXTX, Address::Scaled),
- kAllocationStubTypeArgumentsReg);
+ AllocateObjectABI::kResultReg,
+ Address(AllocateObjectABI::kResultReg, kTypeOffestReg, UXTX,
+ Address::Scaled),
+ AllocateObjectABI::kTypeArgumentsReg);
__ Bind(¬_parameterized_case);
} // kClsIdReg = R4, kTypeOffestReg = R5
- __ AddImmediate(kInstanceReg, kInstanceReg, kHeapObjectTag);
+ __ AddImmediate(AllocateObjectABI::kResultReg,
+ AllocateObjectABI::kResultReg, kHeapObjectTag);
__ ret();
@@ -1941,7 +1953,7 @@
// Fall back on slow case:
if (!is_cls_parameterized) {
- __ mov(kAllocationStubTypeArgumentsReg, NULL_REG);
+ __ mov(AllocateObjectABI::kTypeArgumentsReg, NULL_REG);
}
// Tail call to generic allocation stub.
__ ldr(
@@ -1961,8 +1973,6 @@
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
- const Register kInstanceReg = R0;
- // kAllocationStubTypeArgumentsReg = R1
const Register kTagsToClsIdReg = R2;
if (!FLAG_use_bare_instructions) {
@@ -1980,12 +1990,12 @@
__ PushPair(R0, NULL_REG); // Pushes result slot, then class object.
// Should be Object::null() if class is non-parameterized.
- __ Push(kAllocationStubTypeArgumentsReg);
+ __ Push(AllocateObjectABI::kTypeArgumentsReg);
__ CallRuntime(kAllocateObjectRuntimeEntry, 2);
// Load result off the stack into result register.
- __ ldr(kInstanceReg, Address(SP, 2 * target::kWordSize));
+ __ ldr(AllocateObjectABI::kResultReg, Address(SP, 2 * target::kWordSize));
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
@@ -2003,9 +2013,6 @@
const Class& cls,
const Code& allocate_object,
const Code& allocat_object_parametrized) {
- static_assert(kAllocationStubTypeArgumentsReg == R1,
- "Adjust register allocation in the AllocationStub");
-
classid_t cls_id = target::Class::GetId(cls);
ASSERT(cls_id != kIllegalCid);
@@ -2024,8 +2031,6 @@
target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
// Note: Keep in sync with helper function.
- // kInstanceReg = R0
- // kAllocationStubTypeArgumentsReg = R1
const Register kTagsReg = R2;
__ LoadImmediate(kTagsReg, tags);
@@ -2064,7 +2069,7 @@
}
} else {
if (!is_cls_parameterized) {
- __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
+ __ LoadObject(AllocateObjectABI::kTypeArgumentsReg, NullObject());
}
__ ldr(R4,
Address(THR,
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index 940b20f..16ef27a 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -791,11 +791,13 @@
}
// Called for inline allocation of arrays.
-// Input parameters:
-// EDX : Array length as Smi (must be preserved).
-// ECX : array element type (either NULL or an instantiated type).
-// Uses EAX, EBX, ECX, EDI as temporary registers.
-// The newly allocated object is returned in EAX.
+// Input registers (preserved):
+// AllocateArrayABI::kLengthReg: array length as Smi.
+// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+// Output registers:
+// AllocateArrayABI::kResultReg: newly allocated array.
+// Clobbered:
+// EBX, EDI
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@@ -804,54 +806,57 @@
// RoundedAllocationSize(
// (array_length * kwordSize) + target::Array::header_size()).
// Assert that length is a Smi.
- __ testl(EDX, Immediate(kSmiTagMask));
+ __ testl(AllocateArrayABI::kLengthReg, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &slow_case);
// Check for maximum allowed length.
const Immediate& max_len =
Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
- __ cmpl(EDX, max_len);
+ __ cmpl(AllocateArrayABI::kLengthReg, max_len);
__ j(ABOVE, &slow_case);
- NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, EAX, &slow_case,
- Assembler::kFarJump));
+ NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid,
+ AllocateArrayABI::kResultReg,
+ &slow_case, Assembler::kFarJump));
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
target::ObjectAlignment::kObjectAlignment - 1;
- // EDX is Smi.
- __ leal(EBX, Address(EDX, TIMES_2, fixed_size_plus_alignment_padding));
+ // AllocateArrayABI::kLengthReg is Smi.
+ __ leal(EBX, Address(AllocateArrayABI::kLengthReg, TIMES_2,
+ fixed_size_plus_alignment_padding));
ASSERT(kSmiTagShift == 1);
__ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
- // ECX: array element type.
- // EDX: array length as Smi.
+ // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
+ // AllocateArrayABI::kLengthReg: array length as Smi.
// EBX: allocation size.
const intptr_t cid = kArrayCid;
- __ movl(EAX, Address(THR, target::Thread::top_offset()));
- __ addl(EBX, EAX);
+ __ movl(AllocateArrayABI::kResultReg,
+ Address(THR, target::Thread::top_offset()));
+ __ addl(EBX, AllocateArrayABI::kResultReg);
__ j(CARRY, &slow_case);
// Check if the allocation fits into the remaining space.
- // EAX: potential new object start.
+ // AllocateArrayABI::kResultReg: potential new object start.
// EBX: potential next object start.
- // ECX: array element type.
- // EDX: array length as Smi).
+ // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
+ // AllocateArrayABI::kLengthReg: array length as Smi).
__ cmpl(EBX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ movl(Address(THR, target::Thread::top_offset()), EBX);
- __ subl(EBX, EAX);
- __ addl(EAX, Immediate(kHeapObjectTag));
+ __ subl(EBX, AllocateArrayABI::kResultReg);
+ __ addl(AllocateArrayABI::kResultReg, Immediate(kHeapObjectTag));
// Initialize the tags.
- // EAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// EBX: allocation size.
- // ECX: array element type.
- // EDX: array length as Smi.
+ // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
+ // AllocateArrayABI::kLengthReg: array length as Smi.
{
Label size_tag_overflow, done;
__ movl(EDI, EBX);
@@ -868,41 +873,50 @@
// Get the class index and insert it into the tags.
uword tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orl(EDI, Immediate(tags));
- __ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); // Tags.
+ __ movl(FieldAddress(AllocateArrayABI::kResultReg,
+ target::Object::tags_offset()),
+ EDI); // Tags.
}
- // EAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// EBX: allocation size.
- // ECX: array element type.
- // EDX: Array length as Smi (preserved).
+ // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
+ // AllocateArrayABI::kLengthReg: Array length as Smi (preserved).
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
- EAX, FieldAddress(EAX, target::Array::type_arguments_offset()), ECX);
+ AllocateArrayABI::kResultReg,
+ FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::type_arguments_offset()),
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
- __ StoreIntoObjectNoBarrier(
- EAX, FieldAddress(EAX, target::Array::length_offset()), EDX);
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+ FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::length_offset()),
+ AllocateArrayABI::kLengthReg);
// Initialize all array elements to raw_null.
- // EAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// EBX: allocation size.
// EDI: iterator which initially points to the start of the variable
// data area to be initialized.
- // ECX: array element type.
- // EDX: array length as Smi.
- __ leal(EBX, FieldAddress(EAX, EBX, TIMES_1, 0));
- __ leal(EDI, FieldAddress(EAX, target::Array::header_size()));
+ // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
+ // AllocateArrayABI::kLengthReg: array length as Smi.
+ __ leal(EBX, FieldAddress(AllocateArrayABI::kResultReg, EBX, TIMES_1, 0));
+ __ leal(EDI, FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
__ cmpl(EDI, EBX);
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
// No generational barrier needed, since we are storing null.
- __ StoreIntoObjectNoBarrier(EAX, Address(EDI, 0), NullObject());
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg, Address(EDI, 0),
+ NullObject());
__ addl(EDI, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
- __ ret(); // returns the newly allocated object in EAX.
+ __ ret();
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
@@ -912,12 +926,12 @@
// calling into the runtime.
__ EnterStubFrame();
__ pushl(Immediate(0)); // Setup space on stack for return value.
- __ pushl(EDX); // Array length as Smi.
- __ pushl(ECX); // Element type.
+ __ pushl(AllocateArrayABI::kLengthReg); // Array length as Smi.
+ __ pushl(AllocateArrayABI::kTypeArgumentsReg); // Type arguments.
__ CallRuntime(kAllocateArrayRuntimeEntry, 2);
- __ popl(EAX); // Pop element type argument.
- __ popl(EDX); // Pop array length argument (preserved).
- __ popl(EAX); // Pop return value from return slot.
+ __ popl(AllocateArrayABI::kTypeArgumentsReg); // Pop type arguments.
+ __ popl(AllocateArrayABI::kLengthReg); // Pop array length argument.
+ __ popl(AllocateArrayABI::kResultReg); // Pop return value from return slot.
// Write-barrier elimination might be enabled for this array (depending on the
// array length). To be sure we will check if the allocated object is in old
@@ -1439,9 +1453,9 @@
// Called for inline allocation of objects.
// Input parameters:
// ESP : points to return address.
-// kAllocationStubTypeArgumentsReg (EDX) : type arguments object
-// (only if class is parameterized).
-// Uses EAX, EBX, ECX, EDX, EDI as temporary registers.
+// AllocateObjectABI::kTypeArgumentsPos : type arguments object
+// (only if class is parameterized).
+// Uses AllocateObjectABI::kResultReg, EBX, ECX, EDI as temporary registers.
// Returns patch_code_pc offset where patching code for disabling the stub
// has been generated (similar to regularly generated Dart code).
void StubCodeCompiler::GenerateAllocationStubForClass(
@@ -1462,41 +1476,45 @@
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
ASSERT(instance_size > 0);
- // EDX: instantiated type arguments (if is_cls_parameterized).
- static_assert(kAllocationStubTypeArgumentsReg == EDX,
- "Adjust register allocation in the AllocationStub");
-
+ // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
+ // (if is_cls_parameterized).
if (!FLAG_use_slow_path && FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size) &&
!target::Class::TraceAllocation(cls)) {
Label slow_case;
// Allocate the object and update top to point to
// next object start and initialize the allocated object.
- // EDX: instantiated type arguments (if is_cls_parameterized).
- __ movl(EAX, Address(THR, target::Thread::top_offset()));
- __ leal(EBX, Address(EAX, instance_size));
+ // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
+ // (if is_cls_parameterized).
+ __ movl(AllocateObjectABI::kResultReg,
+ Address(THR, target::Thread::top_offset()));
+ __ leal(EBX, Address(AllocateObjectABI::kResultReg, instance_size));
// Check if the allocation fits into the remaining space.
- // EAX: potential new object start.
+ // AllocateObjectABI::kResultReg: potential new object start.
// EBX: potential next object start.
__ cmpl(EBX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
__ movl(Address(THR, target::Thread::top_offset()), EBX);
- // EAX: new object start (untagged).
+ // AllocateObjectABI::kResultReg: new object start (untagged).
// EBX: next object start.
- // EDX: new object type arguments (if is_cls_parameterized).
+ // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
+ // (if is_cls_parameterized).
// Set the tags.
ASSERT(target::Class::GetId(cls) != kIllegalCid);
uword tags = target::MakeTagWordForNewSpaceObject(target::Class::GetId(cls),
instance_size);
- __ movl(Address(EAX, target::Object::tags_offset()), Immediate(tags));
- __ addl(EAX, Immediate(kHeapObjectTag));
+ __ movl(
+ Address(AllocateObjectABI::kResultReg, target::Object::tags_offset()),
+ Immediate(tags));
+ __ addl(AllocateObjectABI::kResultReg, Immediate(kHeapObjectTag));
// Initialize the remaining words of the object.
- // EAX: new object (tagged).
+ // AllocateObjectABI::kResultReg: new object (tagged).
// EBX: next object start.
- // EDX: new object type arguments (if is_cls_parameterized).
+ // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
+ // (if is_cls_parameterized).
// First try inlining the initialization without a loop.
if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
// Check if the object contains any non-header fields.
@@ -1504,42 +1522,49 @@
for (intptr_t current_offset = target::Instance::first_field_offset();
current_offset < instance_size;
current_offset += target::kWordSize) {
- __ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, current_offset),
- NullObject());
+ __ StoreIntoObjectNoBarrier(
+ AllocateObjectABI::kResultReg,
+ FieldAddress(AllocateObjectABI::kResultReg, current_offset),
+ NullObject());
}
} else {
- __ leal(ECX, FieldAddress(EAX, target::Instance::first_field_offset()));
+ __ leal(ECX, FieldAddress(AllocateObjectABI::kResultReg,
+ target::Instance::first_field_offset()));
// Loop until the whole object is initialized.
- // EAX: new object (tagged).
+ // AllocateObjectABI::kResultReg: new object (tagged).
// EBX: next object start.
// ECX: next word to be initialized.
- // EDX: new object type arguments (if is_cls_parameterized).
+ // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
+ // (if is_cls_parameterized).
Label init_loop;
Label done;
__ Bind(&init_loop);
__ cmpl(ECX, EBX);
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
- __ StoreIntoObjectNoBarrier(EAX, Address(ECX, 0), NullObject());
+ __ StoreIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
+ Address(ECX, 0), NullObject());
__ addl(ECX, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
}
if (is_cls_parameterized) {
- // EAX: new object (tagged).
- // EDX: new object type arguments.
+ // AllocateObjectABI::kResultReg: new object (tagged).
+ // AllocateObjectABI::kTypeArgumentsReg: new object type arguments.
// Set the type arguments in the new object.
const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
- __ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, offset),
- kAllocationStubTypeArgumentsReg);
+ __ StoreIntoObjectNoBarrier(
+ AllocateObjectABI::kResultReg,
+ FieldAddress(AllocateObjectABI::kResultReg, offset),
+ AllocateObjectABI::kTypeArgumentsReg);
}
// Done allocating and initializing the instance.
- // EAX: new object (tagged).
+ // AllocateObjectABI::kResultReg: new object (tagged).
__ ret();
__ Bind(&slow_case);
}
// If is_cls_parameterized:
- // EDX: new object type arguments.
+ // AllocateObjectABI::kTypeArgumentsReg: new object type arguments.
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@@ -1548,14 +1573,14 @@
CastHandle<Object>(cls)); // Push class of object to be allocated.
if (is_cls_parameterized) {
// Push type arguments of object to be allocated.
- __ pushl(kAllocationStubTypeArgumentsReg);
+ __ pushl(AllocateObjectABI::kTypeArgumentsReg);
} else {
__ pushl(raw_null); // Push null type arguments.
}
__ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
- __ popl(EAX); // Pop argument (type arguments of object).
- __ popl(EAX); // Pop argument (class of object).
- __ popl(EAX); // Pop result (newly allocated object).
+ __ popl(AllocateObjectABI::kResultReg); // Drop type arguments.
+ __ popl(AllocateObjectABI::kResultReg); // Drop class.
+ __ popl(AllocateObjectABI::kResultReg); // Pop allocated object.
if (AllocateObjectInstr::WillAllocateNewOrRemembered(cls)) {
// Write-barrier elimination is enabled for [cls] and we therefore need to
@@ -1563,7 +1588,7 @@
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
}
- // EAX: new object
+ // AllocateObjectABI::kResultReg: new object
// Restore the frame pointer.
__ LeaveFrame();
__ ret();
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index 138d7ce..b65cc5e 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -1048,11 +1048,13 @@
}
// Called for inline allocation of arrays.
-// Input parameters:
-// R10 : Array length as Smi.
-// RBX : array element type (either NULL or an instantiated type).
-// NOTE: R10 cannot be clobbered here as the caller relies on it being saved.
-// The newly allocated object is returned in RAX.
+// Input registers (preserved):
+// AllocateArrayABI::kLengthReg: array length as Smi.
+// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
+// Output registers:
+// AllocateArrayABI::kResultReg: newly allocated array.
+// Clobbered:
+// RCX, RDI, R12
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@@ -1060,7 +1062,7 @@
// and is computed as:
// RoundedAllocationSize(
// (array_length * target::kwordSize) + target::Array::header_size()).
- __ movq(RDI, R10); // Array Length.
+ __ movq(RDI, AllocateArrayABI::kLengthReg); // Array Length.
// Check that length is Smi.
__ testq(RDI, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &slow_case);
@@ -1084,15 +1086,16 @@
__ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
const intptr_t cid = kArrayCid;
- __ movq(RAX, Address(THR, target::Thread::top_offset()));
+ __ movq(AllocateArrayABI::kResultReg,
+ Address(THR, target::Thread::top_offset()));
// RDI: allocation size.
- __ movq(RCX, RAX);
+ __ movq(RCX, AllocateArrayABI::kResultReg);
__ addq(RCX, RDI);
__ j(CARRY, &slow_case);
// Check if the allocation fits into the remaining space.
- // RAX: potential new object start.
+ // AllocateArrayABI::kResultReg: potential new object start.
// RCX: potential next object start.
// RDI: allocation size.
__ cmpq(RCX, Address(THR, target::Thread::end_offset()));
@@ -1101,10 +1104,10 @@
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ movq(Address(THR, target::Thread::top_offset()), RCX);
- __ addq(RAX, Immediate(kHeapObjectTag));
+ __ addq(AllocateArrayABI::kResultReg, Immediate(kHeapObjectTag));
// Initialize the tags.
- // RAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// RDI: allocation size.
{
Label size_tag_overflow, done;
@@ -1124,23 +1127,29 @@
__ movq(FieldAddress(RAX, target::Array::tags_offset()), RDI); // Tags.
}
- // RAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, target::Array::type_arguments_offset()), RBX);
+ AllocateArrayABI::kResultReg,
+ FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::type_arguments_offset()),
+ AllocateArrayABI::kTypeArgumentsReg);
// Set the length field.
- __ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, target::Array::length_offset()), R10);
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
+ FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::length_offset()),
+ AllocateArrayABI::kLengthReg);
// Initialize all array elements to raw_null.
- // RAX: new object start as a tagged pointer.
+ // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
// RCX: new object end address.
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
__ LoadObject(R12, NullObject());
- __ leaq(RDI, FieldAddress(RAX, target::Array::header_size()));
+ __ leaq(RDI, FieldAddress(AllocateArrayABI::kResultReg,
+ target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
@@ -1152,11 +1161,12 @@
#endif // DEBUG
__ j(ABOVE_EQUAL, &done, kJumpLength);
// No generational barrier needed, since we are storing null.
- __ StoreIntoObjectNoBarrier(RAX, Address(RDI, 0), R12);
+ __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg, Address(RDI, 0),
+ R12);
__ addq(RDI, Immediate(target::kWordSize));
__ jmp(&init_loop, kJumpLength);
__ Bind(&done);
- __ ret(); // returns the newly allocated object in RAX.
+ __ ret();
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
@@ -1165,14 +1175,13 @@
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
- // Setup space on stack for return value.
- __ pushq(Immediate(0));
- __ pushq(R10); // Array length as Smi.
- __ pushq(RBX); // Element type.
+ __ pushq(Immediate(0)); // Space for return value.
+ __ pushq(AllocateArrayABI::kLengthReg); // Array length as Smi.
+ __ pushq(AllocateArrayABI::kTypeArgumentsReg); // Element type.
__ CallRuntime(kAllocateArrayRuntimeEntry, 2);
- __ popq(RAX); // Pop element type argument.
- __ popq(R10); // Pop array length argument.
- __ popq(RAX); // Pop return value from return slot.
+ __ popq(AllocateArrayABI::kTypeArgumentsReg); // Pop element type argument.
+ __ popq(AllocateArrayABI::kLengthReg); // Pop array length argument.
+ __ popq(AllocateArrayABI::kResultReg); // Pop allocated object.
// Write-barrier elimination might be enabled for this array (depending on the
// array length). To be sure we will check if the allocated object is in old
@@ -1781,7 +1790,6 @@
static void GenerateAllocateObjectHelper(Assembler* assembler,
bool is_cls_parameterized) {
// Note: Keep in sync with calling function.
- // kAllocationStubTypeArgumentsReg = RDX
const Register kTagsReg = R8;
{
@@ -1795,8 +1803,10 @@
__ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
- __ movq(RAX, Address(THR, target::Thread::top_offset()));
- __ leaq(kNewTopReg, Address(RAX, kInstanceSizeReg, TIMES_1, 0));
+ __ movq(AllocateObjectABI::kResultReg,
+ Address(THR, target::Thread::top_offset()));
+ __ leaq(kNewTopReg, Address(AllocateObjectABI::kResultReg,
+ kInstanceSizeReg, TIMES_1, 0));
// Check if the allocation fits into the remaining space.
__ cmpq(kNewTopReg, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
@@ -1806,15 +1816,18 @@
// Set the tags.
// 64 bit store also zeros the identity hash field.
- __ movq(Address(RAX, target::Object::tags_offset()), kTagsReg);
+ __ movq(
+ Address(AllocateObjectABI::kResultReg, target::Object::tags_offset()),
+ kTagsReg);
- __ addq(RAX, Immediate(kHeapObjectTag));
+ __ addq(AllocateObjectABI::kResultReg, Immediate(kHeapObjectTag));
// Initialize the remaining words of the object.
{
const Register kNextFieldReg = RDI;
__ leaq(kNextFieldReg,
- FieldAddress(RAX, target::Instance::first_field_offset()));
+ FieldAddress(AllocateObjectABI::kResultReg,
+ target::Instance::first_field_offset()));
const Register kNullReg = R10;
__ LoadObject(kNullReg, NullObject());
@@ -1830,7 +1843,8 @@
static auto const kJumpLength = Assembler::kNearJump;
#endif // DEBUG
__ j(ABOVE_EQUAL, &done, kJumpLength);
- __ StoreIntoObjectNoBarrier(RAX, Address(kNextFieldReg, 0), kNullReg);
+ __ StoreIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
+ Address(kNextFieldReg, 0), kNullReg);
__ addq(kNextFieldReg, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
@@ -1853,8 +1867,10 @@
host_type_arguments_field_offset_in_words_offset()));
// Set the type arguments in the new object.
- __ StoreIntoObject(RAX, FieldAddress(RAX, kTypeOffsetReg, TIMES_8, 0),
- kAllocationStubTypeArgumentsReg);
+ __ StoreIntoObject(AllocateObjectABI::kResultReg,
+ FieldAddress(AllocateObjectABI::kResultReg,
+ kTypeOffsetReg, TIMES_8, 0),
+ AllocateObjectABI::kTypeArgumentsReg);
__ Bind(¬_parameterized_case);
} // kTypeOffsetReg = RDI;
@@ -1866,7 +1882,7 @@
// Fall back on slow case:
if (!is_cls_parameterized) {
- __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
+ __ LoadObject(AllocateObjectABI::kTypeArgumentsReg, NullObject());
}
// Tail call to generic allocation stub.
__ jmp(
@@ -1884,8 +1900,6 @@
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
- // Note: Keep in sync with calling stub.
- // kAllocationStubTypeArgumentsReg = RDX
const Register kTagsToClsIdReg = R8;
if (!FLAG_use_bare_instructions) {
@@ -1900,27 +1914,27 @@
__ EnterStubFrame();
// Setup space on stack for return value.
- __ LoadObject(RAX, NullObject());
- __ pushq(RAX);
+ __ LoadObject(AllocateObjectABI::kResultReg, NullObject());
+ __ pushq(AllocateObjectABI::kResultReg);
// Push class of object to be allocated.
- __ LoadClassById(RAX, kTagsToClsIdReg);
- __ pushq(RAX);
+ __ LoadClassById(AllocateObjectABI::kResultReg, kTagsToClsIdReg);
+ __ pushq(AllocateObjectABI::kResultReg);
// Must be Object::null() if non-parameterized class.
- __ pushq(kAllocationStubTypeArgumentsReg);
+ __ pushq(AllocateObjectABI::kTypeArgumentsReg);
__ CallRuntime(kAllocateObjectRuntimeEntry, 2);
- __ popq(RAX); // Pop argument (type arguments of object).
- __ popq(RAX); // Pop argument (class of object).
- __ popq(RAX); // Pop result (newly allocated object).
+ __ popq(AllocateObjectABI::kResultReg); // Drop type arguments.
+ __ popq(AllocateObjectABI::kResultReg); // Drop class.
+ __ popq(AllocateObjectABI::kResultReg); // Pop newly allocated object.
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
- // RAX: new object
+ // AllocateObjectABI::kResultReg: new object
// Restore the frame pointer.
__ LeaveStubFrame();
@@ -1934,9 +1948,6 @@
const Class& cls,
const Code& allocate_object,
const Code& allocat_object_parametrized) {
- static_assert(kAllocationStubTypeArgumentsReg == RDX,
- "Adjust register allocation in the AllocationStub");
-
classid_t cls_id = target::Class::GetId(cls);
ASSERT(cls_id != kIllegalCid);
@@ -1958,8 +1969,6 @@
const uword tags =
target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
- // Note: Keep in sync with helper function.
- // kAllocationStubTypeArgumentsReg = RDX
const Register kTagsReg = R8;
__ movq(kTagsReg, Immediate(tags));
@@ -1995,7 +2004,7 @@
}
} else {
if (!is_cls_parameterized) {
- __ LoadObject(kAllocationStubTypeArgumentsReg, NullObject());
+ __ LoadObject(AllocateObjectABI::kTypeArgumentsReg, NullObject());
}
__ jmp(Address(THR,
target::Thread::allocate_object_slow_entry_point_offset()));
diff --git a/runtime/vm/constants_arm.h b/runtime/vm/constants_arm.h
index 5ef978d..8313e51 100644
--- a/runtime/vm/constants_arm.h
+++ b/runtime/vm/constants_arm.h
@@ -320,9 +320,6 @@
const Register kWriteBarrierValueReg = R0;
const Register kWriteBarrierSlotReg = R9;
-// ABI for allocation stubs.
-const Register kAllocationStubTypeArgumentsReg = R3;
-
// Common ABI for shared slow path stubs.
struct SharedSlowPathStubABI {
static const Register kResultReg = R0;
@@ -451,29 +448,42 @@
static const Register kIndexReg = R1;
};
+// ABI for AllocateObjectStub.
+struct AllocateObjectABI {
+ static const Register kResultReg = R0;
+ static const Register kTypeArgumentsReg = R3;
+};
+
// ABI for AllocateClosureStub.
struct AllocateClosureABI {
- static const Register kResultReg = R0;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFunctionReg = R1;
static const Register kScratchReg = R4;
};
// ABI for AllocateMintShared*Stub.
struct AllocateMintABI {
- static const Register kResultReg = R0;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kTempReg = R1;
};
// ABI for Allocate{Mint,Double,Float32x4,Float64x2}Stub.
struct AllocateBoxABI {
- static const Register kResultReg = R0;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kTempReg = R1;
};
-// ABI for Allocate<TypedData>ArrayStub.
+// ABI for AllocateArrayStub.
+struct AllocateArrayABI {
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
+ static const Register kLengthReg = R2;
+ static const Register kTypeArgumentsReg = R1;
+};
+
+// ABI for AllocateTypedDataArrayStub.
struct AllocateTypedDataArrayABI {
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kLengthReg = R4;
- static const Register kResultReg = R0;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index 69f5cf0..53d827a 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -153,9 +153,6 @@
const Register kWriteBarrierValueReg = R0;
const Register kWriteBarrierSlotReg = R25;
-// ABI for allocation stubs.
-const Register kAllocationStubTypeArgumentsReg = R1;
-
// Common ABI for shared slow path stubs.
struct SharedSlowPathStubABI {
static const Register kResultReg = R0;
@@ -291,29 +288,42 @@
static const Register kIndexReg = R1;
};
+// ABI for AllocateObjectStub.
+struct AllocateObjectABI {
+ static const Register kResultReg = R0;
+ static const Register kTypeArgumentsReg = R1;
+};
+
// ABI for AllocateClosureStub.
struct AllocateClosureABI {
- static const Register kResultReg = R0;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFunctionReg = R1;
static const Register kScratchReg = R4;
};
// ABI for AllocateMintShared*Stub.
struct AllocateMintABI {
- static const Register kResultReg = R0;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kTempReg = R1;
};
// ABI for Allocate{Mint,Double,Float32x4,Float64x2}Stub.
struct AllocateBoxABI {
- static const Register kResultReg = R0;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kTempReg = R1;
};
-// ABI for Allocate<TypedData>ArrayStub.
+// ABI for AllocateArrayStub.
+struct AllocateArrayABI {
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
+ static const Register kLengthReg = R2;
+ static const Register kTypeArgumentsReg = R1;
+};
+
+// ABI for AllocateTypedDataArrayStub.
struct AllocateTypedDataArrayABI {
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kLengthReg = R4;
- static const Register kResultReg = R0;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
diff --git a/runtime/vm/constants_ia32.h b/runtime/vm/constants_ia32.h
index 3d36faf..9216fcc 100644
--- a/runtime/vm/constants_ia32.h
+++ b/runtime/vm/constants_ia32.h
@@ -88,9 +88,6 @@
const Register kWriteBarrierValueReg = kNoRegister;
const Register kWriteBarrierSlotReg = EDI;
-// ABI for allocation stubs.
-const Register kAllocationStubTypeArgumentsReg = EDX;
-
// Common ABI for shared slow path stubs.
struct SharedSlowPathStubABI {
static const Register kResultReg = EAX;
@@ -196,23 +193,36 @@
static const Register kIndexReg = EBX;
};
+// ABI for AllocateObjectStub.
+struct AllocateObjectABI {
+ static const Register kResultReg = EAX;
+ static const Register kTypeArgumentsReg = EDX;
+};
+
// ABI for Allocate{Mint,Double,Float32x4,Float64x2}Stub.
struct AllocateBoxABI {
- static const Register kResultReg = EAX;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kTempReg = EBX;
};
// ABI for AllocateClosureStub.
struct AllocateClosureABI {
- static const Register kResultReg = EAX;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFunctionReg = EBX;
static const Register kScratchReg = EDX;
};
-// ABI for Allocate<TypedData>ArrayStub.
+// ABI for AllocateArrayStub.
+struct AllocateArrayABI {
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
+ static const Register kLengthReg = EDX;
+ static const Register kTypeArgumentsReg = ECX;
+};
+
+// ABI for AllocateTypedDataArrayStub.
struct AllocateTypedDataArrayABI {
- static const Register kLengthReg = EAX;
- static const Register kResultReg = EAX;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
+ static const Register kLengthReg = kResultReg;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
diff --git a/runtime/vm/constants_x64.h b/runtime/vm/constants_x64.h
index 953c2ce..dc02fa4 100644
--- a/runtime/vm/constants_x64.h
+++ b/runtime/vm/constants_x64.h
@@ -129,9 +129,6 @@
const Register kWriteBarrierValueReg = RAX;
const Register kWriteBarrierSlotReg = R13;
-// ABI for allocation stubs.
-const Register kAllocationStubTypeArgumentsReg = RDX;
-
// Common ABI for shared slow path stubs.
struct SharedSlowPathStubABI {
static const Register kResultReg = RAX;
@@ -263,29 +260,42 @@
static const Register kIndexReg = RBX;
};
+// ABI for AllocateObjectStub.
+struct AllocateObjectABI {
+ static const Register kResultReg = RAX;
+ static const Register kTypeArgumentsReg = RDX;
+};
+
// ABI for AllocateClosureStub.
struct AllocateClosureABI {
- static const Register kResultReg = RAX;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFunctionReg = RBX;
static const Register kScratchReg = R13;
};
// ABI for AllocateMintShared*Stub.
struct AllocateMintABI {
- static const Register kResultReg = RAX;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kTempReg = RBX;
};
// ABI for Allocate{Mint,Double,Float32x4,Float64x2}Stub.
struct AllocateBoxABI {
- static const Register kResultReg = RAX;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kTempReg = RBX;
};
-// ABI for Allocate<TypedData>ArrayStub.
+// ABI for AllocateArrayStub.
+struct AllocateArrayABI {
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
+ static const Register kLengthReg = R10;
+ static const Register kTypeArgumentsReg = RBX;
+};
+
+// ABI for AllocateTypedDataArrayStub.
struct AllocateTypedDataArrayABI {
- static const Register kLengthReg = RAX;
- static const Register kResultReg = RAX;
+ static const Register kResultReg = AllocateObjectABI::kResultReg;
+ static const Register kLengthReg = kResultReg;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
diff --git a/runtime/vm/kernel_loader.cc b/runtime/vm/kernel_loader.cc
index 47f0205..c9f1b29 100644
--- a/runtime/vm/kernel_loader.cc
+++ b/runtime/vm/kernel_loader.cc
@@ -699,13 +699,16 @@
}
}
- // Sets the constants array to an empty hash and leaves the constant
- // table's raw bytes in place for lazy reading. We can fix up all
- // "pending" processing now, and must ensure we don't create new
- // ones from this point on.
+ // Sets the constants array to an empty array with the length equal to
+ // the number of constants. The array gets filled lazily while reading
+ // constants.
ASSERT(kernel_program_info_.constants_table() != ExternalTypedData::null());
- const Array& array =
- Array::Handle(Z, HashTables::New<KernelConstantsMap>(16, Heap::kOld));
+ ConstantReader constant_reader(&helper_, &active_class_);
+ const intptr_t num_consts = constant_reader.NumConstants();
+ const Array& array = Array::Handle(Z, Array::New(num_consts, Heap::kOld));
+ for (intptr_t i = 0; i < num_consts; i++) {
+ array.SetAt(i, Object::sentinel());
+ }
kernel_program_info_.set_constants(array);
H.SetConstants(array); // for caching
AnnotateNativeProcedures();
diff --git a/tests/ffi/regress_46085_test.dart b/tests/ffi/regress_46085_test.dart
new file mode 100644
index 0000000..5f681d6
--- /dev/null
+++ b/tests/ffi/regress_46085_test.dart
@@ -0,0 +1,19 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import "dart:ffi";
+
+class MyStruct extends Struct {
+ external Pointer<Int8> notEmpty;
+
+ @Array.multi([]) //# 01: compile-time error
+ external Array<Int16> a0; //# 01: compile-time error
+
+ @Array.multi([1]) //# 02: compile-time error
+ external Array<Array<Int16>> a1; //# 02: compile-time error
+}
+
+void main() {
+ MyStruct? ms = null;
+}
diff --git a/tests/ffi_2/regress_46085_test.dart b/tests/ffi_2/regress_46085_test.dart
new file mode 100644
index 0000000..0f90373
--- /dev/null
+++ b/tests/ffi_2/regress_46085_test.dart
@@ -0,0 +1,19 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import "dart:ffi";
+
+class MyStruct extends Struct {
+ Pointer<Int8> notEmpty;
+
+ @Array.multi([]) //# 01: compile-time error
+ Array<Int16> a0; //# 01: compile-time error
+
+ @Array.multi([1]) //# 02: compile-time error
+ Array<Array<Int16>> a1; //# 02: compile-time error
+}
+
+void main() {
+ MyStruct ms = null;
+}
diff --git a/tools/VERSION b/tools/VERSION
index 245eac3..9e0a3a9 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 2
MINOR 14
PATCH 0
-PRERELEASE 155
+PRERELEASE 156
PRERELEASE_PATCH 0
\ No newline at end of file