Version 2.11.0-192.0.dev
Merge commit '1df5c58d290e4a580d089afb97eaa4a736914c63' into 'dev'
diff --git a/DEPS b/DEPS
index 6c3060c..88be3b3 100644
--- a/DEPS
+++ b/DEPS
@@ -97,7 +97,7 @@
"dart_style_tag": "1.3.7", # Please see the note above before updating.
"chromedriver_tag": "83.0.4103.39",
- "dartdoc_rev" : "2bef0f260594b822f55c8c8f777d9c4c1ea8f76c",
+ "dartdoc_rev" : "v0.35.0",
"ffi_rev": "454ab0f9ea6bd06942a983238d8a6818b1357edb",
"fixnum_rev": "16d3890c6dc82ca629659da1934e412292508bba",
"glob_rev": "e9f4e6b7ae8abe5071461cf8f47191bb19cf7ef6",
diff --git a/pkg/analyzer/lib/src/error/best_practices_verifier.dart b/pkg/analyzer/lib/src/error/best_practices_verifier.dart
index 8517699..66e0d5c 100644
--- a/pkg/analyzer/lib/src/error/best_practices_verifier.dart
+++ b/pkg/analyzer/lib/src/error/best_practices_verifier.dart
@@ -23,6 +23,7 @@
import 'package:analyzer/src/dart/resolver/exit_detector.dart';
import 'package:analyzer/src/dart/resolver/scope.dart';
import 'package:analyzer/src/error/codes.dart';
+import 'package:analyzer/src/error/must_call_super_verifier.dart';
import 'package:analyzer/src/generated/constant.dart';
import 'package:analyzer/src/generated/engine.dart';
import 'package:analyzer/src/generated/resolver.dart';
@@ -69,6 +70,8 @@
final _InvalidAccessVerifier _invalidAccessVerifier;
+ final MustCallSuperVerifier _mustCallSuperVerifier;
+
/// The [WorkspacePackage] in which [_currentLibrary] is declared.
final WorkspacePackage _workspacePackage;
@@ -103,6 +106,7 @@
_inheritanceManager = inheritanceManager,
_invalidAccessVerifier = _InvalidAccessVerifier(
_errorReporter, _currentLibrary, workspacePackage),
+ _mustCallSuperVerifier = MustCallSuperVerifier(_errorReporter),
_workspacePackage = workspacePackage {
_inDeprecatedMember = _currentLibrary.hasDeprecated;
_inDoNotStoreMember = _currentLibrary.hasDoNotStore;
@@ -589,6 +593,7 @@
// This was determined to not be a good hint, see: dartbug.com/16029
//checkForOverridingPrivateMember(node);
_checkForMissingReturn(node.returnType, node.body, element, node);
+ _mustCallSuperVerifier.checkMethodDeclaration(node);
_checkForUnnecessaryNoSuchMethod(node);
if (!node.isSetter && !elementIsOverride()) {
diff --git a/pkg/analyzer/lib/src/error/must_call_super_verifier.dart b/pkg/analyzer/lib/src/error/must_call_super_verifier.dart
new file mode 100644
index 0000000..f3a5942
--- /dev/null
+++ b/pkg/analyzer/lib/src/error/must_call_super_verifier.dart
@@ -0,0 +1,125 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'dart:collection';
+
+import 'package:analyzer/dart/ast/ast.dart';
+import 'package:analyzer/dart/ast/visitor.dart';
+import 'package:analyzer/dart/element/element.dart';
+import 'package:analyzer/error/listener.dart';
+import 'package:analyzer/src/dart/error/hint_codes.dart';
+
+class MustCallSuperVerifier {
+ final ErrorReporter _errorReporter;
+
+ MustCallSuperVerifier(this._errorReporter);
+
+ void checkMethodDeclaration(MethodDeclaration node) {
+ if (node.isStatic || node.isAbstract) {
+ return;
+ }
+ ExecutableElement overridden =
+ _findOverriddenMemberWithMustCallSuper(node.declaredElement);
+ if (overridden != null && _hasConcreteSuperMethod(node.declaredElement)) {
+ _SuperCallVerifier verifier = _SuperCallVerifier(overridden.name);
+ node.accept(verifier);
+ if (!verifier.superIsCalled) {
+ _errorReporter.reportErrorForNode(HintCode.MUST_CALL_SUPER, node.name,
+ [overridden.enclosingElement.name]);
+ }
+ }
+ }
+
+ /// Find a method which is overridden by [node] and which is annotated with
+ /// `@mustCallSuper`.
+ ///
+ /// As per the definition of `mustCallSuper` [1], every method which overrides
+ /// a method annotated with `@mustCallSuper` is implicitly annotated with
+ /// `@mustCallSuper`.
+ ///
+ /// [1]: https://pub.dev/documentation/meta/latest/meta/mustCallSuper-constant.html
+ ExecutableElement _findOverriddenMemberWithMustCallSuper(
+ ExecutableElement element) {
+ //Element member = node.declaredElement;
+ if (element.enclosingElement is! ClassElement) {
+ return null;
+ }
+ ClassElement classElement = element.enclosingElement;
+ String name = element.name;
+
+ // Walk up the type hierarchy from [classElement], ignoring direct
+ // interfaces.
+ Queue<ClassElement> superclasses =
+ Queue.of(classElement.mixins.map((i) => i.element))
+ ..addAll(classElement.superclassConstraints.map((i) => i.element))
+ ..add(classElement.supertype?.element);
+ var visitedClasses = <ClassElement>{};
+ while (superclasses.isNotEmpty) {
+ ClassElement ancestor = superclasses.removeFirst();
+ if (ancestor == null || !visitedClasses.add(ancestor)) {
+ continue;
+ }
+ ExecutableElement member = ancestor.getMethod(name) ??
+ ancestor.getGetter(name) ??
+ ancestor.getSetter(name);
+ if (member is MethodElement && member.hasMustCallSuper) {
+ return member;
+ }
+ superclasses
+ ..addAll(ancestor.mixins.map((i) => i.element))
+ ..addAll(ancestor.superclassConstraints.map((i) => i.element))
+ ..add(ancestor.supertype?.element);
+ }
+ return null;
+ }
+
+ /// Returns whether [node] overrides a concrete method.
+ bool _hasConcreteSuperMethod(MethodElement element) {
+ ClassElement classElement = element.enclosingElement;
+ String name = element.name;
+
+ bool isConcrete(ClassElement element) =>
+ element.lookUpConcreteMethod(name, element.library) != null;
+
+ if (classElement.mixins.map((i) => i.element).any(isConcrete)) {
+ return true;
+ }
+ if (classElement.superclassConstraints
+ .map((i) => i.element)
+ .any(isConcrete)) {
+ return true;
+ }
+ if (classElement.supertype != null &&
+ isConcrete(classElement.supertype.element)) {
+ return true;
+ }
+
+ return false;
+ }
+}
+
+/// Recursively visits an AST, looking for method invocations.
+class _SuperCallVerifier extends RecursiveAstVisitor<void> {
+ bool superIsCalled = false;
+
+ final String name;
+
+ _SuperCallVerifier(this.name);
+
+ @override
+ void visitBinaryExpression(BinaryExpression node) {
+ if (node.leftOperand is SuperExpression && node.operator.lexeme == name) {
+ superIsCalled = true;
+ }
+ super.visitBinaryExpression(node);
+ }
+
+ @override
+ void visitMethodInvocation(MethodInvocation node) {
+ if (node.target is SuperExpression && node.methodName.name == name) {
+ superIsCalled = true;
+ }
+ super.visitMethodInvocation(node);
+ }
+}
diff --git a/pkg/analyzer/lib/src/generated/error_verifier.dart b/pkg/analyzer/lib/src/generated/error_verifier.dart
index bb5328a..81e2c6a 100644
--- a/pkg/analyzer/lib/src/generated/error_verifier.dart
+++ b/pkg/analyzer/lib/src/generated/error_verifier.dart
@@ -928,7 +928,6 @@
_checkForTypeAnnotationDeferredClass(returnType);
_returnTypeVerifier.verifyReturnType(returnType);
_checkForImplicitDynamicReturn(node, node.declaredElement);
- _checkForMustCallSuper(node);
_checkForWrongTypeParameterVarianceInMethod(node);
super.visitMethodDeclaration(node);
});
@@ -3451,21 +3450,6 @@
}
}
- void _checkForMustCallSuper(MethodDeclaration node) {
- if (node.isStatic || node.isAbstract) {
- return;
- }
- MethodElement element = _findOverriddenMemberThatMustCallSuper(node);
- if (element != null && _hasConcreteSuperMethod(node)) {
- _InvocationCollector collector = _InvocationCollector();
- node.accept(collector);
- if (!collector.superCalls.contains(element.name)) {
- _errorReporter.reportErrorForNode(HintCode.MUST_CALL_SUPER, node.name,
- [element.enclosingElement.name]);
- }
- }
- }
-
/// Checks to ensure that the given native function [body] is in SDK code.
///
/// See [ParserErrorCode.NATIVE_FUNCTION_BODY_IN_NON_SDK_CODE].
@@ -5117,47 +5101,6 @@
return result;
}
- /// Find a method which is overridden by [node] and which is annotated with
- /// `@mustCallSuper`.
- ///
- /// As per the definition of `mustCallSuper` [1], every method which overrides
- /// a method annotated with `@mustCallSuper` is implicitly annotated with
- /// `@mustCallSuper`.
- ///
- /// [1] https://pub.dev/documentation/meta/latest/meta/mustCallSuper-constant.html
- MethodElement _findOverriddenMemberThatMustCallSuper(MethodDeclaration node) {
- Element member = node.declaredElement;
- if (member.enclosingElement is! ClassElement) {
- return null;
- }
- ClassElement classElement = member.enclosingElement;
- String name = member.name;
-
- // Walk up the type hierarchy from [classElement], ignoring direct interfaces.
- Queue<ClassElement> superclasses =
- Queue.of(classElement.mixins.map((i) => i.element))
- ..addAll(classElement.superclassConstraints.map((i) => i.element))
- ..add(classElement.supertype?.element);
- Set<ClassElement> visitedClasses = <ClassElement>{};
- while (superclasses.isNotEmpty) {
- ClassElement ancestor = superclasses.removeFirst();
- if (ancestor == null || !visitedClasses.add(ancestor)) {
- continue;
- }
- ExecutableElement member = ancestor.getMethod(name) ??
- ancestor.getGetter(name) ??
- ancestor.getSetter(name);
- if (member is MethodElement && member.hasMustCallSuper) {
- return member;
- }
- superclasses
- ..addAll(ancestor.mixins.map((i) => i.element))
- ..addAll(ancestor.superclassConstraints.map((i) => i.element))
- ..add(ancestor.supertype?.element);
- }
- return null;
- }
-
/// Given an [expression] in a switch case whose value is expected to be an
/// enum constant, return the name of the constant.
String _getConstantName(Expression expression) {
@@ -5242,21 +5185,6 @@
return buffer.toString();
}
- /// Returns whether [node] overrides a concrete method.
- bool _hasConcreteSuperMethod(MethodDeclaration node) {
- ClassElement classElement = node.declaredElement.enclosingElement;
- String name = node.declaredElement.name;
-
- Queue<ClassElement> superclasses =
- Queue.of(classElement.mixins.map((i) => i.element))
- ..addAll(classElement.superclassConstraints.map((i) => i.element));
- if (classElement.supertype != null) {
- superclasses.add(classElement.supertype.element);
- }
- return superclasses.any(
- (parent) => parent.lookUpConcreteMethod(name, parent.library) != null);
- }
-
/// Return `true` if the given [constructor] redirects to itself, directly or
/// indirectly.
bool _hasRedirectingFactoryConstructorCycle(ConstructorElement constructor) {
@@ -5443,27 +5371,6 @@
}
}
-/// Recursively visits an AST, looking for method invocations.
-class _InvocationCollector extends RecursiveAstVisitor<void> {
- final List<String> superCalls = <String>[];
-
- @override
- void visitBinaryExpression(BinaryExpression node) {
- if (node.leftOperand is SuperExpression) {
- superCalls.add(node.operator.lexeme);
- }
- super.visitBinaryExpression(node);
- }
-
- @override
- void visitMethodInvocation(MethodInvocation node) {
- if (node.target is SuperExpression) {
- superCalls.add(node.methodName.name);
- }
- super.visitMethodInvocation(node);
- }
-}
-
/// Recursively visits a type annotation, looking uninstantiated bounds.
class _UninstantiatedBoundChecker extends RecursiveAstVisitor<void> {
final ErrorReporter _errorReporter;
diff --git a/pkg/analyzer/test/src/diagnostics/must_call_super_test.dart b/pkg/analyzer/test/src/diagnostics/must_call_super_test.dart
index f871116..44f695f 100644
--- a/pkg/analyzer/test/src/diagnostics/must_call_super_test.dart
+++ b/pkg/analyzer/test/src/diagnostics/must_call_super_test.dart
@@ -46,8 +46,7 @@
}
class B extends A {
@override
- void a()
- {}
+ void a() {}
}
''', [
error(HintCode.MUST_CALL_SUPER, 115, 1),
@@ -82,6 +81,38 @@
''');
}
+ test_fromExtendingClass_genericClass() async {
+ await assertErrorsInCode(r'''
+import 'package:meta/meta.dart';
+class A<T> {
+ @mustCallSuper
+ void a() {}
+}
+class B extends A<int> {
+ @override
+ void a() {}
+}
+''', [
+ error(HintCode.MUST_CALL_SUPER, 123, 1),
+ ]);
+ }
+
+ test_fromExtendingClass_genericMethod() async {
+ await assertErrorsInCode(r'''
+import 'package:meta/meta.dart';
+class A {
+ @mustCallSuper
+ void a<T>() {}
+}
+class B extends A {
+ @override
+ void a<T>() {}
+}
+''', [
+ error(HintCode.MUST_CALL_SUPER, 118, 1),
+ ]);
+ }
+
test_fromExtendingClass_operator() async {
await assertErrorsInCode(r'''
import 'package:meta/meta.dart';
@@ -142,6 +173,23 @@
]);
}
+ test_fromMixin_throughExtendingClass() async {
+ await assertErrorsInCode(r'''
+import 'package:meta/meta.dart';
+mixin M {
+ @mustCallSuper
+ void a() {}
+}
+class C with M {}
+class D extends C {
+ @override
+ void a() {}
+}
+''', [
+ error(HintCode.MUST_CALL_SUPER, 133, 1),
+ ]);
+ }
+
test_indirectlyInherited() async {
await assertErrorsInCode(r'''
import 'package:meta/meta.dart';
diff --git a/pkg/front_end/lib/src/fasta/kernel/body_builder.dart b/pkg/front_end/lib/src/fasta/kernel/body_builder.dart
index 563ab17..73e6525 100644
--- a/pkg/front_end/lib/src/fasta/kernel/body_builder.dart
+++ b/pkg/front_end/lib/src/fasta/kernel/body_builder.dart
@@ -4590,6 +4590,11 @@
@override
void handleElseControlFlow(Token elseToken) {
+ // Resolve the top of the stack so that if it's a delayed assignment it
+ // happens before we go into the else block.
+ Object node = pop();
+ if (node is! MapEntry) node = toValue(node);
+ push(node);
typePromoter?.enterElse();
}
@@ -4599,8 +4604,6 @@
Object entry = pop();
Object condition = pop(); // parenthesized expression
Token ifToken = pop();
- typePromoter?.enterElse();
- typePromoter?.exitConditional();
transformCollections = true;
if (entry is MapEntry) {
@@ -4610,6 +4613,8 @@
push(forest.createIfElement(
offsetForToken(ifToken), toValue(condition), toValue(entry)));
}
+ typePromoter?.enterElse();
+ typePromoter?.exitConditional();
}
@override
diff --git a/pkg/front_end/lib/src/fasta/kernel/internal_ast.dart b/pkg/front_end/lib/src/fasta/kernel/internal_ast.dart
index 6b8510c..2ed8f56 100644
--- a/pkg/front_end/lib/src/fasta/kernel/internal_ast.dart
+++ b/pkg/front_end/lib/src/fasta/kernel/internal_ast.dart
@@ -1354,18 +1354,6 @@
}
@override
- void setVariableMutatedAnywhere(VariableDeclaration variable) {
- if (variable is VariableDeclarationImpl) {
- variable.mutatedAnywhere = true;
- } else {
- // Hack to deal with the fact that BodyBuilder still creates raw
- // VariableDeclaration objects sometimes.
- // TODO(paulberry): get rid of this once the type parameter is
- // KernelVariableDeclaration.
- }
- }
-
- @override
void setVariableMutatedInClosure(VariableDeclaration variable) {
if (variable is VariableDeclarationImpl) {
variable.mutatedInClosure = true;
@@ -1376,19 +1364,6 @@
// KernelVariableDeclaration.
}
}
-
- @override
- bool wasVariableMutatedAnywhere(VariableDeclaration variable) {
- if (variable is VariableDeclarationImpl) {
- return variable.mutatedAnywhere;
- } else {
- // Hack to deal with the fact that BodyBuilder still creates raw
- // VariableDeclaration objects sometimes.
- // TODO(paulberry): get rid of this once the type parameter is
- // KernelVariableDeclaration.
- return true;
- }
- }
}
/// Front end specific implementation of [VariableDeclaration].
@@ -1415,9 +1390,6 @@
// be close to zero).
bool mutatedInClosure = false;
- // TODO(ahe): Investigate if this can be removed.
- bool mutatedAnywhere = false;
-
/// Determines whether the given [VariableDeclarationImpl] represents a
/// local function.
///
diff --git a/pkg/front_end/lib/src/fasta/type_inference/type_promotion.dart b/pkg/front_end/lib/src/fasta/type_inference/type_promotion.dart
index 4a8dca0..af42f14 100644
--- a/pkg/front_end/lib/src/fasta/type_inference/type_promotion.dart
+++ b/pkg/front_end/lib/src/fasta/type_inference/type_promotion.dart
@@ -190,6 +190,11 @@
/// created.
int _lastFactSequenceNumber = 0;
+ /// Map from variables to the set of scopes in which the variable is mutated.
+ /// If a variable is missing from the map, it is not mutated anywhere.
+ Map<VariableDeclaration, Set<TypePromotionScope>> _variableMutationScopes =
+ new Map<VariableDeclaration, Set<TypePromotionScope>>.identity();
+
TypePromoterImpl.private(TypeSchemaEnvironment typeSchemaEnvironment)
: this._(typeSchemaEnvironment, new _NullFact());
@@ -207,7 +212,8 @@
DartType computePromotedType(
TypePromotionFact fact, TypePromotionScope scope, bool mutatedInClosure) {
if (mutatedInClosure) return null;
- return fact?._computePromotedType(this, scope);
+ return fact?._computePromotedType(
+ this, scope, _variableMutationScopes[fact.variable]);
}
/// For internal debugging use, optionally prints the current state followed
@@ -220,28 +226,28 @@
@override
void enterElse() {
debugEvent('enterElse');
+ // Pop the scope and restore the facts to the state they were in before we
+ // entered the conditional. No promotion happens in the "else" branch.
_ConditionalScope scope = _currentScope;
- // Record the current fact state so that once we exit the "else" branch, we
- // can merge facts from the two branches.
- scope.afterTrue = _currentFacts;
- // While processing the "else" block, assume the condition was false.
+ _currentScope = _currentScope._enclosing;
_currentFacts = scope.beforeElse;
}
@override
void enterLogicalExpression(Expression lhs, String operator) {
debugEvent('enterLogicalExpression');
- // Figure out what the facts are based on possible LHS outcomes.
- TypePromotionFact trueFacts = _factsWhenTrue(lhs);
- TypePromotionFact falseFacts = _factsWhenFalse(lhs);
- // Record the fact that we are entering a new scope, and save the
- // appropriate facts for the case where the expression gets short-cut.
- bool isAnd = identical(operator, '&&');
- _currentScope =
- new _LogicalScope(_currentScope, isAnd, isAnd ? falseFacts : trueFacts);
- // While processing the RHS, assume the condition was false or true,
- // depending on the type of logical expression.
- _currentFacts = isAnd ? trueFacts : falseFacts;
+ if (!identical(operator, '&&')) {
+ // We don't promote for `||`.
+ _currentScope = new _LogicalScope(_currentScope, false, _currentFacts);
+ } else {
+ // Figure out what the facts are based on possible LHS outcomes.
+ TypePromotionFact trueFacts = _factsWhenTrue(lhs);
+ // Record the fact that we are entering a new scope, and save the
+ // appropriate facts for the case where the expression gets short-cut.
+ _currentScope = new _LogicalScope(_currentScope, true, _currentFacts);
+ // While processing the RHS, assume the condition was true.
+ _currentFacts = _addBlockingScopeToFacts(trueFacts);
+ }
}
@override
@@ -249,36 +255,29 @@
debugEvent('enterThen');
// Figure out what the facts are based on possible condition outcomes.
TypePromotionFact trueFacts = _factsWhenTrue(condition);
- TypePromotionFact falseFacts = _factsWhenFalse(condition);
- // Record the fact that we are entering a new scope, and save the "false"
+ // Record the fact that we are entering a new scope, and save the current
// facts for when we enter the "else" branch.
- _currentScope = new _ConditionalScope(_currentScope, falseFacts);
+ _currentScope = new _ConditionalScope(_currentScope, _currentFacts);
// While processing the "then" block, assume the condition was true.
- _currentFacts = trueFacts;
+ _currentFacts = _addBlockingScopeToFacts(trueFacts);
}
@override
void exitConditional() {
debugEvent('exitConditional');
- _ConditionalScope scope = _currentScope;
- _currentScope = _currentScope._enclosing;
- _currentFacts = _mergeFacts(scope.afterTrue, _currentFacts);
}
@override
void exitLogicalExpression(Expression rhs, Expression logicalExpression) {
debugEvent('exitLogicalExpression');
_LogicalScope scope = _currentScope;
- _currentScope = _currentScope._enclosing;
if (scope.isAnd) {
- _recordPromotionExpression(logicalExpression, _factsWhenTrue(rhs),
- _mergeFacts(scope.shortcutFacts, _currentFacts));
- } else {
+ TypePromotionFact factsWhenTrue = _factsWhenTrue(rhs);
+ _currentFacts = scope.shortcutFacts;
_recordPromotionExpression(
- logicalExpression,
- _mergeFacts(scope.shortcutFacts, _currentFacts),
- _factsWhenFalse(rhs));
+ logicalExpression, _addBlockingScopeToFacts(factsWhenTrue));
}
+ _currentScope = _currentScope._enclosing;
}
@override
@@ -317,9 +316,9 @@
_currentFacts,
_computeCurrentFactMap()[variable],
functionNestingLevel,
- type);
+ type, []);
if (!isInverted) {
- _recordPromotionExpression(isExpression, isCheck, _currentFacts);
+ _recordPromotionExpression(isExpression, isCheck);
}
}
@@ -335,27 +334,57 @@
/// mutated.
void mutateVariable(VariableDeclaration variable, int functionNestingLevel) {
debugEvent('mutateVariable');
- TypePromotionFact fact = _computeCurrentFactMap()[variable];
- TypePromotionFact._recordMutatedInScope(fact, _currentScope);
+ (_variableMutationScopes[variable] ??=
+ new Set<TypePromotionScope>.identity())
+ .add(_currentScope);
if (getVariableFunctionNestingLevel(variable) < functionNestingLevel) {
setVariableMutatedInClosure(variable);
}
- setVariableMutatedAnywhere(variable);
}
/// Determines whether [a] and [b] represent the same expression, after
/// dropping redundant enclosing parentheses.
bool sameExpressions(Expression a, Expression b);
- /// Records that the given variable was mutated somewhere inside the method.
- void setVariableMutatedAnywhere(VariableDeclaration variable);
-
/// Records that the given variable was mutated inside a closure.
void setVariableMutatedInClosure(VariableDeclaration variable);
- /// Indicates whether [setVariableMutatedAnywhere] has been called for the
- /// given [variable].
- bool wasVariableMutatedAnywhere(VariableDeclaration variable);
+ /// Updates any facts that are present in [facts] but not in [_currentFacts]
+ /// so that they include [_currentScope] in their list of blocking scopes, and
+ /// returns the resulting new linked list of facts.
+ ///
+ /// This is used when entering the body of a conditional, or the RHS of a
+ /// logical "and", to ensure that promotions are blocked if the construct
+ /// being entered contains any modifications of the corresponding variables.
+ /// It is also used when leaving the RHS of a logical "and", to ensure that
+ /// any promotions induced by the RHS of the "and" are blocked if the RHS of
+ /// the "and" contains any modifications of the corresponding variables.
+ TypePromotionFact _addBlockingScopeToFacts(TypePromotionFact facts) {
+ List<TypePromotionFact> factsToUpdate = [];
+ while (facts != _currentFacts) {
+ factsToUpdate.add(facts);
+ facts = facts.previous;
+ }
+ Map<VariableDeclaration, TypePromotionFact> factMap =
+ _computeCurrentFactMap();
+ for (TypePromotionFact fact in factsToUpdate.reversed) {
+ _IsCheck isCheck = fact as _IsCheck;
+ VariableDeclaration variable = isCheck.variable;
+ facts = new _IsCheck(
+ ++_lastFactSequenceNumber,
+ variable,
+ facts,
+ factMap[variable],
+ isCheck.functionNestingLevel,
+ isCheck.checkedType,
+ []
+ ..addAll(isCheck._blockingScopes)
+ ..add(_currentScope));
+ factMap[variable] = facts;
+ _factCacheState = facts;
+ }
+ return facts;
+ }
/// Returns a map from variable declaration to the most recent
/// [TypePromotionFact] associated with the variable.
@@ -401,20 +430,6 @@
}
/// Returns the set of facts known to be true after the execution of [e]
- /// assuming it evaluates to `false`.
- ///
- /// [e] must be the most recently parsed expression or statement.
- TypePromotionFact _factsWhenFalse(Expression e) {
- // Type promotion currently only occurs when an "is" or logical expression
- // evaluates to `true`, so no special logic is required; we just use
- // [_currentFacts].
- //
- // TODO(paulberry): experiment with supporting promotion in cases like
- // `if (x is! T) { ... } else { ...access x... }`
- return _currentFacts;
- }
-
- /// Returns the set of facts known to be true after the execution of [e]
/// assuming it evaluates to `true`.
///
/// [e] must be the most recently parsed expression or statement.
@@ -423,31 +438,6 @@
? _trueFactsForPromotionExpression
: _currentFacts;
- /// Returns the set of facts known to be true after two branches of execution
- /// rejoin.
- TypePromotionFact _mergeFacts(TypePromotionFact a, TypePromotionFact b) {
- // Type promotion currently doesn't support any mechanism for facts to
- // accumulate along a straight-line execution path (they can only accumulate
- // when entering a scope), so we can simply find the common ancestor fact.
- //
- // TODO(paulberry): experiment with supporting promotion in cases like:
- // if (...) {
- // if (x is! T) return;
- // } else {
- // if (x is! T) return;
- // }
- // ...access x...
- while (a.sequenceNumber != b.sequenceNumber) {
- if (a.sequenceNumber > b.sequenceNumber) {
- a = a.previous;
- } else {
- b = b.previous;
- }
- }
- assert(identical(a, b));
- return a;
- }
-
/// For internal debugging use, prints the current state followed by the event
/// name.
// ignore: unused_element
@@ -466,26 +456,19 @@
if (_promotionExpression != null) {
print(' _promotionExpression: $_promotionExpression');
if (!identical(_trueFactsForPromotionExpression, _currentFacts)) {
- print(' if true: $_trueFactsForPromotionExpression');
+ print(' if true: '
+ '${factChain(_trueFactsForPromotionExpression).join(' -> ')}');
}
}
print(name);
}
/// Records that after the evaluation of [expression], the facts will be
- /// [ifTrue] on a branch where the expression evaluated to `true`, and
- /// [ifFalse] on a branch where the expression evaluated to `false` (or where
- /// the truth value of the expression doesn't matter).
- ///
- /// TODO(paulberry): when we start handling promotion in "else" clauses, we'll
- /// need to split [ifFalse] into two cases, one for when the expression
- /// evaluated to `false`, and one where the truth value of the expression
- /// doesn't matter.
- void _recordPromotionExpression(Expression expression,
- TypePromotionFact ifTrue, TypePromotionFact ifFalse) {
+ /// [ifTrue] on a branch where the expression evaluated to `true`.
+ void _recordPromotionExpression(
+ Expression expression, TypePromotionFact ifTrue) {
_promotionExpression = expression;
_trueFactsForPromotionExpression = ifTrue;
- _currentFacts = ifFalse;
}
}
@@ -533,19 +516,9 @@
/// The function nesting level of the expression that led to this fact.
final int functionNestingLevel;
- /// If this fact's variable was mutated within any scopes the
- /// fact applies to, a set of the corresponding scopes. Otherwise `null`.
- ///
- /// TODO(paulberry): the size of this set is probably very small most of the
- /// time. Would it be better to use a list?
- Set<TypePromotionScope> _mutatedInScopes;
-
- /// If this fact's variable was accessed inside a closure within any scopes
- /// the fact applies to, a set of the corresponding scopes. Otherwise `null`.
- ///
- /// TODO(paulberry): the size of this set is probably very small most of the
- /// time. Would it be better to use a list?
- Set<TypePromotionScope> _accessedInClosureInScopes;
+ /// Indicates whether this fact's variable was accessed inside a closure
+ /// within the scope the fact applies to.
+ bool _accessedInClosureInScope = false;
TypePromotionFact(this.sequenceNumber, this.variable, this.previous,
this.previousForVariable, this.functionNestingLevel);
@@ -553,9 +526,13 @@
/// Computes the promoted type for [variable] at a location in the code where
/// this fact applies.
///
+ /// [scope] is the scope containing the read that might be promoted, and
+ /// [mutationScopes] is the set of scopes in which the variable is mutated, or
+ /// `null` if the variable isn't mutated anywhere.
+ ///
/// Should not be called until after parsing of the entire method is complete.
- DartType _computePromotedType(
- TypePromoterImpl promoter, TypePromotionScope scope);
+ DartType _computePromotedType(TypePromoterImpl promoter,
+ TypePromotionScope scope, Iterable<TypePromotionScope> mutationScopes);
/// Records the fact that the variable referenced by [fact] was accessed
/// within the given scope, at the given function nesting level.
@@ -569,26 +546,17 @@
// to testing it against getVariableFunctionNestingLevel(variable)).
while (fact != null) {
if (functionNestingLevel > fact.functionNestingLevel) {
- fact._accessedInClosureInScopes ??=
- new Set<TypePromotionScope>.identity();
- if (!fact._accessedInClosureInScopes.add(scope)) return;
+ if (fact._accessedInClosureInScope) {
+ // The variable has already been accessed in a closure in the scope of
+ // the current promotion (and this, any enclosing promotions), so
+ // no further information needs to be updated.
+ return;
+ }
+ fact._accessedInClosureInScope = true;
}
fact = fact.previousForVariable;
}
}
-
- /// Records the fact that the variable referenced by [fact] was mutated
- /// within the given scope.
- ///
- /// If `null` is passed in for [fact], there is no effect.
- static void _recordMutatedInScope(
- TypePromotionFact fact, TypePromotionScope scope) {
- while (fact != null) {
- fact._mutatedInScopes ??= new Set<TypePromotionScope>.identity();
- if (!fact._mutatedInScopes.add(scope)) return;
- fact = fact.previousForVariable;
- }
- }
}
/// Represents a contiguous block of program text in which variables may or may
@@ -629,9 +597,6 @@
/// The fact state in effect at the top of the "else" block.
final TypePromotionFact beforeElse;
- /// The fact state which was in effect at the bottom of the "then" block.
- TypePromotionFact afterTrue;
-
_ConditionalScope(TypePromotionScope enclosing, this.beforeElse)
: super(enclosing);
}
@@ -641,13 +606,18 @@
/// The type appearing on the right hand side of "is".
final DartType checkedType;
+ /// List of the scopes in which a mutation to the variable would block
+ /// promotion.
+ final List<TypePromotionScope> _blockingScopes;
+
_IsCheck(
int sequenceNumber,
VariableDeclaration variable,
TypePromotionFact previous,
TypePromotionFact previousForVariable,
int functionNestingLevel,
- this.checkedType)
+ this.checkedType,
+ this._blockingScopes)
: super(sequenceNumber, variable, previous, previousForVariable,
functionNestingLevel);
@@ -655,30 +625,27 @@
String toString() => 'isCheck($checkedType)';
@override
- DartType _computePromotedType(
- TypePromoterImpl promoter, TypePromotionScope scope) {
- DartType previousPromotedType =
- previousForVariable?._computePromotedType(promoter, scope);
+ DartType _computePromotedType(TypePromoterImpl promoter,
+ TypePromotionScope scope, Iterable<TypePromotionScope> mutationScopes) {
+ DartType previousPromotedType = previousForVariable?._computePromotedType(
+ promoter, scope, mutationScopes);
- // If the variable was mutated somewhere in the scope of the potential
- // promotion, promotion does not occur.
- if (_mutatedInScopes != null) {
- for (TypePromotionScope assignmentScope in _mutatedInScopes) {
- if (assignmentScope.containsScope(scope)) {
- return previousPromotedType;
+ if (mutationScopes != null) {
+ // If the variable was mutated somewhere in a that blocks the promotion,
+ // promotion does not occur.
+ for (TypePromotionScope blockingScope in _blockingScopes) {
+ for (TypePromotionScope mutationScope in mutationScopes) {
+ if (blockingScope.containsScope(mutationScope)) {
+ return previousPromotedType;
+ }
}
}
- }
- // If the variable was mutated anywhere, and it was accessed inside a
- // closure somewhere in the scope of the potential promotion, promotion does
- // not occur.
- if (promoter.wasVariableMutatedAnywhere(variable) &&
- _accessedInClosureInScopes != null) {
- for (TypePromotionScope accessScope in _accessedInClosureInScopes) {
- if (accessScope.containsScope(scope)) {
- return previousPromotedType;
- }
+ // If the variable was mutated anywhere, and it was accessed inside a
+ // closure somewhere in the scope of the potential promotion, promotion
+ // does not occur.
+ if (_accessedInClosureInScope) {
+ return previousPromotedType;
}
}
@@ -725,8 +692,8 @@
String toString() => 'null';
@override
- DartType _computePromotedType(
- TypePromoter promoter, TypePromotionScope scope) {
+ DartType _computePromotedType(TypePromoter promoter, TypePromotionScope scope,
+ Iterable<TypePromotionScope> mutationScopes) {
throw new StateError('Tried to create promoted type for no variable');
}
}
diff --git a/pkg/front_end/test/spell_checking_list_common.txt b/pkg/front_end/test/spell_checking_list_common.txt
index 9bd21db..267154d 100644
--- a/pkg/front_end/test/spell_checking_list_common.txt
+++ b/pkg/front_end/test/spell_checking_list_common.txt
@@ -300,6 +300,7 @@
bitwise
black
block
+blocked
blocks
blogs
blue
@@ -797,6 +798,7 @@
derivation
derive
derived
+descendant
descendants
describe
described
diff --git a/pkg/vm/lib/transformations/type_flow/summary_collector.dart b/pkg/vm/lib/transformations/type_flow/summary_collector.dart
index a9fbc07..b545936 100644
--- a/pkg/vm/lib/transformations/type_flow/summary_collector.dart
+++ b/pkg/vm/lib/transformations/type_flow/summary_collector.dart
@@ -1833,12 +1833,6 @@
if (v == null) {
throw 'Unable to find variable ${node.variable} at ${node.location}';
}
-
- if ((node.promotedType != null) &&
- (node.promotedType != const DynamicType())) {
- return _makeNarrowAfterSuccessfulIsCheck(v, node.promotedType);
- }
-
return v;
}
diff --git a/runtime/include/dart_api.h b/runtime/include/dart_api.h
index 821ae12..5268f5d 100644
--- a/runtime/include/dart_api.h
+++ b/runtime/include/dart_api.h
@@ -3695,22 +3695,28 @@
#define kSnapshotBuildIdCSymbol "kDartSnapshotBuildId"
#define kVmSnapshotDataCSymbol "kDartVmSnapshotData"
#define kVmSnapshotInstructionsCSymbol "kDartVmSnapshotInstructions"
+#define kVmSnapshotBssCSymbol "kDartVmSnapshotBss"
#define kIsolateSnapshotDataCSymbol "kDartIsolateSnapshotData"
#define kIsolateSnapshotInstructionsCSymbol "kDartIsolateSnapshotInstructions"
+#define kIsolateSnapshotBssCSymbol "kDartIsolateSnapshotBss"
#else
#define kSnapshotBuildIdCSymbol "_kDartSnapshotBuildId"
#define kVmSnapshotDataCSymbol "_kDartVmSnapshotData"
#define kVmSnapshotInstructionsCSymbol "_kDartVmSnapshotInstructions"
+#define kVmSnapshotBssCSymbol "_kDartVmSnapshotBss"
#define kIsolateSnapshotDataCSymbol "_kDartIsolateSnapshotData"
#define kIsolateSnapshotInstructionsCSymbol "_kDartIsolateSnapshotInstructions"
+#define kIsolateSnapshotBssCSymbol "_kDartIsolateSnapshotBss"
#endif
#define kSnapshotBuildIdAsmSymbol "_kDartSnapshotBuildId"
#define kVmSnapshotDataAsmSymbol "_kDartVmSnapshotData"
#define kVmSnapshotInstructionsAsmSymbol "_kDartVmSnapshotInstructions"
+#define kVmSnapshotBssAsmSymbol "_kDartVmSnapshotBss"
#define kIsolateSnapshotDataAsmSymbol "_kDartIsolateSnapshotData"
#define kIsolateSnapshotInstructionsAsmSymbol \
"_kDartIsolateSnapshotInstructions"
+#define kIsolateSnapshotBssAsmSymbol "_kDartIsolateSnapshotBss"
/**
* Creates a precompiled snapshot.
diff --git a/runtime/platform/elf.h b/runtime/platform/elf.h
index 42c2f87..8f58005 100644
--- a/runtime/platform/elf.h
+++ b/runtime/platform/elf.h
@@ -189,11 +189,29 @@
static const intptr_t STB_LOCAL = 0;
static const intptr_t STB_GLOBAL = 1;
+static const intptr_t STT_NOTYPE = 0;
static const intptr_t STT_OBJECT = 1; // I.e., data.
static const intptr_t STT_FUNC = 2;
static const intptr_t STT_SECTION = 3;
-static constexpr const char* ELF_NOTE_GNU = "GNU";
+static constexpr const char ELF_NOTE_GNU[] = "GNU";
+
+// Creates symbol info from the given STB and STT values.
+constexpr decltype(Symbol::info) SymbolInfo(intptr_t binding, intptr_t type) {
+ // Take the low nibble of each value in case, though the upper bits should
+ // all be zero as long as STB/STT constants are used.
+ return (binding & 0xf) << 4 | (type & 0xf);
+}
+
+// Retrieves the STB binding value for the given symbol info.
+constexpr intptr_t SymbolBinding(const decltype(Symbol::info) info) {
+ return (info >> 4) & 0xf;
+}
+
+// Retrieves the STT type value for the given symbol info.
+constexpr intptr_t SymbolType(const decltype(Symbol::info) info) {
+ return info & 0xf;
+}
} // namespace elf
} // namespace dart
diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h
index 46ea17b..1796c33 100644
--- a/runtime/platform/globals.h
+++ b/runtime/platform/globals.h
@@ -449,112 +449,136 @@
#define strtoll _strtoi64
#endif
+// Byte sizes.
+constexpr int kInt8SizeLog2 = 0;
+constexpr int kInt8Size = 1 << kInt8SizeLog2;
+static_assert(kInt8Size == sizeof(int8_t), "Mismatched int8 size constant");
+constexpr int kInt16SizeLog2 = 1;
+constexpr int kInt16Size = 1 << kInt16SizeLog2;
+static_assert(kInt16Size == sizeof(int16_t), "Mismatched int16 size constant");
+constexpr int kInt32SizeLog2 = 2;
+constexpr int kInt32Size = 1 << kInt32SizeLog2;
+static_assert(kInt32Size == sizeof(int32_t), "Mismatched int32 size constant");
+constexpr int kInt64SizeLog2 = 3;
+constexpr int kInt64Size = 1 << kInt64SizeLog2;
+static_assert(kInt64Size == sizeof(int64_t), "Mismatched int64 size constant");
+
+constexpr int kDoubleSize = sizeof(double);
+constexpr int kFloatSize = sizeof(float);
+constexpr int kQuadSize = 4 * kFloatSize;
+constexpr int kSimd128Size = sizeof(simd128_value_t);
+
+// Bit sizes.
+constexpr int kBitsPerByteLog2 = 3;
+constexpr int kBitsPerByte = 1 << kBitsPerByteLog2;
+constexpr int kBitsPerInt8 = kInt8Size * kBitsPerByte;
+constexpr int kBitsPerInt16 = kInt16Size * kBitsPerByte;
+constexpr int kBitsPerInt32 = kInt32Size * kBitsPerByte;
+constexpr int kBitsPerInt64 = kInt64Size * kBitsPerByte;
+
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456ULL
// write DART_2PART_UINT64_C(0x12345678,90123456);
#define DART_2PART_UINT64_C(a, b) \
- (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+ (((static_cast<uint64_t>(a) << kBitsPerInt32) + 0x##b##u))
// Integer constants.
-const int8_t kMinInt8 = 0x80;
-const int8_t kMaxInt8 = 0x7F;
-const uint8_t kMaxUint8 = 0xFF;
-const int16_t kMinInt16 = 0x8000;
-const int16_t kMaxInt16 = 0x7FFF;
-const uint16_t kMaxUint16 = 0xFFFF;
-const int32_t kMinInt32 = 0x80000000;
-const int32_t kMaxInt32 = 0x7FFFFFFF;
-const uint32_t kMaxUint32 = 0xFFFFFFFF;
-const int64_t kMinInt64 = DART_INT64_C(0x8000000000000000);
-const int64_t kMaxInt64 = DART_INT64_C(0x7FFFFFFFFFFFFFFF);
-const int kMinInt = INT_MIN;
-const int kMaxInt = INT_MAX;
-const int64_t kMinInt64RepresentableAsDouble = kMinInt64;
-const int64_t kMaxInt64RepresentableAsDouble = DART_INT64_C(0x7FFFFFFFFFFFFC00);
-const uint64_t kMaxUint64 = DART_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF);
-const int64_t kSignBitDouble = DART_INT64_C(0x8000000000000000);
+constexpr int8_t kMinInt8 = 0x80;
+constexpr int8_t kMaxInt8 = 0x7F;
+constexpr uint8_t kMaxUint8 = 0xFF;
+constexpr int16_t kMinInt16 = 0x8000;
+constexpr int16_t kMaxInt16 = 0x7FFF;
+constexpr uint16_t kMaxUint16 = 0xFFFF;
+constexpr int32_t kMinInt32 = 0x80000000;
+constexpr int32_t kMaxInt32 = 0x7FFFFFFF;
+constexpr uint32_t kMaxUint32 = 0xFFFFFFFF;
+constexpr int64_t kMinInt64 = DART_INT64_C(0x8000000000000000);
+constexpr int64_t kMaxInt64 = DART_INT64_C(0x7FFFFFFFFFFFFFFF);
+constexpr uint64_t kMaxUint64 = DART_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF);
+
+constexpr int kMinInt = INT_MIN;
+constexpr int kMaxInt = INT_MAX;
+constexpr int kMaxUint = UINT_MAX;
+
+constexpr int64_t kMinInt64RepresentableAsDouble = kMinInt64;
+constexpr int64_t kMaxInt64RepresentableAsDouble =
+ DART_INT64_C(0x7FFFFFFFFFFFFC00);
+constexpr int64_t kSignBitDouble = DART_INT64_C(0x8000000000000000);
// Types for native machine words. Guaranteed to be able to hold pointers and
// integers.
typedef intptr_t word;
typedef uintptr_t uword;
+// Byte sizes for native machine words.
+#ifdef ARCH_IS_32_BIT
+constexpr int kWordSizeLog2 = kInt32SizeLog2;
+#else
+constexpr int kWordSizeLog2 = kInt64SizeLog2;
+#endif
+constexpr int kWordSize = 1 << kWordSizeLog2;
+static_assert(kWordSize == sizeof(word), "Mismatched word size constant");
+
+// Bit sizes for native machine words.
+constexpr int kBitsPerWordLog2 = kWordSizeLog2 + kBitsPerByteLog2;
+constexpr int kBitsPerWord = 1 << kBitsPerWordLog2;
+
+// Integer constants for native machine words.
+constexpr word kWordMin = static_cast<uword>(1) << (kBitsPerWord - 1);
+constexpr word kWordMax = (static_cast<uword>(1) << (kBitsPerWord - 1)) - 1;
+constexpr uword kUwordMax = static_cast<uword>(-1);
+
// Size of a class id assigned to concrete, abstract and top-level classes.
//
// We use a signed integer type here to make it comparable with intptr_t.
typedef int32_t classid_t;
-// Byte sizes.
-const int kWordSize = sizeof(word);
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kFloatSize = sizeof(float); // NOLINT
-const int kQuadSize = 4 * kFloatSize;
-const int kSimd128Size = sizeof(simd128_value_t); // NOLINT
-const int kInt64Size = sizeof(int64_t); // NOLINT
-const int kInt32Size = sizeof(int32_t); // NOLINT
-const int kInt16Size = sizeof(int16_t); // NOLINT
-#ifdef ARCH_IS_32_BIT
-const int kWordSizeLog2 = 2;
-const uword kUwordMax = kMaxUint32;
-#else
-const int kWordSizeLog2 = 3;
-const uword kUwordMax = kMaxUint64;
-#endif
-
-// Bit sizes.
-const int kBitsPerByte = 8;
-const int kBitsPerByteLog2 = 3;
-const int kBitsPerInt32 = kInt32Size * kBitsPerByte;
-const int kBitsPerInt64 = kInt64Size * kBitsPerByte;
-const int kBitsPerWord = kWordSize * kBitsPerByte;
-const int kBitsPerWordLog2 = kWordSizeLog2 + kBitsPerByteLog2;
-
// System-wide named constants.
-const intptr_t KB = 1024;
-const intptr_t KBLog2 = 10;
-const intptr_t MB = KB * KB;
-const intptr_t MBLog2 = KBLog2 + KBLog2;
-const intptr_t GB = MB * KB;
-const intptr_t GBLog2 = MBLog2 + KBLog2;
+constexpr intptr_t KBLog2 = 10;
+constexpr intptr_t KB = 1 << KBLog2;
+constexpr intptr_t MBLog2 = KBLog2 + KBLog2;
+constexpr intptr_t MB = 1 << MBLog2;
+constexpr intptr_t GBLog2 = MBLog2 + KBLog2;
+constexpr intptr_t GB = 1 << GBLog2;
-const intptr_t KBInWords = KB >> kWordSizeLog2;
-const intptr_t KBInWordsLog2 = KBLog2 - kWordSizeLog2;
-const intptr_t MBInWords = KB * KBInWords;
-const intptr_t MBInWordsLog2 = KBLog2 + KBInWordsLog2;
-const intptr_t GBInWords = MB * KBInWords;
-const intptr_t GBInWordsLog2 = MBLog2 + KBInWordsLog2;
+constexpr intptr_t KBInWordsLog2 = KBLog2 - kWordSizeLog2;
+constexpr intptr_t KBInWords = 1 << KBInWordsLog2;
+constexpr intptr_t MBInWordsLog2 = KBLog2 + KBInWordsLog2;
+constexpr intptr_t MBInWords = 1 << MBInWordsLog2;
+constexpr intptr_t GBInWordsLog2 = MBLog2 + KBInWordsLog2;
+constexpr intptr_t GBInWords = 1 << GBInWordsLog2;
// Helpers to round memory sizes to human readable values.
-inline intptr_t RoundWordsToKB(intptr_t size_in_words) {
+constexpr intptr_t RoundWordsToKB(intptr_t size_in_words) {
return (size_in_words + (KBInWords >> 1)) >> KBInWordsLog2;
}
-inline intptr_t RoundWordsToMB(intptr_t size_in_words) {
+constexpr intptr_t RoundWordsToMB(intptr_t size_in_words) {
return (size_in_words + (MBInWords >> 1)) >> MBInWordsLog2;
}
-inline intptr_t RoundWordsToGB(intptr_t size_in_words) {
+constexpr intptr_t RoundWordsToGB(intptr_t size_in_words) {
return (size_in_words + (GBInWords >> 1)) >> GBInWordsLog2;
}
-const intptr_t kIntptrOne = 1;
-const intptr_t kIntptrMin = (kIntptrOne << (kBitsPerWord - 1));
-const intptr_t kIntptrMax = ~kIntptrMin;
+constexpr intptr_t kIntptrOne = 1;
+constexpr intptr_t kIntptrMin = (kIntptrOne << (kBitsPerWord - 1));
+constexpr intptr_t kIntptrMax = ~kIntptrMin;
// Time constants.
-const int kMillisecondsPerSecond = 1000;
-const int kMicrosecondsPerMillisecond = 1000;
-const int kMicrosecondsPerSecond =
+constexpr int kMillisecondsPerSecond = 1000;
+constexpr int kMicrosecondsPerMillisecond = 1000;
+constexpr int kMicrosecondsPerSecond =
(kMicrosecondsPerMillisecond * kMillisecondsPerSecond);
-const int kNanosecondsPerMicrosecond = 1000;
-const int kNanosecondsPerMillisecond =
+constexpr int kNanosecondsPerMicrosecond = 1000;
+constexpr int kNanosecondsPerMillisecond =
(kNanosecondsPerMicrosecond * kMicrosecondsPerMillisecond);
-const int kNanosecondsPerSecond =
+constexpr int kNanosecondsPerSecond =
(kNanosecondsPerMicrosecond * kMicrosecondsPerSecond);
// Helpers to scale micro second times to human understandable values.
-inline double MicrosecondsToSeconds(int64_t micros) {
+constexpr double MicrosecondsToSeconds(int64_t micros) {
return static_cast<double>(micros) / kMicrosecondsPerSecond;
}
-inline double MicrosecondsToMilliseconds(int64_t micros) {
+constexpr double MicrosecondsToMilliseconds(int64_t micros) {
return static_cast<double>(micros) / kMicrosecondsPerMillisecond;
}
diff --git a/runtime/tests/vm/dart/regress_43679_test.dart b/runtime/tests/vm/dart/regress_43679_test.dart
new file mode 100644
index 0000000..9738391
--- /dev/null
+++ b/runtime/tests/vm/dart/regress_43679_test.dart
@@ -0,0 +1,24 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// VMOptions=--deterministic --optimization_counter_threshold=80
+
+// Verifies that JIT compiler doesn't crash when typed data allocation is
+// used in unreachable LoadField(Array.length) due to polymorphic inlining.
+// Regression test for https://github.com/dart-lang/sdk/issues/43679.
+
+import 'dart:typed_data';
+import 'package:expect/expect.dart';
+
+main() {
+ List<List> lists = [];
+ for (int i = 0; i < 100; ++i) {
+ lists.add(Uint32List.fromList(List<int>.filled(0, 0)));
+ lists.add(Uint32List.fromList(Uint8List(3)));
+ }
+ for (int i = 0; i < lists.length; i += 2) {
+ Expect.equals(0, lists[i].length);
+ Expect.equals(3, lists[i + 1].length);
+ }
+}
diff --git a/runtime/tests/vm/dart/use_dwarf_stack_traces_flag_test.dart b/runtime/tests/vm/dart/use_dwarf_stack_traces_flag_test.dart
index 5d9fe49..052ddda 100644
--- a/runtime/tests/vm/dart/use_dwarf_stack_traces_flag_test.dart
+++ b/runtime/tests/vm/dart/use_dwarf_stack_traces_flag_test.dart
@@ -115,12 +115,16 @@
final dwarf = Dwarf.fromFile(scriptDwarfDebugInfo)!;
// Check that build IDs match for traces.
+ Expect.isNotNull(dwarf.buildId);
+ print('Dwarf build ID: "${dwarf.buildId!}"');
final buildId1 = buildId(dwarfTrace1);
Expect.isFalse(buildId1.isEmpty);
- Expect.equals(dwarf.buildId!, buildId1);
+ print('Trace 1 build ID: "${buildId1}"');
+ Expect.equals(dwarf.buildId, buildId1);
final buildId2 = buildId(dwarfTrace2);
Expect.isFalse(buildId2.isEmpty);
- Expect.equals(dwarf.buildId!, buildId2);
+ print('Trace 2 build ID: "${buildId2}"');
+ Expect.equals(dwarf.buildId, buildId2);
final translatedDwarfTrace1 = await Stream.fromIterable(dwarfTrace1)
.transform(DwarfStackTraceDecoder(dwarf))
diff --git a/runtime/tests/vm/dart/use_save_debugging_info_flag_test.dart b/runtime/tests/vm/dart/use_save_debugging_info_flag_test.dart
index cfc8f16..091087d 100644
--- a/runtime/tests/vm/dart/use_save_debugging_info_flag_test.dart
+++ b/runtime/tests/vm/dart/use_save_debugging_info_flag_test.dart
@@ -121,13 +121,13 @@
.transform(DwarfStackTraceDecoder(debugDwarf))
.toList();
print("\nStack trace converted using separate debugging info:");
- print(fromDebug.join());
+ print(fromDebug.join('\n'));
final fromWhole = await Stream.fromIterable(strippedTrace)
.transform(DwarfStackTraceDecoder(wholeDwarf))
.toList();
print("\nStack trace converted using unstripped ELF file:");
- print(fromWhole.join());
+ print(fromWhole.join('\n'));
Expect.deepEquals(fromDebug, fromWhole);
});
diff --git a/runtime/tests/vm/dart_2/regress_43679_test.dart b/runtime/tests/vm/dart_2/regress_43679_test.dart
new file mode 100644
index 0000000..9738391
--- /dev/null
+++ b/runtime/tests/vm/dart_2/regress_43679_test.dart
@@ -0,0 +1,24 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// VMOptions=--deterministic --optimization_counter_threshold=80
+
+// Verifies that JIT compiler doesn't crash when typed data allocation is
+// used in unreachable LoadField(Array.length) due to polymorphic inlining.
+// Regression test for https://github.com/dart-lang/sdk/issues/43679.
+
+import 'dart:typed_data';
+import 'package:expect/expect.dart';
+
+main() {
+ List<List> lists = [];
+ for (int i = 0; i < 100; ++i) {
+ lists.add(Uint32List.fromList(List<int>.filled(0, 0)));
+ lists.add(Uint32List.fromList(Uint8List(3)));
+ }
+ for (int i = 0; i < lists.length; i += 2) {
+ Expect.equals(0, lists[i].length);
+ Expect.equals(3, lists[i + 1].length);
+ }
+}
diff --git a/runtime/tests/vm/dart_2/use_dwarf_stack_traces_flag_test.dart b/runtime/tests/vm/dart_2/use_dwarf_stack_traces_flag_test.dart
index 7f3557e..967c027 100644
--- a/runtime/tests/vm/dart_2/use_dwarf_stack_traces_flag_test.dart
+++ b/runtime/tests/vm/dart_2/use_dwarf_stack_traces_flag_test.dart
@@ -117,11 +117,14 @@
// Check that build IDs match for traces.
Expect.isNotNull(dwarf.buildId);
+ print('Dwarf build ID: "${dwarf.buildId}"');
final buildId1 = buildId(dwarfTrace1);
Expect.isFalse(buildId1.isEmpty);
+ print('Trace 1 build ID: "${buildId1}"');
Expect.equals(dwarf.buildId, buildId1);
final buildId2 = buildId(dwarfTrace2);
Expect.isFalse(buildId2.isEmpty);
+ print('Trace 2 build ID: "${buildId2}"');
Expect.equals(dwarf.buildId, buildId2);
final translatedDwarfTrace1 = await Stream.fromIterable(dwarfTrace1)
diff --git a/runtime/tests/vm/dart_2/use_save_debugging_info_flag_test.dart b/runtime/tests/vm/dart_2/use_save_debugging_info_flag_test.dart
index e098c5a..1a21971 100644
--- a/runtime/tests/vm/dart_2/use_save_debugging_info_flag_test.dart
+++ b/runtime/tests/vm/dart_2/use_save_debugging_info_flag_test.dart
@@ -121,13 +121,13 @@
.transform(DwarfStackTraceDecoder(debugDwarf))
.toList();
print("\nStack trace converted using separate debugging info:");
- print(fromDebug.join());
+ print(fromDebug.join('\n'));
final fromWhole = await Stream.fromIterable(strippedTrace)
.transform(DwarfStackTraceDecoder(wholeDwarf))
.toList();
print("\nStack trace converted using unstripped ELF file:");
- print(fromWhole.join());
+ print(fromWhole.join('\n'));
Expect.deepEquals(fromDebug, fromWhole);
});
diff --git a/runtime/vm/bss_relocs.cc b/runtime/vm/bss_relocs.cc
index 2cb3157..867d82a 100644
--- a/runtime/vm/bss_relocs.cc
+++ b/runtime/vm/bss_relocs.cc
@@ -32,7 +32,7 @@
current->isolate_group()->source()->snapshot_instructions);
uword dso_base;
// Needed for assembly snapshots. For ELF snapshots, we set up the relocated
- // address information directly in the text segment ImageHeader.
+ // address information directly in the text segment InstructionsSection.
if (NativeSymbolResolver::LookupSharedObject(instructions, &dso_base)) {
InitializeBSSEntry(Relocation::InstructionsRelocatedAddress,
instructions - dso_base, bss_start);
diff --git a/runtime/vm/class_id.h b/runtime/vm/class_id.h
index 97f9325..0664cbb 100644
--- a/runtime/vm/class_id.h
+++ b/runtime/vm/class_id.h
@@ -91,8 +91,7 @@
V(FutureOr) \
V(UserTag) \
V(TransferableTypedData) \
- V(WeakSerializationReference) \
- V(ImageHeader)
+ V(WeakSerializationReference)
#define CLASS_LIST_ARRAYS(V) \
V(Array) \
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 05f8a8d..cf1fd3a 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -2918,8 +2918,9 @@
}
} else if (AllocateTypedDataInstr* alloc_typed_data =
array->AsAllocateTypedData()) {
- ASSERT(slot().kind() == Slot::Kind::kTypedDataBase_length);
- return alloc_typed_data->num_elements()->definition();
+ if (slot().kind() == Slot::Kind::kTypedDataBase_length) {
+ return alloc_typed_data->num_elements()->definition();
+ }
} else if (LoadFieldInstr* load_array = array->AsLoadField()) {
// For arrays with guarded lengths, replace the length load
// with a constant.
diff --git a/runtime/vm/compiler/runtime_api.cc b/runtime/vm/compiler/runtime_api.cc
index 06bf60c..d3627d0 100644
--- a/runtime/vm/compiler/runtime_api.cc
+++ b/runtime/vm/compiler/runtime_api.cc
@@ -664,19 +664,19 @@
// For InstructionsSections and Instructions, we define these by hand, because
// they depend on flags or #defines.
+// Used for InstructionsSection and Instructions methods, since we don't
+// serialize Instructions objects in bare instructions mode, just payloads.
+DART_FORCE_INLINE static bool BareInstructionsPayloads() {
+ return FLAG_precompiled_mode && FLAG_use_bare_instructions;
+}
+
word InstructionsSection::HeaderSize() {
// We only create InstructionsSections in precompiled mode.
ASSERT(FLAG_precompiled_mode);
- return Utils::RoundUp(UnalignedHeaderSize(),
+ return Utils::RoundUp(InstructionsSection::UnalignedHeaderSize(),
Instructions::kBarePayloadAlignment);
}
-// Used for Instructions methods, since we don't serialize Instructions objects
-// in bare instructions mode, just payloads.
-DART_FORCE_INLINE static bool BareInstructionsPayloads() {
- return FLAG_precompiled_mode && FLAG_use_bare_instructions;
-}
-
word Instructions::HeaderSize() {
return BareInstructionsPayloads()
? 0
@@ -842,10 +842,6 @@
return 0;
}
-word ImageHeader::InstanceSize() {
- return RoundedAllocationSize(UnroundedSize());
-}
-
word Instance::NextFieldOffset() {
return TranslateOffsetInWords(dart::Instance::NextFieldOffset());
}
@@ -854,10 +850,6 @@
return TranslateOffsetInWords(dart::Pointer::NextFieldOffset());
}
-word ImageHeader::NextFieldOffset() {
- return -kWordSize;
-}
-
word WeakSerializationReference::NextFieldOffset() {
return -kWordSize;
}
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
index e4468ab..ac5ff81 100644
--- a/runtime/vm/compiler/runtime_api.h
+++ b/runtime/vm/compiler/runtime_api.h
@@ -68,9 +68,16 @@
extern InvalidClass kWordSize;
extern InvalidClass kWordSizeLog2;
extern InvalidClass kBitsPerWord;
+extern InvalidClass kBitsPerWordLog2;
+extern InvalidClass kWordMin;
+extern InvalidClass kWordMax;
+extern InvalidClass kUWordMax;
extern InvalidClass kNewObjectAlignmentOffset;
extern InvalidClass kOldObjectAlignmentOffset;
extern InvalidClass kNewObjectBitPosition;
+extern InvalidClass kOldPageSize;
+extern InvalidClass kOldPageSizeInWords;
+extern InvalidClass kOldPageMask;
extern InvalidClass kObjectAlignment;
extern InvalidClass kObjectAlignmentLog2;
extern InvalidClass kObjectAlignmentMask;
@@ -1191,13 +1198,6 @@
static word NextFieldOffset();
};
-class ImageHeader : public AllStatic {
- public:
- static word UnroundedSize();
- static word InstanceSize();
- static word NextFieldOffset();
-};
-
class WeakSerializationReference : public AllStatic {
public:
static word InstanceSize();
diff --git a/runtime/vm/compiler/runtime_offsets_extracted.h b/runtime/vm/compiler/runtime_offsets_extracted.h
index 349f146..4a6d9e0 100644
--- a/runtime/vm/compiler/runtime_offsets_extracted.h
+++ b/runtime/vm/compiler/runtime_offsets_extracted.h
@@ -456,12 +456,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
16;
static constexpr dart::compiler::target::word ICData_InstanceSize = 32;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 20;
static constexpr dart::compiler::target::word Instance_InstanceSize = 4;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
8;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 8;
+ InstructionsSection_UnalignedHeaderSize = 20;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 4;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -968,12 +967,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
32;
static constexpr dart::compiler::target::word ICData_InstanceSize = 56;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 40;
static constexpr dart::compiler::target::word Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
12;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 16;
+ InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -1471,12 +1469,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
16;
static constexpr dart::compiler::target::word ICData_InstanceSize = 32;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 20;
static constexpr dart::compiler::target::word Instance_InstanceSize = 4;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
8;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 8;
+ InstructionsSection_UnalignedHeaderSize = 20;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 4;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -1984,12 +1981,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
32;
static constexpr dart::compiler::target::word ICData_InstanceSize = 56;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 40;
static constexpr dart::compiler::target::word Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
12;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 16;
+ InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -2486,12 +2482,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
16;
static constexpr dart::compiler::target::word ICData_InstanceSize = 32;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 20;
static constexpr dart::compiler::target::word Instance_InstanceSize = 4;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
8;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 8;
+ InstructionsSection_UnalignedHeaderSize = 20;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 4;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -2992,12 +2987,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
32;
static constexpr dart::compiler::target::word ICData_InstanceSize = 56;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 40;
static constexpr dart::compiler::target::word Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
12;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 16;
+ InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -3489,12 +3483,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
16;
static constexpr dart::compiler::target::word ICData_InstanceSize = 32;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 20;
static constexpr dart::compiler::target::word Instance_InstanceSize = 4;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
8;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 8;
+ InstructionsSection_UnalignedHeaderSize = 20;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 4;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -3996,12 +3989,11 @@
static constexpr dart::compiler::target::word GrowableObjectArray_InstanceSize =
32;
static constexpr dart::compiler::target::word ICData_InstanceSize = 56;
-static constexpr dart::compiler::target::word ImageHeader_UnroundedSize = 40;
static constexpr dart::compiler::target::word Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word Instructions_UnalignedHeaderSize =
12;
static constexpr dart::compiler::target::word
- InstructionsSection_UnalignedHeaderSize = 16;
+ InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word KernelProgramInfo_InstanceSize =
@@ -4543,13 +4535,11 @@
static constexpr dart::compiler::target::word
AOT_GrowableObjectArray_InstanceSize = 16;
static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 24;
-static constexpr dart::compiler::target::word AOT_ImageHeader_UnroundedSize =
- 20;
static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 4;
static constexpr dart::compiler::target::word
AOT_Instructions_UnalignedHeaderSize = 8;
static constexpr dart::compiler::target::word
- AOT_InstructionsSection_UnalignedHeaderSize = 8;
+ AOT_InstructionsSection_UnalignedHeaderSize = 20;
static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 4;
static constexpr dart::compiler::target::word
@@ -5105,13 +5095,11 @@
static constexpr dart::compiler::target::word
AOT_GrowableObjectArray_InstanceSize = 32;
static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 48;
-static constexpr dart::compiler::target::word AOT_ImageHeader_UnroundedSize =
- 40;
static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word
AOT_Instructions_UnalignedHeaderSize = 12;
static constexpr dart::compiler::target::word
- AOT_InstructionsSection_UnalignedHeaderSize = 16;
+ AOT_InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word
@@ -5671,13 +5659,11 @@
static constexpr dart::compiler::target::word
AOT_GrowableObjectArray_InstanceSize = 32;
static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 48;
-static constexpr dart::compiler::target::word AOT_ImageHeader_UnroundedSize =
- 40;
static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word
AOT_Instructions_UnalignedHeaderSize = 12;
static constexpr dart::compiler::target::word
- AOT_InstructionsSection_UnalignedHeaderSize = 16;
+ AOT_InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word
@@ -6225,13 +6211,11 @@
static constexpr dart::compiler::target::word
AOT_GrowableObjectArray_InstanceSize = 16;
static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 24;
-static constexpr dart::compiler::target::word AOT_ImageHeader_UnroundedSize =
- 20;
static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 4;
static constexpr dart::compiler::target::word
AOT_Instructions_UnalignedHeaderSize = 8;
static constexpr dart::compiler::target::word
- AOT_InstructionsSection_UnalignedHeaderSize = 8;
+ AOT_InstructionsSection_UnalignedHeaderSize = 20;
static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 4;
static constexpr dart::compiler::target::word
@@ -6780,13 +6764,11 @@
static constexpr dart::compiler::target::word
AOT_GrowableObjectArray_InstanceSize = 32;
static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 48;
-static constexpr dart::compiler::target::word AOT_ImageHeader_UnroundedSize =
- 40;
static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word
AOT_Instructions_UnalignedHeaderSize = 12;
static constexpr dart::compiler::target::word
- AOT_InstructionsSection_UnalignedHeaderSize = 16;
+ AOT_InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word
@@ -7339,13 +7321,11 @@
static constexpr dart::compiler::target::word
AOT_GrowableObjectArray_InstanceSize = 32;
static constexpr dart::compiler::target::word AOT_ICData_InstanceSize = 48;
-static constexpr dart::compiler::target::word AOT_ImageHeader_UnroundedSize =
- 40;
static constexpr dart::compiler::target::word AOT_Instance_InstanceSize = 8;
static constexpr dart::compiler::target::word
AOT_Instructions_UnalignedHeaderSize = 12;
static constexpr dart::compiler::target::word
- AOT_InstructionsSection_UnalignedHeaderSize = 16;
+ AOT_InstructionsSection_UnalignedHeaderSize = 40;
static constexpr dart::compiler::target::word AOT_Int32x4_InstanceSize = 24;
static constexpr dart::compiler::target::word AOT_Integer_InstanceSize = 8;
static constexpr dart::compiler::target::word
diff --git a/runtime/vm/compiler/runtime_offsets_list.h b/runtime/vm/compiler/runtime_offsets_list.h
index fca139c..76eb49f 100644
--- a/runtime/vm/compiler/runtime_offsets_list.h
+++ b/runtime/vm/compiler/runtime_offsets_list.h
@@ -315,7 +315,6 @@
SIZEOF(FutureOr, InstanceSize, FutureOrLayout) \
SIZEOF(GrowableObjectArray, InstanceSize, GrowableObjectArrayLayout) \
SIZEOF(ICData, InstanceSize, ICDataLayout) \
- SIZEOF(ImageHeader, UnroundedSize, ImageHeaderLayout) \
SIZEOF(Instance, InstanceSize, InstanceLayout) \
SIZEOF(Instructions, UnalignedHeaderSize, InstructionsLayout) \
SIZEOF(InstructionsSection, UnalignedHeaderSize, InstructionsSectionLayout) \
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index df7a2b7..291f8f7 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -6683,13 +6683,12 @@
strip ? new (Z) Dwarf(Z) : dwarf)
: nullptr;
- BlobImageWriter vm_image_writer(T, &vm_snapshot_instructions, debug_elf,
- elf);
- BlobImageWriter isolate_image_writer(T, &isolate_snapshot_instructions,
- debug_elf, elf);
+ BlobImageWriter image_writer(T, &vm_snapshot_instructions,
+ &isolate_snapshot_instructions, debug_elf,
+ elf);
FullSnapshotWriter writer(Snapshot::kFullAOT, &vm_snapshot_data,
- &isolate_snapshot_data, &vm_image_writer,
- &isolate_image_writer);
+ &isolate_snapshot_data, &image_writer,
+ &image_writer);
if (unit == nullptr || unit->id() == LoadingUnit::kRootId) {
writer.WriteFullSnapshot(units);
@@ -7030,11 +7029,11 @@
ZoneWriteStream isolate_snapshot_instructions(
Api::TopScope(T)->zone(), FullSnapshotWriter::kInitialSize);
- BlobImageWriter vm_image_writer(T, &vm_snapshot_instructions);
- BlobImageWriter isolate_image_writer(T, &isolate_snapshot_instructions);
+ BlobImageWriter image_writer(T, &vm_snapshot_instructions,
+ &isolate_snapshot_instructions);
FullSnapshotWriter writer(Snapshot::kFullJIT, &vm_snapshot_data,
- &isolate_snapshot_data, &vm_image_writer,
- &isolate_image_writer);
+ &isolate_snapshot_data, &image_writer,
+ &image_writer);
writer.WriteFullSnapshot();
*vm_snapshot_data_buffer = vm_snapshot_data.buffer();
@@ -7118,9 +7117,10 @@
FullSnapshotWriter::kInitialSize);
ZoneWriteStream isolate_snapshot_instructions(
Api::TopScope(T)->zone(), FullSnapshotWriter::kInitialSize);
- BlobImageWriter isolate_image_writer(T, &isolate_snapshot_instructions);
+ BlobImageWriter image_writer(T, /*vm_instructions=*/nullptr,
+ &isolate_snapshot_instructions);
FullSnapshotWriter writer(Snapshot::kFullJIT, nullptr, &isolate_snapshot_data,
- nullptr, &isolate_image_writer);
+ nullptr, &image_writer);
writer.WriteFullSnapshot();
*isolate_snapshot_data_buffer = isolate_snapshot_data.buffer();
diff --git a/runtime/vm/datastream.h b/runtime/vm/datastream.h
index c6b3e04..81f2c82 100644
--- a/runtime/vm/datastream.h
+++ b/runtime/vm/datastream.h
@@ -316,13 +316,14 @@
DART_FORCE_INLINE intptr_t bytes_written() const { return Position(); }
virtual intptr_t Position() const { return current_ - buffer_; }
- void Align(intptr_t alignment) {
+ intptr_t Align(intptr_t alignment) {
const intptr_t position_before = Position();
const intptr_t position_after = Utils::RoundUp(position_before, alignment);
const intptr_t length = position_after - position_before;
EnsureSpace(length);
memset(current_, 0, length);
SetPosition(position_after);
+ return length;
}
template <int N, typename T>
diff --git a/runtime/vm/dwarf.cc b/runtime/vm/dwarf.cc
index da8bb08..cfafd48 100644
--- a/runtime/vm/dwarf.cc
+++ b/runtime/vm/dwarf.cc
@@ -115,66 +115,33 @@
: zone_(zone),
reverse_obfuscation_trie_(CreateReverseObfuscationTrie(zone)),
codes_(zone, 1024),
- code_to_address_(zone),
+ code_to_name_(zone),
functions_(zone, 1024),
function_to_index_(zone),
scripts_(zone, 1024),
script_to_index_(zone),
temp_(0) {}
-SegmentRelativeOffset Dwarf::CodeAddress(const Code& code) const {
- const auto& pair = code_to_address_.LookupValue(&code);
- // This is only used by Elf::Finalize(), and the image writers always give a
- // text offset when calling AddCode() for an Elf object's Dwarf object. Thus,
- // we should have known code offsets for each code object in the map.
- ASSERT(pair.offset != SegmentRelativeOffset::kUnknownOffset);
- return pair;
-}
-
-intptr_t Dwarf::AddCode(const Code& orig_code,
- const SegmentRelativeOffset& offset) {
+void Dwarf::AddCode(const Code& orig_code, const char* name) {
ASSERT(!orig_code.IsNull());
- // We should never get the no-argument constructed version here.
- ASSERT(offset.offset != SegmentRelativeOffset::kInvalidOffset);
- // Generate an appropriately zoned ZoneHandle for storing.
- const auto& code = Code::ZoneHandle(zone_, orig_code.raw());
+ ASSERT(name != nullptr);
- // For now, we assume one of two flows for a given code object:
- // ELF: Calls to AddCode(code, vm, offset), vm and offset are the same over
- // all calls.
- // Assembly: An initial call to AddCode(code, vm) (assembly), possibly
- // followed by a later call to AddCode(code, vm, offset)
- // (separate debugging info ELF)
- if (offset.offset == SegmentRelativeOffset::kUnknownOffset) {
- // A call without an address should always come before any calls with
- // addresses.
- ASSERT(code_to_address_.Lookup(&code) == nullptr);
- // Insert a marker so on later calls, we know we've already added to codes_.
- code_to_address_.Insert(CodeAddressPair(&code, offset));
- } else {
- const auto& old_value = code_to_address_.LookupValue(&code);
- // ELF does not need to know the index. If we've already added this Code
- // object to codes_ in a previous call, don't bother scanning codes_ to find
- // the corresponding index, just return -1 instead.
- switch (old_value.offset) {
- case SegmentRelativeOffset::kInvalidOffset:
- code_to_address_.Insert(CodeAddressPair(&code, offset));
- break; // Still need to add to codes_.
- case SegmentRelativeOffset::kUnknownOffset:
- // Code objects should only be associated with either the VM or isolate.
- ASSERT_EQUAL(old_value.vm, offset.vm);
- code_to_address_.Update(CodeAddressPair(&code, offset));
- return -1;
- default:
- // The information for the code object shouldn't have changed since the
- // previous update.
- ASSERT(old_value == offset);
- return -1;
- }
+ if (auto const old_pair = code_to_name_.Lookup(&orig_code)) {
+ // Dwarf objects can be shared, so we may get the same information for a
+ // given code object in different calls. In DEBUG mode, make sure the
+ // information is the same before returning.
+ ASSERT(old_pair->value != nullptr);
+ ASSERT_EQUAL(strcmp(old_pair->value, name), 0);
+ return;
}
- const intptr_t index = codes_.length();
+ // Generate an appropriately zoned ZoneHandle for storing.
+ const auto& code = Code::ZoneHandle(zone_, orig_code.raw());
codes_.Add(&code);
+ // Currently assumes the name has the same lifetime as the Zone of the
+ // Dwarf object (which is currently true). Otherwise, need to copy.
+ code_to_name_.Insert({&code, name});
+
if (code.IsFunctionCode()) {
const Function& function = Function::Handle(zone_, code.function());
AddFunction(function);
@@ -188,7 +155,6 @@
AddFunction(function);
}
}
- return index;
}
intptr_t Dwarf::AddFunction(const Function& function) {
@@ -309,8 +275,6 @@
}
void Dwarf::WriteDebugInfo(DwarfWriteStream* stream) {
- SnapshotTextObjectNamer namer(zone_);
-
// 7.5.1.1 Compilation Unit Header
// Unit length.
@@ -343,10 +307,15 @@
// The highest instruction address in this object file that is part of our
// compilation unit. Dwarf consumers use this to quickly decide which
// compilation unit DIE to consult for a given pc.
- intptr_t last_code_index = codes_.length() - 1;
- const Code& last_code = *(codes_[last_code_index]);
- auto const last_code_name = namer.SnapshotNameFor(last_code_index, last_code);
- stream->OffsetFromSymbol(last_code_name, last_code.Size());
+ if (codes_.is_empty()) {
+ // No code objects in this program, so set high_pc to same as low_pc.
+ stream->OffsetFromSymbol(kIsolateSnapshotInstructionsAsmSymbol, 0);
+ } else {
+ const Code& last_code = *codes_.Last();
+ auto const last_code_name = code_to_name_.LookupValue(&last_code);
+ ASSERT(last_code_name != nullptr);
+ stream->OffsetFromSymbol(last_code_name, last_code.Size());
+ }
// DW_AT_stmt_list (offset into .debug_line)
// Indicates which line number program is associated with this compilation
@@ -388,7 +357,6 @@
void Dwarf::WriteConcreteFunctions(DwarfWriteStream* stream) {
Function& function = Function::Handle(zone_);
Script& script = Script::Handle(zone_);
- SnapshotTextObjectNamer namer(zone_);
for (intptr_t i = 0; i < codes_.length(); i++) {
const Code& code = *(codes_[i]);
RELEASE_ASSERT(!code.IsNull());
@@ -399,7 +367,8 @@
function = code.function();
intptr_t function_index = LookupFunction(function);
script = function.script();
- const char* asm_name = namer.SnapshotNameFor(i, code);
+ const char* asm_name = code_to_name_.LookupValue(&code);
+ ASSERT(asm_name != nullptr);
stream->uleb128(kConcreteFunction);
// DW_AT_abstract_origin
@@ -415,7 +384,7 @@
if (node != NULL) {
for (InliningNode* child = node->children_head; child != NULL;
child = child->children_next) {
- WriteInliningNode(stream, child, asm_name, script, &namer);
+ WriteInliningNode(stream, child, asm_name, script);
}
}
@@ -514,8 +483,7 @@
void Dwarf::WriteInliningNode(DwarfWriteStream* stream,
InliningNode* node,
const char* root_asm_name,
- const Script& parent_script,
- SnapshotTextObjectNamer* namer) {
+ const Script& parent_script) {
intptr_t file = LookupScript(parent_script);
intptr_t function_index = LookupFunction(node->function);
const Script& script = Script::Handle(zone_, node->function.script());
@@ -538,7 +506,7 @@
for (InliningNode* child = node->children_head; child != NULL;
child = child->children_next) {
- WriteInliningNode(stream, child, root_asm_name, script, namer);
+ WriteInliningNode(stream, child, root_asm_name, script);
}
stream->uleb128(0); // End of children.
@@ -617,11 +585,11 @@
Array& functions = Array::Handle(zone_);
GrowableArray<const Function*> function_stack(zone_, 8);
GrowableArray<DwarfPosition> token_positions(zone_, 8);
- SnapshotTextObjectNamer namer(zone_);
for (intptr_t i = 0; i < codes_.length(); i++) {
const Code& code = *(codes_[i]);
- auto const asm_name = namer.SnapshotNameFor(i, code);
+ auto const asm_name = code_to_name_.LookupValue(&code);
+ ASSERT(asm_name != nullptr);
map = code.code_source_map();
if (map.IsNull()) {
@@ -731,8 +699,8 @@
const intptr_t last_code_index = codes_.length() - 1;
const Code& last_code = *(codes_[last_code_index]);
const intptr_t last_pc_offset = last_code.Size();
- const char* last_asm_name =
- namer.SnapshotNameFor(last_code_index, last_code);
+ const char* last_asm_name = code_to_name_.LookupValue(&last_code);
+ ASSERT(last_asm_name != nullptr);
stream->u1(DW_LNS_advance_pc);
if (previous_asm_name != nullptr) {
diff --git a/runtime/vm/dwarf.h b/runtime/vm/dwarf.h
index 65d612c..22df262 100644
--- a/runtime/vm/dwarf.h
+++ b/runtime/vm/dwarf.h
@@ -6,6 +6,7 @@
#define RUNTIME_VM_DWARF_H_
#include "vm/allocation.h"
+#include "vm/hash.h"
#include "vm/hash_map.h"
#include "vm/object.h"
#include "vm/zone.h"
@@ -15,7 +16,6 @@
#ifdef DART_PRECOMPILER
class InliningNode;
-class SnapshotTextObjectNamer;
struct ScriptIndexPair {
// Typedefs needed for the DirectChainedHashMap template.
@@ -82,64 +82,46 @@
typedef DirectChainedHashMap<FunctionIndexPair> FunctionIndexMap;
-struct SegmentRelativeOffset {
- // Used for the empty constructor (for hash map usage).
- static constexpr intptr_t kInvalidOffset = -2;
- // Used for cases where we know which segment, but don't know the offset.
- static constexpr intptr_t kUnknownOffset = -1;
-
- SegmentRelativeOffset(bool vm, intptr_t offset) : vm(vm), offset(offset) {
- ASSERT(offset >= 0);
- }
- explicit SegmentRelativeOffset(bool vm) : vm(vm), offset(kUnknownOffset) {}
- SegmentRelativeOffset() : vm(false), offset(kInvalidOffset) {}
-
- bool operator==(const SegmentRelativeOffset& b) const {
- return vm == b.vm && offset == b.offset;
- }
- bool operator==(const SegmentRelativeOffset& b) {
- return *const_cast<const SegmentRelativeOffset*>(this) == b;
- }
- bool operator!=(const SegmentRelativeOffset& b) { return !(*this == b); }
-
- // Whether or not this is an offset into the VM text segment.
- bool vm;
- // The byte offset into the segment contents.
- intptr_t offset;
-};
-
-struct CodeAddressPair {
+// Assumes T has a copy constructor and is CopyAssignable.
+template <typename T>
+struct DwarfCodeKeyValueTrait {
// Typedefs needed for the DirectChainedHashMap template.
typedef const Code* Key;
- typedef SegmentRelativeOffset Value;
- typedef CodeAddressPair Pair;
+ typedef T Value;
+
+ struct Pair {
+ Pair(const Code* c, const T v) : code(c), value(v) {
+ ASSERT(c != nullptr);
+ ASSERT(!c->IsNull());
+ ASSERT(c->IsNotTemporaryScopedHandle());
+ }
+ Pair() : code(nullptr), value{} {}
+
+ // Don't implcitly delete copy and copy assigment constructors.
+ Pair(const Pair& other) = default;
+ Pair& operator=(const Pair& other) = default;
+
+ const Code* code;
+ T value;
+ };
static Key KeyOf(Pair kv) { return kv.code; }
- static Value ValueOf(Pair kv) { return kv.segment_offset; }
+ static Value ValueOf(Pair kv) { return kv.value; }
static inline intptr_t Hashcode(Key key) {
- // Code objects are always allocated in old space, so they don't move.
- return key->PayloadStart();
+ // Instructions are always allocated in old space, so they don't move.
+ return FinalizeHash(key->PayloadStart(), 32);
}
static inline bool IsKeyEqual(Pair pair, Key key) {
+ // Code objects are always allocated in old space, so they don't move.
return pair.code->raw() == key->raw();
}
-
- CodeAddressPair(const Code* c, const SegmentRelativeOffset& o)
- : code(c), segment_offset(o) {
- ASSERT(!c->IsNull());
- ASSERT(c->IsNotTemporaryScopedHandle());
- ASSERT(o.offset == SegmentRelativeOffset::kUnknownOffset || o.offset >= 0);
- }
- CodeAddressPair() : code(nullptr), segment_offset() {}
-
- const Code* code;
- SegmentRelativeOffset segment_offset;
};
-typedef DirectChainedHashMap<CodeAddressPair> CodeAddressMap;
+template <typename T>
+using DwarfCodeMap = DirectChainedHashMap<DwarfCodeKeyValueTrait<T>>;
template <typename T>
class Trie : public ZoneAllocated {
@@ -256,15 +238,7 @@
const ZoneGrowableArray<const Code*>& codes() const { return codes_; }
// Stores the code object for later creating the line number program.
- //
- // Returns the stored index of the code object when the relocated address
- // is not known at snapshot generation time (that is, when offset.offset is
- // SegmentRelativeOffset::kUnknownOffset).
- intptr_t AddCode(const Code& code, const SegmentRelativeOffset& offset);
-
- // Returns the stored segment offset for the given Code object. If no
- // address is stored, the second element will be kNoCodeAddressPairOffset.
- SegmentRelativeOffset CodeAddress(const Code& code) const;
+ void AddCode(const Code& code, const char* name);
intptr_t AddFunction(const Function& function);
intptr_t AddScript(const Script& script);
@@ -331,8 +305,7 @@
void WriteInliningNode(DwarfWriteStream* stream,
InliningNode* node,
const char* root_code_name,
- const Script& parent_script,
- SnapshotTextObjectNamer* namer);
+ const Script& parent_script);
const char* Deobfuscate(const char* cstr);
static Trie<const char>* CreateReverseObfuscationTrie(Zone* zone);
@@ -340,7 +313,7 @@
Zone* const zone_;
Trie<const char>* const reverse_obfuscation_trie_;
ZoneGrowableArray<const Code*> codes_;
- CodeAddressMap code_to_address_;
+ DwarfCodeMap<const char*> code_to_name_;
ZoneGrowableArray<const Function*> functions_;
FunctionIndexMap function_to_index_;
ZoneGrowableArray<const Script*> scripts_;
diff --git a/runtime/vm/elf.cc b/runtime/vm/elf.cc
index 30c25bf..5098285 100644
--- a/runtime/vm/elf.cc
+++ b/runtime/vm/elf.cc
@@ -96,10 +96,6 @@
intptr_t info = 0;
intptr_t entry_size = 0;
- // Stores the name for the symbol that should be created in the dynamic (and
- // static, if unstripped) tables for this section.
- const char* symbol_name = nullptr;
-
#define FOR_EACH_SECTION_LINEAR_FIELD(M) \
M(name) \
M(index) \
@@ -493,17 +489,17 @@
alignment) {}
// For BitsContainers used as segments whose type differ on the type of the
- // ELF file. Creates an elf::SHT_NOBITS section if type is DebugInfo,
- // otherwise creates an elf::SHT_PROGBITS section.
+ // ELF file. Creates an elf::SHT_PROGBITS section if type is Snapshot,
+ // otherwise creates an elf::SHT_NOBITS section.
BitsContainer(Elf::Type t,
bool executable,
bool writable,
intptr_t size,
const uint8_t* bytes,
intptr_t alignment = kDefaultAlignment)
- : BitsContainer(t == Elf::Type::DebugInfo
- ? elf::SectionHeaderType::SHT_NOBITS
- : elf::SectionHeaderType::SHT_PROGBITS,
+ : BitsContainer(t == Elf::Type::Snapshot
+ ? elf::SectionHeaderType::SHT_PROGBITS
+ : elf::SectionHeaderType::SHT_NOBITS,
/*allocate=*/true,
executable,
writable,
@@ -552,6 +548,7 @@
}
intptr_t AddString(const char* str) {
+ ASSERT(str != nullptr);
if (auto const kv = text_indices_.Lookup(str)) return kv->value - 1;
intptr_t offset = text_.length();
text_.AddString(str);
@@ -578,12 +575,14 @@
public:
Symbol(const char* cstr,
intptr_t name,
- intptr_t info,
+ intptr_t binding,
+ intptr_t type,
intptr_t section,
intptr_t offset,
intptr_t size)
: name_index(name),
- info(info),
+ binding(binding),
+ type(type),
section_index(section),
offset(offset),
size(size),
@@ -595,11 +594,11 @@
#if defined(TARGET_ARCH_IS_32_BIT)
stream->WriteAddr(offset);
stream->WriteWord(size);
- stream->WriteByte(info);
+ stream->WriteByte(elf::SymbolInfo(binding, type));
stream->WriteByte(0);
stream->WriteHalf(section_index);
#else
- stream->WriteByte(info);
+ stream->WriteByte(elf::SymbolInfo(binding, type));
stream->WriteByte(0);
stream->WriteHalf(section_index);
stream->WriteAddr(offset);
@@ -609,7 +608,8 @@
}
const intptr_t name_index;
- const intptr_t info;
+ const intptr_t binding;
+ const intptr_t type;
const intptr_t section_index;
const intptr_t offset;
const intptr_t size;
@@ -622,19 +622,22 @@
class SymbolTable : public Section {
public:
- SymbolTable(Zone* zone, bool dynamic)
+ SymbolTable(Zone* zone, StringTable* table, bool dynamic)
: Section(dynamic ? elf::SectionHeaderType::SHT_DYNSYM
: elf::SectionHeaderType::SHT_SYMTAB,
dynamic,
/*executable=*/false,
/*writable=*/false),
+ zone_(zone),
+ table_(table),
dynamic_(dynamic),
- reserved_("", 0, 0, 0, 0, 0),
- symbols_(zone, 1) {
+ symbols_(zone, 1),
+ by_name_index_(zone) {
entry_size = sizeof(elf::Symbol);
// The first symbol table entry is reserved and must be all zeros.
- symbols_.Add(&reserved_);
- info = 1; // One "local" symbol, the reserved first entry.
+ // (String tables always have the empty string at the 0th index.)
+ AddSymbol("", elf::STB_LOCAL, elf::STT_NOTYPE, elf::SHN_UNDEF, /*offset=*/0,
+ /*size=*/0);
}
intptr_t FileSize() const { return Length() * entry_size; }
@@ -649,22 +652,49 @@
}
}
- void AddSymbol(const Symbol* symbol) { symbols_.Add(symbol); }
+ void AddSymbol(const char* name,
+ intptr_t binding,
+ intptr_t type,
+ intptr_t section_index,
+ intptr_t offset,
+ intptr_t size) {
+ ASSERT(!table_->HasBeenFinalized());
+ auto const name_index = table_->AddString(name);
+ ASSERT(by_name_index_.Lookup(name_index) == nullptr);
+ auto const symbol = new (zone_)
+ Symbol(name, name_index, binding, type, section_index, offset, size);
+ symbols_.Add(symbol);
+ by_name_index_.Insert(name_index, symbol);
+ // The info field on a symbol table section holds the index of the first
+ // non-local symbol, so they can be skipped if desired. Thus, we need to
+ // make sure local symbols are before any non-local ones.
+ if (binding == elf::STB_LOCAL) {
+ if (info != symbols_.length() - 1) {
+ // There are non-local symbols, as otherwise [info] would be the
+ // index of the new symbol. Since the order doesn't otherwise matter,
+ // swap the new local symbol with the value at index [info], so when
+ // [info] is incremented it will point just past the new local symbol.
+ ASSERT(symbols_[info]->binding != elf::STB_LOCAL);
+ symbols_.Swap(info, symbols_.length() - 1);
+ }
+ info += 1;
+ }
+ }
intptr_t Length() const { return symbols_.length(); }
const Symbol* At(intptr_t i) const { return symbols_[i]; }
- const Symbol* FindSymbolWithNameIndex(intptr_t name_index) const {
- for (intptr_t i = 0; i < Length(); i++) {
- auto const symbol = At(i);
- if (symbol->name_index == name_index) return symbol;
- }
- return nullptr;
+ const Symbol* Find(const char* name) const {
+ ASSERT(name != nullptr);
+ auto const name_index = table_->Lookup(name);
+ return by_name_index_.Lookup(name_index);
}
private:
+ Zone* const zone_;
+ StringTable* const table_;
const bool dynamic_;
- const Symbol reserved_;
GrowableArray<const Symbol*> symbols_;
+ mutable IntMap<const Symbol*> by_name_index_;
};
static uint32_t ElfHash(const unsigned char* name) {
@@ -809,7 +839,7 @@
};
// We assume that the final program table fits in a single page of memory.
-static const intptr_t kProgramTableSegmentSize = Elf::kPageSize;
+static constexpr intptr_t kProgramTableSegmentSize = Elf::kPageSize;
// Here, both VM and isolate will be compiled into a single snapshot.
// In assembly generation, each serialized text section gets a separate
@@ -817,11 +847,27 @@
// we may not serialize both VM and isolate. Here, we always serialize both,
// so make a BSS segment large enough for both, with the VM entries coming
// first.
-static constexpr const char* kSnapshotBssAsmSymbol = "_kDartBSSData";
-static const intptr_t kBssIsolateOffset =
+static constexpr intptr_t kBssVmSize =
BSS::kVmEntryCount * compiler::target::kWordSize;
-static const intptr_t kBssSize =
- kBssIsolateOffset + BSS::kIsolateEntryCount * compiler::target::kWordSize;
+static constexpr intptr_t kBssIsolateSize =
+ BSS::kIsolateEntryCount * compiler::target::kWordSize;
+static constexpr intptr_t kBssSize = kBssVmSize + kBssIsolateSize;
+
+// For the build ID, we generate a 128-bit hash, where each 32 bits is a hash of
+// the contents of the following segments in order:
+//
+// .text(VM) | .text(Isolate) | .rodata(VM) | .rodata(Isolate)
+static constexpr const char* kBuildIdSegmentNames[]{
+ kVmSnapshotInstructionsAsmSymbol,
+ kIsolateSnapshotInstructionsAsmSymbol,
+ kVmSnapshotDataAsmSymbol,
+ kIsolateSnapshotDataAsmSymbol,
+};
+static constexpr intptr_t kBuildIdSegmentNamesLength =
+ ARRAY_SIZE(kBuildIdSegmentNames);
+// Includes the note name, but not the description.
+static constexpr intptr_t kBuildIdHeaderSize =
+ sizeof(elf::Note) + sizeof(elf::ELF_NOTE_GNU);
Elf::Elf(Zone* zone, BaseWriteStream* stream, Type type, Dwarf* dwarf)
: zone_(zone),
@@ -831,7 +877,7 @@
bss_(CreateBSS(zone, type, kBssSize)),
shstrtab_(new (zone) StringTable(zone, /*allocate=*/false)),
dynstrtab_(new (zone) StringTable(zone, /*allocate=*/true)),
- dynsym_(new (zone) SymbolTable(zone, /*dynamic=*/true)) {
+ dynsym_(new (zone) SymbolTable(zone, dynstrtab_, /*dynamic=*/true)) {
// Separate debugging information should always have a Dwarf object.
ASSERT(type_ == Type::Snapshot || dwarf_ != nullptr);
// Assumed by various offset logic in this file.
@@ -842,7 +888,7 @@
if (!IsStripped()) {
// Not a stripped ELF file, so allocate static string and symbol tables.
strtab_ = new (zone_) StringTable(zone_, /* allocate= */ false);
- symtab_ = new (zone_) SymbolTable(zone, /*dynamic=*/false);
+ symtab_ = new (zone_) SymbolTable(zone, strtab_, /*dynamic=*/false);
}
// We add an initial segment to represent reserved space for the program
// header, and so we can always assume there's at least one segment in the
@@ -852,14 +898,17 @@
new (zone_) ProgramTableLoadSegment(zone_, kProgramTableSegmentSize);
segments_.Add(start_segment);
// We allocate an initial build ID of all zeroes, since we need the build ID
- // memory offset during ImageHeader creation (see BlobImageWriter::WriteText).
+ // memory offset for the InstructionsSection (see BlobImageWriter::WriteText).
// We replace it with the real build ID during finalization. (We add this
// prior to BSS because we make the BuildID section writable also, so they are
// placed in the same segment before any non-writable ones, and if we add it
// after, then in separate debugging information, it'll go into a separate
// segment because the BSS section for debugging info is NOBITS.)
- build_id_ = GenerateBuildId();
- AddSection(build_id_, kBuildIdNoteName, kSnapshotBuildIdAsmSymbol);
+ {
+ uint32_t zeroes[kBuildIdSegmentNamesLength] = {0};
+ build_id_ = CreateBuildIdNote(&zeroes, sizeof(zeroes));
+ AddSection(build_id_, kBuildIdNoteName, kSnapshotBuildIdAsmSymbol);
+ }
// Note that the BSS segment must be in the first user-defined segment because
// it cannot be placed in between any two non-writable segments, due to a bug
// in Jelly Bean's ELF loader. (For this reason, the program table segments
@@ -869,7 +918,16 @@
// We add it in all cases, even to the separate debugging information ELF,
// to ensure that relocated addresses are consistent between ELF snapshots
// and ELF separate debugging information.
- AddSection(bss_, ".bss", kSnapshotBssAsmSymbol);
+ auto const bss_start = AddSection(bss_, ".bss");
+ // For the BSS section, we add two local symbols to the static symbol table,
+ // one for each isolate. We use local symbols because these addresses are only
+ // used for relocation. (This matches the behavior in the assembly output,
+ // where these symbols are also local.)
+ AddStaticSymbol(kVmSnapshotBssAsmSymbol, elf::STB_LOCAL, elf::STT_SECTION,
+ bss_->index(), bss_start, kBssVmSize);
+ AddStaticSymbol(kIsolateSnapshotBssAsmSymbol, elf::STB_LOCAL,
+ elf::STT_SECTION, bss_->index(), bss_start + kBssVmSize,
+ kBssIsolateSize);
}
intptr_t Elf::NextMemoryOffset(intptr_t alignment) const {
@@ -882,8 +940,30 @@
return Utils::RoundUp(LastLoadSegment()->MemoryEnd(), alignment);
}
-uword Elf::BssStart(bool vm) const {
- return bss_->memory_offset() + (vm ? 0 : kBssIsolateOffset);
+uword Elf::SymbolAddress(const char* name) const {
+ ASSERT(name != nullptr);
+ // Check the static symbol table first if it exists, since the dynamic
+ // table is a subset of it. Fall back on the dynamic otherwise.
+ if (symtab_ != nullptr) {
+ if (auto const symbol = symtab_->Find(name)) {
+ return symbol->offset;
+ }
+ } else if (auto const symbol = dynsym_->Find(name)) {
+ return symbol->offset;
+ }
+ // If stripping, then we won't have symbols for the BSS sections because
+ // they're only added to the static symbol table. Check for these special
+ // cases before returning kNoSectionStart.
+ if (strcmp(name, kVmSnapshotBssAsmSymbol) == 0) {
+ ASSERT(bss_ != nullptr);
+ ASSERT(bss_->memory_offset_is_set());
+ return bss_->memory_offset();
+ } else if (strcmp(name, kIsolateSnapshotBssAsmSymbol) == 0) {
+ ASSERT(bss_ != nullptr);
+ ASSERT(bss_->memory_offset_is_set());
+ return bss_->memory_offset() + kBssVmSize;
+ }
+ return kNoSectionStart;
}
intptr_t Elf::AddSection(Section* section,
@@ -913,7 +993,11 @@
segments_.Add(segment);
}
if (symbol_name != nullptr) {
- section->symbol_name = symbol_name;
+ // While elf::STT_SECTION might seem more appropriate, section symbols are
+ // usually local and dlsym won't return them.
+ AddDynamicSymbol(symbol_name, elf::STB_GLOBAL, elf::STT_FUNC,
+ section->index(), section->memory_offset(),
+ section->MemorySize());
}
return section->memory_offset();
}
@@ -937,9 +1021,6 @@
}
intptr_t Elf::AddText(const char* name, const uint8_t* bytes, intptr_t size) {
- // When making a separate debugging info file for assembly, we don't have
- // the binary text segment contents.
- ASSERT(type_ == Type::DebugInfo || bytes != nullptr);
auto const image = new (zone_) BitsContainer(type_, /*executable=*/true,
/*writable=*/false, size, bytes,
ImageWriter::kTextAlignment);
@@ -948,7 +1029,7 @@
Section* Elf::CreateBSS(Zone* zone, Type type, intptr_t size) {
uint8_t* bytes = nullptr;
- if (type != Type::DebugInfo) {
+ if (type == Type::Snapshot) {
// Ideally the BSS segment would take no space in the object, but Android's
// "strip" utility truncates the memory-size of our segments to their
// file-size.
@@ -962,7 +1043,6 @@
}
intptr_t Elf::AddROData(const char* name, const uint8_t* bytes, intptr_t size) {
- ASSERT(bytes != nullptr);
auto const image = new (zone_) BitsContainer(type_, /*executable=*/false,
/*writable=*/false, size, bytes,
ImageWriter::kRODataAlignment);
@@ -977,33 +1057,43 @@
AddSection(image, name);
}
+void Elf::AddLocalSymbol(const char* name,
+ intptr_t type,
+ intptr_t offset,
+ intptr_t size) {
+ const intptr_t section_index = sections_.length();
+ // Assume the next section will go into its own segment (currently true
+ // because we write writable sections, data vm (non-writable, non-executable),
+ // text vm (executable), data isolate (non-executable), text isolate
+ // (executable), and we only call this for data and text sections).
+ const intptr_t address =
+ NextMemoryOffset(ImageWriter::kTextAlignment) + offset;
+ AddStaticSymbol(name, elf::STB_LOCAL, type, section_index, address, size);
+}
+
void Elf::AddDynamicSymbol(const char* name,
- intptr_t info,
+ intptr_t binding,
+ intptr_t type,
intptr_t section_index,
intptr_t address,
intptr_t size) {
- ASSERT(!dynstrtab_->HasBeenFinalized() && !dynsym_->HasBeenFinalized());
- auto const name_index = dynstrtab_->AddString(name);
- auto const symbol =
- new (zone_) Symbol(name, name_index, info, section_index, address, size);
- dynsym_->AddSymbol(symbol);
+ ASSERT(!dynsym_->HasBeenFinalized());
+ dynsym_->AddSymbol(name, binding, type, section_index, address, size);
// Some tools assume the static symbol table is a superset of the dynamic
// symbol table when it exists (see dartbug.com/41783).
- AddStaticSymbol(name, info, section_index, address, size);
+ AddStaticSymbol(name, binding, type, section_index, address, size);
}
void Elf::AddStaticSymbol(const char* name,
- intptr_t info,
+ intptr_t binding,
+ intptr_t type,
intptr_t section_index,
intptr_t address,
intptr_t size) {
if (IsStripped()) return; // No static info kept in stripped ELF files.
- ASSERT(!symtab_->HasBeenFinalized() && !strtab_->HasBeenFinalized());
- auto const name_index = strtab_->AddString(name);
- auto const symbol =
- new (zone_) Symbol(name, name_index, info, section_index, address, size);
- symtab_->AddSymbol(symbol);
+ ASSERT(!symtab_->HasBeenFinalized());
+ symtab_->AddSymbol(name, binding, type, section_index, address, size);
}
#if defined(DART_PRECOMPILER)
@@ -1011,10 +1101,10 @@
public:
explicit DwarfElfStream(Zone* zone,
NonStreamingWriteStream* stream,
- const CStringMap<intptr_t>& address_map)
+ const SymbolTable* table)
: zone_(ASSERT_NOTNULL(zone)),
stream_(ASSERT_NOTNULL(stream)),
- address_map_(address_map) {}
+ table_(table) {}
void sleb128(intptr_t value) {
bool is_last_part = false;
@@ -1070,19 +1160,15 @@
stream_->SetPosition(old_position);
}
void OffsetFromSymbol(const char* symbol, intptr_t offset) {
- auto const address = address_map_.LookupValue(symbol);
- ASSERT(address != 0);
- addr(address + offset);
+ addr(RelocatedAddress(symbol, offset));
}
void DistanceBetweenSymbolOffsets(const char* symbol1,
intptr_t offset1,
const char* symbol2,
intptr_t offset2) {
- auto const address1 = address_map_.LookupValue(symbol1);
- ASSERT(address1 != 0);
- auto const address2 = address_map_.LookupValue(symbol2);
- ASSERT(address2 != 0);
- auto const delta = (address1 + offset1) - (address2 + offset2);
+ auto const address1 = RelocatedAddress(symbol1, offset1);
+ auto const address2 = RelocatedAddress(symbol2, offset2);
+ auto const delta = address1 - address2;
RELEASE_ASSERT(delta >= 0);
uleb128(delta);
}
@@ -1098,6 +1184,12 @@
void AbstractOrigin(intptr_t index) { u4(abstract_origins_[index]); }
private:
+ uword RelocatedAddress(const char* name, intptr_t offset) {
+ auto const symbol = table_->Find(name);
+ ASSERT(symbol != nullptr);
+ return symbol->offset + offset;
+ }
+
void addr(uword value) {
#if defined(TARGET_ARCH_IS_32_BIT)
u4(value);
@@ -1108,7 +1200,7 @@
Zone* const zone_;
NonStreamingWriteStream* const stream_;
- const CStringMap<intptr_t>& address_map_;
+ const SymbolTable* table_;
uint32_t* abstract_origins_ = nullptr;
intptr_t abstract_origins_size_ = -1;
@@ -1141,86 +1233,29 @@
return nullptr;
}
-void Elf::AddSectionSymbols() {
- for (auto const section : sections_) {
- if (section->symbol_name == nullptr) continue;
- ASSERT(section->memory_offset_is_set());
- // While elf::STT_SECTION might seem more appropriate, those symbols are
- // usually local and dlsym won't return them.
- auto const info = (elf::STB_GLOBAL << 4) | elf::STT_FUNC;
- AddDynamicSymbol(section->symbol_name, info, section->index(),
- section->memory_offset(), section->MemorySize());
- }
-}
-
void Elf::FinalizeDwarfSections() {
if (dwarf_ == nullptr) return;
#if defined(DART_PRECOMPILER)
- // Add all the static symbols for Code objects. We'll keep a table of
- // symbol names to relocated addresses for use in the DwarfElfStream.
- // The default kNoValue of 0 is okay here, as no symbols are defined for
- // relocated address 0.
- CStringMap<intptr_t> symbol_to_address_map;
- // Prime the map with any existing static symbols.
- if (symtab_ != nullptr) {
- ASSERT(strtab_ != nullptr);
- // Skip the initial reserved entry in the symbol table.
- for (intptr_t i = 1; i < symtab_->Length(); i++) {
- auto const symbol = symtab_->At(i);
- auto const name = strtab_->At(symbol->name_index);
- symbol_to_address_map.Insert({name, symbol->offset});
- }
- }
-
- // Need these to turn offsets into relocated addresses.
- auto const vm_start =
- symbol_to_address_map.LookupValue(kVmSnapshotInstructionsAsmSymbol);
- // vm_start is absent in deferred loading peices.
- auto const isolate_start =
- symbol_to_address_map.LookupValue(kIsolateSnapshotInstructionsAsmSymbol);
- ASSERT(isolate_start > 0);
- auto const vm_text = FindSectionForAddress(vm_start);
- // vm_text is absent in deferred loading peices.
- auto const isolate_text = FindSectionForAddress(isolate_start);
- ASSERT(isolate_text != nullptr);
-
- SnapshotTextObjectNamer namer(zone_);
- const auto& codes = dwarf_->codes();
- if (codes.length() == 0) {
- return;
- }
- for (intptr_t i = 0; i < codes.length(); i++) {
- const auto& code = *codes[i];
- auto const name = namer.SnapshotNameFor(i, code);
- const auto& pair = dwarf_->CodeAddress(code);
- ASSERT(pair.offset > 0);
- auto const section = pair.vm ? vm_text : isolate_text;
- const intptr_t address = section->memory_offset() + pair.offset;
- auto const info = (elf::STB_GLOBAL << 4) | elf::STT_FUNC;
- AddStaticSymbol(name, info, section->index(), address, code.Size());
- symbol_to_address_map.Insert({name, address});
- }
-
- // TODO(rmacnak): Generate .debug_frame / .eh_frame / .arm.exidx to
- // provide unwinding information.
-
{
ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
- DwarfElfStream dwarf_stream(zone_, &stream, symbol_to_address_map);
+ // We can use symtab_ without checking because this is an unstripped
+ // snapshot or separate debugging information, both of which have static
+ // symbol tables, and the static symbol table is a superset of the dynamic.
+ DwarfElfStream dwarf_stream(zone_, &stream, symtab_);
dwarf_->WriteAbbreviations(&dwarf_stream);
AddDebug(".debug_abbrev", stream.buffer(), stream.bytes_written());
}
{
ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
- DwarfElfStream dwarf_stream(zone_, &stream, symbol_to_address_map);
+ DwarfElfStream dwarf_stream(zone_, &stream, symtab_);
dwarf_->WriteDebugInfo(&dwarf_stream);
AddDebug(".debug_info", stream.buffer(), stream.bytes_written());
}
{
ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
- DwarfElfStream dwarf_stream(zone_, &stream, symbol_to_address_map);
+ DwarfElfStream dwarf_stream(zone_, &stream, symtab_);
dwarf_->WriteLineNumberProgram(&dwarf_stream);
AddDebug(".debug_line", stream.buffer(), stream.bytes_written());
}
@@ -1228,15 +1263,7 @@
}
void Elf::Finalize() {
- AddSectionSymbols();
-
- // The Build ID depends on the symbols being in place, so must be run after
- // AddSectionSymbols(). Unfortunately, it currently depends on the contents
- // of the .text and .rodata sections, so it can't come earlier in the file
- // without changing how we add the .text and .rodata sections (since we
- // determine memory offsets for those sections when we add them, and the
- // text sections must have the memory offsets to do BSS relocations).
- if (auto const new_build_id = GenerateBuildId()) {
+ if (auto const new_build_id = GenerateFinalBuildId()) {
ReplaceSection(build_id_, new_build_id);
// Add a PT_NOTE segment for the build ID.
@@ -1268,13 +1295,13 @@
// extra padding _or_ determine file offsets before memory offsets. The
// latter would require us to handle BSS relocations during ELF finalization,
// instead of while writing the .text section content.
- FinalizeDwarfSections();
if (!IsStripped()) {
AddSection(strtab_, ".strtab");
AddSection(symtab_, ".symtab");
symtab_->link = strtab_->index();
}
AddSection(shstrtab_, ".shstrtab");
+ FinalizeDwarfSections();
// At this point, all non-programmatically calculated sections and segments
// have been added. Add any programatically calculated sections and segments
@@ -1290,39 +1317,13 @@
WriteSectionTable(&wrapped);
}
-// Need to include the final \0 terminator in both byte count and byte output.
-static const uint32_t kBuildIdNameLength = strlen(elf::ELF_NOTE_GNU) + 1;
-// We generate a 128-bit hash, where each 32 bits is a hash of the contents of
-// the following segments in order:
-//
-// .text(VM) | .text(Isolate) | .rodata(VM) | .rodata(Isolate)
-static constexpr intptr_t kBuildIdSegmentNamesLength = 4;
-static constexpr const char* kBuildIdSegmentNames[kBuildIdSegmentNamesLength]{
- kVmSnapshotInstructionsAsmSymbol,
- kIsolateSnapshotInstructionsAsmSymbol,
- kVmSnapshotDataAsmSymbol,
- kIsolateSnapshotDataAsmSymbol,
-};
-static constexpr uint32_t kBuildIdDescriptionLength =
- kBuildIdSegmentNamesLength * sizeof(uint32_t);
-static const intptr_t kBuildIdDescriptionOffset =
- sizeof(elf::Note) + kBuildIdNameLength;
-static const intptr_t kBuildIdSize =
- kBuildIdDescriptionOffset + kBuildIdDescriptionLength;
-
-static const Symbol* LookupSymbol(StringTable* strings,
- SymbolTable* symbols,
- const char* name) {
- ASSERT(strings != nullptr);
- ASSERT(symbols != nullptr);
- auto const name_index = strings->Lookup(name);
- if (name_index < 0) return nullptr;
- return symbols->FindSymbolWithNameIndex(name_index);
-}
-
static uint32_t HashBitsContainer(const BitsContainer* bits) {
uint32_t hash = 0;
auto const size = bits->MemorySize();
+ if (bits->bytes() == nullptr) {
+ // Just hash the size as a fallback if this section has no contents.
+ return FinalizeHash(size, 32);
+ }
auto const end = bits->bytes() + size;
auto const non_word_size = size % kWordSize;
auto const end_of_words =
@@ -1338,41 +1339,51 @@
return FinalizeHash(hash, 32);
}
-uword Elf::BuildIdStart(intptr_t* size) {
- ASSERT(size != nullptr);
- ASSERT(build_id_ != nullptr);
- *size = kBuildIdDescriptionLength;
- return build_id_->memory_offset() + kBuildIdDescriptionOffset;
-}
-
-Section* Elf::GenerateBuildId() {
- ZoneWriteStream stream(zone(), kBuildIdSize);
- stream.WriteFixed(kBuildIdNameLength);
- stream.WriteFixed(kBuildIdDescriptionLength);
- stream.WriteFixed(static_cast<uint32_t>(elf::NoteType::NT_GNU_BUILD_ID));
- stream.WriteBytes(elf::ELF_NOTE_GNU, kBuildIdNameLength);
- const intptr_t description_start = stream.bytes_written();
+Section* Elf::GenerateFinalBuildId() {
+ uint32_t hashes[kBuildIdSegmentNamesLength];
for (intptr_t i = 0; i < kBuildIdSegmentNamesLength; i++) {
auto const name = kBuildIdSegmentNames[i];
- auto const symbol = LookupSymbol(dynstrtab_, dynsym_, name);
+ auto const symbol = dynsym_->Find(name);
if (symbol == nullptr) {
- stream.WriteFixed(static_cast<uint32_t>(0));
- continue;
+ // If we're missing a section, then we don't generate a final build ID.
+ return nullptr;
}
auto const bits = sections_[symbol->section_index]->AsBitsContainer();
if (bits == nullptr) {
FATAL1("Section for symbol %s is not a BitsContainer", name);
}
+ if (bits->bytes() == nullptr) {
+ // For now, if we don't have section contents (because we're generating
+ // assembly), don't generate a final build ID, as we'll have different
+ // build IDs in the snapshot and the separate debugging information.
+ //
+ // TODO(dartbug.com/43274): Change once we generate consistent build IDs
+ // between assembly snapshots and their debugging information.
+ return nullptr;
+ }
ASSERT_EQUAL(bits->MemorySize(), symbol->size);
- // We don't actually have the bytes (i.e., this is a separate debugging
- // info file for an assembly snapshot), so we can't calculate the build ID.
- if (bits->bytes() == nullptr) return nullptr;
-
- stream.WriteFixed(HashBitsContainer(bits));
+ hashes[i] = HashBitsContainer(bits);
}
- ASSERT_EQUAL(stream.bytes_written() - description_start,
- kBuildIdDescriptionLength);
- ASSERT_EQUAL(stream.bytes_written(), kBuildIdSize);
+ // To ensure we can quickly check for a final build ID, we ensure the first
+ // byte contains a non-zero value.
+ auto const bytes = reinterpret_cast<uint8_t*>(hashes);
+ if (bytes[0] == 0) {
+ bytes[0] = 1;
+ }
+ return CreateBuildIdNote(&hashes, sizeof(hashes));
+}
+
+Section* Elf::CreateBuildIdNote(const void* description_bytes,
+ intptr_t description_length) {
+ ASSERT(description_length == 0 || description_bytes != nullptr);
+ ZoneWriteStream stream(zone(), kBuildIdHeaderSize + description_length);
+ stream.WriteFixed<decltype(elf::Note::name_size)>(sizeof(elf::ELF_NOTE_GNU));
+ stream.WriteFixed<decltype(elf::Note::description_size)>(description_length);
+ stream.WriteFixed<decltype(elf::Note::type)>(elf::NoteType::NT_GNU_BUILD_ID);
+ ASSERT_EQUAL(stream.Position(), sizeof(elf::Note));
+ stream.WriteBytes(elf::ELF_NOTE_GNU, sizeof(elf::ELF_NOTE_GNU));
+ ASSERT_EQUAL(stream.bytes_written(), kBuildIdHeaderSize);
+ stream.WriteBytes(description_bytes, description_length);
// While the build ID section does not need to be writable, it and the
// BSS section are allocated segments at the same time. Having the same flags
// ensures they will be combined in the same segment and not unnecessarily
diff --git a/runtime/vm/elf.h b/runtime/vm/elf.h
index 37b2fc8..d0f8a96 100644
--- a/runtime/vm/elf.h
+++ b/runtime/vm/elf.h
@@ -33,6 +33,7 @@
Elf(Zone* zone, BaseWriteStream* stream, Type type, Dwarf* dwarf = nullptr);
static constexpr intptr_t kPageSize = 4096;
+ static constexpr uword kNoSectionStart = 0;
bool IsStripped() const { return dwarf_ == nullptr; }
@@ -40,18 +41,25 @@
const Dwarf* dwarf() const { return dwarf_; }
Dwarf* dwarf() { return dwarf_; }
- uword BssStart(bool vm) const;
- uword BuildIdStart(intptr_t* size);
+ // Returns the relocated address for the symbol with the given name or
+ // kNoSectionStart if the symbol was not found.
+ uword SymbolAddress(const char* name) const;
// What the next memory offset for an appropriately aligned section would be.
//
- // Only used by BlobImageWriter::WriteText() to determine the memory offset
- // for the text section before it is added.
+ // Only used by AssemblyImageWriter and BlobImageWriter methods.
intptr_t NextMemoryOffset(intptr_t alignment) const;
intptr_t AddText(const char* name, const uint8_t* bytes, intptr_t size);
intptr_t AddROData(const char* name, const uint8_t* bytes, intptr_t size);
void AddDebug(const char* name, const uint8_t* bytes, intptr_t size);
+ // Adds a local symbol for the given offset and size in the "current" section,
+ // that is, the section index for the symbol is for the next added section.
+ void AddLocalSymbol(const char* name,
+ intptr_t type,
+ intptr_t offset,
+ intptr_t size);
+
void Finalize();
private:
@@ -76,19 +84,23 @@
void ReplaceSection(Section* old_section, Section* new_section);
void AddStaticSymbol(const char* name,
- intptr_t info,
+ intptr_t binding,
+ intptr_t type,
intptr_t section_index,
intptr_t address,
intptr_t size);
void AddDynamicSymbol(const char* name,
- intptr_t info,
+ intptr_t binding,
+ intptr_t type,
intptr_t section_index,
intptr_t address,
intptr_t size);
Segment* LastLoadSegment() const;
const Section* FindSectionForAddress(intptr_t address) const;
- Section* GenerateBuildId();
+ Section* CreateBuildIdNote(const void* description_bytes,
+ intptr_t description_length);
+ Section* GenerateFinalBuildId();
void AddSectionSymbols();
void FinalizeDwarfSections();
@@ -123,7 +135,7 @@
SymbolTable* symtab_ = nullptr;
// We always create a GNU build ID for all Elf files. In order to create
- // the appropriate offset to it in an ImageHeader object, we create an
+ // the appropriate offset to it in an InstructionsSection object, we create an
// initial build ID section as a placeholder and then replace that section
// during finalization once we have the information to calculate the real one.
Section* build_id_;
diff --git a/runtime/vm/image_snapshot.cc b/runtime/vm/image_snapshot.cc
index ba82175..4e53de4 100644
--- a/runtime/vm/image_snapshot.cc
+++ b/runtime/vm/image_snapshot.cc
@@ -6,6 +6,7 @@
#include "include/dart_api.h"
#include "platform/assert.h"
+#include "platform/elf.h"
#include "vm/bss_relocs.h"
#include "vm/class_id.h"
#include "vm/compiler/runtime_api.h"
@@ -41,14 +42,24 @@
"Print sizes of all instruction objects to the given file");
#endif
-const ImageHeaderLayout* Image::ExtraInfo(const uword raw_memory,
- const uword size) {
+const InstructionsSectionLayout* Image::ExtraInfo(const uword raw_memory,
+ const uword size) {
#if defined(DART_PRECOMPILED_RUNTIME)
- auto const raw_value = FieldValue(raw_memory, HeaderField::ImageHeaderOffset);
- if (raw_value != kNoImageHeader) {
+ auto const raw_value =
+ FieldValue(raw_memory, HeaderField::InstructionsSectionOffset);
+ if (raw_value != kNoInstructionsSection) {
ASSERT(raw_value >= kHeaderSize);
- ASSERT(raw_value <= size - ImageHeader::InstanceSize());
- return reinterpret_cast<const ImageHeaderLayout*>(raw_memory + raw_value);
+ ASSERT(raw_value <= size - InstructionsSection::HeaderSize());
+ auto const layout = reinterpret_cast<const InstructionsSectionLayout*>(
+ raw_memory + raw_value);
+ // The instructions section is likely non-empty in bare instructions mode
+ // (unless splitting into multiple outputs and there are no Code objects
+ // in this particular output), but is guaranteed empty otherwise (the
+ // instructions follow the InstructionsSection object instead).
+ ASSERT(FLAG_use_bare_instructions || layout->payload_length_ == 0);
+ ASSERT(raw_value <=
+ size - InstructionsSection::InstanceSize(layout->payload_length_));
+ return layout;
}
#endif
return nullptr;
@@ -87,8 +98,15 @@
#if defined(DART_PRECOMPILED_RUNTIME)
ASSERT(extra_info_ != nullptr);
if (extra_info_->build_id_offset_ != kNoBuildId) {
- return reinterpret_cast<const uint8_t*>(raw_memory_ +
- extra_info_->build_id_offset_);
+ auto const note = reinterpret_cast<elf::Note*>(
+ raw_memory_ + extra_info_->build_id_offset_);
+ // Check that we have a final build ID. A non-final build ID will either
+ // have a description length of 0 or an initial byte of 0.
+ auto const description = note->data + note->name_size;
+ auto const length = note->description_size;
+ if (length != 0 && description[0] != 0) {
+ return description;
+ }
}
#endif
return nullptr;
@@ -97,10 +115,19 @@
intptr_t Image::build_id_length() const {
#if defined(DART_PRECOMPILED_RUNTIME)
ASSERT(extra_info_ != nullptr);
- return extra_info_->build_id_length_;
-#else
- return 0;
+ if (extra_info_->build_id_offset_ != kNoBuildId) {
+ auto const note = reinterpret_cast<elf::Note*>(
+ raw_memory_ + extra_info_->build_id_offset_);
+ // Check that we have a final build ID. A non-final build ID will either
+ // have a description length of 0 or an initial byte of 0.
+ auto const description = note->data + note->name_size;
+ auto const length = note->description_size;
+ if (length != 0 && description[0] != 0) {
+ return length;
+ }
+ }
#endif
+ return 0;
}
bool Image::compiled_to_elf() const {
@@ -164,7 +191,6 @@
objects_(),
instructions_(),
image_type_(TagObjectTypeAsReadOnly(t->zone(), "Image")),
- image_header_type_(TagObjectTypeAsReadOnly(t->zone(), "ImageHeader")),
instructions_section_type_(
TagObjectTypeAsReadOnly(t->zone(), "InstructionsSection")),
instructions_type_(TagObjectTypeAsReadOnly(t->zone(), "Instructions")),
@@ -423,7 +449,7 @@
}
// Needs to happen before WriteText, as we add information about the
- // BSSsection in the text section as an initial ImageHeader object.
+ // BSSsection in the text section as an initial InstructionsSection object.
WriteBss(vm);
offset_space_ = vm ? V8SnapshotProfileWriter::kVmText
@@ -449,10 +475,10 @@
intptr_t section_start = stream->Position();
stream->WriteWord(next_data_offset_); // Data length.
- stream->WriteWord(0); // No ImageHeader object in data sections.
+ stream->WriteWord(Image::kNoInstructionsSection);
// Zero values for the rest of the Image object header bytes.
stream->Align(Image::kHeaderSize);
- ASSERT(stream->Position() - section_start == Image::kHeaderSize);
+ ASSERT_EQUAL(stream->Position() - section_start, Image::kHeaderSize);
#if defined(DART_PRECOMPILER)
if (profile_writer_ != nullptr) {
const intptr_t end_position = stream->Position();
@@ -466,7 +492,9 @@
for (intptr_t i = 0; i < objects_.length(); i++) {
const Object& obj = *objects_[i].obj_;
+#if defined(DART_PRECOMPILER)
AutoTraceImage(obj, section_start, stream);
+#endif
auto const object_start = stream->Position();
NoSafepointScope no_safepoint;
@@ -537,7 +565,302 @@
GetMarkedTags(obj.raw()->GetClassId(), SizeInSnapshot(obj));
}
+const char* ImageWriter::SectionSymbol(ProgramSection section, bool vm) const {
+ switch (section) {
+ case ProgramSection::Text:
+ return vm ? kVmSnapshotInstructionsAsmSymbol
+ : kIsolateSnapshotInstructionsAsmSymbol;
+ case ProgramSection::Data:
+ return vm ? kVmSnapshotDataAsmSymbol : kIsolateSnapshotDataAsmSymbol;
+ case ProgramSection::Bss:
+ return vm ? kVmSnapshotBssAsmSymbol : kIsolateSnapshotBssAsmSymbol;
+ case ProgramSection::BuildId:
+ return kSnapshotBuildIdAsmSymbol;
+ }
+ return nullptr;
+}
+
+void ImageWriter::WriteText(bool vm) {
+ Zone* zone = Thread::Current()->zone();
+
+ const bool bare_instruction_payloads =
+ FLAG_precompiled_mode && FLAG_use_bare_instructions;
+
+ // Start snapshot at page boundary.
+ ASSERT(ImageWriter::kTextAlignment >= VirtualMemory::PageSize());
+ if (!EnterSection(ProgramSection::Text, vm, ImageWriter::kTextAlignment)) {
+ return;
+ }
+
+ intptr_t text_offset = 0;
#if defined(DART_PRECOMPILER)
+ // Parent used for later profile objects. Starts off as the Image. When
+ // writing bare instructions payloads, this is later updated with the
+ // InstructionsSection object which contains all the bare payloads.
+ V8SnapshotProfileWriter::ObjectId parent_id(offset_space_, text_offset);
+#endif
+
+ // This head also provides the gap to make the instructions snapshot
+ // look like a OldPage.
+ const intptr_t image_size = Utils::RoundUp(
+ next_text_offset_, compiler::target::ObjectAlignment::kObjectAlignment);
+ text_offset += WriteTargetWord(image_size);
+ // Output the offset to the InstructionsSection object from the start of the
+ // image, if any.
+ text_offset +=
+ WriteTargetWord(FLAG_precompiled_mode ? Image::kHeaderSize
+ : Image::kNoInstructionsSection);
+ // Zero values for the rest of the Image object header bytes.
+ text_offset += Align(Image::kHeaderSize, text_offset);
+ ASSERT_EQUAL(text_offset, Image::kHeaderSize);
+
+#if defined(DART_PRECOMPILER)
+ const char* instructions_symbol = SectionSymbol(ProgramSection::Text, vm);
+ ASSERT(instructions_symbol != nullptr);
+ const char* bss_symbol = SectionSymbol(ProgramSection::Bss, vm);
+ ASSERT(bss_symbol != nullptr);
+
+ if (FLAG_precompiled_mode) {
+ if (profile_writer_ != nullptr) {
+ profile_writer_->SetObjectTypeAndName(parent_id, image_type_,
+ instructions_symbol);
+ profile_writer_->AttributeBytesTo(parent_id, Image::kHeaderSize);
+ profile_writer_->AddRoot(parent_id);
+ }
+
+ const intptr_t section_header_length =
+ compiler::target::InstructionsSection::HeaderSize();
+ // Calculated using next_text_offset_, which doesn't include post-payload
+ // padding to object alignment. Note that if not in bare instructions mode,
+ // the section has no contents, instead the instructions objects follow it.
+ const intptr_t section_payload_length =
+ bare_instruction_payloads
+ ? next_text_offset_ - text_offset - section_header_length
+ : 0;
+ const intptr_t section_size =
+ compiler::target::InstructionsSection::InstanceSize(
+ section_payload_length);
+
+ const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
+ if (profile_writer_ != nullptr) {
+ profile_writer_->SetObjectTypeAndName(id, instructions_section_type_,
+ instructions_symbol);
+ profile_writer_->AttributeBytesTo(id,
+ section_size - section_payload_length);
+ const intptr_t element_offset = id.second - parent_id.second;
+ profile_writer_->AttributeReferenceTo(
+ parent_id,
+ {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
+ // Later objects will have the InstructionsSection as a parent if in
+ // bare instructions mode, otherwise the image.
+ if (bare_instruction_payloads) {
+ parent_id = id;
+ }
+ }
+
+ // Add the RawInstructionsSection header.
+ text_offset +=
+ WriteTargetWord(GetMarkedTags(kInstructionsSectionCid, section_size));
+ // An InstructionsSection has five fields:
+ // 1) The length of the payload.
+ text_offset += WriteTargetWord(section_payload_length);
+ // 2) The BSS offset from this section.
+ text_offset += Relocation(text_offset, instructions_symbol, bss_symbol);
+ // 3) The relocated address of the instructions.
+ text_offset += WriteTargetWord(RelocatedAddress(instructions_symbol));
+ // 4) The GNU build ID note offset from this section.
+ text_offset += Relocation(text_offset, instructions_symbol,
+ SectionSymbol(ProgramSection::BuildId, vm));
+
+ const intptr_t section_contents_alignment =
+ bare_instruction_payloads
+ ? compiler::target::Instructions::kBarePayloadAlignment
+ : compiler::target::ObjectAlignment::kObjectAlignment;
+ const intptr_t expected_size =
+ bare_instruction_payloads
+ ? compiler::target::InstructionsSection::HeaderSize()
+ : compiler::target::InstructionsSection::InstanceSize(0);
+ text_offset += Align(section_contents_alignment, text_offset);
+ ASSERT_EQUAL(text_offset - id.second, expected_size);
+ }
+#endif
+
+ FrameUnwindPrologue();
+
+ PcDescriptors& descriptors = PcDescriptors::Handle(zone);
+#if defined(DART_PRECOMPILER)
+ SnapshotTextObjectNamer namer(zone);
+#endif
+
+ ASSERT(offset_space_ != V8SnapshotProfileWriter::kSnapshot);
+ for (intptr_t i = 0; i < instructions_.length(); i++) {
+ auto& data = instructions_[i];
+ const bool is_trampoline = data.trampoline_bytes != nullptr;
+ ASSERT_EQUAL(data.text_offset_, text_offset);
+
+#if defined(DART_PRECOMPILER)
+ // We won't add trampolines as symbols, so their name need not be unique
+ // across different WriteText() calls.
+ const char* object_name = namer.SnapshotNameFor(
+ is_trampoline ? i : unique_symbol_counter_++, data);
+
+ if (profile_writer_ != nullptr) {
+ const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
+ auto const type = is_trampoline ? trampoline_type_ : instructions_type_;
+ const intptr_t size = is_trampoline ? data.trampoline_length
+ : SizeInSnapshot(data.insns_->raw());
+ profile_writer_->SetObjectTypeAndName(id, type, object_name);
+ profile_writer_->AttributeBytesTo(id, size);
+ const intptr_t element_offset = id.second - parent_id.second;
+ profile_writer_->AttributeReferenceTo(
+ parent_id,
+ {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
+ }
+#endif
+
+ if (is_trampoline) {
+ text_offset += WriteBytes(data.trampoline_bytes, data.trampoline_length);
+ delete[] data.trampoline_bytes;
+ data.trampoline_bytes = nullptr;
+ continue;
+ }
+
+ const intptr_t instr_start = text_offset;
+ const auto& code = *data.code_;
+ const auto& insns = *data.insns_;
+
+ // 1. Write from the object start to the payload start. This includes the
+ // object header and the fixed fields. Not written for AOT snapshots using
+ // bare instructions.
+ if (!bare_instruction_payloads) {
+ NoSafepointScope no_safepoint;
+
+ // Write Instructions with the mark and read-only bits set.
+ text_offset += WriteTargetWord(GetMarkedTags(insns));
+ text_offset += WriteFixed(insns.raw_ptr()->size_and_flags_);
+ text_offset +=
+ Align(compiler::target::Instructions::kNonBarePayloadAlignment,
+ text_offset);
+ }
+
+ ASSERT_EQUAL(text_offset - instr_start,
+ compiler::target::Instructions::HeaderSize());
+
+#if defined(DART_PRECOMPILER)
+ // 2. Add a symbol for the code at the entry point in precompiled snapshots.
+ // Linux's perf uses these labels.
+ AddCodeSymbol(code, object_name, text_offset);
+#endif
+
+ {
+ NoSafepointScope no_safepoint;
+
+ // 3. Write from the payload start to payload end. For AOT snapshots
+ // with bare instructions, this is the only part serialized other than
+ // any padding needed for alignment.
+ auto const payload_start =
+ reinterpret_cast<const uint8_t*>(insns.PayloadStart());
+ // Double-check the payload alignment, since we will load and write
+ // target-sized words starting from that address.
+ ASSERT(Utils::IsAligned(payload_start, compiler::target::kWordSize));
+ const uword payload_size = insns.Size();
+ descriptors = code.pc_descriptors();
+ PcDescriptors::Iterator iterator(
+ descriptors, /*kind_mask=*/PcDescriptorsLayout::kBSSRelocation);
+
+ auto const payload_end = payload_start + payload_size;
+ auto cursor = payload_start;
+ while (iterator.MoveNext()) {
+ ASSERT(FLAG_precompiled_mode);
+ auto const next_reloc_offset = iterator.PcOffset();
+ auto const next_reloc_address = payload_start + next_reloc_offset;
+ // We only generate BSS relocations that are target word-sized and at
+ // target word-aligned offsets in the payload. Double-check this..
+ ASSERT(
+ Utils::IsAligned(next_reloc_address, compiler::target::kWordSize));
+ text_offset += WriteBytes(cursor, next_reloc_address - cursor);
+
+#if defined(DART_PRECOMPILER)
+ // The instruction stream at the relocation position holds an offset
+ // into BSS corresponding to the symbol being resolved. This addend is
+ // factored into the relocation.
+ const auto addend = *reinterpret_cast<const compiler::target::word*>(
+ next_reloc_address);
+ text_offset += Relocation(text_offset, instructions_symbol, text_offset,
+ bss_symbol, /*target_offset=*/0, addend);
+#endif
+ cursor = next_reloc_address + compiler::target::kWordSize;
+ }
+ text_offset += WriteBytes(cursor, payload_end - cursor);
+ }
+
+ // 4. Add appropriate padding. Note we can't simply copy from the object
+ // because the host object may have less alignment filler than the target
+ // object in the cross-word case.
+ const intptr_t alignment =
+ bare_instruction_payloads
+ ? compiler::target::Instructions::kBarePayloadAlignment
+ : compiler::target::ObjectAlignment::kObjectAlignment;
+ text_offset += AlignWithBreakInstructions(alignment, text_offset);
+
+ ASSERT_EQUAL(text_offset - instr_start, SizeInSnapshot(insns.raw()));
+ }
+
+ // Should be a no-op unless writing bare instruction payloads, in which case
+ // we need to add post-payload padding for the InstructionsSection object.
+ // Since this follows instructions, we'll use break instructions for padding.
+ ASSERT(bare_instruction_payloads ||
+ Utils::IsAligned(text_offset,
+ compiler::target::ObjectAlignment::kObjectAlignment));
+ text_offset += AlignWithBreakInstructions(
+ compiler::target::ObjectAlignment::kObjectAlignment, text_offset);
+
+ ASSERT_EQUAL(text_offset, image_size);
+
+ FrameUnwindEpilogue();
+
+ ExitSection(ProgramSection::Text, vm, text_offset);
+}
+
+intptr_t ImageWriter::AlignWithBreakInstructions(intptr_t alignment,
+ intptr_t offset) {
+ intptr_t bytes_written = 0;
+ uword remaining;
+ for (remaining = Utils::RoundUp(offset, alignment) - offset;
+ remaining >= compiler::target::kWordSize;
+ remaining -= compiler::target::kWordSize) {
+ bytes_written += WriteTargetWord(kBreakInstructionFiller);
+ }
+#if defined(TARGET_ARCH_ARM)
+ // All instructions are 4 bytes long on ARM architectures, so on 32-bit ARM
+ // there won't be any padding.
+ ASSERT_EQUAL(remaining, 0);
+#elif defined(TARGET_ARCH_ARM64)
+ // All instructions are 4 bytes long on ARM architectures, so on 64-bit ARM
+ // there is only 0 or 4 bytes of padding.
+ if (remaining != 0) {
+ ASSERT_EQUAL(remaining, 4);
+ bytes_written += WriteBytes(&kBreakInstructionFiller, remaining);
+ }
+#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
+ // The break instruction is a single byte, repeated to fill a word.
+ bytes_written += WriteBytes(&kBreakInstructionFiller, remaining);
+#else
+#error Unexpected architecture.
+#endif
+ ASSERT_EQUAL(bytes_written, Utils::RoundUp(offset, alignment) - offset);
+ return bytes_written;
+}
+
+#if defined(DART_PRECOMPILER)
+
+// Indices are log2(size in bytes).
+static constexpr const char* kSizeDirectives[] = {".byte", ".2byte", ".long",
+ ".quad"};
+
+static constexpr const char* kWordDirective =
+ kSizeDirectives[compiler::target::kWordSizeLog2];
+
class DwarfAssemblyStream : public DwarfWriteStream {
public:
explicit DwarfAssemblyStream(BaseWriteStream* stream)
@@ -547,11 +870,19 @@
void uleb128(uintptr_t value) {
stream_->Printf(".uleb128 %" Pd "\n", value);
}
- void u1(uint8_t value) { stream_->Printf(".byte %u\n", value); }
- void u2(uint16_t value) { stream_->Printf(".2byte %u\n", value); }
- void u4(uint32_t value) { stream_->Printf(".4byte %" Pu32 "\n", value); }
- void u8(uint64_t value) { stream_->Printf(".8byte %" Pu64 "\n", value); }
- void string(const char* cstr) { // NOLINT
+ void u1(uint8_t value) {
+ stream_->Printf("%s %u\n", kSizeDirectives[kInt8SizeLog2], value);
+ }
+ void u2(uint16_t value) {
+ stream_->Printf("%s %u\n", kSizeDirectives[kInt16SizeLog2], value);
+ }
+ void u4(uint32_t value) {
+ stream_->Printf("%s %" Pu32 "\n", kSizeDirectives[kInt32SizeLog2], value);
+ }
+ void u8(uint64_t value) {
+ stream_->Printf("%s %" Pu64 "\n", kSizeDirectives[kInt64SizeLog2], value);
+ }
+ void string(const char* cstr) { // NOLINT
stream_->Printf(".string \"%s\"\n", cstr); // NOLINT
}
// Uses labels, so doesn't output to start or return a useful fixup position.
@@ -559,7 +890,7 @@
// Assignment to temp works around buggy Mac assembler.
stream_->Printf("L%s_size = .L%s_end - .L%s_start\n", prefix, prefix,
prefix);
- stream_->Printf(".4byte L%s_size\n", prefix);
+ stream_->Printf("%s L%s_size\n", kSizeDirectives[kInt32SizeLog2], prefix);
stream_->Printf(".L%s_start:\n", prefix);
return -1;
}
@@ -593,7 +924,8 @@
// Assignment to temp works around buggy Mac assembler.
stream_->Printf("Ltemp%" Pd " = .Lfunc%" Pd " - %s\n", temp_, index,
kDebugInfoLabel);
- stream_->Printf(".4byte Ltemp%" Pd "\n", temp_);
+ stream_->Printf("%s Ltemp%" Pd "\n", kSizeDirectives[kInt32SizeLog2],
+ temp_);
temp_++;
}
@@ -634,30 +966,20 @@
private:
static constexpr const char* kDebugInfoLabel = ".Ldebug_info";
-#if defined(TARGET_ARCH_IS_32_BIT)
-#define FORM_ADDR ".4byte"
-#elif defined(TARGET_ARCH_IS_64_BIT)
-#define FORM_ADDR ".8byte"
-#endif
-
void PrintNamedAddress(const char* name) {
- stream_->Printf(FORM_ADDR " %s\n", name);
+ stream_->Printf("%s %s\n", kWordDirective, name);
}
void PrintNamedAddressWithOffset(const char* name, intptr_t offset) {
- stream_->Printf(FORM_ADDR " %s + %" Pd "\n", name, offset);
+ stream_->Printf("%s %s + %" Pd "\n", kWordDirective, name, offset);
}
-#undef FORM_ADDR
-
BaseWriteStream* const stream_;
intptr_t temp_ = 0;
DISALLOW_COPY_AND_ASSIGN(DwarfAssemblyStream);
};
-#endif
static inline Dwarf* AddDwarfIfUnstripped(Zone* zone, bool strip, Elf* elf) {
-#if defined(DART_PRECOMPILER)
if (!strip) {
if (elf != nullptr) {
// Reuse the existing DWARF object.
@@ -666,7 +988,6 @@
}
return new (zone) Dwarf(zone);
}
-#endif
return nullptr;
}
@@ -680,7 +1001,6 @@
debug_elf_(debug_elf) {}
void AssemblyImageWriter::Finalize() {
-#if defined(DART_PRECOMPILER)
if (assembly_dwarf_ != nullptr) {
DwarfAssemblyStream dwarf_stream(assembly_stream_);
dwarf_stream.AbbreviationsPrologue();
@@ -693,10 +1013,8 @@
if (debug_elf_ != nullptr) {
debug_elf_->Finalize();
}
-#endif
}
-#if !defined(DART_PRECOMPILED_RUNTIME)
static void EnsureAssemblerIdentifier(char* label) {
for (char c = *label; c != '\0'; c = *++label) {
if (((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) ||
@@ -706,7 +1024,6 @@
*label = '_';
}
}
-#endif // !defined(DART_PRECOMPILED_RUNTIME)
const char* SnapshotTextObjectNamer::SnapshotNameFor(intptr_t code_index,
const Code& code) {
@@ -751,377 +1068,174 @@
return SnapshotNameFor(index, *data.code_);
}
-#if defined(DART_PRECOMPILER)
-static const char* const kVmSnapshotBssAsmSymbol = "_kDartVmSnapshotBss";
-static const char* const kIsolateSnapshotBssAsmSymbol =
- "_kDartIsolateSnapshotBss";
-#endif
-
void AssemblyImageWriter::WriteBss(bool vm) {
-#if defined(DART_PRECOMPILER)
- auto const bss_symbol =
- vm ? kVmSnapshotBssAsmSymbol : kIsolateSnapshotBssAsmSymbol;
- assembly_stream_->WriteString(".bss\n");
- // Align the BSS contents as expected by the Image class.
- Align(ImageWriter::kBssAlignment);
- assembly_stream_->Printf("%s:\n", bss_symbol);
-
+ EnterSection(ProgramSection::Bss, vm, ImageWriter::kBssAlignment);
auto const entry_count = vm ? BSS::kVmEntryCount : BSS::kIsolateEntryCount;
for (intptr_t i = 0; i < entry_count; i++) {
- WriteWordLiteralText(0);
+ // All bytes in the .bss section must be zero.
+ WriteTargetWord(0);
}
-#endif
+ ExitSection(ProgramSection::Bss, vm,
+ entry_count * compiler::target::kWordSize);
}
void AssemblyImageWriter::WriteROData(NonStreamingWriteStream* clustered_stream,
bool vm) {
ImageWriter::WriteROData(clustered_stream, vm);
-#if defined(DART_PRECOMPILED_RUNTIME)
- UNREACHABLE();
-#else
-#if defined(TARGET_OS_LINUX) || defined(TARGET_OS_ANDROID) || \
- defined(TARGET_OS_FUCHSIA)
- assembly_stream_->WriteString(".section .rodata\n");
-#elif defined(TARGET_OS_MACOS) || defined(TARGET_OS_MACOS_IOS)
- assembly_stream_->WriteString(".const\n");
-#else
- UNIMPLEMENTED();
-#endif
-
- const char* data_symbol =
- vm ? kVmSnapshotDataAsmSymbol : kIsolateSnapshotDataAsmSymbol;
- assembly_stream_->Printf(".globl %s\n", data_symbol);
- Align(ImageWriter::kRODataAlignment);
- assembly_stream_->Printf("%s:\n", data_symbol);
- const uword buffer = reinterpret_cast<uword>(clustered_stream->buffer());
- const intptr_t length = clustered_stream->bytes_written();
- WriteByteSequence(buffer, buffer + length);
-#if defined(DART_PRECOMPILER)
- if (debug_elf_ != nullptr) {
- // Add a NoBits section for the ROData as well.
- debug_elf_->AddROData(data_symbol, clustered_stream->buffer(), length);
+ if (!EnterSection(ProgramSection::Data, vm, ImageWriter::kRODataAlignment)) {
+ return;
}
-#endif // defined(DART_PRECOMPILER)
-#endif // !defined(DART_PRECOMPILED_RUNTIME)
+ WriteBytes(clustered_stream->buffer(), clustered_stream->bytes_written());
+ ExitSection(ProgramSection::Data, vm, clustered_stream->bytes_written());
}
-void AssemblyImageWriter::WriteText(bool vm) {
-#if defined(DART_PRECOMPILED_RUNTIME)
- UNREACHABLE();
+bool AssemblyImageWriter::EnterSection(ProgramSection section,
+ bool vm,
+ intptr_t alignment) {
+ ASSERT(FLAG_precompiled_mode);
+ ASSERT(current_section_symbol_ == nullptr);
+ bool global_symbol = false;
+ switch (section) {
+ case ProgramSection::Text:
+ assembly_stream_->WriteString(".text\n");
+ global_symbol = true;
+ break;
+ case ProgramSection::Data:
+#if defined(TARGET_OS_LINUX) || defined(TARGET_OS_ANDROID) || \
+ defined(TARGET_OS_FUCHSIA)
+ assembly_stream_->WriteString(".section .rodata\n");
+#elif defined(TARGET_OS_MACOS) || defined(TARGET_OS_MACOS_IOS)
+ assembly_stream_->WriteString(".const\n");
#else
- Zone* zone = Thread::Current()->zone();
-
- const bool bare_instruction_payloads =
- FLAG_precompiled_mode && FLAG_use_bare_instructions;
-
- const char* instructions_symbol = vm ? kVmSnapshotInstructionsAsmSymbol
- : kIsolateSnapshotInstructionsAsmSymbol;
- assembly_stream_->WriteString(".text\n");
- assembly_stream_->Printf(".globl %s\n", instructions_symbol);
-
- // Start snapshot at page boundary.
- ASSERT(ImageWriter::kTextAlignment >= VirtualMemory::PageSize());
- Align(ImageWriter::kTextAlignment);
- assembly_stream_->Printf("%s:\n", instructions_symbol);
-
-#if defined(DART_PRECOMPILER)
- auto const bss_symbol =
- vm ? kVmSnapshotBssAsmSymbol : kIsolateSnapshotBssAsmSymbol;
- intptr_t debug_segment_base = 0;
- if (debug_elf_ != nullptr) {
- debug_segment_base =
- debug_elf_->NextMemoryOffset(ImageWriter::kTextAlignment);
+ UNIMPLEMENTED();
+#endif
+ global_symbol = true;
+ break;
+ case ProgramSection::Bss:
+ assembly_stream_->WriteString(".bss\n");
+ break;
+ case ProgramSection::BuildId:
+ break;
}
-#endif
+ current_section_symbol_ = SectionSymbol(section, vm);
+ ASSERT(current_section_symbol_ != nullptr);
+ if (global_symbol) {
+ assembly_stream_->Printf(".globl %s\n", current_section_symbol_);
+ }
+ Align(alignment);
+ assembly_stream_->Printf("%s:\n", current_section_symbol_);
+ return true;
+}
- intptr_t text_offset = 0;
-#if defined(DART_PRECOMPILER)
- // Parent used for later profile objects. Starts off as the Image. When
- // writing bare instructions payloads, this is later updated with the
- // InstructionsSection object which contains all the bare payloads.
- V8SnapshotProfileWriter::ObjectId parent_id(offset_space_, text_offset);
-#endif
+static void ElfAddSection(Elf* elf,
+ ImageWriter::ProgramSection section,
+ const char* symbol,
+ const uint8_t* bytes,
+ intptr_t size) {
+ if (elf == nullptr) return;
+ switch (section) {
+ case ImageWriter::ProgramSection::Text:
+ elf->AddText(symbol, bytes, size);
+ break;
+ case ImageWriter::ProgramSection::Data:
+ elf->AddROData(symbol, bytes, size);
+ break;
+ default:
+ // Other sections are handled by the Elf object internally.
+ break;
+ }
+}
- // This head also provides the gap to make the instructions snapshot
- // look like a OldPage.
- const intptr_t image_size = Utils::RoundUp(
- next_text_offset_, compiler::target::ObjectAlignment::kObjectAlignment);
- text_offset += WriteWordLiteralText(image_size);
- if (FLAG_precompiled_mode) {
- // Output the offset to the ImageHeader object from the start of the image.
- text_offset += WriteWordLiteralText(Image::kHeaderSize);
+void AssemblyImageWriter::ExitSection(ProgramSection name,
+ bool vm,
+ intptr_t size) {
+ // We should still be in the same section as the last EnterSection.
+ ASSERT(current_section_symbol_ != nullptr);
+ ASSERT_EQUAL(strcmp(SectionSymbol(name, vm), current_section_symbol_), 0);
+ // We need to generate a text segment of the appropriate size in the ELF
+ // for two reasons:
+ //
+ // * We need unique virtual addresses for each text section in the DWARF
+ // file and that the virtual addresses for payloads within those sections
+ // do not overlap.
+ //
+ // * Our tools for converting DWARF stack traces back to "normal" Dart
+ // stack traces calculate an offset into the appropriate instructions
+ // section, and then add that offset to the virtual address of the
+ // corresponding segment to get the virtual address for the frame.
+ //
+ // Since we don't want to add the actual contents of the segment in the
+ // separate debugging information, we pass nullptr for the bytes, which
+ // creates an appropriate NOBITS section instead of PROGBITS.
+ ElfAddSection(debug_elf_, name, current_section_symbol_, /*bytes=*/nullptr,
+ size);
+ current_section_symbol_ = nullptr;
+}
+
+intptr_t AssemblyImageWriter::WriteTargetWord(word value) {
+ ASSERT(compiler::target::kBitsPerWord == kBitsPerWord ||
+ Utils::IsAbsoluteUint(compiler::target::kBitsPerWord, value));
+ // Padding is helpful for comparing the .S with --disassemble.
+ assembly_stream_->Printf("%s 0x%0.*" Px "\n", kWordDirective,
+ 2 * compiler::target::kWordSize, value);
+ return compiler::target::kWordSize;
+}
+
+intptr_t AssemblyImageWriter::Relocation(intptr_t section_offset,
+ const char* source_symbol,
+ intptr_t source_offset,
+ const char* target_symbol,
+ intptr_t target_offset,
+ intptr_t target_addend) {
+ ASSERT(source_symbol != nullptr);
+ ASSERT(target_symbol != nullptr);
+
+ // TODO(dartbug.com/43274): Remove once we generate consistent build IDs
+ // between assembly snapshots and their debugging information.
+ const char* build_id_symbol =
+ SectionSymbol(ProgramSection::BuildId, /*vm=*/false);
+ if (strcmp(target_symbol, build_id_symbol) == 0) {
+ return WriteTargetWord(Image::kNoBuildId);
+ }
+
+ // All relocations are word-sized.
+ assembly_stream_->Printf("%s ", kWordDirective);
+ if (strcmp(target_symbol, current_section_symbol_) == 0 &&
+ target_offset == section_offset) {
+ assembly_stream_->WriteString("(.)");
} else {
- text_offset += WriteWordLiteralText(Image::kNoImageHeader);
- }
- // Zero values for the rest of the Image object header bytes.
- text_offset += Align(Image::kHeaderSize, text_offset);
- ASSERT_EQUAL(text_offset, Image::kHeaderSize);
-
-#if defined(DART_PRECOMPILER)
- if (FLAG_precompiled_mode) {
- if (profile_writer_ != nullptr) {
- profile_writer_->SetObjectTypeAndName(parent_id, image_type_,
- instructions_symbol);
- // Assign post-instruction padding to the Image, unless we're writing bare
- // instruction payloads, in which case we'll assign it to the
- // InstructionsSection object.
- const intptr_t padding =
- bare_instruction_payloads ? 0 : image_size - next_text_offset_;
- profile_writer_->AttributeBytesTo(parent_id,
- Image::kHeaderSize + padding);
- profile_writer_->AddRoot(parent_id);
- }
-
- // Write the ImageHeader object, starting with the header.
- const intptr_t image_header_size =
- compiler::target::ImageHeader::InstanceSize();
- if (profile_writer_ != nullptr) {
- const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
- profile_writer_->SetObjectTypeAndName(id, image_header_type_,
- instructions_symbol);
- profile_writer_->AttributeBytesTo(id, image_header_size);
- const intptr_t element_offset = id.second - parent_id.second;
- profile_writer_->AttributeReferenceTo(
- parent_id,
- {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
- }
- text_offset +=
- WriteWordLiteralText(GetMarkedTags(kImageHeaderCid, image_header_size));
-
- // An ImageHeader has four fields:
- // 1) The BSS offset from this section.
- assembly_stream_->Printf("%s %s - %s\n", kLiteralPrefix, bss_symbol,
- instructions_symbol);
- text_offset += compiler::target::kWordSize;
- // 2) The relocated address of the instructions.
- //
- // For assembly snapshots, we can't generate assembly to get the absolute
- // address of the text section, as using the section symbol gives us a
- // relative offset from the section start, which is 0. Instead, depend on
- // the BSS initialization to retrieve this for us at runtime. As a side
- // effect, this field also doubles as a way to detect whether we compiled to
- // assembly or directly to ELF.
- text_offset += WriteWordLiteralText(Image::kNoRelocatedAddress);
- // TODO(dartbug.com/43274): Change once we generate consistent build IDs
- // between assembly snapshots and their debugging information.
- // 3) The GNU build ID offset from this section.
- text_offset += WriteWordLiteralText(Image::kNoBuildId);
- // 4) The GNU build ID length.
- text_offset += WriteWordLiteralText(0);
- text_offset +=
- Align(compiler::target::ObjectAlignment::kObjectAlignment, text_offset);
-
- ASSERT_EQUAL(text_offset, Image::kHeaderSize + image_header_size);
-
- if (bare_instruction_payloads) {
- if (profile_writer_ != nullptr) {
- const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
- profile_writer_->SetObjectTypeAndName(id, instructions_section_type_,
- instructions_symbol);
- const intptr_t padding = image_size - next_text_offset_;
- profile_writer_->AttributeBytesTo(
- id, compiler::target::InstructionsSection::HeaderSize() + padding);
- const intptr_t element_offset = id.second - parent_id.second;
- profile_writer_->AttributeReferenceTo(
- parent_id,
- {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
- // Later objects will have the InstructionsSection as a parent.
- parent_id = id;
- }
- const intptr_t section_size = image_size - text_offset;
- // Calculated using next_text_offset_, which doesn't include post-payload
- // padding to object alignment.
- const intptr_t instructions_length =
- next_text_offset_ - text_offset -
- compiler::target::InstructionsSection::HeaderSize();
- // Add the RawInstructionsSection header.
- text_offset += WriteWordLiteralText(
- GetMarkedTags(kInstructionsSectionCid, section_size));
- text_offset += WriteWordLiteralText(instructions_length);
- text_offset += Align(
- compiler::target::Instructions::kBarePayloadAlignment, text_offset);
+ assembly_stream_->Printf("%s", target_symbol);
+ if (target_offset != 0) {
+ assembly_stream_->Printf(" + %" Pd "", target_offset);
}
}
-#endif
-
- FrameUnwindPrologue();
-
-#if defined(DART_PRECOMPILER)
- PcDescriptors& descriptors = PcDescriptors::Handle(zone);
-#endif
- SnapshotTextObjectNamer namer(zone);
-
- ASSERT(offset_space_ != V8SnapshotProfileWriter::kSnapshot);
- for (intptr_t i = 0; i < instructions_.length(); i++) {
- auto& data = instructions_[i];
- const bool is_trampoline = data.trampoline_bytes != nullptr;
- ASSERT_EQUAL(data.text_offset_, text_offset);
-
- intptr_t dwarf_index = i;
-#if defined(DART_PRECOMPILER)
- if (!is_trampoline && assembly_dwarf_ != nullptr) {
- dwarf_index =
- assembly_dwarf_->AddCode(*data.code_, SegmentRelativeOffset(vm));
- }
-#endif
- const auto object_name = namer.SnapshotNameFor(dwarf_index, data);
-
-#if defined(DART_PRECOMPILER)
- if (profile_writer_ != nullptr) {
- const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
- auto const type = is_trampoline ? trampoline_type_ : instructions_type_;
- const intptr_t size =
- is_trampoline ? data.trampoline_length : SizeInSnapshot(*data.insns_);
- profile_writer_->SetObjectTypeAndName(id, type, object_name);
- profile_writer_->AttributeBytesTo(id, size);
- const intptr_t element_offset = id.second - parent_id.second;
- profile_writer_->AttributeReferenceTo(
- parent_id,
- {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
- }
-#endif
-
- if (is_trampoline) {
- const auto start = reinterpret_cast<uword>(data.trampoline_bytes);
- const auto end = start + data.trampoline_length;
- text_offset += WriteByteSequence(start, end);
- delete[] data.trampoline_bytes;
- data.trampoline_bytes = nullptr;
- continue;
- }
-
- const intptr_t instr_start = text_offset;
- const auto& insns = *data.insns_;
-
- // 1. Write from the object start to the payload start. This includes the
- // object header and the fixed fields. Not written for AOT snapshots using
- // bare instructions.
- if (!bare_instruction_payloads) {
- NoSafepointScope no_safepoint;
-
- // Write Instructions with the mark and read-only bits set.
- text_offset += WriteWordLiteralText(GetMarkedTags(insns));
- text_offset += WriteWordLiteralText(insns.raw_ptr()->size_and_flags_);
- text_offset +=
- Align(compiler::target::Instructions::kNonBarePayloadAlignment,
- text_offset);
- }
-
- ASSERT_EQUAL(text_offset - instr_start,
- compiler::target::Instructions::HeaderSize());
-
-#if defined(DART_PRECOMPILER)
- const auto& code = *data.code_;
- if (debug_elf_ != nullptr) {
- debug_elf_->dwarf()->AddCode(code, {vm, text_offset});
- }
-#endif
- // 2. Write a label at the entry point.
- // Linux's perf uses these labels.
- assembly_stream_->Printf("%s:\n", object_name);
-
- {
- // 3. Write from the payload start to payload end. For AOT snapshots
- // with bare instructions, this is the only non-padding part serialized.
- NoSafepointScope no_safepoint;
- const uword payload_start = insns.PayloadStart();
- // Double-check the payload alignment, since we will load and write
- // target-sized words starting from that address.
- ASSERT(Utils::IsAligned(payload_start, compiler::target::kWordSize));
- const uword payload_size = insns.Size();
- const uword payload_end = payload_start + payload_size;
-
-#if defined(DART_PRECOMPILER)
- descriptors = code.pc_descriptors();
- PcDescriptors::Iterator iterator(descriptors,
- PcDescriptorsLayout::kBSSRelocation);
-
- uword cursor = payload_start;
- while (iterator.MoveNext()) {
- const uword next_reloc_address = payload_start + iterator.PcOffset();
- // We only generate BSS relocations that are target word-sized and at
- // target word-aligned offsets in the payload. Double-check this.
- ASSERT(
- Utils::IsAligned(next_reloc_address, compiler::target::kWordSize));
- text_offset += WriteByteSequence(cursor, next_reloc_address);
- const word addend =
- *reinterpret_cast<compiler::target::word*>(next_reloc_address);
- assembly_stream_->Printf("%s %s - (.) + %" Pd "\n", kLiteralPrefix,
- bss_symbol, addend);
- text_offset += compiler::target::kWordSize;
- cursor = next_reloc_address + compiler::target::kWordSize;
- }
- text_offset += WriteByteSequence(cursor, payload_end);
-#else
- text_offset += WriteByteSequence(payload_start, payload_end);
-#endif
- }
-
- // 4. Write from the payload end to object end. Note we can't simply copy
- // from the object because the host object may have less alignment filler
- // than the target object in the cross-word case.
- uword unpadded_end = text_offset - instr_start;
- uword padding_size = SizeInSnapshot(insns) - unpadded_end;
- for (; padding_size >= compiler::target::kWordSize;
- padding_size -= compiler::target::kWordSize) {
- text_offset += WriteWordLiteralText(kBreakInstructionFiller);
- }
-#if defined(TARGET_ARCH_ARM)
- // ARM never needs more padding, as instructions are word sized and all
- // alignments are multiples of the word size.
- ASSERT_EQUAL(padding_size, 0);
-#elif defined(TARGET_ARCH_ARM64)
- // ARM64 may need 4 bytes of padding, but the break instruction filler
- // is two copies of the 4-byte break instruction, so this works.
- ASSERT(padding_size == 0 || padding_size == 4);
-#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
- // The break instruction filler is the same single byte instruction filling
- // a uword, so no checks needed.
-#else
-#error Unexpected target architecture.
-#endif
- text_offset += WriteByteSequence(
- reinterpret_cast<uword>(&kBreakInstructionFiller),
- reinterpret_cast<uword>(&kBreakInstructionFiller) + padding_size);
-
- ASSERT_EQUAL(text_offset - instr_start, SizeInSnapshot(insns.raw()));
+ if (target_addend != 0) {
+ assembly_stream_->Printf(" + %" Pd "", target_addend);
}
+ if (strcmp(source_symbol, current_section_symbol_) == 0 &&
+ source_offset == section_offset) {
+ assembly_stream_->WriteString(" - (.)");
+ } else {
+ assembly_stream_->Printf(" - %s", source_symbol);
+ if (source_offset != 0) {
+ assembly_stream_->Printf(" - %" Pd "", source_offset);
+ }
+ }
+ assembly_stream_->WriteString("\n");
+ return compiler::target::kWordSize;
+}
- // Should be a no-op unless writing bare instruction payloads, in which case
- // we need to add post-payload padding to the object alignment. The alignment
- // needs to match the one we used for image_size above.
- text_offset +=
- Align(compiler::target::ObjectAlignment::kObjectAlignment, text_offset);
-
- ASSERT_EQUAL(text_offset, image_size);
-
- FrameUnwindEpilogue();
-
-#if defined(DART_PRECOMPILER)
+void AssemblyImageWriter::AddCodeSymbol(const Code& code,
+ const char* symbol,
+ intptr_t offset) {
+ if (assembly_dwarf_ != nullptr) {
+ assembly_dwarf_->AddCode(code, symbol);
+ }
if (debug_elf_ != nullptr) {
- // We need to generate a text segment of the appropriate size in the ELF
- // for two reasons:
- //
- // * We need unique virtual addresses for each text section in the DWARF
- // file and that the virtual addresses for payloads within those sections
- // do not overlap.
- //
- // * Our tools for converting DWARF stack traces back to "normal" Dart
- // stack traces calculate an offset into the appropriate instructions
- // section, and then add that offset to the virtual address of the
- // corresponding segment to get the virtual address for the frame.
- //
- // Since we don't want to add the actual contents of the segment in the
- // separate debugging information, we pass nullptr for the bytes, which
- // creates an appropriate NOBITS section instead of PROGBITS.
- auto const debug_segment_base2 = debug_elf_->AddText(
- instructions_symbol, /*bytes=*/nullptr, text_offset);
- // Double-check that no other ELF sections were added in the middle of
- // writing the text section.
- ASSERT(debug_segment_base2 == debug_segment_base);
+ debug_elf_->dwarf()->AddCode(code, symbol);
+ debug_elf_->AddLocalSymbol(symbol, elf::STT_FUNC, offset, code.Size());
}
-#endif
-#endif // !defined(DART_PRECOMPILED_RUNTIME)
+ assembly_stream_->Printf("%s:\n", symbol);
}
void AssemblyImageWriter::FrameUnwindPrologue() {
@@ -1193,7 +1307,6 @@
assembly_stream_->WriteString(".save {r11, lr}\n");
assembly_stream_->WriteString(".setfp r11, sp, #0\n");
#endif
-
#endif
}
@@ -1206,55 +1319,55 @@
assembly_stream_->WriteString(".cfi_endproc\n");
}
-intptr_t AssemblyImageWriter::WriteByteSequence(uword start, uword end) {
- assert(end >= start);
+intptr_t AssemblyImageWriter::WriteBytes(const void* bytes, intptr_t size) {
+ ASSERT(size >= 0);
+ auto const start = reinterpret_cast<const uint8_t*>(bytes);
auto const end_of_words =
- Utils::RoundDown(end, sizeof(compiler::target::uword));
- for (auto cursor = reinterpret_cast<compiler::target::uword*>(start);
- cursor < reinterpret_cast<compiler::target::uword*>(end_of_words);
+ start + Utils::RoundDown(size, compiler::target::kWordSize);
+ for (auto cursor = reinterpret_cast<const compiler::target::word*>(start);
+ cursor < reinterpret_cast<const compiler::target::word*>(end_of_words);
cursor++) {
- WriteWordLiteralText(*cursor);
+ WriteTargetWord(*cursor);
}
+ auto const end = start + size;
if (end != end_of_words) {
- auto start_of_rest = reinterpret_cast<const uint8_t*>(end_of_words);
- assembly_stream_->WriteString(".byte ");
- for (auto cursor = start_of_rest;
- cursor < reinterpret_cast<const uint8_t*>(end); cursor++) {
- if (cursor != start_of_rest) {
- assembly_stream_->WriteString(", ");
- }
- assembly_stream_->Printf("0x%0.2x", *cursor);
+ assembly_stream_->WriteString(kSizeDirectives[kInt8SizeLog2]);
+ for (auto cursor = end_of_words; cursor < end; cursor++) {
+ assembly_stream_->Printf("%s 0x%0.2x", cursor != end_of_words ? "," : "",
+ *cursor);
}
assembly_stream_->WriteString("\n");
}
- return end - start;
+ return size;
}
-intptr_t AssemblyImageWriter::Align(intptr_t alignment, uword position) {
- const uword next_position = Utils::RoundUp(position, alignment);
+intptr_t AssemblyImageWriter::Align(intptr_t alignment, intptr_t position) {
+ const intptr_t next_position = Utils::RoundUp(position, alignment);
assembly_stream_->Printf(".balign %" Pd ", 0\n", alignment);
return next_position - position;
}
+#endif // defined(DART_PRECOMPILER)
BlobImageWriter::BlobImageWriter(Thread* thread,
- NonStreamingWriteStream* stream,
+ NonStreamingWriteStream* vm_instructions,
+ NonStreamingWriteStream* isolate_instructions,
Elf* debug_elf,
Elf* elf)
: ImageWriter(thread),
- instructions_blob_stream_(ASSERT_NOTNULL(stream)),
+ vm_instructions_(vm_instructions),
+ isolate_instructions_(isolate_instructions),
elf_(elf),
debug_elf_(debug_elf) {
#if defined(DART_PRECOMPILER)
+ ASSERT_EQUAL(FLAG_precompiled_mode, elf_ != nullptr);
ASSERT(debug_elf_ == nullptr || debug_elf_->dwarf() != nullptr);
#else
RELEASE_ASSERT(elf_ == nullptr);
#endif
}
-intptr_t BlobImageWriter::WriteByteSequence(uword start, uword end) {
- const uword size = end - start;
- instructions_blob_stream_->WriteBytes(reinterpret_cast<const void*>(start),
- size);
+intptr_t BlobImageWriter::WriteBytes(const void* bytes, intptr_t size) {
+ current_section_stream_->WriteBytes(bytes, size);
return size;
}
@@ -1262,343 +1375,120 @@
#if defined(DART_PRECOMPILER)
// We don't actually write a BSS segment, it's created as part of the
// Elf constructor, but make sure it has an non-zero start.
- ASSERT(elf_ == nullptr || elf_->BssStart(vm) != 0);
+ ASSERT(elf_ == nullptr ||
+ elf_->SymbolAddress(vm ? kVmSnapshotBssAsmSymbol
+ : kIsolateSnapshotBssAsmSymbol) != 0);
#endif
}
void BlobImageWriter::WriteROData(NonStreamingWriteStream* clustered_stream,
bool vm) {
ImageWriter::WriteROData(clustered_stream, vm);
-#if defined(DART_PRECOMPILER)
- auto const data_symbol =
- vm ? kVmSnapshotDataAsmSymbol : kIsolateSnapshotDataAsmSymbol;
- if (elf_ != nullptr) {
- elf_->AddROData(data_symbol, clustered_stream->buffer(),
- clustered_stream->bytes_written());
+ current_section_stream_ = clustered_stream;
+ if (!EnterSection(ProgramSection::Data, vm, ImageWriter::kRODataAlignment)) {
+ return;
}
- if (debug_elf_ != nullptr) {
- // To keep memory addresses consistent, we create elf::SHT_NOBITS sections
- // in the debugging information. We still pass along the buffers because
- // we'll need the buffer bytes at generation time to calculate the build ID
- // so it'll match the one in the snapshot.
- debug_elf_->AddROData(data_symbol, clustered_stream->buffer(),
- clustered_stream->bytes_written());
- }
-#endif
+ ExitSection(ProgramSection::Data, vm, clustered_stream->bytes_written());
}
-void BlobImageWriter::WriteText(bool vm) {
- const bool bare_instruction_payloads =
- FLAG_precompiled_mode && FLAG_use_bare_instructions;
- auto const zone = Thread::Current()->zone();
-
+bool BlobImageWriter::EnterSection(ProgramSection section,
+ bool vm,
+ intptr_t alignment) {
#if defined(DART_PRECOMPILER)
- auto const instructions_symbol = vm ? kVmSnapshotInstructionsAsmSymbol
- : kIsolateSnapshotInstructionsAsmSymbol;
- intptr_t segment_base = 0;
- if (elf_ != nullptr) {
- segment_base = elf_->NextMemoryOffset(ImageWriter::kTextAlignment);
+ ASSERT_EQUAL(elf_ != nullptr, FLAG_precompiled_mode);
+#endif
+ // For now, we set current_section_stream_ in ::WriteData.
+ ASSERT(section == ProgramSection::Data || current_section_stream_ == nullptr);
+ ASSERT(current_section_symbol_ == nullptr);
+ switch (section) {
+ case ProgramSection::Text:
+ current_section_stream_ =
+ ASSERT_NOTNULL(vm ? vm_instructions_ : isolate_instructions_);
+ break;
+ case ProgramSection::Data:
+ break;
+ case ProgramSection::Bss:
+ // The BSS section is pre-made in the Elf object for precompiled snapshots
+ // and unused otherwise, so there's no work that needs doing here.
+ return false;
+ case ProgramSection::BuildId:
+ // The GNU build ID is handled specially in the Elf object, and does not
+ // get used for non-precompiled snapshots.
+ return false;
}
- intptr_t debug_segment_base = 0;
- if (debug_elf_ != nullptr) {
- debug_segment_base =
- debug_elf_->NextMemoryOffset(ImageWriter::kTextAlignment);
- // If we're also generating an ELF snapshot, we want the virtual addresses
- // in it and the separately saved DWARF information to match.
- ASSERT(elf_ == nullptr || segment_base == debug_segment_base);
- }
-#endif
-
- intptr_t text_offset = 0;
-#if defined(DART_PRECOMPILER)
- // Parent used for later profile objects. Starts off as the Image. When
- // writing bare instructions payloads, this is later updated with the
- // InstructionsSection object which contains all the bare payloads.
- V8SnapshotProfileWriter::ObjectId parent_id(offset_space_, text_offset);
-#endif
-
- // This header provides the gap to make the instructions snapshot look like a
- // OldPage.
- const intptr_t image_size = Utils::RoundUp(
- next_text_offset_, compiler::target::ObjectAlignment::kObjectAlignment);
- instructions_blob_stream_->WriteTargetWord(image_size);
- if (FLAG_precompiled_mode) {
- // Output the offset to the ImageHeader object from the start of the image.
- instructions_blob_stream_->WriteTargetWord(Image::kHeaderSize);
- } else {
- instructions_blob_stream_->WriteTargetWord(0); // No ImageHeader object.
- }
- // Zero values for the rest of the Image object header bytes.
- instructions_blob_stream_->Align(Image::kHeaderSize);
- ASSERT_EQUAL(instructions_blob_stream_->Position(), Image::kHeaderSize);
- text_offset += Image::kHeaderSize;
-
-#if defined(DART_PRECOMPILER)
- if (FLAG_precompiled_mode) {
- if (profile_writer_ != nullptr) {
- profile_writer_->SetObjectTypeAndName(parent_id, image_type_,
- instructions_symbol);
- // Assign post-instruction padding to the Image, unless we're writing bare
- // instruction payloads, in which case we'll assign it to the
- // InstructionsSection object.
- const intptr_t padding =
- bare_instruction_payloads ? 0 : image_size - next_text_offset_;
- profile_writer_->AttributeBytesTo(parent_id,
- Image::kHeaderSize + padding);
- profile_writer_->AddRoot(parent_id);
- }
-
- // Write the ImageHeader object, starting with the header.
- const intptr_t image_header_size =
- compiler::target::ImageHeader::InstanceSize();
- if (profile_writer_ != nullptr) {
- const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
- profile_writer_->SetObjectTypeAndName(id, image_header_type_,
- instructions_symbol);
- profile_writer_->AttributeBytesTo(id, image_header_size);
- const intptr_t element_offset = id.second - parent_id.second;
- profile_writer_->AttributeReferenceTo(
- parent_id,
- {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
- }
- instructions_blob_stream_->WriteTargetWord(
- GetMarkedTags(kImageHeaderCid, image_header_size));
-
- ASSERT(elf_ != nullptr);
- // An ImageHeader has four fields:
- // 1) The BSS offset from this section.
- const word bss_offset = elf_->BssStart(vm) - segment_base;
- ASSERT(bss_offset != Image::kNoBssSection);
- instructions_blob_stream_->WriteTargetWord(bss_offset);
- // 2) The relocated address of the instructions.
- //
- // Since we set this to a non-zero value for ELF snapshots, we also use this
- // to detect compiled-to-ELF snapshots.
- ASSERT(segment_base != Image::kNoRelocatedAddress);
- instructions_blob_stream_->WriteTargetWord(segment_base);
- // 3) The GNU build ID offset from this section.
- intptr_t build_id_length = 0;
- const word build_id_offset =
- elf_->BuildIdStart(&build_id_length) - segment_base;
- ASSERT(build_id_offset != Image::kNoBuildId);
- instructions_blob_stream_->WriteTargetWord(build_id_offset);
- // 4) The GNU build ID length.
- ASSERT(build_id_length != 0);
- instructions_blob_stream_->WriteTargetWord(build_id_length);
- instructions_blob_stream_->Align(
- compiler::target::ObjectAlignment::kObjectAlignment);
-
- ASSERT_EQUAL(instructions_blob_stream_->Position() - text_offset,
- image_header_size);
- text_offset += image_header_size;
-
- if (bare_instruction_payloads) {
- if (profile_writer_ != nullptr) {
- const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
- profile_writer_->SetObjectTypeAndName(id, instructions_section_type_,
- instructions_symbol);
- const intptr_t padding = image_size - next_text_offset_;
- profile_writer_->AttributeBytesTo(
- id, compiler::target::InstructionsSection::HeaderSize() + padding);
- const intptr_t element_offset = id.second - parent_id.second;
- profile_writer_->AttributeReferenceTo(
- parent_id,
- {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
- // Later objects will have the InstructionsSection as a parent.
- parent_id = id;
- }
- const intptr_t section_size = image_size - text_offset;
- // Uses next_text_offset_ to avoid any post-payload padding.
- const intptr_t instructions_length =
- next_text_offset_ - text_offset -
- compiler::target::InstructionsSection::HeaderSize();
- // Add the RawInstructionsSection header.
- instructions_blob_stream_->WriteTargetWord(
- GetMarkedTags(kInstructionsSectionCid, section_size));
- instructions_blob_stream_->WriteTargetWord(instructions_length);
- instructions_blob_stream_->Align(
- compiler::target::Instructions::kBarePayloadAlignment);
- ASSERT_EQUAL(instructions_blob_stream_->Position() - text_offset,
- compiler::target::InstructionsSection::HeaderSize());
- text_offset += compiler::target::InstructionsSection::HeaderSize();
- }
- }
-#endif
-
- ASSERT_EQUAL(text_offset, instructions_blob_stream_->Position());
-
-#if defined(DART_PRECOMPILER)
- auto& descriptors = PcDescriptors::Handle(zone);
-#endif
- SnapshotTextObjectNamer namer(zone);
-
- NoSafepointScope no_safepoint;
- for (intptr_t i = 0; i < instructions_.length(); i++) {
- auto& data = instructions_[i];
- const bool is_trampoline = data.trampoline_bytes != nullptr;
- ASSERT(data.text_offset_ == text_offset);
-
-#if defined(DART_PRECOMPILER)
- const auto object_name = namer.SnapshotNameFor(i, data);
- if (profile_writer_ != nullptr) {
- const V8SnapshotProfileWriter::ObjectId id(offset_space_, text_offset);
- auto const type = is_trampoline ? trampoline_type_ : instructions_type_;
- const intptr_t size = is_trampoline ? data.trampoline_length
- : SizeInSnapshot(data.insns_->raw());
- profile_writer_->SetObjectTypeAndName(id, type, object_name);
- profile_writer_->AttributeBytesTo(id, size);
- // If the object is wrapped in an InstructionSection, then add an
- // element reference.
- const intptr_t element_offset = id.second - parent_id.second;
- profile_writer_->AttributeReferenceTo(
- parent_id,
- {id, V8SnapshotProfileWriter::Reference::kElement, element_offset});
- }
-#endif
-
- if (is_trampoline) {
- instructions_blob_stream_->WriteBytes(
- reinterpret_cast<const void*>(data.trampoline_bytes),
- data.trampoline_length);
- text_offset += data.trampoline_length;
- delete[] data.trampoline_bytes;
- data.trampoline_bytes = nullptr;
- continue;
- }
-
- const auto& insns = *data.insns_;
- const intptr_t instr_start = instructions_blob_stream_->Position();
-
- if (!bare_instruction_payloads) {
- // Write Instructions with the mark and read-only bits set.
- instructions_blob_stream_->WriteTargetWord(GetMarkedTags(insns));
- instructions_blob_stream_->WriteFixed<uint32_t>(
- insns.raw_ptr()->size_and_flags_);
- instructions_blob_stream_->Align(
- compiler::target::Instructions::kNonBarePayloadAlignment);
- }
-
- ASSERT_EQUAL(instructions_blob_stream_->Position() - instr_start,
- compiler::target::Instructions::HeaderSize());
-
-#if defined(DART_PRECOMPILER)
- const auto& code = *data.code_;
- auto const payload_offset = instructions_blob_stream_->Position();
- if (elf_ != nullptr && elf_->dwarf() != nullptr) {
- elf_->dwarf()->AddCode(code, {vm, payload_offset});
- }
- if (debug_elf_ != nullptr) {
- debug_elf_->dwarf()->AddCode(code, {vm, payload_offset});
- }
-#endif
-
- auto const payload_start =
- reinterpret_cast<const uint8_t*>(insns.PayloadStart());
- // Double-check the payload alignment, since we will load and write
- // target-sized words starting from that address.
- ASSERT(Utils::IsAligned(payload_start, compiler::target::kWordSize));
- const intptr_t payload_size = insns.Size();
- // Don't patch the relocation if we're not generating ELF. The regular blobs
- // format does not yet support these relocations. Use
- // Code::VerifyBSSRelocations to check whether the relocations are patched
- // or not after loading.
- if (elf_ != nullptr) {
-#if defined(DART_PRECOMPILER)
- descriptors = code.pc_descriptors();
- PcDescriptors::Iterator iterator(
- descriptors, /*kind_mask=*/PcDescriptorsLayout::kBSSRelocation);
-
- auto const payload_end = payload_start + payload_size;
- const intptr_t bss_offset = elf_->BssStart(vm) - segment_base;
- auto cursor = payload_start;
- while (iterator.MoveNext()) {
- auto const next_reloc_offset = iterator.PcOffset();
- auto const next_reloc_address = payload_start + next_reloc_offset;
- // We only generate BSS relocations that are target word-sized and at
- // target word-aligned offsets in the payload. Double-check this..
- ASSERT(
- Utils::IsAligned(next_reloc_address, compiler::target::kWordSize));
- instructions_blob_stream_->WriteBytes(cursor,
- next_reloc_address - cursor);
-
- // The instruction stream at the relocation position holds an offset
- // into BSS corresponding to the symbol being resolved. This addend is
- // factored into the relocation.
- const auto addend = *reinterpret_cast<const compiler::target::word*>(
- next_reloc_address);
-
- const word reloc_value =
- bss_offset + addend - (payload_offset + next_reloc_offset);
- instructions_blob_stream_->WriteTargetWord(reloc_value);
- cursor = next_reloc_address + compiler::target::kWordSize;
- }
- instructions_blob_stream_->WriteBytes(cursor, payload_end - cursor);
-#endif
- } else {
- instructions_blob_stream_->WriteBytes(payload_start, payload_size);
- }
-
- // Create padding containing break instructions. Instead of copying it out
- // of the object, we recreate it to handle the crossword case where the
- // amount of padding may differ.
- uword unpadded_end = instructions_blob_stream_->Position() - instr_start;
- uword padding_size = SizeInSnapshot(insns) - unpadded_end;
- for (; padding_size >= compiler::target::kWordSize;
- padding_size -= compiler::target::kWordSize) {
- instructions_blob_stream_->WriteTargetWord(kBreakInstructionFiller);
- }
-#if defined(TARGET_ARCH_ARM)
- // ARM never needs more padding, as instructions are word sized and all
- // alignments are multiples of the word size.
- ASSERT_EQUAL(padding_size, 0);
-#elif defined(TARGET_ARCH_ARM64)
- // ARM64 may need 4 bytes of padding, but the break instruction filler
- // is two copies of the 4-byte break instruction, so this works.
- ASSERT(padding_size == 0 || padding_size == 4);
-#elif defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
- // The break instruction filler is the same single byte instruction filling
- // a uword, so no checks needed.
-#else
-#error Unexpected target architecture.
-#endif
- instructions_blob_stream_->WriteBytes(
- reinterpret_cast<const void*>(&kBreakInstructionFiller), padding_size);
-
- const intptr_t instr_end = instructions_blob_stream_->Position();
- ASSERT_EQUAL(instr_end - instr_start, SizeInSnapshot(insns.raw()));
- text_offset += instr_end - instr_start;
- }
-
- // Should be a no-op unless writing bare instruction payloads, in which case
- // we need to add post-payload padding to the object alignment. The alignment
- // should match the alignment used in image_size above.
- instructions_blob_stream_->Align(
- compiler::target::ObjectAlignment::kObjectAlignment);
- text_offset = Utils::RoundUp(
- text_offset, compiler::target::ObjectAlignment::kObjectAlignment);
-
- ASSERT_EQUAL(text_offset, instructions_blob_stream_->Position());
- ASSERT_EQUAL(text_offset, image_size);
-
-#if defined(DART_PRECOMPILER)
- if (elf_ != nullptr) {
- auto const segment_base2 =
- elf_->AddText(instructions_symbol, instructions_blob_stream_->buffer(),
- instructions_blob_stream_->bytes_written());
- ASSERT_EQUAL(segment_base2, segment_base);
- }
- if (debug_elf_ != nullptr) {
- // To keep memory addresses consistent, we create elf::SHT_NOBITS sections
- // in the debugging information. We still pass along the buffers because
- // we'll need the buffer bytes at generation time to calculate the build ID
- // so it'll match the one in the snapshot.
- auto const debug_segment_base2 = debug_elf_->AddText(
- instructions_symbol, instructions_blob_stream_->buffer(),
- instructions_blob_stream_->bytes_written());
- ASSERT_EQUAL(debug_segment_base2, debug_segment_base);
- }
-#endif
+ current_section_symbol_ = SectionSymbol(section, vm);
+ current_section_stream_->Align(alignment);
+ return true;
}
+
+void BlobImageWriter::ExitSection(ProgramSection name, bool vm, intptr_t size) {
+ // We should still be in the same section as the last EnterSection.
+ ASSERT(current_section_symbol_ != nullptr);
+ ASSERT_EQUAL(strcmp(SectionSymbol(name, vm), current_section_symbol_), 0);
+#if defined(DART_PRECOMPILER)
+ ElfAddSection(elf_, name, current_section_symbol_,
+ current_section_stream_->buffer(), size);
+ // We create the corresponding segment in the debugging information as well,
+ // since it needs the contents to create the correct build ID.
+ ElfAddSection(debug_elf_, name, current_section_symbol_,
+ current_section_stream_->buffer(), size);
+#endif
+ current_section_symbol_ = nullptr;
+ current_section_stream_ = nullptr;
+}
+
+intptr_t BlobImageWriter::WriteTargetWord(word value) {
+ current_section_stream_->WriteTargetWord(value);
+ return compiler::target::kWordSize;
+}
+
+intptr_t BlobImageWriter::Align(intptr_t alignment, intptr_t offset) {
+ const intptr_t stream_padding = current_section_stream_->Align(alignment);
+ // Double-check that the offset has the same alignment.
+ ASSERT_EQUAL(Utils::RoundUp(offset, alignment) - offset, stream_padding);
+ return stream_padding;
+}
+
+#if defined(DART_PRECOMPILER)
+intptr_t BlobImageWriter::Relocation(intptr_t section_offset,
+ const char* source_symbol,
+ intptr_t source_offset,
+ const char* target_symbol,
+ intptr_t target_offset,
+ intptr_t target_addend) {
+ ASSERT(FLAG_precompiled_mode);
+ const uword source_address = RelocatedAddress(source_symbol) + source_offset;
+ const uword target_address = RelocatedAddress(target_symbol) + target_offset;
+ return WriteTargetWord(target_address + target_addend - source_address);
+}
+
+uword BlobImageWriter::RelocatedAddress(const char* symbol) {
+ ASSERT(FLAG_precompiled_mode);
+ ASSERT(symbol != nullptr);
+ if (strcmp(symbol, current_section_symbol_) == 0) {
+ // Cheating a bit here, assuming that the current section will go into its
+ // own load segment (and that the load segment alignment is the same as
+ // the text section alignment).
+ return elf_->NextMemoryOffset(ImageWriter::kTextAlignment);
+ }
+ const uword start = elf_->SymbolAddress(symbol);
+ ASSERT(start != Elf::kNoSectionStart);
+ return start;
+}
+
+void BlobImageWriter::AddCodeSymbol(const Code& code,
+ const char* symbol,
+ intptr_t offset) {
+ if (elf_ != nullptr && elf_->dwarf() != nullptr) {
+ elf_->dwarf()->AddCode(code, symbol);
+ elf_->AddLocalSymbol(symbol, elf::STT_FUNC, offset, code.Size());
+ }
+ if (debug_elf_ != nullptr) {
+ debug_elf_->dwarf()->AddCode(code, symbol);
+ debug_elf_->AddLocalSymbol(symbol, elf::STT_FUNC, offset, code.Size());
+ }
+}
+#endif // defined(DART_PRECOMPILER)
#endif // !defined(DART_PRECOMPILED_RUNTIME)
ImageReader::ImageReader(const uint8_t* data_image,
diff --git a/runtime/vm/image_snapshot.h b/runtime/vm/image_snapshot.h
index 0190cae..060f3a9 100644
--- a/runtime/vm/image_snapshot.h
+++ b/runtime/vm/image_snapshot.h
@@ -82,10 +82,10 @@
enum class HeaderField : intptr_t {
// The size of the image (total of header and payload).
ImageSize,
- // The offset of the ImageHeader object in the image. Note this offset
- // is from the start of the _image_, _not_ from its payload start, so we
- // can detect images without ImageHeaders by a 0 value here.
- ImageHeaderOffset,
+ // The offset of the InstructionsSection object in the image. Note this
+ // offset is from the start of the _image_, _not_ from its payload start,
+ // so we can detect images without an InstructionsSection by a 0 value here.
+ InstructionsSectionOffset,
// If adding more fields, updating kHeaderFields below. (However, more
// fields _can't_ be added on 64-bit architectures, see the restrictions
// on kHeaderSize below.)
@@ -93,7 +93,7 @@
// Number of fields described by the HeaderField enum.
static constexpr intptr_t kHeaderFields =
- static_cast<intptr_t>(HeaderField::ImageHeaderOffset) + 1;
+ static_cast<intptr_t>(HeaderField::InstructionsSectionOffset) + 1;
static uword FieldValue(uword raw_memory, HeaderField field) {
return reinterpret_cast<const uword*>(
@@ -101,8 +101,8 @@
}
// Constants used to denote special values for the offsets in the Image
- // object header and the fields of the ImageHeader object.
- static constexpr intptr_t kNoImageHeader = 0;
+ // object header and the fields of the InstructionsSection object.
+ static constexpr intptr_t kNoInstructionsSection = 0;
static constexpr intptr_t kNoBssSection = 0;
static constexpr intptr_t kNoRelocatedAddress = 0;
static constexpr intptr_t kNoBuildId = 0;
@@ -126,13 +126,13 @@
// We don't use a handle or the tagged pointer because this object cannot be
// moved in memory by the GC.
- static const ImageHeaderLayout* ExtraInfo(const uword raw_memory,
- const uword size);
+ static const InstructionsSectionLayout* ExtraInfo(const uword raw_memory,
+ const uword size);
// Most internal uses would cast this to uword, so just store it as such.
const uword raw_memory_;
const intptr_t snapshot_size_;
- const ImageHeaderLayout* const extra_info_;
+ const InstructionsSectionLayout* const extra_info_;
// For access to private constants.
friend class AssemblyImageWriter;
@@ -249,15 +249,15 @@
next_text_offset_ = Image::kHeaderSize;
#if defined(DART_PRECOMPILER)
if (FLAG_precompiled_mode) {
- // We reserve space for the initial ImageHeader object. It is manually
- // serialized since it involves offsets to other parts of the snapshot.
- next_text_offset_ += compiler::target::ImageHeader::InstanceSize();
- if (FLAG_use_bare_instructions) {
- // For bare instructions mode, we wrap all the instruction payloads
- // in a single InstructionsSection object.
- next_text_offset_ +=
- compiler::target::InstructionsSection::HeaderSize();
- }
+ // We reserve space for the initial InstructionsSection object. It is
+ // manually serialized since it includes offsets to other snapshot parts.
+ // In bare instructions mode, it contains all the payloads and so we
+ // start after the header, whereas in non-bare mode, it contains no
+ // payload and Instructions start after it.
+ next_text_offset_ +=
+ FLAG_use_bare_instructions
+ ? compiler::target::InstructionsSection::HeaderSize()
+ : compiler::target::InstructionsSection::InstanceSize(0);
}
#endif
objects_.Clear();
@@ -302,10 +302,22 @@
const char* ObjectTypeForProfile(const Object& object) const;
static const char* TagObjectTypeAsReadOnly(Zone* zone, const char* type);
+ enum class ProgramSection {
+ Text, // Instructions.
+ Data, // Read-only data.
+ Bss, // Statically allocated variables initialized at load.
+ BuildId, // GNU build ID (when applicable)
+ };
+
protected:
virtual void WriteBss(bool vm) = 0;
virtual void WriteROData(NonStreamingWriteStream* clustered_stream, bool vm);
- virtual void WriteText(bool vm) = 0;
+ void WriteText(bool vm);
+
+ // Returns the standard Dart dynamic symbol name for the given VM isolate (if
+ // vm is true) or application isolate (otherwise) section. Some sections are
+ // shared by both.
+ const char* SectionSymbol(ProgramSection section, bool vm) const;
static uword GetMarkedTags(classid_t cid, intptr_t size);
static uword GetMarkedTags(const Object& obj);
@@ -353,6 +365,69 @@
};
};
+ // Methods abstracting out the particulars of the underlying concrete writer.
+
+ // Marks the entrance into a particular ProgramSection for either the VM
+ // isolate (if vm is true) or application isolate (if not). Returns false if
+ // this section should not be written.
+ virtual bool EnterSection(ProgramSection name,
+ bool vm,
+ intptr_t alignment) = 0;
+ // Marks the exit from a particular ProgramSection, allowing subclasses to
+ // do any post-writing work.
+ virtual void ExitSection(ProgramSection name, bool vm, intptr_t size) = 0;
+ // Writes a prologue to the text section that describes how to interpret
+ // Dart stack frames using DWARF's Call Frame Information (CFI).
+ virtual void FrameUnwindPrologue() = 0;
+ // Writes an epilogue to the text section that marks the end of instructions
+ // covered by the CFI information in the prologue.
+ virtual void FrameUnwindEpilogue() = 0;
+ // Writes a target uword-sized value to the section contents.
+ virtual intptr_t WriteTargetWord(word value) = 0;
+ // Writes a sequence of bytes of length [size] from address [bytes] to the
+ // section contents.
+ virtual intptr_t WriteBytes(const void* bytes, intptr_t size) = 0;
+ // Pads the section contents to a given alignment with zeroes.
+ virtual intptr_t Align(intptr_t alignment, intptr_t offset) = 0;
+#if defined(DART_PRECOMPILER)
+ // Writes a target word-sized value that depends on the final relocated
+ // addresses of the sections named by the two symbols. If T is the final
+ // relocated address of the target section and S is the final relocated
+ // address of the source, the final value is:
+ // (T + target_offset + target_addend) - (S + source_offset)
+ virtual intptr_t Relocation(intptr_t section_offset,
+ const char* source_symbol,
+ intptr_t source_offset,
+ const char* target_symbol,
+ intptr_t target_offset,
+ intptr_t target_addend) = 0;
+ // Returns the final relocated address for the section represented by the
+ // symbol. May not be supported by some writers.
+ virtual uword RelocatedAddress(const char* symbol) = 0;
+ // Creates a static symbol for the given Code object when appropriate.
+ virtual void AddCodeSymbol(const Code& code,
+ const char* symbol,
+ intptr_t section_offset) = 0;
+
+ // Overloaded convenience versions of the above virtual methods.
+
+ // An overload of Relocation where the target and source offsets and
+ // target addend are 0.
+ intptr_t Relocation(intptr_t section_offset,
+ const char* source_symbol,
+ const char* target_symbol) {
+ return Relocation(section_offset, source_symbol, 0, target_symbol, 0, 0);
+ }
+#endif
+ // Writes a fixed-sized value of type T to the section contents.
+ template <typename T>
+ intptr_t WriteFixed(T value) {
+ return WriteBytes(&value, sizeof(value));
+ }
+ // Like Align, but instead of padding with zeroes, the appropriate break
+ // instruction for the target architecture is used.
+ intptr_t AlignWithBreakInstructions(intptr_t alignment, intptr_t offset);
+
Heap* heap_; // Used for mapping RawInstructiosn to object ids.
intptr_t next_data_offset_;
intptr_t next_text_offset_;
@@ -363,11 +438,13 @@
V8SnapshotProfileWriter::kSnapshot;
V8SnapshotProfileWriter* profile_writer_ = nullptr;
const char* const image_type_;
- const char* const image_header_type_;
const char* const instructions_section_type_;
const char* const instructions_type_;
const char* const trampoline_type_;
+ // Used to make sure Code symbols are unique across text sections.
+ intptr_t unique_symbol_counter_ = 0;
+
template <class T>
friend class TraceImageObjectScope;
friend class SnapshotTextObjectNamer; // For InstructionsData.
@@ -376,13 +453,14 @@
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
};
+#if defined(DART_PRECOMPILER)
#define AutoTraceImage(object, section_offset, stream) \
- auto AutoTraceImagObjectScopeVar##__COUNTER__ = \
- TraceImageObjectScope<std::remove_pointer<decltype(stream)>::type>( \
- this, section_offset, stream, object);
+ TraceImageObjectScope<std::remove_pointer<decltype(stream)>::type> \
+ AutoTraceImageObjectScopeVar##__COUNTER__(this, section_offset, stream, \
+ object);
template <typename T>
-class TraceImageObjectScope {
+class TraceImageObjectScope : ValueObject {
public:
TraceImageObjectScope(ImageWriter* writer,
intptr_t section_offset,
@@ -410,12 +488,14 @@
const intptr_t section_offset_;
const intptr_t start_offset_;
const char* const object_type_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceImageObjectScope);
};
-class SnapshotTextObjectNamer {
+class SnapshotTextObjectNamer : ValueObject {
public:
explicit SnapshotTextObjectNamer(Zone* zone)
- : zone_(zone),
+ : zone_(ASSERT_NOTNULL(zone)),
owner_(Object::Handle(zone)),
string_(String::Handle(zone)),
insns_(Instructions::Handle(zone)),
@@ -434,6 +514,8 @@
Instructions& insns_;
ObjectStore* const store_;
TypeTestingStubNamer namer_;
+
+ DISALLOW_COPY_AND_ASSIGN(SnapshotTextObjectNamer);
};
class AssemblyImageWriter : public ImageWriter {
@@ -447,57 +529,91 @@
private:
virtual void WriteBss(bool vm);
virtual void WriteROData(NonStreamingWriteStream* clustered_stream, bool vm);
- virtual void WriteText(bool vm);
- void FrameUnwindPrologue();
- void FrameUnwindEpilogue();
- intptr_t WriteByteSequence(uword start, uword end);
- intptr_t Align(intptr_t alignment, uword position = 0);
-
-#if defined(TARGET_ARCH_IS_64_BIT)
- const char* kLiteralPrefix = ".quad";
-#else
- const char* kLiteralPrefix = ".long";
-#endif
-
- intptr_t WriteWordLiteralText(word value) {
- ASSERT(compiler::target::kBitsPerWord == kBitsPerWord ||
- Utils::IsAbsoluteUint(compiler::target::kBitsPerWord, value));
- // Padding is helpful for comparing the .S with --disassemble.
- assembly_stream_->Printf("%s 0x%0.*" Px "\n", kLiteralPrefix,
- 2 * compiler::target::kWordSize, value);
- return compiler::target::kWordSize;
+ virtual bool EnterSection(ProgramSection section,
+ bool vm,
+ intptr_t alignment);
+ virtual void ExitSection(ProgramSection name, bool vm, intptr_t size);
+ virtual intptr_t WriteTargetWord(word value);
+ virtual intptr_t WriteBytes(const void* bytes, intptr_t size);
+ virtual intptr_t Align(intptr_t alignment, intptr_t offset = 0);
+ virtual intptr_t Relocation(intptr_t section_offset,
+ const char* source_symbol,
+ intptr_t source_offset,
+ const char* target_symbol,
+ intptr_t target_offset,
+ intptr_t target_addend);
+ // We can't generate the relocated address in assembly, so it'll be
+ // retrieved and stored in the BSS during BSS initialization instead.
+ virtual uword RelocatedAddress(const char* symbol) {
+ return Image::kNoRelocatedAddress;
}
+ virtual void FrameUnwindPrologue();
+ virtual void FrameUnwindEpilogue();
+ virtual void AddCodeSymbol(const Code& code,
+ const char* symbol,
+ intptr_t offset);
BaseWriteStream* const assembly_stream_;
Dwarf* const assembly_dwarf_;
Elf* const debug_elf_;
+ // Used in Relocation to output "(.)" for relocations involving the current
+ // section position and creating local symbols in AddCodeSymbol.
+ const char* current_section_symbol_ = nullptr;
+
DISALLOW_COPY_AND_ASSIGN(AssemblyImageWriter);
};
+#endif
class BlobImageWriter : public ImageWriter {
public:
BlobImageWriter(Thread* thread,
- NonStreamingWriteStream* stream,
+ NonStreamingWriteStream* vm_instructions,
+ NonStreamingWriteStream* isolate_instructions,
Elf* debug_elf = nullptr,
Elf* elf = nullptr);
- intptr_t InstructionsBlobSize() const {
- return instructions_blob_stream_->bytes_written();
- }
-
private:
virtual void WriteBss(bool vm);
virtual void WriteROData(NonStreamingWriteStream* clustered_stream, bool vm);
- virtual void WriteText(bool vm);
- intptr_t WriteByteSequence(uword start, uword end);
+ virtual bool EnterSection(ProgramSection section,
+ bool vm,
+ intptr_t alignment);
+ virtual void ExitSection(ProgramSection name, bool vm, intptr_t size);
+ virtual intptr_t WriteTargetWord(word value);
+ virtual intptr_t WriteBytes(const void* bytes, intptr_t size);
+ virtual intptr_t Align(intptr_t alignment, intptr_t offset);
+ // TODO(rmacnak): Generate .debug_frame / .eh_frame / .arm.exidx to
+ // provide unwinding information.
+ virtual void FrameUnwindPrologue() {}
+ virtual void FrameUnwindEpilogue() {}
+#if defined(DART_PRECOMPILER)
+ virtual intptr_t Relocation(intptr_t section_offset,
+ const char* source_symbol,
+ intptr_t source_offset,
+ const char* target_symbol,
+ intptr_t target_offset,
+ intptr_t target_addend);
+ virtual uword RelocatedAddress(const char* symbol);
+ virtual void AddCodeSymbol(const Code& code,
+ const char* symbol,
+ intptr_t offset);
+#endif
- NonStreamingWriteStream* instructions_blob_stream_;
+ NonStreamingWriteStream* const vm_instructions_;
+ NonStreamingWriteStream* const isolate_instructions_;
Elf* const elf_;
Elf* const debug_elf_;
+ // Used to detect relocations or relocated address requests involving the
+ // current section and creating local symbols in AddCodeSymbol.
+ const char* current_section_symbol_ = nullptr;
+ // Set on section entrance to the stream that should be used by the writing
+ // methods.
+ NonStreamingWriteStream* current_section_stream_ = nullptr;
+
DISALLOW_COPY_AND_ASSIGN(BlobImageWriter);
};
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index c9be18c..2b1195d 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -16051,10 +16051,6 @@
}
#endif
-const char* ImageHeader::ToCString() const {
- return "ImageHeader";
-}
-
const char* WeakSerializationReference::ToCString() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return Symbols::OptimizedOut().ToCString();
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 9577ff4..9a9702f 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -5601,12 +5601,19 @@
friend class ImageWriter;
};
-// Used only to provide memory accounting for the bare instruction payloads
-// we serialize, since they are no longer part of RawInstructions objects.
+// An InstructionsSection contains extra information about serialized AOT
+// snapshots.
+//
+// To avoid changing the embedder to return more information about an AOT
+// snapshot and possibly disturbing existing clients of that interface, we
+// serialize a single InstructionsSection object at the start of any text
+// segments. In bare instructions mode, it also has the benefit of providing
+// memory accounting for the instructions payloads and avoiding special casing
+// Images with bare instructions payloads in the GC. Otherwise, it is empty
+// and the Instructions objects come after it in the Image.
class InstructionsSection : public Object {
public:
// Excludes HeaderSize().
- intptr_t Size() const { return raw_ptr()->payload_length_; }
static intptr_t Size(const InstructionsSectionPtr instr) {
return instr->ptr()->payload_length_;
}
@@ -5625,7 +5632,14 @@
Instructions::kBarePayloadAlignment);
}
+ // There are no public instance methods for the InstructionsSection class, as
+ // all access to the contents is handled by methods on the Image class.
+
private:
+ // Note there are no New() methods for InstructionsSection. Instead, the
+ // serializer writes the InstructionsSectionLayout object manually at the
+ // start of instructions Images in precompiled snapshots.
+
FINAL_HEAP_OBJECT_IMPLEMENTATION(InstructionsSection, Object);
friend class Class;
};
@@ -5988,26 +6002,6 @@
friend class Object;
};
-// An ImageHeader contains extra information about serialized AOT snapshots.
-//
-// To avoid changing the embedder to return more information about an AOT
-// snapshot and possibly disturbing existing clients of that interface, we
-// serialize a single ImageHeader object at the start of any text segments.
-class ImageHeader : public Object {
- public:
- static intptr_t InstanceSize() {
- return RoundedAllocationSize(sizeof(ImageHeaderLayout));
- }
- // There are no public methods for the ImageHeader contents, because
- // all access to the contents is handled by methods on the Image class.
-
- private:
- // Note there are no New() methods for ImageHeaders. Unstead, the serializer
- // writes the ImageHeaderLayout object manually at the start of the text
- // segment in precompiled snapshots.
- FINAL_HEAP_OBJECT_IMPLEMENTATION(ImageHeader, Object);
-};
-
// A WeakSerializationReference (WSR) denotes a type of weak reference to a
// target object. In particular, objects that can only be reached from roots via
// WSR edges during serialization of AOT snapshots should not be serialized. Of
diff --git a/runtime/vm/object_service.cc b/runtime/vm/object_service.cc
index 80eb1e8..c70282b 100644
--- a/runtime/vm/object_service.cc
+++ b/runtime/vm/object_service.cc
@@ -684,10 +684,6 @@
Object::PrintJSONImpl(stream, ref);
}
-void ImageHeader::PrintJSONImpl(JSONStream* stream, bool ref) const {
- Object::PrintJSONImpl(stream, ref);
-}
-
void WeakSerializationReference::PrintJSONImpl(JSONStream* stream,
bool ref) const {
JSONObject jsobj(stream);
diff --git a/runtime/vm/raw_object.cc b/runtime/vm/raw_object.cc
index de63846..3dfd910 100644
--- a/runtime/vm/raw_object.cc
+++ b/runtime/vm/raw_object.cc
@@ -223,10 +223,6 @@
instance_size = element->HeapSize();
break;
}
- case kImageHeaderCid: {
- instance_size = ImageHeader::InstanceSize();
- break;
- }
case kWeakSerializationReferenceCid: {
instance_size = WeakSerializationReference::InstanceSize();
break;
@@ -568,7 +564,6 @@
NULL_VISITOR(Capability)
NULL_VISITOR(SendPort)
NULL_VISITOR(TransferableTypedData)
-NULL_VISITOR(ImageHeader)
REGULAR_VISITOR(Pointer)
NULL_VISITOR(DynamicLibrary)
VARIABLE_NULL_VISITOR(Instructions, Instructions::Size(raw_obj))
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index cb853c6..9dd7f8f 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -1415,25 +1415,6 @@
}
};
-class ImageHeaderLayout : public ObjectLayout {
- RAW_HEAP_OBJECT_IMPLEMENTATION(ImageHeader);
-
- VISIT_NOTHING();
- // The offset of the corresponding BSS section from this text section.
- uword bss_offset_;
- // The relocated address of this text section in the shared object. Properly
- // filled for ELF snapshots, always 0 in assembly snapshots. (For the latter,
- // we instead get the value during BSS initialization and store it there.)
- uword instructions_relocated_address_;
- // The offset of the GNU build ID section description field from this text
- // section.
- uword build_id_offset_;
- // The length of the GNU build ID section description field.
- uword build_id_length_;
-
- friend class Image;
-};
-
class WeakSerializationReferenceLayout : public ObjectLayout {
RAW_HEAP_OBJECT_IMPLEMENTATION(WeakSerializationReference);
@@ -1649,17 +1630,29 @@
friend class BlobImageWriter;
};
-// Used only to provide memory accounting for the bare instruction payloads
-// we serialize, since they are no longer part of RawInstructions objects.
+// Used to carry extra information to the VM without changing the embedder
+// interface, to provide memory accounting for the bare instruction payloads
+// we serialize, since they are no longer part of RawInstructions objects,
+// and to avoid special casing bare instructions payload Images in the GC.
class InstructionsSectionLayout : public ObjectLayout {
RAW_HEAP_OBJECT_IMPLEMENTATION(InstructionsSection);
VISIT_NOTHING();
// Instructions section payload length in bytes.
uword payload_length_;
+ // The offset of the corresponding BSS section from this text section.
+ word bss_offset_;
+ // The relocated address of this text section in the shared object. Properly
+ // filled for ELF snapshots, always 0 in assembly snapshots. (For the latter,
+ // we instead get the value during BSS initialization and store it there.)
+ uword instructions_relocated_address_;
+ // The offset of the GNU build ID note section from this text section.
+ word build_id_offset_;
// Variable length data follows here.
uint8_t* data() { OPEN_ARRAY_START(uint8_t, uint8_t); }
+
+ friend class Image;
};
class PcDescriptorsLayout : public ObjectLayout {
diff --git a/runtime/vm/raw_object_snapshot.cc b/runtime/vm/raw_object_snapshot.cc
index 83e457d..2f36c97 100644
--- a/runtime/vm/raw_object_snapshot.cc
+++ b/runtime/vm/raw_object_snapshot.cc
@@ -582,7 +582,6 @@
MESSAGE_SNAPSHOT_UNREACHABLE(UnwindError);
MESSAGE_SNAPSHOT_UNREACHABLE(FutureOr);
MESSAGE_SNAPSHOT_UNREACHABLE(WeakSerializationReference);
-MESSAGE_SNAPSHOT_UNREACHABLE(ImageHeader);
MESSAGE_SNAPSHOT_ILLEGAL(DynamicLibrary);
MESSAGE_SNAPSHOT_ILLEGAL(MirrorReference);
diff --git a/runtime/vm/tagged_pointer.h b/runtime/vm/tagged_pointer.h
index 0cd8548..6a8ce03 100644
--- a/runtime/vm/tagged_pointer.h
+++ b/runtime/vm/tagged_pointer.h
@@ -241,7 +241,6 @@
DEFINE_TAGGED_POINTER(Library, Object)
DEFINE_TAGGED_POINTER(Namespace, Object)
DEFINE_TAGGED_POINTER(KernelProgramInfo, Object)
-DEFINE_TAGGED_POINTER(ImageHeader, Object)
DEFINE_TAGGED_POINTER(WeakSerializationReference, Object)
DEFINE_TAGGED_POINTER(Code, Object)
DEFINE_TAGGED_POINTER(Bytecode, Object)
diff --git a/tests/language_2/type_promotion/assignment_defeats_promotion_and_test.dart b/tests/language_2/type_promotion/assignment_defeats_promotion_and_test.dart
new file mode 100644
index 0000000..c47460b
--- /dev/null
+++ b/tests/language_2/type_promotion/assignment_defeats_promotion_and_test.dart
@@ -0,0 +1,240 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Test that an assignment on the right hand side of `&&` defeats promotion
+// after the entire `&&` expression, not just in the right hand side.
+
+class A {}
+
+class B extends A {
+ // An invocation of the form `x.checkB()` verifies that the static type of `x`
+ // is `B`, since this method is not defined anywhere else.
+ dynamic checkB() => null;
+}
+
+class C extends A {}
+
+class D extends B {}
+
+// An invocation of the form `checkNotB(x)` verifies that the static type of `x`
+// is not `B`, since `B` is not assignable to `C`.
+dynamic checkNotB(C c) => null;
+
+// An invocation of the form `alwaysTrue(x)` always returns `true` regardless of
+// `x`.
+bool alwaysTrue(dynamic x) => true;
+
+ifSimple([A a]) {
+ if (a is B && alwaysTrue(a = null)) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsUnparenthesizedUnrelated([A a, Object x]) {
+ if (a is B && alwaysTrue(a = null) && x is int) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsUnparenthesizedRePromote([A a]) {
+ if (a is B && alwaysTrue(a = null) && a is B) {
+ a.checkB();
+ }
+}
+
+ifChainedAndsUnparenthesizedAssignLast([A a, Object x]) {
+ if (a is B && x is int && alwaysTrue(a = null)) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsUnparenthesizedDeeperPromote([A a]) {
+ if (a is B && a is D && alwaysTrue(a = null)) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsParenLeftUnrelated([A a, Object x]) {
+ if ((a is B && alwaysTrue(a = null)) && x is int) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsParenLeftRePromote([A a]) {
+ if ((a is B && alwaysTrue(a = null)) && a is B) {
+ a.checkB();
+ }
+}
+
+ifChainedAndsParenLeftAssignLast([A a, Object x]) {
+ if ((a is B && x is int) && alwaysTrue(a = null)) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsParenLeftDeeperPromote([A a]) {
+ if ((a is B && a is D) && alwaysTrue(a = null)) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsParenRightUnrelated([A a, Object x]) {
+ if (a is B && (alwaysTrue(a = null) && x is int)) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsParenRightRePromote([A a]) {
+ if (a is B && (alwaysTrue(a = null) && a is B)) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsParenRightAssignLast([A a, Object x]) {
+ if (a is B && (x is int && alwaysTrue(a = null))) {
+ checkNotB(a);
+ }
+}
+
+ifChainedAndsParenRightDeeperPromote([A a]) {
+ if (a is B && (a is D && alwaysTrue(a = null))) {
+ checkNotB(a);
+ }
+}
+
+conditionalSimple([A a]) {
+ a is B && alwaysTrue(a = null) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsUnparenthesizedUnrelated([A a, Object x]) {
+ a is B && alwaysTrue(a = null) && x is int ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsUnparenthesizedRePromote([A a]) {
+ a is B && alwaysTrue(a = null) && a is B ? a.checkB() : null;
+}
+
+conditionalChainedAndsUnparenthesizedAssignLast([A a, Object x]) {
+ a is B && x is int && alwaysTrue(a = null) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsUnparenthesizedDeeperPromote([A a]) {
+ a is B && a is D && alwaysTrue(a = null) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsParenLeftUnrelated([A a, Object x]) {
+ (a is B && alwaysTrue(a = null)) && x is int ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsParenLeftRePromote([A a]) {
+ (a is B && alwaysTrue(a = null)) && a is B ? a.checkB() : null;
+}
+
+conditionalChainedAndsParenLeftAssignLast([A a, Object x]) {
+ (a is B && x is int) && alwaysTrue(a = null) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsParenLeftDeeperPromote([A a]) {
+ (a is B && a is D) && alwaysTrue(a = null) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsParenRightUnrelated([A a, Object x]) {
+ a is B && (alwaysTrue(a = null) && x is int) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsParenRightRePromote([A a]) {
+ a is B && (alwaysTrue(a = null) && a is B) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsParenRightAssignLast([A a, Object x]) {
+ a is B && (x is int && alwaysTrue(a = null)) ? checkNotB(a) : null;
+}
+
+conditionalChainedAndsParenRightDeeperPromote([A a]) {
+ a is B && (a is D && alwaysTrue(a = null)) ? checkNotB(a) : null;
+}
+
+andSimple([A a]) {
+ a is B && alwaysTrue(a = null) && checkNotB(a);
+}
+
+andChainedAndsUnparenthesizedUnrelated([A a, Object x]) {
+ a is B && alwaysTrue(a = null) && x is int && checkNotB(a);
+}
+
+andChainedAndsUnparenthesizedAssignLast([A a, Object x]) {
+ a is B && x is int && alwaysTrue(a = null) && checkNotB(a);
+}
+
+andChainedAndsUnparenthesizedDeeperPromote([A a]) {
+ a is B && a is D && alwaysTrue(a = null) && checkNotB(a);
+}
+
+andChainedAndsParenLeftUnrelated([A a, Object x]) {
+ (a is B && alwaysTrue(a = null)) && x is int && checkNotB(a);
+}
+
+andChainedAndsParenLeftAssignLast([A a, Object x]) {
+ (a is B && x is int) && alwaysTrue(a = null) && checkNotB(a);
+}
+
+andChainedAndsParenLeftDeeperPromote([A a]) {
+ (a is B && a is D) && alwaysTrue(a = null) && checkNotB(a);
+}
+
+andChainedAndsParenRightUnrelated([A a, Object x]) {
+ a is B && (alwaysTrue(a = null) && x is int) && checkNotB(a);
+}
+
+andChainedAndsParenRightRePromote([A a]) {
+ a is B && (alwaysTrue(a = null) && a is B) && checkNotB(a);
+}
+
+andChainedAndsParenRightAssignLast([A a, Object x]) {
+ a is B && (x is int && alwaysTrue(a = null)) && checkNotB(a);
+}
+
+andChainedAndsParenRightDeeperPromote([A a]) {
+ a is B && (a is D && alwaysTrue(a = null)) && checkNotB(a);
+}
+
+main() {
+ ifSimple();
+ ifChainedAndsUnparenthesizedUnrelated();
+ ifChainedAndsUnparenthesizedRePromote();
+ ifChainedAndsUnparenthesizedAssignLast();
+ ifChainedAndsUnparenthesizedDeeperPromote();
+ ifChainedAndsParenLeftUnrelated();
+ ifChainedAndsParenLeftRePromote();
+ ifChainedAndsParenLeftAssignLast();
+ ifChainedAndsParenLeftDeeperPromote();
+ ifChainedAndsParenRightUnrelated();
+ ifChainedAndsParenRightRePromote();
+ ifChainedAndsParenRightAssignLast();
+ ifChainedAndsParenRightDeeperPromote();
+ conditionalSimple();
+ conditionalChainedAndsUnparenthesizedUnrelated();
+ conditionalChainedAndsUnparenthesizedRePromote();
+ conditionalChainedAndsUnparenthesizedAssignLast();
+ conditionalChainedAndsUnparenthesizedDeeperPromote();
+ conditionalChainedAndsParenLeftUnrelated();
+ conditionalChainedAndsParenLeftRePromote();
+ conditionalChainedAndsParenLeftAssignLast();
+ conditionalChainedAndsParenLeftDeeperPromote();
+ conditionalChainedAndsParenRightUnrelated();
+ conditionalChainedAndsParenRightRePromote();
+ conditionalChainedAndsParenRightAssignLast();
+ conditionalChainedAndsParenRightDeeperPromote();
+ andSimple();
+ andChainedAndsUnparenthesizedUnrelated();
+ andChainedAndsUnparenthesizedAssignLast();
+ andChainedAndsUnparenthesizedDeeperPromote();
+ andChainedAndsParenLeftUnrelated();
+ andChainedAndsParenLeftAssignLast();
+ andChainedAndsParenLeftDeeperPromote();
+ andChainedAndsParenRightUnrelated();
+ andChainedAndsParenRightRePromote();
+ andChainedAndsParenRightAssignLast();
+ andChainedAndsParenRightDeeperPromote();
+}
diff --git a/tests/language_2/type_promotion/assignment_defeats_promotion_cascaded_test.dart b/tests/language_2/type_promotion/assignment_defeats_promotion_cascaded_test.dart
new file mode 100644
index 0000000..9b65f00
--- /dev/null
+++ b/tests/language_2/type_promotion/assignment_defeats_promotion_cascaded_test.dart
@@ -0,0 +1,34 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Test that an assignment inside a complex promotion scope defeats all pending
+// promotions, whether introduced directly or through LHS or RHS of a logical
+// and expression.
+
+class A {}
+
+class B extends A {}
+
+class C extends A {}
+
+class D extends B {}
+
+class E extends D {}
+
+// An invocation of the form `checkNotB(x)` verifies that the static type of `x`
+// is not `B`, since `B` is not assignable to `C`.
+dynamic checkNotB(C c) => null;
+
+test([A a]) {
+ if (a is B && a is D) {
+ if (a is E) {
+ checkNotB(a);
+ a = null;
+ }
+ }
+}
+
+main() {
+ test();
+}
diff --git a/tests/language_2/type_promotion/assignment_defeats_promotion_immediate_test.dart b/tests/language_2/type_promotion/assignment_defeats_promotion_immediate_test.dart
new file mode 100644
index 0000000..f128749
--- /dev/null
+++ b/tests/language_2/type_promotion/assignment_defeats_promotion_immediate_test.dart
@@ -0,0 +1,83 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Test that an assignment inside a promotion scope defeats the promotion, even
+// if the assignment fills the scope (there are no intervening syntactic
+// constructs).
+
+class A {}
+
+class B extends A {}
+
+class C extends A {}
+
+// An invocation of the form `checkNotB(x)` verifies that the static type of `x`
+// is not `B`, since `B` is not assignable to `C`.
+dynamic checkNotB(C c) => null;
+
+conditional([A a]) {
+ a is B ? a = checkNotB(a) : null;
+}
+
+ifStatementWithoutElse([A a]) {
+ if (a is B) a = checkNotB(a);
+}
+
+ifStatementWithElse([A a]) {
+ if (a is B)
+ a = checkNotB(a);
+ else
+ null;
+}
+
+ifElementWithoutElseList([A a]) {
+ [if (a is B) a = checkNotB(a)];
+}
+
+ifElementWithoutElseSet([A a]) {
+ ({if (a is B) a = checkNotB(a)});
+}
+
+ifElementWithoutElseMapKey([A a]) {
+ ({if (a is B) a = checkNotB(a): null});
+}
+
+ifElementWithoutElseMapValue([A a]) {
+ ({if (a is B) null: a = checkNotB(a)});
+}
+
+ifElementWithElseList([A a]) {
+ [if (a is B) a = checkNotB(a) else null];
+}
+
+ifElementWithElseSet([A a]) {
+ ({if (a is B) a = checkNotB(a) else null});
+}
+
+ifElementWithElseMapKey([A a]) {
+ ({if (a is B) a = checkNotB(a): null else null: null});
+}
+
+ifElementWithElseMapValue([A a]) {
+ ({if (a is B) null: a = checkNotB(a) else null: null});
+}
+
+logicalAnd([A a]) {
+ a is B && (a = checkNotB(a));
+}
+
+main() {
+ conditional();
+ ifStatementWithoutElse();
+ ifStatementWithElse();
+ ifElementWithoutElseList();
+ ifElementWithoutElseSet();
+ ifElementWithoutElseMapKey();
+ ifElementWithoutElseMapValue();
+ ifElementWithElseList();
+ ifElementWithElseSet();
+ ifElementWithElseMapKey();
+ ifElementWithElseMapValue();
+ logicalAnd();
+}
diff --git a/tests/language_2/type_promotion/assignment_defeats_promotion_lhs_and_test.dart b/tests/language_2/type_promotion/assignment_defeats_promotion_lhs_and_test.dart
new file mode 100644
index 0000000..f917dd4
--- /dev/null
+++ b/tests/language_2/type_promotion/assignment_defeats_promotion_lhs_and_test.dart
@@ -0,0 +1,37 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Test that an assignment on the left hand side of `&&` defeats promotion after
+// the entire `&&` expression, even if the promotion is on the right hand side
+// of `&&`.
+//
+// Note that it is not strictly necessary for soundness to defeat promotion
+// under this circumstance, but it is in the spec.
+
+class A {}
+
+class B extends A {}
+
+class C extends A {}
+
+// An invocation of the form `checkNotB(x)` verifies that the static type of `x`
+// is not `B`, since `B` is not assignable to `C`.
+dynamic checkNotB(C c) => null;
+
+// An invocation of the form `alwaysTrue(x)` always returns `true` regardless of
+// `x`.
+bool alwaysTrue(dynamic x) => true;
+
+andChainedAndsUnparenthesizedRePromote([A a]) {
+ a is B && alwaysTrue(a = null) && a is B && checkNotB(a);
+}
+
+andChainedAndsParenLeftRePromote([A a]) {
+ (a is B && alwaysTrue(a = null)) && a is B && checkNotB(a);
+}
+
+main() {
+ andChainedAndsUnparenthesizedRePromote();
+ andChainedAndsParenLeftRePromote();
+}
diff --git a/tests/language_2/type_promotion/assignment_defeats_promotion_nested_other_bool_test.dart b/tests/language_2/type_promotion/assignment_defeats_promotion_nested_other_bool_test.dart
new file mode 100644
index 0000000..4df8b11
--- /dev/null
+++ b/tests/language_2/type_promotion/assignment_defeats_promotion_nested_other_bool_test.dart
@@ -0,0 +1,118 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Test that an assignment inside a promotion scope defeats the promotion, even
+// if it is nested inside another unrelated promotion scope.
+
+class A {}
+
+class B extends A {}
+
+class C extends A {}
+
+// An invocation of the form `checkNotB(x)` verifies that the static type of `x`
+// is not `B`, since `B` is not assignable to `C`.
+checkNotB(C c) {}
+
+// An invocation of the form `alwaysTrue(x)` always returns `true` regardless of
+// `x`.
+bool alwaysTrue(dynamic x) => true;
+
+noNesting([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ a = null;
+ }
+}
+
+nestedInsideConditionalExpressionThen([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ b ? a = null : null;
+ }
+}
+
+nestedInsideConditionalExpressionElse([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ b ? null : a = null;
+ }
+}
+
+nestedInsideIfStatementThenNoElse([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ if (b) {
+ a = null;
+ }
+ }
+}
+
+nestedInsideIfStatementThenWithElse([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ if (b) {
+ a = null;
+ } else {}
+ }
+}
+
+nestedInsideIfStatementElse([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ if (b) {
+ } else {
+ a = null;
+ }
+ }
+}
+
+nestedInsideIfElementThenNoElse([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (b) alwaysTrue(a = null)];
+ }
+}
+
+nestedInsideIfElementThenWithElse([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (b) alwaysTrue(a = null) else 0];
+ }
+}
+
+nestedInsideIfElementElse([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (b) 0 else alwaysTrue(a = null)];
+ }
+}
+
+nestedInsideRhsOfAnd([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ b && alwaysTrue(a = null);
+ }
+}
+
+nestedInsideRhsOfOr([A a, bool b = true]) {
+ if (a is B) {
+ checkNotB(a);
+ b || alwaysTrue(a = null);
+ }
+}
+
+main() {
+ noNesting();
+ nestedInsideConditionalExpressionThen();
+ nestedInsideConditionalExpressionElse();
+ nestedInsideIfStatementThenNoElse();
+ nestedInsideIfStatementThenWithElse();
+ nestedInsideIfStatementElse();
+ nestedInsideIfElementThenNoElse();
+ nestedInsideIfElementThenWithElse();
+ nestedInsideIfElementElse();
+ nestedInsideRhsOfAnd();
+ nestedInsideRhsOfOr();
+}
diff --git a/tests/language_2/type_promotion/assignment_defeats_promotion_nested_other_test.dart b/tests/language_2/type_promotion/assignment_defeats_promotion_nested_other_test.dart
new file mode 100644
index 0000000..c87a85d
--- /dev/null
+++ b/tests/language_2/type_promotion/assignment_defeats_promotion_nested_other_test.dart
@@ -0,0 +1,118 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Test that an assignment inside a promotion scope defeats the promotion, even
+// if it is nested inside another unrelated promotion scope.
+
+class A {}
+
+class B extends A {}
+
+class C extends A {}
+
+// An invocation of the form `checkNotB(x)` verifies that the static type of `x`
+// is not `B`, since `B` is not assignable to `C`.
+checkNotB(C c) {}
+
+// An invocation of the form `alwaysTrue(x)` always returns `true` regardless of
+// `x`.
+bool alwaysTrue(dynamic x) => true;
+
+noNesting([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ a = null;
+ }
+}
+
+nestedInsideConditionalExpressionThen([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ x is int ? a = null : null;
+ }
+}
+
+nestedInsideConditionalExpressionElse([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ x is int ? null : a = null;
+ }
+}
+
+nestedInsideIfStatementThenNoElse([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ if (x is int) {
+ a = null;
+ }
+ }
+}
+
+nestedInsideIfStatementThenWithElse([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ if (x is int) {
+ a = null;
+ } else {}
+ }
+}
+
+nestedInsideIfStatementElse([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ if (x is int) {
+ } else {
+ a = null;
+ }
+ }
+}
+
+nestedInsideIfElementThenNoElse([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (x is int) alwaysTrue(a = null)];
+ }
+}
+
+nestedInsideIfElementThenWithElse([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (x is int) alwaysTrue(a = null) else 0];
+ }
+}
+
+nestedInsideIfElementElse([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (x is int) 0 else alwaysTrue(a = null)];
+ }
+}
+
+nestedInsideRhsOfAnd([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ x is int && alwaysTrue(a = null);
+ }
+}
+
+nestedInsideRhsOfOr([A a, Object x]) {
+ if (a is B) {
+ checkNotB(a);
+ x is int || alwaysTrue(a = null);
+ }
+}
+
+main() {
+ noNesting();
+ nestedInsideConditionalExpressionThen();
+ nestedInsideConditionalExpressionElse();
+ nestedInsideIfStatementThenNoElse();
+ nestedInsideIfStatementThenWithElse();
+ nestedInsideIfStatementElse();
+ nestedInsideIfElementThenNoElse();
+ nestedInsideIfElementThenWithElse();
+ nestedInsideIfElementElse();
+ nestedInsideRhsOfAnd();
+ nestedInsideRhsOfOr();
+}
diff --git a/tests/language_2/type_promotion/assignment_defeats_promotion_nested_same_test.dart b/tests/language_2/type_promotion/assignment_defeats_promotion_nested_same_test.dart
new file mode 100644
index 0000000..09a3785
--- /dev/null
+++ b/tests/language_2/type_promotion/assignment_defeats_promotion_nested_same_test.dart
@@ -0,0 +1,121 @@
+// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Test that an assignment inside a promotion scope defeats the promotion, even
+// if it is nested inside another promotion scope that promotes the same
+// variable.
+
+class A {}
+
+class B extends A {}
+
+class C extends A {}
+
+class D extends B {}
+
+// An invocation of the form `checkNotB(x)` verifies that the static type of `x`
+// is not `B`, since `B` is not assignable to `C`.
+checkNotB(C c) {}
+
+// An invocation of the form `alwaysTrue(x)` always returns `true` regardless of
+// `x`.
+bool alwaysTrue(dynamic x) => true;
+
+noNesting([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ a = null;
+ }
+}
+
+nestedInsideConditionalExpressionThen([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ a is D ? a = null : null;
+ }
+}
+
+nestedInsideConditionalExpressionElse([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ a is D ? null : a = null;
+ }
+}
+
+nestedInsideIfStatementThenNoElse([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ if (a is D) {
+ a = null;
+ }
+ }
+}
+
+nestedInsideIfStatementThenWithElse([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ if (a is D) {
+ a = null;
+ } else {}
+ }
+}
+
+nestedInsideIfStatementElse([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ if (a is D) {
+ } else {
+ a = null;
+ }
+ }
+}
+
+nestedInsideIfElementThenNoElse([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (a is D) alwaysTrue(a = null)];
+ }
+}
+
+nestedInsideIfElementThenWithElse([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (a is D) alwaysTrue(a = null) else 0];
+ }
+}
+
+nestedInsideIfElementElse([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ [if (a is D) 0 else alwaysTrue(a = null)];
+ }
+}
+
+nestedInsideRhsOfAnd([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ a is D && alwaysTrue(a = null);
+ }
+}
+
+nestedInsideRhsOfOr([A a]) {
+ if (a is B) {
+ checkNotB(a);
+ a is D || alwaysTrue(a = null);
+ }
+}
+
+main() {
+ noNesting();
+ nestedInsideConditionalExpressionThen();
+ nestedInsideConditionalExpressionElse();
+ nestedInsideIfStatementThenNoElse();
+ nestedInsideIfStatementThenWithElse();
+ nestedInsideIfStatementElse();
+ nestedInsideIfElementThenNoElse();
+ nestedInsideIfElementThenWithElse();
+ nestedInsideIfElementElse();
+ nestedInsideRhsOfAnd();
+ nestedInsideRhsOfOr();
+}
diff --git a/tests/language_2/type_promotion/logical_and_test.dart b/tests/language_2/type_promotion/logical_and_test.dart
index 79945ba..8168615 100644
--- a/tests/language_2/type_promotion/logical_and_test.dart
+++ b/tests/language_2/type_promotion/logical_and_test.dart
@@ -34,6 +34,7 @@
b = a.d;
// ^
// [analyzer] COMPILE_TIME_ERROR.UNDEFINED_GETTER
+ // [cfe] The getter 'd' isn't defined for the class 'A'.
}
if (a is D && (b = a.d)) {
b = a.d;
diff --git a/tools/VERSION b/tools/VERSION
index d970f4f..cc68fb0 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 2
MINOR 11
PATCH 0
-PRERELEASE 191
+PRERELEASE 192
PRERELEASE_PATCH 0
\ No newline at end of file