Version 2.17.0-203.0.dev
Merge commit 'd70e97f8a850f44b30e018a42dc2cd92011fb38d' into 'dev'
diff --git a/pkg/compiler/lib/src/deferred_load/program_split_constraints/builder.dart b/pkg/compiler/lib/src/deferred_load/program_split_constraints/builder.dart
index 2428f3e..1bd334f 100644
--- a/pkg/compiler/lib/src/deferred_load/program_split_constraints/builder.dart
+++ b/pkg/compiler/lib/src/deferred_load/program_split_constraints/builder.dart
@@ -150,9 +150,8 @@
Map<ImportEntity, Set<ImportEntity>> singletonTransitions = {};
Map<Constraint, SetTransition> setTransitions = {};
Map<Constraint, Set<ImportEntity>> processed = {};
- Queue<_WorkItem> queue = Queue.from(nodeToConstraintMap.values
- .where((node) => node.successors.isEmpty)
- .map((node) => _WorkItem(node)));
+ Queue<_WorkItem> queue =
+ Queue.from(nodeToConstraintMap.values.map((node) => _WorkItem(node)));
while (queue.isNotEmpty) {
var item = queue.removeFirst();
var constraint = item.child;
diff --git a/pkg/compiler/test/custom_split/custom_split_test.dart b/pkg/compiler/test/custom_split/custom_split_test.dart
index 7f4c4dc..2e62442 100644
--- a/pkg/compiler/test/custom_split/custom_split_test.dart
+++ b/pkg/compiler/test/custom_split/custom_split_test.dart
@@ -28,6 +28,7 @@
'fuse_with_or',
'two_step',
'two_branch',
+ 'just_fuse',
];
Map<String, List<String>> createPerTestOptions() {
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/constraints.dart b/pkg/compiler/test/custom_split/data/just_fuse/constraints.dart
new file mode 100644
index 0000000..3b4280a
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/constraints.dart
@@ -0,0 +1,22 @@
+// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'dart:isolate';
+
+import 'package:compiler/src/deferred_load/program_split_constraints/nodes.dart';
+import '../../constraint_harness.dart';
+
+void main(List<String> args, SendPort sendPort) {
+ waitForImportsAndInvoke(sendPort, processDeferredImports);
+}
+
+List<Node> processDeferredImports(List<String> imports) {
+ var lib1 = 'memory:sdk/tests/web/native/lib1.dart#b1';
+ var lib2 = 'memory:sdk/tests/web/native/lib2.dart#b2';
+ var builder = ProgramSplitBuilder();
+ return [
+ ...imports.map(builder.referenceNode),
+ builder.fuseNode({lib1, lib2}),
+ ];
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/constraints.json b/pkg/compiler/test/custom_split/data/just_fuse/constraints.json
new file mode 100644
index 0000000..81702d9
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/constraints.json
@@ -0,0 +1,29 @@
+[
+ {
+ "type": "reference",
+ "name": "memory:sdk/tests/web/native/lib1.dart#b1",
+ "import": "memory:sdk/tests/web/native/lib1.dart#b1"
+ },
+ {
+ "type": "reference",
+ "name": "memory:sdk/tests/web/native/lib2.dart#b2",
+ "import": "memory:sdk/tests/web/native/lib2.dart#b2"
+ },
+ {
+ "type": "reference",
+ "name": "memory:sdk/tests/web/native/lib3.dart#b3",
+ "import": "memory:sdk/tests/web/native/lib3.dart#b3"
+ },
+ {
+ "type": "reference",
+ "name": "memory:sdk/tests/web/native/lib4.dart#b4",
+ "import": "memory:sdk/tests/web/native/lib4.dart#b4"
+ },
+ {
+ "type": "fuse",
+ "nodes": [
+ "memory:sdk/tests/web/native/lib1.dart#b1",
+ "memory:sdk/tests/web/native/lib2.dart#b2"
+ ]
+ }
+]
\ No newline at end of file
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib1.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib1.dart
new file mode 100644
index 0000000..33e2c05
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib1.dart
@@ -0,0 +1,12 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import 'lib_100_0.dart' deferred as b1;
+
+/*member: entryLib1:member_unit=main{}*/
+entryLib1() async {
+ await b1.loadLibrary();
+ b1.g_100_0();
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib2.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib2.dart
new file mode 100644
index 0000000..1ad0cd3
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib2.dart
@@ -0,0 +1,12 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import 'lib_010_0.dart' deferred as b2;
+
+/*member: entryLib2:member_unit=main{}*/
+entryLib2() async {
+ await b2.loadLibrary();
+ b2.g_010_0();
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib3.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib3.dart
new file mode 100644
index 0000000..1d2b9d5
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib3.dart
@@ -0,0 +1,12 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import 'lib_001_0.dart' deferred as b3;
+
+/*member: entryLib3:member_unit=main{}*/
+entryLib3() async {
+ await b3.loadLibrary();
+ b3.g_001_0();
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib4.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib4.dart
new file mode 100644
index 0000000..1eedd87
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib4.dart
@@ -0,0 +1,12 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import 'lib_000_1.dart' deferred as b4;
+
+/*member: entryLib4:member_unit=main{}*/
+entryLib4() async {
+ await b4.loadLibrary();
+ b4.g_000_1();
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/libImport.dart b/pkg/compiler/test/custom_split/data/just_fuse/libImport.dart
new file mode 100644
index 0000000..1f64eba
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/libImport.dart
@@ -0,0 +1,58 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import "package:expect/expect.dart";
+
+/*member: v:member_unit=2{b1, b2, b3, b4}*/
+void v(Set<String> u, String name, int bit) {
+ Expect.isTrue(u.add(name));
+ Expect.equals(name[bit], '1');
+}
+
+@pragma('dart2js:noInline')
+/*member: f_100_0:member_unit=1{b1, b2}*/
+f_100_0(Set<String> u, int b) => v(u, '1000', b);
+@pragma('dart2js:noInline')
+/*member: f_100_1:member_unit=3{b1, b2, b4}*/
+f_100_1(Set<String> u, int b) => v(u, '1001', b);
+@pragma('dart2js:noInline')
+/*member: f_101_0:member_unit=4{b1, b2, b3}*/
+f_101_0(Set<String> u, int b) => v(u, '1010', b);
+@pragma('dart2js:noInline')
+/*member: f_101_1:member_unit=2{b1, b2, b3, b4}*/
+f_101_1(Set<String> u, int b) => v(u, '1011', b);
+@pragma('dart2js:noInline')
+/*member: f_110_0:member_unit=1{b1, b2}*/
+f_110_0(Set<String> u, int b) => v(u, '1100', b);
+@pragma('dart2js:noInline')
+/*member: f_110_1:member_unit=3{b1, b2, b4}*/
+f_110_1(Set<String> u, int b) => v(u, '1101', b);
+@pragma('dart2js:noInline')
+/*member: f_111_0:member_unit=4{b1, b2, b3}*/
+f_111_0(Set<String> u, int b) => v(u, '1110', b);
+@pragma('dart2js:noInline')
+/*member: f_111_1:member_unit=2{b1, b2, b3, b4}*/
+f_111_1(Set<String> u, int b) => v(u, '1111', b);
+@pragma('dart2js:noInline')
+/*member: f_010_0:member_unit=1{b1, b2}*/
+f_010_0(Set<String> u, int b) => v(u, '0100', b);
+@pragma('dart2js:noInline')
+/*member: f_010_1:member_unit=3{b1, b2, b4}*/
+f_010_1(Set<String> u, int b) => v(u, '0101', b);
+@pragma('dart2js:noInline')
+/*member: f_011_0:member_unit=4{b1, b2, b3}*/
+f_011_0(Set<String> u, int b) => v(u, '0110', b);
+@pragma('dart2js:noInline')
+/*member: f_011_1:member_unit=2{b1, b2, b3, b4}*/
+f_011_1(Set<String> u, int b) => v(u, '0111', b);
+@pragma('dart2js:noInline')
+/*member: f_001_0:member_unit=5{b3}*/
+f_001_0(Set<String> u, int b) => v(u, '0010', b);
+@pragma('dart2js:noInline')
+/*member: f_001_1:member_unit=6{b3, b4}*/
+f_001_1(Set<String> u, int b) => v(u, '0011', b);
+@pragma('dart2js:noInline')
+/*member: f_000_1:member_unit=7{b4}*/
+f_000_1(Set<String> u, int b) => v(u, '0001', b);
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib_000_1.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib_000_1.dart
new file mode 100644
index 0000000..189dcd0
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib_000_1.dart
@@ -0,0 +1,25 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import "package:expect/expect.dart";
+
+import 'libImport.dart';
+
+@pragma('dart2js:noInline')
+/*member: g_000_1:member_unit=7{b4}*/
+g_000_1() {
+ Set<String> uniques = {};
+
+ // f_***_1;
+ f_000_1(uniques, 3);
+ f_001_1(uniques, 3);
+ f_010_1(uniques, 3);
+ f_011_1(uniques, 3);
+ f_100_1(uniques, 3);
+ f_101_1(uniques, 3);
+ f_110_1(uniques, 3);
+ f_111_1(uniques, 3);
+ Expect.equals(8, uniques.length);
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib_001_0.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib_001_0.dart
new file mode 100644
index 0000000..9320c80
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib_001_0.dart
@@ -0,0 +1,25 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import "package:expect/expect.dart";
+
+import 'libImport.dart';
+
+@pragma('dart2js:noInline')
+/*member: g_001_0:member_unit=5{b3}*/
+g_001_0() {
+ Set<String> uniques = {};
+
+ // f_**1_*;
+ f_001_0(uniques, 2);
+ f_001_1(uniques, 2);
+ f_011_0(uniques, 2);
+ f_011_1(uniques, 2);
+ f_101_0(uniques, 2);
+ f_101_1(uniques, 2);
+ f_111_0(uniques, 2);
+ f_111_1(uniques, 2);
+ Expect.equals(8, uniques.length);
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib_010_0.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib_010_0.dart
new file mode 100644
index 0000000..b2575c3
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib_010_0.dart
@@ -0,0 +1,25 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import "package:expect/expect.dart";
+
+import 'libImport.dart';
+
+@pragma('dart2js:noInline')
+/*member: g_010_0:member_unit=1{b1, b2}*/
+g_010_0() {
+ Set<String> uniques = {};
+
+ // f_*1*_*;
+ f_010_0(uniques, 1);
+ f_010_1(uniques, 1);
+ f_011_0(uniques, 1);
+ f_011_1(uniques, 1);
+ f_110_0(uniques, 1);
+ f_110_1(uniques, 1);
+ f_111_0(uniques, 1);
+ f_111_1(uniques, 1);
+ Expect.equals(8, uniques.length);
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/lib_100_0.dart b/pkg/compiler/test/custom_split/data/just_fuse/lib_100_0.dart
new file mode 100644
index 0000000..4d8ad55
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/lib_100_0.dart
@@ -0,0 +1,25 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import "package:expect/expect.dart";
+
+import 'libImport.dart';
+
+@pragma('dart2js:noInline')
+/*member: g_100_0:member_unit=1{b1, b2}*/
+g_100_0() {
+ Set<String> uniques = {};
+
+ // f_1**_*;
+ f_100_0(uniques, 0);
+ f_100_1(uniques, 0);
+ f_101_0(uniques, 0);
+ f_101_1(uniques, 0);
+ f_110_0(uniques, 0);
+ f_110_1(uniques, 0);
+ f_111_0(uniques, 0);
+ f_111_1(uniques, 0);
+ Expect.equals(8, uniques.length);
+}
diff --git a/pkg/compiler/test/custom_split/data/just_fuse/main.dart b/pkg/compiler/test/custom_split/data/just_fuse/main.dart
new file mode 100644
index 0000000..7cceec0
--- /dev/null
+++ b/pkg/compiler/test/custom_split/data/just_fuse/main.dart
@@ -0,0 +1,41 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+/*library:
+ a_pre_fragments=[
+ p1: {units: [7{b4}], usedBy: [], needs: []},
+ p2: {units: [5{b3}], usedBy: [], needs: []},
+ p3: {units: [6{b3, b4}], usedBy: [], needs: []},
+ p4: {units: [1{b1, b2}], usedBy: [], needs: []},
+ p5: {units: [3{b1, b2, b4}], usedBy: [], needs: []},
+ p6: {units: [4{b1, b2, b3}], usedBy: [], needs: []},
+ p7: {units: [2{b1, b2, b3, b4}], usedBy: [], needs: []}],
+ b_finalized_fragments=[
+ f1: [7{b4}],
+ f2: [5{b3}],
+ f3: [6{b3, b4}],
+ f4: [1{b1, b2}],
+ f5: [3{b1, b2, b4}],
+ f6: [4{b1, b2, b3}],
+ f7: [2{b1, b2, b3, b4}]],
+ c_steps=[
+ b1=(f7, f6, f5, f4),
+ b2=(f7, f6, f5, f4),
+ b3=(f7, f6, f3, f2),
+ b4=(f7, f5, f3, f1)]
+*/
+
+// This file was autogenerated by the pkg/compiler/tool/graph_isomorphizer.dart.
+import 'lib1.dart';
+import 'lib2.dart';
+import 'lib3.dart';
+import 'lib4.dart';
+
+/*member: main:member_unit=main{}*/
+main() {
+ entryLib1();
+ entryLib2();
+ entryLib3();
+ entryLib4();
+}
diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h
index 28bf82c..f2ede3d 100644
--- a/runtime/platform/globals.h
+++ b/runtime/platform/globals.h
@@ -721,14 +721,17 @@
// Undefine math.h definition which clashes with our condition names.
#undef OVERFLOW
-// Include IL printer functionality into non-PRODUCT builds or in all AOT
-// compiler builds or when forced.
+// Include IL printer and disassembler functionality into non-PRODUCT builds,
+// in all AOT compiler builds or when forced.
#if !defined(PRODUCT) || defined(DART_PRECOMPILER) || \
defined(FORCE_INCLUDE_DISASSEMBLER)
#if defined(DART_PRECOMPILED_RUNTIME) && defined(PRODUCT)
#error Requested to include IL printer into PRODUCT AOT runtime
#endif
#define INCLUDE_IL_PRINTER 1
+#if !defined(FORCE_INCLUDE_DISASSEMBLER)
+#define FORCE_INCLUDE_DISASSEMBLER 1
+#endif
#endif
} // namespace dart
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index e1830d1..36fd740 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -3194,11 +3194,8 @@
&speculative_policy, pass_state.inline_id_to_function,
pass_state.inline_id_to_token_pos, pass_state.caller_inline_id,
ic_data_array, function_stats);
- {
- COMPILER_TIMINGS_TIMER_SCOPE(thread(), EmitCode);
- TIMELINE_DURATION(thread(), CompilerVerbose, "CompileGraph");
- graph_compiler.CompileGraph();
- }
+ pass_state.graph_compiler = &graph_compiler;
+ CompilerPass::GenerateCode(&pass_state);
{
COMPILER_TIMINGS_TIMER_SCOPE(thread(), FinalizeCode);
TIMELINE_DURATION(thread(), CompilerVerbose, "FinalizeCompilation");
diff --git a/runtime/vm/compiler/assembler/disassembler.cc b/runtime/vm/compiler/assembler/disassembler.cc
index 9d8e13c..e376b04 100644
--- a/runtime/vm/compiler/assembler/disassembler.cc
+++ b/runtime/vm/compiler/assembler/disassembler.cc
@@ -58,53 +58,6 @@
va_end(args);
}
-void DisassembleToJSONStream::ConsumeInstruction(char* hex_buffer,
- intptr_t hex_size,
- char* human_buffer,
- intptr_t human_size,
- Object* object,
- uword pc) {
- // Instructions are represented as four consecutive values in a JSON array.
- // The first is the address of the instruction, the second is the hex string,
- // of the code, and the third is a human readable string, and the fourth is
- // the object loaded by the instruction.
- jsarr_.AddValueF("%" Pp "", pc);
- jsarr_.AddValue(hex_buffer);
- jsarr_.AddValue(human_buffer);
-
- if (object != NULL) {
- jsarr_.AddValue(*object);
- } else {
- jsarr_.AddValueNull(); // Not a reference to null.
- }
-}
-
-void DisassembleToJSONStream::Print(const char* format, ...) {
- va_list measure_args;
- va_start(measure_args, format);
- intptr_t len = Utils::VSNPrint(NULL, 0, format, measure_args);
- va_end(measure_args);
-
- char* p = reinterpret_cast<char*>(malloc(len + 1));
- va_list print_args;
- va_start(print_args, format);
- intptr_t len2 = Utils::VSNPrint(p, len, format, print_args);
- va_end(print_args);
- ASSERT(len == len2);
- for (intptr_t i = 0; i < len; i++) {
- if (p[i] == '\n' || p[i] == '\r') {
- p[i] = ' ';
- }
- }
- // Instructions are represented as four consecutive values in a JSON array.
- // Comments only use the third slot. See above comment for more information.
- jsarr_.AddValueNull();
- jsarr_.AddValueNull();
- jsarr_.AddValue(p);
- jsarr_.AddValueNull();
- free(p);
-}
-
void DisassembleToMemory::ConsumeInstruction(char* hex_buffer,
intptr_t hex_size,
char* human_buffer,
@@ -519,4 +472,53 @@
bool optimized) {}
#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
+#if !defined(PRODUCT)
+void DisassembleToJSONStream::ConsumeInstruction(char* hex_buffer,
+ intptr_t hex_size,
+ char* human_buffer,
+ intptr_t human_size,
+ Object* object,
+ uword pc) {
+ // Instructions are represented as four consecutive values in a JSON array.
+ // The first is the address of the instruction, the second is the hex string,
+ // of the code, and the third is a human readable string, and the fourth is
+ // the object loaded by the instruction.
+ jsarr_.AddValueF("%" Pp "", pc);
+ jsarr_.AddValue(hex_buffer);
+ jsarr_.AddValue(human_buffer);
+
+ if (object != NULL) {
+ jsarr_.AddValue(*object);
+ } else {
+ jsarr_.AddValueNull(); // Not a reference to null.
+ }
+}
+
+void DisassembleToJSONStream::Print(const char* format, ...) {
+ va_list measure_args;
+ va_start(measure_args, format);
+ intptr_t len = Utils::VSNPrint(NULL, 0, format, measure_args);
+ va_end(measure_args);
+
+ char* p = reinterpret_cast<char*>(malloc(len + 1));
+ va_list print_args;
+ va_start(print_args, format);
+ intptr_t len2 = Utils::VSNPrint(p, len, format, print_args);
+ va_end(print_args);
+ ASSERT(len == len2);
+ for (intptr_t i = 0; i < len; i++) {
+ if (p[i] == '\n' || p[i] == '\r') {
+ p[i] = ' ';
+ }
+ }
+ // Instructions are represented as four consecutive values in a JSON array.
+ // Comments only use the third slot. See above comment for more information.
+ jsarr_.AddValueNull();
+ jsarr_.AddValueNull();
+ jsarr_.AddValue(p);
+ jsarr_.AddValueNull();
+ free(p);
+}
+#endif // !defined(PRODUCT)
+
} // namespace dart
diff --git a/runtime/vm/compiler/assembler/disassembler.h b/runtime/vm/compiler/assembler/disassembler.h
index 77ad2ce..d58a0c9 100644
--- a/runtime/vm/compiler/assembler/disassembler.h
+++ b/runtime/vm/compiler/assembler/disassembler.h
@@ -61,6 +61,7 @@
DISALLOW_COPY_AND_ASSIGN(DisassembleToStdout);
};
+#if !defined(PRODUCT)
// Disassemble into a JSONStream.
class DisassembleToJSONStream : public DisassemblyFormatter {
public:
@@ -82,6 +83,7 @@
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(DisassembleToJSONStream);
};
+#endif // !defined(PRODUCT)
#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
// Basic disassembly formatter that outputs the disassembled instruction
diff --git a/runtime/vm/compiler/backend/constant_propagator.cc b/runtime/vm/compiler/backend/constant_propagator.cc
index eed551a..9cf2ade 100644
--- a/runtime/vm/compiler/backend/constant_propagator.cc
+++ b/runtime/vm/compiler/backend/constant_propagator.cc
@@ -1454,6 +1454,10 @@
SetValue(instr, non_constant_);
}
+void ConstantPropagator::VisitLoadThread(LoadThreadInstr* instr) {
+ SetValue(instr, non_constant_);
+}
+
void ConstantPropagator::VisitUnaryUint32Op(UnaryUint32OpInstr* instr) {
// TODO(kmillikin): Handle unary operations.
SetValue(instr, non_constant_);
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 5a742d0..e69dc63 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -13,6 +13,7 @@
#include "vm/compiler/backend/flow_graph_compiler.h"
#include "vm/compiler/backend/linearscan.h"
#include "vm/compiler/backend/locations.h"
+#include "vm/compiler/backend/locations_helpers.h"
#include "vm/compiler/backend/loops.h"
#include "vm/compiler/backend/range_analysis.h"
#include "vm/compiler/ffi/frame_rebase.h"
@@ -6863,6 +6864,11 @@
return marshaller_.RepInFfiCall(compiler::ffi::kResultIndex);
}
+// TODO(http://dartbug.com/48543): integrate with register allocator directly.
+DEFINE_BACKEND(LoadThread, (Register out)) {
+ __ MoveRegister(out, THR);
+}
+
// SIMD
SimdOpInstr::Kind SimdOpInstr::KindForOperator(MethodRecognizer::Kind kind) {
@@ -7038,12 +7044,10 @@
// Define the metadata array.
static const SimdOpInfo simd_op_information[] = {
-#define PP_APPLY(M, Args) M Args
#define CASE(Arity, Mask, Name, Args, Result) \
{Arity, HAS_##Mask, REP(Result), {PP_APPLY(ENCODE_INPUTS_##Arity, Args)}},
SIMD_OP_LIST(CASE, CASE)
#undef CASE
-#undef PP_APPLY
};
// Undef all auxiliary macros.
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 809c16c..6e916cd 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -5,7 +5,6 @@
#ifndef RUNTIME_VM_COMPILER_BACKEND_IL_H_
#define RUNTIME_VM_COMPILER_BACKEND_IL_H_
-#include "vm/hash_map.h"
#if defined(DART_PRECOMPILED_RUNTIME)
#error "AOT runtime should not use compiler sources (including header files)"
#endif // defined(DART_PRECOMPILED_RUNTIME)
@@ -513,6 +512,7 @@
M(BoxSmallInt, kNoGC) \
M(IntConverter, kNoGC) \
M(BitCast, kNoGC) \
+ M(LoadThread, kNoGC) \
M(Deoptimize, kNoGC) \
M(SimdOp, kNoGC)
@@ -5358,6 +5358,14 @@
DISALLOW_COPY_AND_ASSIGN(AllocateHandleInstr);
};
+// Populates the untagged base + offset outside the heap with a tagged value.
+//
+// The store must be outside of the heap, does not emit a store barrier.
+// For stores in the heap, use StoreIndexedInstr, which emits store barriers.
+//
+// Does not have a dual RawLoadFieldInstr, because for loads we do not have to
+// distinguish between loading from within the heap or outside the heap.
+// Use FlowGraphBuilder::RawLoadField.
class RawStoreFieldInstr : public TemplateInstruction<2, NoThrow> {
public:
RawStoreFieldInstr(Value* base, Value* value, int32_t offset)
@@ -9272,6 +9280,32 @@
DISALLOW_COPY_AND_ASSIGN(BitCastInstr);
};
+class LoadThreadInstr : public TemplateDefinition<0, NoThrow, Pure> {
+ public:
+ LoadThreadInstr() : TemplateDefinition(DeoptId::kNone) {}
+
+ virtual bool ComputeCanDeoptimize() const { return false; }
+
+ virtual Representation representation() const { return kUntagged; }
+
+ virtual Representation RequiredInputRepresentation(intptr_t idx) const {
+ UNREACHABLE();
+ }
+
+ virtual CompileType ComputeType() const { return CompileType::Int(); }
+
+ // CSE is allowed. The thread should always be the same value.
+ virtual bool AttributesEqual(const Instruction& other) const {
+ ASSERT(other.IsLoadThread());
+ return true;
+ }
+
+ DECLARE_INSTRUCTION(LoadThread);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LoadThreadInstr);
+};
+
// SimdOpInstr
//
// All SIMD intrinsics and recognized methods are represented via instances
@@ -9833,7 +9867,6 @@
return (constant == nullptr) || constant->value().ptr() == value.ptr();
}
-
} // namespace dart
#endif // RUNTIME_VM_COMPILER_BACKEND_IL_H_
diff --git a/runtime/vm/compiler/backend/il_test.cc b/runtime/vm/compiler/backend/il_test.cc
index d29658c..65dadc0 100644
--- a/runtime/vm/compiler/backend/il_test.cc
+++ b/runtime/vm/compiler/backend/il_test.cc
@@ -679,4 +679,277 @@
}));
}
+#ifdef DART_TARGET_OS_WINDOWS
+const char* pointer_prefix = "0x";
+#else
+const char* pointer_prefix = "";
+#endif
+
+ISOLATE_UNIT_TEST_CASE(IRTest_RawStoreField) {
+ InstancePtr ptr = Smi::New(100);
+ OS::Print("&ptr %p\n", &ptr);
+
+ // clang-format off
+ auto kScript = Utils::CStringUniquePtr(OS::SCreate(nullptr, R"(
+ import 'dart:ffi';
+
+ void myFunction() {
+ final pointer = Pointer<IntPtr>.fromAddress(%s%p);
+ anotherFunction();
+ }
+
+ void anotherFunction() {}
+ )", pointer_prefix, &ptr), std::free);
+ // clang-format on
+
+ const auto& root_library = Library::Handle(LoadTestScript(kScript.get()));
+ Invoke(root_library, "myFunction");
+ EXPECT_EQ(Smi::New(100), ptr);
+
+ const auto& my_function =
+ Function::Handle(GetFunction(root_library, "myFunction"));
+
+ TestPipeline pipeline(my_function, CompilerPass::kJIT);
+ FlowGraph* flow_graph = pipeline.RunPasses({
+ CompilerPass::kComputeSSA,
+ });
+
+ Zone* const zone = Thread::Current()->zone();
+
+ StaticCallInstr* pointer = nullptr;
+ StaticCallInstr* another_function_call = nullptr;
+ {
+ ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
+
+ EXPECT(cursor.TryMatch({
+ kMoveGlob,
+ {kMatchAndMoveStaticCall, &pointer},
+ {kMatchAndMoveStaticCall, &another_function_call},
+ }));
+ }
+ auto pointer_value = Value(pointer);
+ auto* const load_untagged_instr = new (zone) LoadUntaggedInstr(
+ &pointer_value, compiler::target::PointerBase::data_offset());
+ flow_graph->InsertBefore(another_function_call, load_untagged_instr, nullptr,
+ FlowGraph::kValue);
+ auto load_untagged_value = Value(load_untagged_instr);
+ auto pointer_value2 = Value(pointer);
+ auto* const raw_store_field_instr =
+ new (zone) RawStoreFieldInstr(&load_untagged_value, &pointer_value2, 0);
+ flow_graph->InsertBefore(another_function_call, raw_store_field_instr,
+ nullptr, FlowGraph::kEffect);
+ another_function_call->RemoveFromGraph();
+
+ {
+ // Check we constructed the right graph.
+ ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
+ EXPECT(cursor.TryMatch({
+ kMoveGlob,
+ kMatchAndMoveStaticCall,
+ kMatchAndMoveLoadUntagged,
+ kMatchAndMoveRawStoreField,
+ }));
+ }
+
+ pipeline.RunForcedOptimizedAfterSSAPasses();
+
+ {
+#if !defined(PRODUCT)
+ SetFlagScope<bool> sfs(&FLAG_disassemble_optimized, true);
+#endif
+ pipeline.CompileGraphAndAttachFunction();
+ }
+
+ // Ensure we can successfully invoke the function.
+ Invoke(root_library, "myFunction");
+
+ // Might be garbage if we ran a GC, but should never be a Smi.
+ EXPECT(!ptr.IsSmi());
+}
+
+// We do not have a RawLoadFieldInstr, instead we just use LoadIndexed for
+// loading from outside the heap.
+//
+// This test constructs to instructions from FlowGraphBuilder::RawLoadField
+// and exercises them to do a load from outside the heap.
+ISOLATE_UNIT_TEST_CASE(IRTest_RawLoadField) {
+ InstancePtr ptr = Smi::New(100);
+ intptr_t ptr2 = 100;
+ OS::Print("&ptr %p &ptr2 %p\n", &ptr, &ptr2);
+
+ // clang-format off
+ auto kScript = Utils::CStringUniquePtr(OS::SCreate(nullptr, R"(
+ import 'dart:ffi';
+
+ void myFunction() {
+ final pointer = Pointer<IntPtr>.fromAddress(%s%p);
+ anotherFunction();
+ final pointer2 = Pointer<IntPtr>.fromAddress(%s%p);
+ pointer2.value = 3;
+ }
+
+ void anotherFunction() {}
+ )", pointer_prefix, &ptr, pointer_prefix, &ptr2), std::free);
+ // clang-format on
+
+ const auto& root_library = Library::Handle(LoadTestScript(kScript.get()));
+ Invoke(root_library, "myFunction");
+ EXPECT_EQ(Smi::New(100), ptr);
+ EXPECT_EQ(3, ptr2);
+
+ const auto& my_function =
+ Function::Handle(GetFunction(root_library, "myFunction"));
+
+ TestPipeline pipeline(my_function, CompilerPass::kJIT);
+ FlowGraph* flow_graph = pipeline.RunPasses({
+ CompilerPass::kComputeSSA,
+ });
+
+ Zone* const zone = Thread::Current()->zone();
+
+ StaticCallInstr* pointer = nullptr;
+ StaticCallInstr* another_function_call = nullptr;
+ StaticCallInstr* pointer2 = nullptr;
+ StaticCallInstr* pointer2_store = nullptr;
+ {
+ ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
+
+ EXPECT(cursor.TryMatch({
+ kMoveGlob,
+ {kMatchAndMoveStaticCall, &pointer},
+ {kMatchAndMoveStaticCall, &another_function_call},
+ {kMatchAndMoveStaticCall, &pointer2},
+ {kMatchAndMoveStaticCall, &pointer2_store},
+ }));
+ }
+ auto pointer_value = Value(pointer);
+ auto* const load_untagged_instr = new (zone) LoadUntaggedInstr(
+ &pointer_value, compiler::target::PointerBase::data_offset());
+ flow_graph->InsertBefore(another_function_call, load_untagged_instr, nullptr,
+ FlowGraph::kValue);
+ auto load_untagged_value = Value(load_untagged_instr);
+ auto* const constant_instr = new (zone) UnboxedConstantInstr(
+ Integer::ZoneHandle(zone, Integer::New(0, Heap::kOld)), kUnboxedIntPtr);
+ flow_graph->InsertBefore(another_function_call, constant_instr, nullptr,
+ FlowGraph::kValue);
+ auto constant_value = Value(constant_instr);
+ auto* const load_indexed_instr = new (zone)
+ LoadIndexedInstr(&load_untagged_value, &constant_value,
+ /*index_unboxed=*/true, /*index_scale=*/1, kArrayCid,
+ kAlignedAccess, DeoptId::kNone, InstructionSource());
+ flow_graph->InsertBefore(another_function_call, load_indexed_instr, nullptr,
+ FlowGraph::kValue);
+
+ another_function_call->RemoveFromGraph();
+ pointer2_store->InputAt(2)->definition()->ReplaceUsesWith(load_indexed_instr);
+
+ {
+ // Check we constructed the right graph.
+ ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
+ EXPECT(cursor.TryMatch({
+ kMoveGlob,
+ kMatchAndMoveStaticCall,
+ kMatchAndMoveLoadUntagged,
+ kMatchAndMoveUnboxedConstant,
+ kMatchAndMoveLoadIndexed,
+ kMatchAndMoveStaticCall,
+ kMatchAndMoveStaticCall,
+ }));
+ }
+
+ pipeline.RunForcedOptimizedAfterSSAPasses();
+
+ {
+#if !defined(PRODUCT)
+ SetFlagScope<bool> sfs(&FLAG_disassemble_optimized, true);
+#endif
+ pipeline.CompileGraphAndAttachFunction();
+ }
+
+ // Ensure we can successfully invoke the function.
+ Invoke(root_library, "myFunction");
+ EXPECT_EQ(Smi::New(100), ptr);
+ EXPECT_EQ(100, ptr2);
+}
+
+ISOLATE_UNIT_TEST_CASE(IRTest_LoadThread) {
+ // clang-format off
+ auto kScript = R"(
+ import 'dart:ffi';
+
+ int myFunction() {
+ return 100;
+ }
+
+ void anotherFunction() {}
+ )";
+ // clang-format on
+
+ const auto& root_library = Library::Handle(LoadTestScript(kScript));
+ Zone* const zone = Thread::Current()->zone();
+ auto& invoke_result = Instance::Handle(zone);
+ invoke_result ^= Invoke(root_library, "myFunction");
+ EXPECT_EQ(Smi::New(100), invoke_result.ptr());
+
+ const auto& my_function =
+ Function::Handle(GetFunction(root_library, "myFunction"));
+
+ TestPipeline pipeline(my_function, CompilerPass::kJIT);
+ FlowGraph* flow_graph = pipeline.RunPasses({
+ CompilerPass::kComputeSSA,
+ });
+
+ ReturnInstr* return_instr = nullptr;
+ {
+ ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
+
+ EXPECT(cursor.TryMatch({
+ kMoveGlob,
+ {kMatchReturn, &return_instr},
+ }));
+ }
+
+ auto* const load_thread_instr = new (zone) LoadThreadInstr();
+ flow_graph->InsertBefore(return_instr, load_thread_instr, nullptr,
+ FlowGraph::kValue);
+ auto load_thread_value = Value(load_thread_instr);
+
+ auto* const convert_instr = new (zone) IntConverterInstr(
+ kUntagged, kUnboxedFfiIntPtr, &load_thread_value, DeoptId::kNone);
+ flow_graph->InsertBefore(return_instr, convert_instr, nullptr,
+ FlowGraph::kValue);
+ auto convert_value = Value(convert_instr);
+
+ auto* const box_instr = BoxInstr::Create(kUnboxedFfiIntPtr, &convert_value);
+ flow_graph->InsertBefore(return_instr, box_instr, nullptr, FlowGraph::kValue);
+
+ return_instr->InputAt(0)->definition()->ReplaceUsesWith(box_instr);
+
+ {
+ // Check we constructed the right graph.
+ ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
+ EXPECT(cursor.TryMatch({
+ kMoveGlob,
+ kMatchAndMoveLoadThread,
+ kMatchAndMoveIntConverter,
+ kMatchAndMoveBox,
+ kMatchReturn,
+ }));
+ }
+
+ pipeline.RunForcedOptimizedAfterSSAPasses();
+
+ {
+#if !defined(PRODUCT)
+ SetFlagScope<bool> sfs(&FLAG_disassemble_optimized, true);
+#endif
+ pipeline.CompileGraphAndAttachFunction();
+ }
+
+ // Ensure we can successfully invoke the function.
+ invoke_result ^= Invoke(root_library, "myFunction");
+ intptr_t result_int = Integer::Cast(invoke_result).AsInt64Value();
+ EXPECT_EQ(reinterpret_cast<intptr_t>(thread), result_int);
+}
+
} // namespace dart
diff --git a/runtime/vm/compiler/backend/il_test_helper.cc b/runtime/vm/compiler/backend/il_test_helper.cc
index df48df4..e1b3f68 100644
--- a/runtime/vm/compiler/backend/il_test_helper.cc
+++ b/runtime/vm/compiler/backend/il_test_helper.cc
@@ -5,6 +5,7 @@
#include "vm/compiler/backend/il_test_helper.h"
#include "vm/compiler/aot/aot_call_specializer.h"
+#include "vm/compiler/assembler/disassembler.h"
#include "vm/compiler/backend/block_scheduler.h"
#include "vm/compiler/backend/flow_graph.h"
#include "vm/compiler/backend/flow_graph_compiler.h"
@@ -16,6 +17,7 @@
#include "vm/compiler/jit/compiler.h"
#include "vm/compiler/jit/jit_call_specializer.h"
#include "vm/dart_api_impl.h"
+#include "vm/flags.h"
#include "vm/parser.h"
#include "vm/unit_test.h"
@@ -186,6 +188,32 @@
pass_state_->call_specializer = nullptr;
}
+// Keep in sync with CompilerPass::RunForceOptimizedPipeline.
+void TestPipeline::RunForcedOptimizedAfterSSAPasses() {
+ RunAdditionalPasses({
+ CompilerPass::kSetOuterInliningId,
+ CompilerPass::kTypePropagation,
+ CompilerPass::kCanonicalize,
+ CompilerPass::kBranchSimplify,
+ CompilerPass::kIfConvert,
+ CompilerPass::kConstantPropagation,
+ CompilerPass::kTypePropagation,
+ CompilerPass::kWidenSmiToInt32,
+ CompilerPass::kSelectRepresentations_Final,
+ CompilerPass::kTypePropagation,
+ CompilerPass::kTryCatchOptimization,
+ CompilerPass::kEliminateEnvironments,
+ CompilerPass::kEliminateDeadPhis,
+ CompilerPass::kDCE,
+ CompilerPass::kCanonicalize,
+ CompilerPass::kDelayAllocations,
+ CompilerPass::kEliminateWriteBarriers,
+ CompilerPass::kFinalizeGraph,
+ CompilerPass::kAllocateRegisters,
+ CompilerPass::kReorderBlocks,
+ });
+}
+
void TestPipeline::CompileGraphAndAttachFunction() {
Zone* zone = thread_->zone();
const bool optimized = true;
@@ -248,6 +276,12 @@
if (mode_ == CompilerPass::kAOT) {
EXPECT(deopt_info_array.IsNull() || deopt_info_array.Length() == 0);
}
+
+#if !defined(PRODUCT)
+ if (FLAG_disassemble_optimized) {
+ Disassembler::DisassembleCode(function_, code, optimized);
+ }
+#endif
}
bool ILMatcher::TryMatch(std::initializer_list<MatchCode> match_codes,
diff --git a/runtime/vm/compiler/backend/il_test_helper.h b/runtime/vm/compiler/backend/il_test_helper.h
index babe427..6e77ecc 100644
--- a/runtime/vm/compiler/backend/il_test_helper.h
+++ b/runtime/vm/compiler/backend/il_test_helper.h
@@ -91,6 +91,8 @@
void RunAdditionalPasses(std::initializer_list<CompilerPass::Id> passes);
+ void RunForcedOptimizedAfterSSAPasses();
+
void CompileGraphAndAttachFunction();
private:
diff --git a/runtime/vm/compiler/compiler_pass.cc b/runtime/vm/compiler/compiler_pass.cc
index 33079e7..2ea7890 100644
--- a/runtime/vm/compiler/compiler_pass.cc
+++ b/runtime/vm/compiler/compiler_pass.cc
@@ -297,6 +297,7 @@
INVOKE_PASS(TryOptimizePatterns);
}
+// Keep in sync with TestPipeline::RunForcedOptimizedAfterSSAPasses.
FlowGraph* CompilerPass::RunForceOptimizedPipeline(
PipelineMode mode,
CompilerPassState* pass_state) {
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index 2fab100..006ea70 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -3905,6 +3905,13 @@
return code;
}
+Fragment FlowGraphBuilder::RawLoadField(int32_t offset) {
+ Fragment code;
+ code += UnboxedIntConstant(offset, kUnboxedIntPtr);
+ code += LoadIndexed(kArrayCid, /*index_scale=*/1, /*index_unboxed=*/true);
+ return code;
+}
+
Fragment FlowGraphBuilder::RawStoreField(int32_t offset) {
Fragment code;
Value* value = Pop();
@@ -3931,9 +3938,7 @@
Fragment FlowGraphBuilder::UnwrapHandle() {
Fragment code;
code += ConvertUnboxedToUntagged(kUnboxedIntPtr);
- code += IntConstant(compiler::target::LocalHandle::ptr_offset());
- code += UnboxTruncate(kUnboxedIntPtr);
- code += LoadIndexed(kArrayCid, /*index_scale=*/1, /*index_unboxed=*/true);
+ code += RawLoadField(compiler::target::LocalHandle::ptr_offset());
return code;
}
@@ -4674,8 +4679,7 @@
if (marshaller.IsPointer(compiler::ffi::kResultIndex) ||
marshaller.IsVoid(compiler::ffi::kResultIndex)) {
ASSERT(function.FfiCallbackExceptionalReturn() == Object::null());
- catch_body += IntConstant(0);
- catch_body += UnboxTruncate(kUnboxedFfiIntPtr);
+ catch_body += UnboxedIntConstant(0, kUnboxedFfiIntPtr);
} else if (marshaller.IsHandle(compiler::ffi::kResultIndex)) {
catch_body += UnhandledException();
catch_body +=
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.h b/runtime/vm/compiler/frontend/kernel_to_il.h
index f226a12..03a18f6 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.h
+++ b/runtime/vm/compiler/frontend/kernel_to_il.h
@@ -381,7 +381,12 @@
// Leaves a `LocalHandle` on the stack.
Fragment AllocateHandle(LocalVariable* api_local_scope);
- // Populates the base + offset with a tagged value.
+ // Loads a tagged value from an untagged base + offset from outside the heap.
+ Fragment RawLoadField(int32_t offset);
+
+ // Populates the untagged base + offset outside the heap with a tagged value.
+ //
+ // The store must be outside of the heap, does not emit a store barrier.
Fragment RawStoreField(int32_t offset);
// Wraps an `Object` from the stack and leaves a `LocalHandle` on the stack.
diff --git a/runtime/vm/constants_arm.h b/runtime/vm/constants_arm.h
index d560e44..a8d33d5 100644
--- a/runtime/vm/constants_arm.h
+++ b/runtime/vm/constants_arm.h
@@ -575,7 +575,7 @@
const QRegister kDartLastVolatileFpuReg = Q3;
const int kDartVolatileFpuRegCount = 4;
-#define R(REG) (1 << REG)
+#define R(reg) (static_cast<RegList>(1) << (reg))
class CallingConventions {
public:
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index 9b4df71..8977115 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -384,7 +384,7 @@
// See "Procedure Call Standard for the ARM 64-bit Architecture", document
// number "ARM IHI 0055B", May 22 2013.
-#define R(REG) (1 << REG)
+#define R(reg) (static_cast<RegList>(1) << (reg))
// C++ ABI call registers.
const RegList kAbiArgumentCpuRegs =
diff --git a/runtime/vm/constants_riscv.h b/runtime/vm/constants_riscv.h
index 645b69e..4979017 100644
--- a/runtime/vm/constants_riscv.h
+++ b/runtime/vm/constants_riscv.h
@@ -388,7 +388,7 @@
typedef uint32_t RegList;
const RegList kAllCpuRegistersList = 0xFFFFFFFF;
-#define R(REG) (1 << REG)
+#define R(reg) (static_cast<RegList>(1) << (reg))
// C++ ABI call registers.
diff --git a/runtime/vm/constants_x64.h b/runtime/vm/constants_x64.h
index a880647..b8bc0e7 100644
--- a/runtime/vm/constants_x64.h
+++ b/runtime/vm/constants_x64.h
@@ -383,7 +383,7 @@
TIMES_COMPRESSED_HALF_WORD_SIZE = TIMES_COMPRESSED_WORD_SIZE - 1,
};
-#define R(reg) (1 << (reg))
+#define R(reg) (static_cast<RegList>(1) << (reg))
class CallingConventions {
public:
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index 6398852..400aff2 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -6994,7 +6994,7 @@
}
DART_EXPORT void Dart_DumpNativeStackTrace(void* context) {
-#ifndef PRODUCT
+#if !defined(PRODUCT) || defined(DART_PRECOMPILER)
Profiler::DumpStackTrace(context);
#endif
}
diff --git a/runtime/vm/profiler.cc b/runtime/vm/profiler.cc
index dcf17e7..c5c5e3b 100644
--- a/runtime/vm/profiler.cc
+++ b/runtime/vm/profiler.cc
@@ -63,12 +63,462 @@
"default is ~4 seconds. Large values will greatly increase memory "
"consumption.");
+// Include native stack dumping helpers into AOT compiler even in PRODUCT
+// mode. This allows to report more informative errors when gen_snapshot
+// crashes.
+#if !defined(PRODUCT) || defined(DART_PRECOMPILER)
+ProfilerCounters Profiler::counters_ = {};
+
+static void DumpStackFrame(intptr_t frame_index, uword pc, uword fp) {
+ uword start = 0;
+ if (auto const name = NativeSymbolResolver::LookupSymbolName(pc, &start)) {
+ uword offset = pc - start;
+ OS::PrintErr(" pc 0x%" Pp " fp 0x%" Pp " %s+0x%" Px "\n", pc, fp, name,
+ offset);
+ NativeSymbolResolver::FreeSymbolName(name);
+ return;
+ }
+
+ char* dso_name;
+ uword dso_base;
+ if (NativeSymbolResolver::LookupSharedObject(pc, &dso_base, &dso_name)) {
+ uword dso_offset = pc - dso_base;
+ OS::PrintErr(" pc 0x%" Pp " fp 0x%" Pp " %s+0x%" Px "\n", pc, fp, dso_name,
+ dso_offset);
+ NativeSymbolResolver::FreeSymbolName(dso_name);
+ return;
+ }
+
+ OS::PrintErr(" pc 0x%" Pp " fp 0x%" Pp " Unknown symbol\n", pc, fp);
+}
+
+class ProfilerStackWalker : public ValueObject {
+ public:
+ ProfilerStackWalker(Dart_Port port_id,
+ Sample* head_sample,
+ SampleBuffer* sample_buffer,
+ intptr_t skip_count = 0)
+ : port_id_(port_id),
+ sample_(head_sample),
+ sample_buffer_(sample_buffer),
+ skip_count_(skip_count),
+ frames_skipped_(0),
+ frame_index_(0),
+ total_frames_(0) {
+ if (sample_ == NULL) {
+ ASSERT(sample_buffer_ == NULL);
+ } else {
+ ASSERT(sample_buffer_ != NULL);
+ ASSERT(sample_->head_sample());
+ }
+ }
+
+ bool Append(uword pc, uword fp) {
+ if (frames_skipped_ < skip_count_) {
+ frames_skipped_++;
+ return true;
+ }
+
+ if (sample_ == NULL) {
+ DumpStackFrame(frame_index_, pc, fp);
+ frame_index_++;
+ total_frames_++;
+ return true;
+ }
+ if (total_frames_ >= FLAG_max_profile_depth) {
+ sample_->set_truncated_trace(true);
+ return false;
+ }
+ ASSERT(sample_ != NULL);
+ if (frame_index_ == Sample::kPCArraySizeInWords) {
+ Sample* new_sample = sample_buffer_->ReserveSampleAndLink(sample_);
+ if (new_sample == NULL) {
+ // Could not reserve new sample- mark this as truncated.
+ sample_->set_truncated_trace(true);
+ return false;
+ }
+ frame_index_ = 0;
+ sample_ = new_sample;
+ }
+ ASSERT(frame_index_ < Sample::kPCArraySizeInWords);
+ sample_->SetAt(frame_index_, pc);
+ frame_index_++;
+ total_frames_++;
+ return true;
+ }
+
+ protected:
+ Dart_Port port_id_;
+ Sample* sample_;
+ SampleBuffer* sample_buffer_;
+ intptr_t skip_count_;
+ intptr_t frames_skipped_;
+ intptr_t frame_index_;
+ intptr_t total_frames_;
+};
+
+// The layout of C stack frames.
+#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64) || \
+ defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
+// +-------------+
+// | saved IP/LR |
+// +-------------+
+// | saved FP | <- FP
+// +-------------+
+static constexpr intptr_t kHostSavedCallerPcSlotFromFp = 1;
+static constexpr intptr_t kHostSavedCallerFpSlotFromFp = 0;
+#elif defined(HOST_ARCH_RISCV32) || defined(HOST_ARCH_RISCV64)
+// +-------------+
+// | | <- FP
+// +-------------+
+// | saved RA |
+// +-------------+
+// | saved FP |
+// +-------------+
+static constexpr intptr_t kHostSavedCallerPcSlotFromFp = -1;
+static constexpr intptr_t kHostSavedCallerFpSlotFromFp = -2;
+#else
+#error What architecture?
+#endif
+
+// If the VM is compiled without frame pointers (which is the default on
+// recent GCC versions with optimizing enabled) the stack walking code may
+// fail.
+//
+class ProfilerNativeStackWalker : public ProfilerStackWalker {
+ public:
+ ProfilerNativeStackWalker(ProfilerCounters* counters,
+ Dart_Port port_id,
+ Sample* sample,
+ SampleBuffer* sample_buffer,
+ uword stack_lower,
+ uword stack_upper,
+ uword pc,
+ uword fp,
+ uword sp,
+ intptr_t skip_count = 0)
+ : ProfilerStackWalker(port_id, sample, sample_buffer, skip_count),
+ counters_(counters),
+ stack_upper_(stack_upper),
+ original_pc_(pc),
+ original_fp_(fp),
+ original_sp_(sp),
+ lower_bound_(stack_lower) {}
+
+ void walk() {
+ const uword kMaxStep = VirtualMemory::PageSize();
+
+ Append(original_pc_, original_fp_);
+
+ uword* pc = reinterpret_cast<uword*>(original_pc_);
+ uword* fp = reinterpret_cast<uword*>(original_fp_);
+ uword* previous_fp = fp;
+
+ uword gap = original_fp_ - original_sp_;
+ if (gap >= kMaxStep) {
+ // Gap between frame pointer and stack pointer is
+ // too large.
+ counters_->incomplete_sample_fp_step.fetch_add(1);
+ return;
+ }
+
+ if (!ValidFramePointer(fp)) {
+ counters_->incomplete_sample_fp_bounds.fetch_add(1);
+ return;
+ }
+
+ while (true) {
+ pc = CallerPC(fp);
+ previous_fp = fp;
+ fp = CallerFP(fp);
+
+ if (fp == NULL) {
+ return;
+ }
+
+ if (fp <= previous_fp) {
+ // Frame pointer did not move to a higher address.
+ counters_->incomplete_sample_fp_step.fetch_add(1);
+ return;
+ }
+
+ gap = fp - previous_fp;
+ if (gap >= kMaxStep) {
+ // Frame pointer step is too large.
+ counters_->incomplete_sample_fp_step.fetch_add(1);
+ return;
+ }
+
+ if (!ValidFramePointer(fp)) {
+ // Frame pointer is outside of isolate stack boundary.
+ counters_->incomplete_sample_fp_bounds.fetch_add(1);
+ return;
+ }
+
+ const uword pc_value = reinterpret_cast<uword>(pc);
+ if ((pc_value + 1) < pc_value) {
+ // It is not uncommon to encounter an invalid pc as we
+ // traverse a stack frame. Most of these we can tolerate. If
+ // the pc is so large that adding one to it will cause an
+ // overflow it is invalid and it will cause headaches later
+ // while we are building the profile. Discard it.
+ counters_->incomplete_sample_bad_pc.fetch_add(1);
+ return;
+ }
+
+ // Move the lower bound up.
+ lower_bound_ = reinterpret_cast<uword>(fp);
+
+ if (!Append(pc_value, reinterpret_cast<uword>(fp))) {
+ return;
+ }
+ }
+ }
+
+ private:
+ uword* CallerPC(uword* fp) const {
+ ASSERT(fp != NULL);
+ uword* caller_pc_ptr = fp + kHostSavedCallerPcSlotFromFp;
+ // This may actually be uninitialized, by design (see class comment above).
+ MSAN_UNPOISON(caller_pc_ptr, kWordSize);
+ ASAN_UNPOISON(caller_pc_ptr, kWordSize);
+ return reinterpret_cast<uword*>(*caller_pc_ptr);
+ }
+
+ uword* CallerFP(uword* fp) const {
+ ASSERT(fp != NULL);
+ uword* caller_fp_ptr = fp + kHostSavedCallerFpSlotFromFp;
+ // This may actually be uninitialized, by design (see class comment above).
+ MSAN_UNPOISON(caller_fp_ptr, kWordSize);
+ ASAN_UNPOISON(caller_fp_ptr, kWordSize);
+ return reinterpret_cast<uword*>(*caller_fp_ptr);
+ }
+
+ bool ValidFramePointer(uword* fp) const {
+ if (fp == NULL) {
+ return false;
+ }
+ uword cursor = reinterpret_cast<uword>(fp);
+ cursor += sizeof(fp);
+ bool r = (cursor >= lower_bound_) && (cursor < stack_upper_);
+ return r;
+ }
+
+ ProfilerCounters* const counters_;
+ const uword stack_upper_;
+ const uword original_pc_;
+ const uword original_fp_;
+ const uword original_sp_;
+ uword lower_bound_;
+};
+
+static bool ValidateThreadStackBounds(uintptr_t fp,
+ uintptr_t sp,
+ uword stack_lower,
+ uword stack_upper) {
+ if (stack_lower >= stack_upper) {
+ // Stack boundary is invalid.
+ return false;
+ }
+
+ if ((sp < stack_lower) || (sp >= stack_upper)) {
+ // Stack pointer is outside thread's stack boundary.
+ return false;
+ }
+
+ if ((fp < stack_lower) || (fp >= stack_upper)) {
+ // Frame pointer is outside threads's stack boundary.
+ return false;
+ }
+
+ return true;
+}
+
+// Get |thread|'s stack boundary and verify that |sp| and |fp| are within
+// it. Return |false| if anything looks suspicious.
+static bool GetAndValidateThreadStackBounds(OSThread* os_thread,
+ Thread* thread,
+ uintptr_t fp,
+ uintptr_t sp,
+ uword* stack_lower,
+ uword* stack_upper) {
+ ASSERT(os_thread != NULL);
+ ASSERT(stack_lower != NULL);
+ ASSERT(stack_upper != NULL);
+
+#if defined(USING_SIMULATOR)
+ const bool use_simulator_stack_bounds =
+ thread != NULL && thread->IsExecutingDartCode();
+ if (use_simulator_stack_bounds) {
+ Isolate* isolate = thread->isolate();
+ ASSERT(isolate != NULL);
+ Simulator* simulator = isolate->simulator();
+ *stack_lower = simulator->stack_limit();
+ *stack_upper = simulator->stack_base();
+ }
+#else
+ const bool use_simulator_stack_bounds = false;
+#endif // defined(USING_SIMULATOR)
+
+ if (!use_simulator_stack_bounds) {
+ *stack_lower = os_thread->stack_limit();
+ *stack_upper = os_thread->stack_base();
+ }
+
+ if ((*stack_lower == 0) || (*stack_upper == 0)) {
+ return false;
+ }
+
+ if (!use_simulator_stack_bounds && (sp > *stack_lower)) {
+ // The stack pointer gives us a tighter lower bound.
+ *stack_lower = sp;
+ }
+
+ return ValidateThreadStackBounds(fp, sp, *stack_lower, *stack_upper);
+}
+
+// Some simple sanity checking of |pc|, |fp|, and |sp|.
+static bool InitialRegisterCheck(uintptr_t pc, uintptr_t fp, uintptr_t sp) {
+ if ((sp == 0) || (fp == 0) || (pc == 0)) {
+ // None of these registers should be zero.
+ return false;
+ }
+
+ if (sp > fp) {
+ // Assuming the stack grows down, we should never have a stack pointer above
+ // the frame pointer.
+ return false;
+ }
+
+ return true;
+}
+
+void Profiler::DumpStackTrace(void* context) {
+ if (context == NULL) {
+ DumpStackTrace(/*for_crash=*/true);
+ return;
+ }
+#if defined(DART_HOST_OS_LINUX) || defined(DART_HOST_OS_MACOS) || \
+ defined(DART_HOST_OS_ANDROID)
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t mcontext = ucontext->uc_mcontext;
+ uword pc = SignalHandler::GetProgramCounter(mcontext);
+ uword fp = SignalHandler::GetFramePointer(mcontext);
+ uword sp = SignalHandler::GetCStackPointer(mcontext);
+ DumpStackTrace(sp, fp, pc, /*for_crash=*/true);
+#elif defined(DART_HOST_OS_WINDOWS)
+ CONTEXT* ctx = reinterpret_cast<CONTEXT*>(context);
+#if defined(HOST_ARCH_IA32)
+ uword pc = static_cast<uword>(ctx->Eip);
+ uword fp = static_cast<uword>(ctx->Ebp);
+ uword sp = static_cast<uword>(ctx->Esp);
+#elif defined(HOST_ARCH_X64)
+ uword pc = static_cast<uword>(ctx->Rip);
+ uword fp = static_cast<uword>(ctx->Rbp);
+ uword sp = static_cast<uword>(ctx->Rsp);
+#else
+#error Unsupported architecture.
+#endif
+ DumpStackTrace(sp, fp, pc, /*for_crash=*/true);
+#else
+// TODO(fschneider): Add support for more platforms.
+// Do nothing on unsupported platforms.
+#endif
+}
+
+void Profiler::DumpStackTrace(bool for_crash) {
+ uintptr_t sp = OSThread::GetCurrentStackPointer();
+ uintptr_t fp = 0;
+ uintptr_t pc = OS::GetProgramCounter();
+
+ COPY_FP_REGISTER(fp);
+
+ DumpStackTrace(sp, fp, pc, for_crash);
+}
+
+void Profiler::DumpStackTrace(uword sp, uword fp, uword pc, bool for_crash) {
+ if (for_crash) {
+ // Allow only one stack trace to prevent recursively printing stack traces
+ // if we hit an assert while printing the stack.
+ static RelaxedAtomic<uintptr_t> started_dump = 0;
+ if (started_dump.fetch_add(1u) != 0) {
+ OS::PrintErr("Aborting re-entrant request for stack trace.\n");
+ return;
+ }
+ }
+
+ auto os_thread = OSThread::Current();
+ ASSERT(os_thread != nullptr);
+ auto thread = Thread::Current(); // NULL if no current isolate.
+ auto isolate = thread == nullptr ? nullptr : thread->isolate();
+ auto isolate_group = thread == nullptr ? nullptr : thread->isolate_group();
+ auto source = isolate_group == nullptr ? nullptr : isolate_group->source();
+ auto vm_source =
+ Dart::vm_isolate() == nullptr ? nullptr : Dart::vm_isolate()->source();
+ const char* isolate_group_name =
+ isolate_group == nullptr ? "(nil)" : isolate_group->source()->name;
+ const char* isolate_name = isolate == nullptr ? "(nil)" : isolate->name();
+#if defined(PRODUCT)
+ const intptr_t thread_id = -1;
+#else
+ const intptr_t thread_id = OSThread::ThreadIdToIntPtr(os_thread->trace_id());
+#endif
+
+ OS::PrintErr("version=%s\n", Version::String());
+ OS::PrintErr("pid=%" Pd ", thread=%" Pd
+ ", isolate_group=%s(%p), isolate=%s(%p)\n",
+ static_cast<intptr_t>(OS::ProcessId()), thread_id,
+ isolate_group_name, isolate_group, isolate_name, isolate);
+ OS::PrintErr("isolate_instructions=%" Px ", vm_instructions=%" Px "\n",
+ source == nullptr
+ ? 0
+ : reinterpret_cast<uword>(source->snapshot_instructions),
+ vm_source == nullptr
+ ? 0
+ : reinterpret_cast<uword>(vm_source->snapshot_instructions));
+
+ if (!InitialRegisterCheck(pc, fp, sp)) {
+ OS::PrintErr("Stack dump aborted because InitialRegisterCheck failed.\n");
+ return;
+ }
+
+ uword stack_lower = 0;
+ uword stack_upper = 0;
+ if (!GetAndValidateThreadStackBounds(os_thread, thread, fp, sp, &stack_lower,
+ &stack_upper)) {
+ OS::PrintErr(
+ "Stack dump aborted because GetAndValidateThreadStackBounds failed.\n");
+ return;
+ }
+
+ ProfilerNativeStackWalker native_stack_walker(&counters_, ILLEGAL_PORT, NULL,
+ NULL, stack_lower, stack_upper,
+ pc, fp, sp,
+ /*skip_count=*/0);
+ native_stack_walker.walk();
+ OS::PrintErr("-- End of DumpStackTrace\n");
+
+ if (thread != nullptr) {
+ if (thread->execution_state() == Thread::kThreadInNative) {
+ TransitionNativeToVM transition(thread);
+ StackFrame::DumpCurrentTrace();
+ } else if (thread->execution_state() == Thread::kThreadInVM) {
+ StackFrame::DumpCurrentTrace();
+#if !defined(DART_PRECOMPILED_RUNTIME)
+ if (thread->HasCompilerState()) {
+ thread->compiler_state().ReportCrash();
+ }
+#endif
+ }
+ }
+}
+#endif // !defined(PRODUCT) || defined(DART_PRECOMPILER)
+
#ifndef PRODUCT
RelaxedAtomic<bool> Profiler::initialized_ = false;
SampleBlockBuffer* Profiler::sample_block_buffer_ = nullptr;
AllocationSampleBuffer* Profiler::allocation_sample_buffer_ = nullptr;
-ProfilerCounters Profiler::counters_ = {};
bool SampleBlockProcessor::initialized_ = false;
bool SampleBlockProcessor::shutdown_ = false;
@@ -570,94 +1020,6 @@
sample->Clear();
}
-static void DumpStackFrame(intptr_t frame_index, uword pc, uword fp) {
- uword start = 0;
- if (auto const name = NativeSymbolResolver::LookupSymbolName(pc, &start)) {
- uword offset = pc - start;
- OS::PrintErr(" pc 0x%" Pp " fp 0x%" Pp " %s+0x%" Px "\n", pc, fp, name,
- offset);
- NativeSymbolResolver::FreeSymbolName(name);
- return;
- }
-
- char* dso_name;
- uword dso_base;
- if (NativeSymbolResolver::LookupSharedObject(pc, &dso_base, &dso_name)) {
- uword dso_offset = pc - dso_base;
- OS::PrintErr(" pc 0x%" Pp " fp 0x%" Pp " %s+0x%" Px "\n", pc, fp, dso_name,
- dso_offset);
- NativeSymbolResolver::FreeSymbolName(dso_name);
- return;
- }
-
- OS::PrintErr(" pc 0x%" Pp " fp 0x%" Pp " Unknown symbol\n", pc, fp);
-}
-
-class ProfilerStackWalker : public ValueObject {
- public:
- ProfilerStackWalker(Dart_Port port_id,
- Sample* head_sample,
- SampleBuffer* sample_buffer,
- intptr_t skip_count = 0)
- : port_id_(port_id),
- sample_(head_sample),
- sample_buffer_(sample_buffer),
- skip_count_(skip_count),
- frames_skipped_(0),
- frame_index_(0),
- total_frames_(0) {
- if (sample_ == NULL) {
- ASSERT(sample_buffer_ == NULL);
- } else {
- ASSERT(sample_buffer_ != NULL);
- ASSERT(sample_->head_sample());
- }
- }
-
- bool Append(uword pc, uword fp) {
- if (frames_skipped_ < skip_count_) {
- frames_skipped_++;
- return true;
- }
-
- if (sample_ == NULL) {
- DumpStackFrame(frame_index_, pc, fp);
- frame_index_++;
- total_frames_++;
- return true;
- }
- if (total_frames_ >= FLAG_max_profile_depth) {
- sample_->set_truncated_trace(true);
- return false;
- }
- ASSERT(sample_ != NULL);
- if (frame_index_ == Sample::kPCArraySizeInWords) {
- Sample* new_sample = sample_buffer_->ReserveSampleAndLink(sample_);
- if (new_sample == NULL) {
- // Could not reserve new sample- mark this as truncated.
- sample_->set_truncated_trace(true);
- return false;
- }
- frame_index_ = 0;
- sample_ = new_sample;
- }
- ASSERT(frame_index_ < Sample::kPCArraySizeInWords);
- sample_->SetAt(frame_index_, pc);
- frame_index_++;
- total_frames_++;
- return true;
- }
-
- protected:
- Dart_Port port_id_;
- Sample* sample_;
- SampleBuffer* sample_buffer_;
- intptr_t skip_count_;
- intptr_t frames_skipped_;
- intptr_t frame_index_;
- intptr_t total_frames_;
-};
-
// Executing Dart code, walk the stack.
class ProfilerDartStackWalker : public ProfilerStackWalker {
public:
@@ -711,11 +1073,12 @@
sample_->set_exit_frame_sample(has_exit_frame);
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64)
- if (!has_exit_frame && (CallerPC() == EntryMarker())) {
+ const bool is_entry_frame = (CallerPC() == EntryMarker());
#else
- if (!has_exit_frame &&
- (StubCode::InInvocationStub(reinterpret_cast<uword>(lr_)))) {
+ const bool is_entry_frame =
+ (StubCode::InInvocationStub(reinterpret_cast<uword>(lr_)));
#endif
+ if (!has_exit_frame && is_entry_frame) {
// During the prologue of a function, CallerPC will return the caller's
// caller. For most frames, the missing PC will be added during profile
// processing. However, during this stack walk, it can cause us to fail
@@ -799,161 +1162,6 @@
uword* lr_;
};
-// The layout of C stack frames.
-#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64) || \
- defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
-// +-------------+
-// | saved IP/LR |
-// +-------------+
-// | saved FP | <- FP
-// +-------------+
-static constexpr intptr_t kHostSavedCallerPcSlotFromFp = 1;
-static constexpr intptr_t kHostSavedCallerFpSlotFromFp = 0;
-#elif defined(HOST_ARCH_RISCV32) || defined(HOST_ARCH_RISCV64)
-// +-------------+
-// | | <- FP
-// +-------------+
-// | saved RA |
-// +-------------+
-// | saved FP |
-// +-------------+
-static constexpr intptr_t kHostSavedCallerPcSlotFromFp = -1;
-static constexpr intptr_t kHostSavedCallerFpSlotFromFp = -2;
-#else
-#error What architecture?
-#endif
-
-// If the VM is compiled without frame pointers (which is the default on
-// recent GCC versions with optimizing enabled) the stack walking code may
-// fail.
-//
-class ProfilerNativeStackWalker : public ProfilerStackWalker {
- public:
- ProfilerNativeStackWalker(ProfilerCounters* counters,
- Dart_Port port_id,
- Sample* sample,
- SampleBuffer* sample_buffer,
- uword stack_lower,
- uword stack_upper,
- uword pc,
- uword fp,
- uword sp,
- intptr_t skip_count = 0)
- : ProfilerStackWalker(port_id, sample, sample_buffer, skip_count),
- counters_(counters),
- stack_upper_(stack_upper),
- original_pc_(pc),
- original_fp_(fp),
- original_sp_(sp),
- lower_bound_(stack_lower) {}
-
- void walk() {
- const uword kMaxStep = VirtualMemory::PageSize();
-
- Append(original_pc_, original_fp_);
-
- uword* pc = reinterpret_cast<uword*>(original_pc_);
- uword* fp = reinterpret_cast<uword*>(original_fp_);
- uword* previous_fp = fp;
-
- uword gap = original_fp_ - original_sp_;
- if (gap >= kMaxStep) {
- // Gap between frame pointer and stack pointer is
- // too large.
- counters_->incomplete_sample_fp_step.fetch_add(1);
- return;
- }
-
- if (!ValidFramePointer(fp)) {
- counters_->incomplete_sample_fp_bounds.fetch_add(1);
- return;
- }
-
- while (true) {
- pc = CallerPC(fp);
- previous_fp = fp;
- fp = CallerFP(fp);
-
- if (fp == NULL) {
- return;
- }
-
- if (fp <= previous_fp) {
- // Frame pointer did not move to a higher address.
- counters_->incomplete_sample_fp_step.fetch_add(1);
- return;
- }
-
- gap = fp - previous_fp;
- if (gap >= kMaxStep) {
- // Frame pointer step is too large.
- counters_->incomplete_sample_fp_step.fetch_add(1);
- return;
- }
-
- if (!ValidFramePointer(fp)) {
- // Frame pointer is outside of isolate stack boundary.
- counters_->incomplete_sample_fp_bounds.fetch_add(1);
- return;
- }
-
- const uword pc_value = reinterpret_cast<uword>(pc);
- if ((pc_value + 1) < pc_value) {
- // It is not uncommon to encounter an invalid pc as we
- // traverse a stack frame. Most of these we can tolerate. If
- // the pc is so large that adding one to it will cause an
- // overflow it is invalid and it will cause headaches later
- // while we are building the profile. Discard it.
- counters_->incomplete_sample_bad_pc.fetch_add(1);
- return;
- }
-
- // Move the lower bound up.
- lower_bound_ = reinterpret_cast<uword>(fp);
-
- if (!Append(pc_value, reinterpret_cast<uword>(fp))) {
- return;
- }
- }
- }
-
- private:
- uword* CallerPC(uword* fp) const {
- ASSERT(fp != NULL);
- uword* caller_pc_ptr = fp + kHostSavedCallerPcSlotFromFp;
- // This may actually be uninitialized, by design (see class comment above).
- MSAN_UNPOISON(caller_pc_ptr, kWordSize);
- ASAN_UNPOISON(caller_pc_ptr, kWordSize);
- return reinterpret_cast<uword*>(*caller_pc_ptr);
- }
-
- uword* CallerFP(uword* fp) const {
- ASSERT(fp != NULL);
- uword* caller_fp_ptr = fp + kHostSavedCallerFpSlotFromFp;
- // This may actually be uninitialized, by design (see class comment above).
- MSAN_UNPOISON(caller_fp_ptr, kWordSize);
- ASAN_UNPOISON(caller_fp_ptr, kWordSize);
- return reinterpret_cast<uword*>(*caller_fp_ptr);
- }
-
- bool ValidFramePointer(uword* fp) const {
- if (fp == NULL) {
- return false;
- }
- uword cursor = reinterpret_cast<uword>(fp);
- cursor += sizeof(fp);
- bool r = (cursor >= lower_bound_) && (cursor < stack_upper_);
- return r;
- }
-
- ProfilerCounters* const counters_;
- const uword stack_upper_;
- const uword original_pc_;
- const uword original_fp_;
- const uword original_sp_;
- uword lower_bound_;
-};
-
static void CopyStackBuffer(Sample* sample, uword sp_addr) {
ASSERT(sample != NULL);
uword* sp = reinterpret_cast<uword*>(sp_addr);
@@ -1046,87 +1254,6 @@
#endif
}
-static bool ValidateThreadStackBounds(uintptr_t fp,
- uintptr_t sp,
- uword stack_lower,
- uword stack_upper) {
- if (stack_lower >= stack_upper) {
- // Stack boundary is invalid.
- return false;
- }
-
- if ((sp < stack_lower) || (sp >= stack_upper)) {
- // Stack pointer is outside thread's stack boundary.
- return false;
- }
-
- if ((fp < stack_lower) || (fp >= stack_upper)) {
- // Frame pointer is outside threads's stack boundary.
- return false;
- }
-
- return true;
-}
-
-// Get |thread|'s stack boundary and verify that |sp| and |fp| are within
-// it. Return |false| if anything looks suspicious.
-static bool GetAndValidateThreadStackBounds(OSThread* os_thread,
- Thread* thread,
- uintptr_t fp,
- uintptr_t sp,
- uword* stack_lower,
- uword* stack_upper) {
- ASSERT(os_thread != NULL);
- ASSERT(stack_lower != NULL);
- ASSERT(stack_upper != NULL);
-
-#if defined(USING_SIMULATOR)
- const bool use_simulator_stack_bounds =
- thread != NULL && thread->IsExecutingDartCode();
- if (use_simulator_stack_bounds) {
- Isolate* isolate = thread->isolate();
- ASSERT(isolate != NULL);
- Simulator* simulator = isolate->simulator();
- *stack_lower = simulator->stack_limit();
- *stack_upper = simulator->stack_base();
- }
-#else
- const bool use_simulator_stack_bounds = false;
-#endif // defined(USING_SIMULATOR)
-
- if (!use_simulator_stack_bounds) {
- *stack_lower = os_thread->stack_limit();
- *stack_upper = os_thread->stack_base();
- }
-
- if ((*stack_lower == 0) || (*stack_upper == 0)) {
- return false;
- }
-
- if (!use_simulator_stack_bounds && (sp > *stack_lower)) {
- // The stack pointer gives us a tighter lower bound.
- *stack_lower = sp;
- }
-
- return ValidateThreadStackBounds(fp, sp, *stack_lower, *stack_upper);
-}
-
-// Some simple sanity checking of |pc|, |fp|, and |sp|.
-static bool InitialRegisterCheck(uintptr_t pc, uintptr_t fp, uintptr_t sp) {
- if ((sp == 0) || (fp == 0) || (pc == 0)) {
- // None of these registers should be zero.
- return false;
- }
-
- if (sp > fp) {
- // Assuming the stack grows down, we should never have a stack pointer above
- // the frame pointer.
- return false;
- }
-
- return true;
-}
-
static Sample* SetupSample(Thread* thread,
bool allocation_sample,
ThreadId tid) {
@@ -1184,122 +1311,6 @@
return isolate != Dart::vm_isolate();
}
-void Profiler::DumpStackTrace(void* context) {
- if (context == NULL) {
- DumpStackTrace(/*for_crash=*/true);
- return;
- }
-#if defined(DART_HOST_OS_LINUX) || defined(DART_HOST_OS_MACOS) || \
- defined(DART_HOST_OS_ANDROID)
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t mcontext = ucontext->uc_mcontext;
- uword pc = SignalHandler::GetProgramCounter(mcontext);
- uword fp = SignalHandler::GetFramePointer(mcontext);
- uword sp = SignalHandler::GetCStackPointer(mcontext);
- DumpStackTrace(sp, fp, pc, /*for_crash=*/true);
-#elif defined(DART_HOST_OS_WINDOWS)
- CONTEXT* ctx = reinterpret_cast<CONTEXT*>(context);
-#if defined(HOST_ARCH_IA32)
- uword pc = static_cast<uword>(ctx->Eip);
- uword fp = static_cast<uword>(ctx->Ebp);
- uword sp = static_cast<uword>(ctx->Esp);
-#elif defined(HOST_ARCH_X64)
- uword pc = static_cast<uword>(ctx->Rip);
- uword fp = static_cast<uword>(ctx->Rbp);
- uword sp = static_cast<uword>(ctx->Rsp);
-#else
-#error Unsupported architecture.
-#endif
- DumpStackTrace(sp, fp, pc, /*for_crash=*/true);
-#else
-// TODO(fschneider): Add support for more platforms.
-// Do nothing on unsupported platforms.
-#endif
-}
-
-void Profiler::DumpStackTrace(bool for_crash) {
- uintptr_t sp = OSThread::GetCurrentStackPointer();
- uintptr_t fp = 0;
- uintptr_t pc = OS::GetProgramCounter();
-
- COPY_FP_REGISTER(fp);
-
- DumpStackTrace(sp, fp, pc, for_crash);
-}
-
-void Profiler::DumpStackTrace(uword sp, uword fp, uword pc, bool for_crash) {
- if (for_crash) {
- // Allow only one stack trace to prevent recursively printing stack traces
- // if we hit an assert while printing the stack.
- static RelaxedAtomic<uintptr_t> started_dump = 0;
- if (started_dump.fetch_add(1u) != 0) {
- OS::PrintErr("Aborting re-entrant request for stack trace.\n");
- return;
- }
- }
-
- auto os_thread = OSThread::Current();
- ASSERT(os_thread != nullptr);
- auto thread = Thread::Current(); // NULL if no current isolate.
- auto isolate = thread == nullptr ? nullptr : thread->isolate();
- auto isolate_group = thread == nullptr ? nullptr : thread->isolate_group();
- auto source = isolate_group == nullptr ? nullptr : isolate_group->source();
- auto vm_source =
- Dart::vm_isolate() == nullptr ? nullptr : Dart::vm_isolate()->source();
- const char* isolate_group_name =
- isolate_group == nullptr ? "(nil)" : isolate_group->source()->name;
- const char* isolate_name = isolate == nullptr ? "(nil)" : isolate->name();
-
- OS::PrintErr("version=%s\n", Version::String());
- OS::PrintErr("pid=%" Pd ", thread=%" Pd
- ", isolate_group=%s(%p), isolate=%s(%p)\n",
- static_cast<intptr_t>(OS::ProcessId()),
- OSThread::ThreadIdToIntPtr(os_thread->trace_id()),
- isolate_group_name, isolate_group, isolate_name, isolate);
- OS::PrintErr("isolate_instructions=%" Px ", vm_instructions=%" Px "\n",
- source == nullptr
- ? 0
- : reinterpret_cast<uword>(source->snapshot_instructions),
- vm_source == nullptr
- ? 0
- : reinterpret_cast<uword>(vm_source->snapshot_instructions));
-
- if (!InitialRegisterCheck(pc, fp, sp)) {
- OS::PrintErr("Stack dump aborted because InitialRegisterCheck failed.\n");
- return;
- }
-
- uword stack_lower = 0;
- uword stack_upper = 0;
- if (!GetAndValidateThreadStackBounds(os_thread, thread, fp, sp, &stack_lower,
- &stack_upper)) {
- OS::PrintErr(
- "Stack dump aborted because GetAndValidateThreadStackBounds failed.\n");
- return;
- }
-
- ProfilerNativeStackWalker native_stack_walker(&counters_, ILLEGAL_PORT, NULL,
- NULL, stack_lower, stack_upper,
- pc, fp, sp,
- /*skip_count=*/0);
- native_stack_walker.walk();
- OS::PrintErr("-- End of DumpStackTrace\n");
-
- if (thread != nullptr) {
- if (thread->execution_state() == Thread::kThreadInNative) {
- TransitionNativeToVM transition(thread);
- StackFrame::DumpCurrentTrace();
- } else if (thread->execution_state() == Thread::kThreadInVM) {
- StackFrame::DumpCurrentTrace();
-#if !defined(DART_PRECOMPILED_RUNTIME)
- if (thread->HasCompilerState()) {
- thread->compiler_state().ReportCrash();
- }
-#endif
- }
- }
-}
-
void Profiler::SampleAllocation(Thread* thread,
intptr_t cid,
uint32_t identity_hash) {
diff --git a/tools/VERSION b/tools/VERSION
index bd4b086..18b3435 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 2
MINOR 17
PATCH 0
-PRERELEASE 202
+PRERELEASE 203
PRERELEASE_PATCH 0
\ No newline at end of file