Version 2.18.0-87.0.dev
Merge commit '35246aee33b46cf6528c7a04549abc31826ec6a3' into 'dev'
diff --git a/DEPS b/DEPS
index 52a18ec..3c7752c 100644
--- a/DEPS
+++ b/DEPS
@@ -76,7 +76,7 @@
"gperftools_revision": "180bfa10d7cb38e8b3784d60943d50e8fcef0dcb",
# Revisions of /third_party/* dependencies.
- "args_rev": "3b3f55766af13d895d2020ec001a28e8dc147f91",
+ "args_rev": "862d929b980b993334974d38485a39d891d83918",
"async_rev": "f3ed5f690e2ec9dbe1bfc5184705575b4f6480e5",
"bazel_worker_rev": "ceeba0982d4ff40d32371c9d35f3d2dc1868de20",
"benchmark_harness_rev": "0530da692a5d689f4b5450a7c8d1a8abe3e2d555",
@@ -111,12 +111,12 @@
"devtools_rev": "6ca0b315ccafb7e2a5c6081063d28b546a0f7101",
"ffi_rev": "4dd32429880a57b64edaf54c9d5af8a9fa9a4ffb",
"file_rev": "1ebc38852ffed24b564910317982298b56c2cedd",
- "fixnum_rev": "848341f061359ef7ddc0cad472c2ecbb036b28ac",
+ "fixnum_rev": "3bfc2ed1eea7e7acb79ad4f17392f92c816fc5ce",
"glob_rev": "da1f4595ee2f87982cbcc663d4cac244822d9227",
"html_rev": "f108bce59d136c584969fd24a5006914796cf213",
"http_io_rev": "2fa188caf7937e313026557713f7feffedd4978b",
"http_multi_server_rev": "34bf7f04b61cce561f47f7f275c2cc811534a05a",
- "http_parser_rev": "202391286ddc13c4c3c284ac5b511f04697250ed",
+ "http_parser_rev": "9126ee04e77fd8e4e2e6435b503ee4dd708d7ddc",
"http_rev": "2c9b418f5086f999c150d18172d2eec1f963de7b",
"icu_rev": "81d656878ec611cb0b42d52c82e9dae93920d9ba",
"intl_rev": "9669926609e7efc17dfd74fbb44ec719a7e573cc",
@@ -132,7 +132,7 @@
"mockito_rev": "1e977a727e82a2e1bdb49b79ef1dce0f23aa1faa",
"oauth2_rev": "7cd3284049fe5badbec9f2bea2afc41d14c01057",
"package_config_rev": "8731bf10b5375542792a32a0f7c8a6f370583d96",
- "path_rev": "baedce9d2ca11ea2cdf54395a74eb038087777a4",
+ "path_rev": "3d41ea582f5b0b18de3d90008809b877ff3f69bc",
"platform_rev": "1ffad63428bbd1b3ecaa15926bacfb724023648c",
"ply_rev": "604b32590ffad5cbb82e4afef1d305512d06ae93",
"pool_rev": "7abe634002a1ba8a0928eded086062f1307ccfae",
@@ -153,14 +153,14 @@
"sse_rev": "9a54f1cdd91c8d79a6bf5ef8e849a12756607453",
"stack_trace_rev": "5220580872625ddee41e9ca9a5f3364789b2f0f6",
"stream_channel_rev": "3fa3e40c75c210d617b8b943b9b8f580e9866a89",
- "string_scanner_rev": "0e53bf9059e8e22a3b346aac7ec755a0f8314eb6",
+ "string_scanner_rev": "6579871b528036767b3200b390a3ecef28e4900d",
"sync_http_rev": "b59c134f2e34d12acac110d4f17f83e5a7db4330",
"term_glyph_rev": "4885b7f8af6931e23d3aa6d1767ee3f9a626923d",
"test_descriptor_rev": "ead23c1e7df079ac0f6457a35f7a71432892e527",
"test_process_rev": "7c73ec8a8a6e0e63d0ec27d70c21ca4323fb5e8f",
"test_reflective_loader_rev": "fcfce37666672edac849d2af6dffc0f8df236a94",
"test_rev": "d54846bc2b5cfa4e1445fda85c5e48a00940aa68",
- "typed_data_rev": "29ce5a92b03326d0b8035916ac04f528874994bd",
+ "typed_data_rev": "8b19e29bcf4077147de4d67adeabeb48270c65eb",
"usage_rev": "e85d575d6decb921c57a43b9844bba3607479f56",
"vector_math_rev": "0cbed0914d49a6a44555e6d5444c438a4a4c3fc1",
"watcher_rev": "f76997ab0c857dc5537ac0975a9ada92b54ef949",
diff --git a/pkg/nnbd_migration/lib/src/preview/preview_site.dart b/pkg/nnbd_migration/lib/src/preview/preview_site.dart
index 5748c5b..6ac9ec8 100644
--- a/pkg/nnbd_migration/lib/src/preview/preview_site.dart
+++ b/pkg/nnbd_migration/lib/src/preview/preview_site.dart
@@ -7,8 +7,8 @@
import 'dart:math';
import 'dart:typed_data';
+import 'package:analyzer/src/manifest/charcodes.dart';
import 'package:analyzer_plugin/protocol/protocol_common.dart';
-import 'package:charcode/charcode.dart';
import 'package:cli_util/cli_logging.dart';
import 'package:meta/meta.dart';
import 'package:nnbd_migration/src/edit_plan.dart';
diff --git a/pkg/nnbd_migration/pubspec.yaml b/pkg/nnbd_migration/pubspec.yaml
index 4ed12f1..e89625b 100644
--- a/pkg/nnbd_migration/pubspec.yaml
+++ b/pkg/nnbd_migration/pubspec.yaml
@@ -11,7 +11,6 @@
analyzer: any
analyzer_plugin: any
args: ^2.3.0
- charcode: ^1.1.0
cli_util: ^0.3.5
collection: ^1.15.0
crypto: ^3.0.1
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm.cc b/runtime/vm/compiler/asm_intrinsifier_arm.cc
index c45f72f..770e7ff 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm.cc
@@ -1863,16 +1863,16 @@
__ LoadClassId(R1, R1);
__ AddImmediate(R1, -kOneByteStringCid);
__ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
- __ ldr(R0, FieldAddress(R1, target::RegExp::function_offset(kOneByteStringCid,
- sticky)));
+ __ ldr(FUNCTION_REG, FieldAddress(R1, target::RegExp::function_offset(
+ kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in R0, the argument descriptor in R4, and IC-Data in R9.
__ eor(R9, R9, Operand(R9));
// Tail-call the function.
- __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
- __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm64.cc b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
index 7d0fb15..570c844 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
@@ -2105,17 +2105,19 @@
#else
__ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2 - 1));
#endif
- __ LoadCompressed(R0, FieldAddress(R1, target::RegExp::function_offset(
- kOneByteStringCid, sticky)));
+ __ LoadCompressed(FUNCTION_REG,
+ FieldAddress(R1, target::RegExp::function_offset(
+ kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in R0, the argument descriptor in R4, and IC-Data in R5.
__ eor(R5, R5, Operand(R5));
// Tail-call the function.
- __ LoadCompressed(CODE_REG,
- FieldAddress(R0, target::Function::code_offset()));
- __ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ ldr(R1,
+ FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ br(R1);
}
diff --git a/runtime/vm/compiler/asm_intrinsifier_ia32.cc b/runtime/vm/compiler/asm_intrinsifier_ia32.cc
index fce0555..538ce05 100644
--- a/runtime/vm/compiler/asm_intrinsifier_ia32.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_ia32.cc
@@ -1888,16 +1888,16 @@
__ movl(EDI, Address(ESP, kStringParamOffset));
__ LoadClassId(EDI, EDI);
__ SubImmediate(EDI, Immediate(kOneByteStringCid));
- __ movl(EAX, FieldAddress(
- EBX, EDI, TIMES_4,
- target::RegExp::function_offset(kOneByteStringCid, sticky)));
+ __ movl(FUNCTION_REG, FieldAddress(EBX, EDI, TIMES_4,
+ target::RegExp::function_offset(
+ kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in EAX, the argument descriptor in EDX, and IC-Data in ECX.
__ xorl(ECX, ECX);
// Tail-call the function.
- __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
+ __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
diff --git a/runtime/vm/compiler/asm_intrinsifier_riscv.cc b/runtime/vm/compiler/asm_intrinsifier_riscv.cc
index eefc6ef..39b610c 100644
--- a/runtime/vm/compiler/asm_intrinsifier_riscv.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_riscv.cc
@@ -1770,16 +1770,16 @@
__ AddImmediate(T1, -kOneByteStringCid);
__ slli(T1, T1, target::kWordSizeLog2);
__ add(T1, T1, T2);
- __ lx(T0, FieldAddress(T1, target::RegExp::function_offset(kOneByteStringCid,
- sticky)));
+ __ lx(FUNCTION_REG, FieldAddress(T1, target::RegExp::function_offset(
+ kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in T0, the argument descriptor in S4, and IC-Data in S5.
__ li(S5, 0);
// Tail-call the function.
- __ lx(CODE_REG, FieldAddress(T0, target::Function::code_offset()));
- __ lx(T1, FieldAddress(T0, target::Function::entry_point_offset()));
+ __ lx(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ lx(T1, FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jr(T1);
}
diff --git a/runtime/vm/compiler/asm_intrinsifier_x64.cc b/runtime/vm/compiler/asm_intrinsifier_x64.cc
index bbdc515..9ff337a 100644
--- a/runtime/vm/compiler/asm_intrinsifier_x64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_x64.cc
@@ -1976,13 +1976,13 @@
__ LoadClassId(RDI, RDI);
__ SubImmediate(RDI, Immediate(kOneByteStringCid));
#if !defined(DART_COMPRESSED_POINTERS)
- __ movq(RAX, FieldAddress(
- RBX, RDI, TIMES_8,
- target::RegExp::function_offset(kOneByteStringCid, sticky)));
+ __ movq(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_8,
+ target::RegExp::function_offset(
+ kOneByteStringCid, sticky)));
#else
- __ LoadCompressed(RAX, FieldAddress(RBX, RDI, TIMES_4,
- target::RegExp::function_offset(
- kOneByteStringCid, sticky)));
+ __ LoadCompressed(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_4,
+ target::RegExp::function_offset(
+ kOneByteStringCid, sticky)));
#endif
// Registers are now set up for the lazy compile stub. It expects the function
@@ -1990,9 +1990,10 @@
__ xorq(RCX, RCX);
// Tail-call the function.
- __ LoadCompressed(CODE_REG,
- FieldAddress(RAX, target::Function::code_offset()));
- __ movq(RDI, FieldAddress(RAX, target::Function::entry_point_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ movq(RDI,
+ FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jmp(RDI);
}
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index c03025e..6bdd476 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -522,7 +522,7 @@
__ LoadObject(R8, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
- __ LoadUniqueObject(R9, ic_data);
+ __ LoadUniqueObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
@@ -539,7 +539,7 @@
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
- __ LoadUniqueObject(R9, ic_data);
+ __ LoadUniqueObject(IC_DATA_REG, ic_data);
__ LoadUniqueObject(CODE_REG, stub);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
@@ -573,10 +573,10 @@
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see app_snapshot.cc.
CLOBBERS_LR(__ LoadUniqueObject(LR, StubCode::MegamorphicCall()));
- __ LoadUniqueObject(R9, cache);
+ __ LoadUniqueObject(IC_DATA_REG, cache);
CLOBBERS_LR(__ blx(LR));
} else {
- __ LoadUniqueObject(R9, cache);
+ __ LoadUniqueObject(IC_DATA_REG, cache);
__ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
__ Call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
@@ -672,10 +672,10 @@
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.PrologueNeedsArgumentsDescriptor()) {
- __ LoadObject(R4, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {
- __ LoadImmediate(R4, 0); // GC safe smi zero because of stub.
+ __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
}
}
// Do not use the code from the function, but let the code be patched so that
@@ -815,7 +815,7 @@
// Load receiver into R0.
__ LoadFromOffset(
R0, SP, (count_without_type_args - 1) * compiler::target::kWordSize);
- __ LoadObject(R4, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index 362e199..b4f95bd 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -505,7 +505,7 @@
__ LoadObject(R6, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
- __ LoadUniqueObject(R5, ic_data);
+ __ LoadUniqueObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
@@ -529,7 +529,7 @@
const intptr_t stub_index =
op.AddObject(stub, ObjectPool::Patchability::kPatchable);
ASSERT((ic_data_index + 1) == stub_index);
- __ LoadDoubleWordFromPoolIndex(R5, CODE_REG, ic_data_index);
+ __ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, ic_data_index);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
@@ -567,9 +567,9 @@
if (FLAG_precompiled_mode) {
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see app_snapshot.cc.
- CLOBBERS_LR(__ LoadDoubleWordFromPoolIndex(R5, LR, data_index));
+ CLOBBERS_LR(__ LoadDoubleWordFromPoolIndex(IC_DATA_REG, LR, data_index));
} else {
- __ LoadDoubleWordFromPoolIndex(R5, CODE_REG, data_index);
+ __ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, data_index);
CLOBBERS_LR(__ ldr(LR, compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(
Code::EntryKind::kMonomorphic))));
@@ -673,10 +673,10 @@
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.PrologueNeedsArgumentsDescriptor()) {
- __ LoadObject(R4, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {
- __ LoadImmediate(R4, 0); // GC safe smi zero because of stub.
+ __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
}
}
// Do not use the code from the function, but let the code be patched so that
@@ -823,7 +823,7 @@
__ Comment("EmitTestAndCall");
// Load receiver into R0.
__ LoadFromOffset(R0, SP, (count_without_type_args - 1) * kWordSize);
- __ LoadObject(R4, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index 257243a..e4b5071 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -555,7 +555,7 @@
// Load receiver into EBX.
__ movl(EBX, compiler::Address(
ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
- __ LoadObject(ECX, ic_data);
+ __ LoadObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
@@ -574,7 +574,7 @@
// Load receiver into EBX.
__ movl(EBX, compiler::Address(
ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
- __ LoadObject(ECX, ic_data, true);
+ __ LoadObject(IC_DATA_REG, ic_data, true);
__ LoadObject(CODE_REG, stub, true);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
@@ -602,7 +602,7 @@
__ Comment("MegamorphicCall");
// Load receiver into EBX.
__ movl(EBX, compiler::Address(ESP, (args_desc.Count() - 1) * kWordSize));
- __ LoadObject(ECX, cache, true);
+ __ LoadObject(IC_DATA_REG, cache, true);
__ LoadObject(CODE_REG, StubCode::MegamorphicCall(), true);
__ call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
@@ -643,9 +643,9 @@
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (function.PrologueNeedsArgumentsDescriptor()) {
- __ LoadObject(EDX, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
- __ xorl(EDX, EDX); // GC safe smi zero because of stub.
+ __ xorl(ARGS_DESC_REG, ARGS_DESC_REG); // GC safe smi zero because of stub.
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
@@ -810,7 +810,7 @@
// Load receiver into EAX.
__ movl(EAX,
compiler::Address(ESP, (count_without_type_args - 1) * kWordSize));
- __ LoadObject(EDX, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index 59db090..fe8bf0d 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -525,7 +525,7 @@
// Load receiver into RDX.
__ movq(RDX, compiler::Address(
RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
- __ LoadUniqueObject(RBX, ic_data);
+ __ LoadUniqueObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
@@ -544,7 +544,7 @@
// Load receiver into RDX.
__ movq(RDX, compiler::Address(
RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
- __ LoadUniqueObject(RBX, ic_data);
+ __ LoadUniqueObject(IC_DATA_REG, ic_data);
__ LoadUniqueObject(CODE_REG, stub);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
@@ -577,10 +577,10 @@
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see app_snapshot.cc.
__ LoadUniqueObject(RCX, StubCode::MegamorphicCall());
- __ LoadUniqueObject(RBX, cache);
+ __ LoadUniqueObject(IC_DATA_REG, cache);
__ call(RCX);
} else {
- __ LoadUniqueObject(RBX, cache);
+ __ LoadUniqueObject(IC_DATA_REG, cache);
__ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
__ call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
@@ -656,10 +656,11 @@
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.PrologueNeedsArgumentsDescriptor()) {
- __ LoadObject(R10, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {
- __ xorl(R10, R10); // GC safe smi zero because of stub.
+ __ xorl(ARGS_DESC_REG,
+ ARGS_DESC_REG); // GC safe smi zero because of stub.
}
}
// Do not use the code from the function, but let the code be patched so that
@@ -792,7 +793,7 @@
// Load receiver into RAX.
__ movq(RAX,
compiler::Address(RSP, (count_without_type_args - 1) * kWordSize));
- __ LoadObject(R10, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 493238e..fad4204 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -593,36 +593,40 @@
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- summary->set_in(0, Location::RegisterLocation(R0)); // Function.
+ summary->set_in(
+ 0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- // Load arguments descriptor in R4.
+ // Load arguments descriptor in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
- __ LoadObject(R4, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
- ASSERT(locs()->in(0).reg() == R0);
if (FLAG_precompiled_mode) {
+ ASSERT(locs()->in(0).reg() == R0);
// R0: Closure with a cached entry point.
__ ldr(R2, compiler::FieldAddress(
R0, compiler::target::Closure::entry_point_offset()));
} else {
- // R0: Function.
- __ ldr(CODE_REG, compiler::FieldAddress(
- R0, compiler::target::Function::code_offset()));
+ ASSERT(locs()->in(0).reg() == FUNCTION_REG);
+ // FUNCTION_REG: Function.
+ __ ldr(CODE_REG,
+ compiler::FieldAddress(FUNCTION_REG,
+ compiler::target::Function::code_offset()));
// Closure functions only have one entry point.
- __ ldr(R2, compiler::FieldAddress(
- R0, compiler::target::Function::entry_point_offset()));
+ __ ldr(R2,
+ compiler::FieldAddress(
+ FUNCTION_REG, compiler::target::Function::entry_point_offset()));
}
- // R4: Arguments descriptor array.
+ // ARGS_DESC_REG: Arguments descriptor array.
// R2: instructions entry point.
if (!FLAG_precompiled_mode) {
// R9: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
- __ LoadImmediate(R9, 0);
+ __ LoadImmediate(IC_DATA_REG, 0);
}
__ blx(R2);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 4d0bf7c..fd52cc1 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -517,36 +517,38 @@
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- summary->set_in(0, Location::RegisterLocation(R0)); // Function.
+ summary->set_in(
+ 0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- // Load arguments descriptor in R4.
+ // Load arguments descriptor in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
- __ LoadObject(R4, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
- ASSERT(locs()->in(0).reg() == R0);
if (FLAG_precompiled_mode) {
+ ASSERT(locs()->in(0).reg() == R0);
// R0: Closure with a cached entry point.
__ LoadFieldFromOffset(R2, R0,
compiler::target::Closure::entry_point_offset());
} else {
- // R0: Function.
- __ LoadCompressedFieldFromOffset(CODE_REG, R0,
+ ASSERT(locs()->in(0).reg() == FUNCTION_REG);
+ // FUNCTION_REG: Function.
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
compiler::target::Function::code_offset());
// Closure functions only have one entry point.
- __ LoadFieldFromOffset(R2, R0,
+ __ LoadFieldFromOffset(R2, FUNCTION_REG,
compiler::target::Function::entry_point_offset());
}
- // R4: Arguments descriptor array.
+ // ARGS_DESC_REG: Arguments descriptor array.
// R2: instructions entry point.
if (!FLAG_precompiled_mode) {
// R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
- __ LoadImmediate(R5, 0);
+ __ LoadImmediate(IC_DATA_REG, 0);
}
__ blr(R2);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index f2ebdda..8a2ec5a 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -6580,7 +6580,7 @@
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- summary->set_in(0, Location::RegisterLocation(EAX)); // Function.
+ summary->set_in(0, Location::RegisterLocation(FUNCTION_REG)); // Function.
summary->set_out(0, Location::RegisterLocation(EAX));
return summary;
}
@@ -6590,16 +6590,17 @@
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
- __ LoadObject(EDX, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
// EBX: Code (compiled code or lazy compile stub).
- ASSERT(locs()->in(0).reg() == EAX);
- __ movl(EBX, compiler::FieldAddress(EAX, Function::entry_point_offset()));
+ ASSERT(locs()->in(0).reg() == FUNCTION_REG);
+ __ movl(EBX,
+ compiler::FieldAddress(FUNCTION_REG, Function::entry_point_offset()));
- // EAX: Function.
- // EDX: Arguments descriptor array.
+ // FUNCTION_REG: Function.
+ // ARGS_DESC_REG: Arguments descriptor array.
// ECX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
- __ xorl(ECX, ECX);
+ __ xorl(IC_DATA_REG, IC_DATA_REG);
__ call(EBX);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
UntaggedPcDescriptors::kOther, locs(), env());
diff --git a/runtime/vm/compiler/backend/il_riscv.cc b/runtime/vm/compiler/backend/il_riscv.cc
index 74459f7..3355081 100644
--- a/runtime/vm/compiler/backend/il_riscv.cc
+++ b/runtime/vm/compiler/backend/il_riscv.cc
@@ -570,33 +570,35 @@
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- summary->set_in(0, Location::RegisterLocation(T0)); // Function.
+ summary->set_in(
+ 0, Location::RegisterLocation(FLAG_precompiled_mode ? T0 : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- // Load arguments descriptor in S4.
+ // Load arguments descriptor in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
- ASSERT(locs()->in(0).reg() == T0);
if (FLAG_precompiled_mode) {
+ ASSERT(locs()->in(0).reg() == T0);
// T0: Closure with a cached entry point.
__ LoadFieldFromOffset(A1, T0,
compiler::target::Closure::entry_point_offset());
} else {
- // T0: Function.
- __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+ ASSERT(locs()->in(0).reg() == FUNCTION_REG);
+ // FUNCTION_REG: Function.
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
compiler::target::Function::code_offset());
// Closure functions only have one entry point.
- __ LoadFieldFromOffset(A1, T0,
+ __ LoadFieldFromOffset(A1, FUNCTION_REG,
compiler::target::Function::entry_point_offset());
}
- // T0: Function (argument to lazy compile stub)
- // S4: Arguments descriptor array.
+ // FUNCTION_REG: Function (argument to lazy compile stub)
+ // ARGS_DESC_REG: Arguments descriptor array.
// A1: instructions entry point.
if (!FLAG_precompiled_mode) {
// S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 170932f..236ff1a 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -6916,37 +6916,40 @@
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
- summary->set_in(0, Location::RegisterLocation(RAX)); // Function.
+ summary->set_in(0, Location::RegisterLocation(
+ FLAG_precompiled_mode ? RAX : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- // Arguments descriptor is expected in R10.
+ // Arguments descriptor is expected in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
- __ LoadObject(R10, arguments_descriptor);
+ __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
- ASSERT(locs()->in(0).reg() == RAX);
if (FLAG_precompiled_mode) {
+ ASSERT(locs()->in(0).reg() == RAX);
// RAX: Closure with cached entry point.
__ movq(RCX, compiler::FieldAddress(
RAX, compiler::target::Closure::entry_point_offset()));
} else {
- // RAX: Function.
+ ASSERT(locs()->in(0).reg() == FUNCTION_REG);
+ // FUNCTION_REG: Function.
__ LoadCompressed(
- CODE_REG,
- compiler::FieldAddress(RAX, compiler::target::Function::code_offset()));
+ CODE_REG, compiler::FieldAddress(
+ FUNCTION_REG, compiler::target::Function::code_offset()));
// Closure functions only have one entry point.
__ movq(RCX, compiler::FieldAddress(
- RAX, compiler::target::Function::entry_point_offset()));
+ FUNCTION_REG,
+ compiler::target::Function::entry_point_offset()));
}
- // R10: Arguments descriptor array.
+ // ARGS_DESC_REG: Arguments descriptor array.
// RCX: instructions entry point.
if (!FLAG_precompiled_mode) {
// RBX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
- __ xorq(RBX, RBX);
+ __ xorq(IC_DATA_REG, IC_DATA_REG);
}
__ call(RCX);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
diff --git a/runtime/vm/compiler/stub_code_compiler.cc b/runtime/vm/compiler/stub_code_compiler.cc
index fd5b486..bd7219f 100644
--- a/runtime/vm/compiler/stub_code_compiler.cc
+++ b/runtime/vm/compiler/stub_code_compiler.cc
@@ -51,7 +51,6 @@
void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler,
bool is_final) {
const Register kResultReg = InitStaticFieldABI::kResultReg;
- const Register kFunctionReg = InitLateStaticFieldInternalRegs::kFunctionReg;
const Register kFieldReg = InitStaticFieldABI::kFieldReg;
const Register kAddressReg = InitLateStaticFieldInternalRegs::kAddressReg;
const Register kScratchReg = InitLateStaticFieldInternalRegs::kScratchReg;
@@ -61,14 +60,14 @@
__ Comment("Calling initializer function");
__ PushRegister(kFieldReg);
__ LoadCompressedFieldFromOffset(
- kFunctionReg, kFieldReg, target::Field::initializer_function_offset());
+ FUNCTION_REG, kFieldReg, target::Field::initializer_function_offset());
if (!FLAG_precompiled_mode) {
- __ LoadCompressedFieldFromOffset(CODE_REG, kFunctionReg,
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
// Load a GC-safe value for the arguments descriptor (unused but tagged).
__ LoadImmediate(ARGS_DESC_REG, 0);
}
- __ Call(FieldAddress(kFunctionReg, target::Function::entry_point_offset()));
+ __ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ MoveRegister(kResultReg, CallingConventions::kReturnReg);
__ PopRegister(kFieldReg);
__ LoadStaticFieldAddress(kAddressReg, kFieldReg, kScratchReg);
@@ -123,7 +122,6 @@
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler,
bool is_final) {
- const Register kFunctionReg = InitLateInstanceFieldInternalRegs::kFunctionReg;
const Register kInstanceReg = InitInstanceFieldABI::kInstanceReg;
const Register kFieldReg = InitInstanceFieldABI::kFieldReg;
const Register kAddressReg = InitLateInstanceFieldInternalRegs::kAddressReg;
@@ -139,15 +137,15 @@
"Result is a return value from initializer");
__ LoadCompressedFieldFromOffset(
- kFunctionReg, InitInstanceFieldABI::kFieldReg,
+ FUNCTION_REG, InitInstanceFieldABI::kFieldReg,
target::Field::initializer_function_offset());
if (!FLAG_precompiled_mode) {
- __ LoadCompressedFieldFromOffset(CODE_REG, kFunctionReg,
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
// Load a GC-safe value for the arguments descriptor (unused but tagged).
__ LoadImmediate(ARGS_DESC_REG, 0);
}
- __ Call(FieldAddress(kFunctionReg, target::Function::entry_point_offset()));
+ __ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ Drop(1); // Drop argument.
__ PopRegisterPair(kInstanceReg, kFieldReg);
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index c252342..5af92dc 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -645,17 +645,17 @@
}
// Input parameters:
-// R4: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
__ LoadImmediate(R0, 0);
- __ PushList((1 << R0) | (1 << R4));
+ __ PushList((1 << R0) | (1 << ARGS_DESC_REG));
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
- __ PopList((1 << R0) | (1 << R4));
+ __ PopList((1 << R0) | (1 << ARGS_DESC_REG));
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@@ -665,7 +665,7 @@
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
-// R4: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@@ -680,10 +680,10 @@
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
__ LoadImmediate(R0, 0);
- __ PushList((1 << R0) | (1 << R4));
+ __ PushList((1 << R0) | (1 << ARGS_DESC_REG));
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
- __ PopList((1 << R0) | (1 << R4));
+ __ PopList((1 << R0) | (1 << ARGS_DESC_REG));
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@@ -958,27 +958,29 @@
__ Ret();
}
-// R9: ICData/MegamorphicCache
+// IC_DATA_REG: ICData/MegamorphicCache
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
__ EnterStubFrame();
- __ ldr(R4,
- FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
+ __ ldr(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
// Load the receiver.
- __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::size_offset()));
+ __ ldr(R2, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::size_offset()));
__ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
__ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
target::kWordSize));
__ LoadImmediate(IP, 0);
__ Push(IP); // Result slot.
__ Push(R8); // Receiver.
- __ Push(R9); // ICData/MegamorphicCache.
- __ Push(R4); // Arguments descriptor.
+ __ Push(IC_DATA_REG); // ICData/MegamorphicCache.
+ __ Push(ARGS_DESC_REG); // Arguments descriptor.
// Adjust arguments count.
- __ ldr(R3,
- FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
+ __ ldr(R3, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::type_args_len_offset()));
__ cmp(R3, Operand(0));
__ AddImmediate(R2, R2, target::ToRawSmi(1),
NE); // Include the type arguments.
@@ -1005,8 +1007,8 @@
}
// Input:
-// R4 - arguments descriptor
-// R9 - icdata/megamorphic_cache
+// ARGS_DESC_REG - arguments descriptor
+// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
@@ -2011,11 +2013,10 @@
return;
}
if (FLAG_optimization_counter_threshold >= 0) {
- Register ic_reg = R9;
Register func_reg = temp_reg;
ASSERT(temp_reg == R8);
__ Comment("Increment function counter");
- __ ldr(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
+ __ ldr(func_reg, FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
__ ldr(TMP,
FieldAddress(func_reg, target::Function::usage_counter_offset()));
__ add(TMP, TMP, Operand(1));
@@ -2185,22 +2186,26 @@
if (type == kInstanceCall) {
__ LoadTaggedClassIdMayBeSmi(R0, R0);
- __ ldr(R4, FieldAddress(
- R9, target::CallSiteData::arguments_descriptor_offset()));
+ __ ldr(
+ ARGS_DESC_REG,
+ FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
if (num_args == 2) {
- __ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
+ __ ldr(R1, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
__ sub(R1, R1, Operand(target::ToRawSmi(2)));
__ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
__ LoadTaggedClassIdMayBeSmi(R1, R1);
}
} else {
// Load arguments descriptor into R4.
- __ ldr(R4, FieldAddress(
- R9, target::CallSiteData::arguments_descriptor_offset()));
+ __ ldr(
+ ARGS_DESC_REG,
+ FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
// Get the receiver's class ID (first read number of arguments from
// arguments descriptor array and then access the receiver from the stack).
- __ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
+ __ ldr(R1, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
__ sub(R1, R1, Operand(target::ToRawSmi(1)));
// R1: argument_count - 1 (smi).
@@ -2255,7 +2260,8 @@
__ Bind(&miss);
__ Comment("IC miss");
// Compute address of arguments.
- __ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
+ __ ldr(R1, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
__ sub(R1, R1, Operand(target::ToRawSmi(1)));
// R1: argument_count - 1 (smi).
__ add(R1, SP, Operand(R1, LSL, 1)); // R1 is Smi.
@@ -2266,7 +2272,7 @@
__ LoadImmediate(R0, 0);
// Preserve IC data object and arguments descriptor array and
// setup space on stack for result (target code object).
- RegList regs = (1 << R0) | (1 << R4) | (1 << R9);
+ RegList regs = (1 << R0) | (1 << ARGS_DESC_REG) | (1 << R9);
if (save_entry_point) {
__ SmiTag(R3);
regs |= 1 << R3;
@@ -2284,6 +2290,7 @@
__ Drop(num_args + 1);
// Pop returned function object into R0.
// Restore arguments descriptor array and IC data array.
+ COMPILE_ASSERT(FUNCTION_REG == R0);
__ PopList(regs);
if (save_entry_point) {
__ SmiUntag(R3);
@@ -2303,7 +2310,7 @@
target::ICData::TargetIndexFor(num_args) * target::kWordSize;
const intptr_t count_offset =
target::ICData::CountIndexFor(num_args) * target::kWordSize;
- __ LoadFromOffset(R0, R8, kIcDataOffset + target_offset);
+ __ LoadFromOffset(FUNCTION_REG, R8, kIcDataOffset + target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update caller's counter");
@@ -2316,12 +2323,13 @@
__ Comment("Call target");
__ Bind(&call_target_function);
// R0: target function.
- __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
if (save_entry_point) {
- __ Branch(Address(R0, R3));
+ __ Branch(Address(FUNCTION_REG, R3));
} else {
- __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
+ __ Branch(
+ FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
#if !defined(PRODUCT)
@@ -2488,14 +2496,14 @@
}
// Load arguments descriptor into R4.
- __ ldr(R4,
+ __ ldr(ARGS_DESC_REG,
FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
// Get function and call it, if possible.
- __ LoadFromOffset(R0, R8, target_offset);
- __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ LoadFromOffset(FUNCTION_REG, R8, target_offset);
+ __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
- __ Branch(Address(R0, R3));
+ __ Branch(Address(FUNCTION_REG, R3));
#if !defined(PRODUCT)
__ Bind(&stepping);
@@ -2532,17 +2540,19 @@
}
// Stub for compiling a function and jumping to the compiled code.
-// R4: Arguments descriptor.
-// R0: Function.
+// ARGS_DESC_REG: Arguments descriptor.
+// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
- __ PushList((1 << R0) | (1 << R4)); // Preserve arg desc, pass function.
+ // Preserve arg desc, pass function.
+ COMPILE_ASSERT(FUNCTION_REG < ARGS_DESC_REG);
+ __ PushList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
- __ PopList((1 << R0) | (1 << R4));
+ __ PopList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
__ LeaveStubFrame();
- __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
- __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
// R9: Contains an ICData.
@@ -2977,21 +2987,21 @@
// Calls to the runtime to optimize the given function.
// R8: function to be reoptimized.
-// R4: argument descriptor (preserved).
+// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ ldr(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
- __ Push(R4);
+ __ Push(ARGS_DESC_REG);
__ LoadImmediate(IP, 0);
__ Push(IP); // Setup space on stack for return value.
__ Push(R8);
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ Pop(R0); // Discard argument.
- __ Pop(R0); // Get Function object
- __ Pop(R4); // Restore argument descriptor.
+ __ Pop(FUNCTION_REG); // Get Function object
+ __ Pop(ARGS_DESC_REG); // Restore argument descriptor.
__ LeaveStubFrame();
- __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
- __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ bkpt(0);
}
@@ -3107,16 +3117,18 @@
// Called from megamorphic calls.
// R0: receiver
-// R9: MegamorphicCache (preserved)
+// IC_DATA_REG: MegamorphicCache (preserved)
// Passed to target:
-// R0: function
-// R4: arguments descriptor
+// FUNCTION_REG: target function
+// ARGS_DESC_REG: arguments descriptor
// CODE_REG: target Code
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ LoadTaggedClassIdMayBeSmi(R8, R0);
// R8: receiver cid as Smi.
- __ ldr(R2, FieldAddress(R9, target::MegamorphicCache::buckets_offset()));
- __ ldr(R1, FieldAddress(R9, target::MegamorphicCache::mask_offset()));
+ __ ldr(R2,
+ FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
+ __ ldr(R1,
+ FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
// R2: cache buckets array.
// R1: mask as a smi.
@@ -3143,13 +3155,15 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- __ ldr(R0, FieldAddress(IP, base + target::kWordSize));
+ __ ldr(FUNCTION_REG, FieldAddress(IP, base + target::kWordSize));
if (!FLAG_precompiled_mode) {
- __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ ldr(CODE_REG,
+ FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ ldr(ARGS_DESC_REG,
- FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
- __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
+ __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
// Probe failed, check if it is a miss.
__ Bind(&probe_failed);
@@ -3168,9 +3182,9 @@
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
- __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
- __ ldr(R4,
- FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
+ __ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
+ __ ldr(R4, FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
__ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index 0779276..ee14dd9 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -878,18 +878,18 @@
}
// Input parameters:
-// R4: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
- __ Push(R4);
+ __ Push(ARGS_DESC_REG);
__ Push(ZR);
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ Pop(CODE_REG);
- __ Pop(R4);
+ __ Pop(ARGS_DESC_REG);
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@@ -899,7 +899,7 @@
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
-// R4: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@@ -913,12 +913,12 @@
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
- __ Push(R4);
+ __ Push(ARGS_DESC_REG);
__ Push(ZR);
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ Pop(CODE_REG);
- __ Pop(R4);
+ __ Pop(ARGS_DESC_REG);
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@@ -1204,27 +1204,28 @@
__ ret();
}
-// R5: ICData/MegamorphicCache
+// IC_DATA_REG: ICData/MegamorphicCache
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
__ EnterStubFrame();
- __ ldr(R4,
- FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset()));
+ __ ldr(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
// Load the receiver.
__ LoadCompressedSmiFieldFromOffset(
- R2, R4, target::ArgumentsDescriptor::size_offset());
+ R2, ARGS_DESC_REG, target::ArgumentsDescriptor::size_offset());
__ add(TMP, FP, Operand(R2, LSL, target::kWordSizeLog2 - 1)); // R2 is Smi.
__ LoadFromOffset(R6, TMP,
target::frame_layout.param_end_from_fp * target::kWordSize);
__ Push(ZR); // Result slot.
__ Push(R6); // Receiver.
- __ Push(R5); // ICData/MegamorphicCache.
- __ Push(R4); // Arguments descriptor.
+ __ Push(IC_DATA_REG); // ICData/MegamorphicCache.
+ __ Push(ARGS_DESC_REG); // Arguments descriptor.
// Adjust arguments count.
__ LoadCompressedSmiFieldFromOffset(
- R3, R4, target::ArgumentsDescriptor::type_args_len_offset());
+ R3, ARGS_DESC_REG, target::ArgumentsDescriptor::type_args_len_offset());
__ AddImmediate(TMP, R2, 1, kObjectBytes); // Include the type arguments.
__ cmp(R3, Operand(0), kObjectBytes);
// R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2).
@@ -1252,8 +1253,8 @@
}
// Input:
-// R4 - arguments descriptor
-// R5 - icdata/megamorphic_cache
+// ARGS_DESC_REG - arguments descriptor
+// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
@@ -2325,11 +2326,11 @@
return;
}
if (FLAG_optimization_counter_threshold >= 0) {
- Register ic_reg = R5;
Register func_reg = temp_reg;
ASSERT(temp_reg == R6);
__ Comment("Increment function counter");
- __ LoadFieldFromOffset(func_reg, ic_reg, target::ICData::owner_offset());
+ __ LoadFieldFromOffset(func_reg, IC_DATA_REG,
+ target::ICData::owner_offset());
__ LoadFieldFromOffset(
R7, func_reg, target::Function::usage_counter_offset(), kFourBytes);
__ AddImmediate(R7, 1);
@@ -2503,11 +2504,11 @@
if (type == kInstanceCall) {
__ LoadTaggedClassIdMayBeSmi(R0, R0);
- __ LoadFieldFromOffset(R4, R5,
+ __ LoadFieldFromOffset(ARGS_DESC_REG, R5,
target::CallSiteData::arguments_descriptor_offset());
if (num_args == 2) {
__ LoadCompressedSmiFieldFromOffset(
- R7, R4, target::ArgumentsDescriptor::count_offset());
+ R7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
__ sub(R7, R7, Operand(2));
// R1 <- [SP + (R1 << 3)]
@@ -2515,12 +2516,12 @@
__ LoadTaggedClassIdMayBeSmi(R1, R1);
}
} else {
- __ LoadFieldFromOffset(R4, R5,
+ __ LoadFieldFromOffset(ARGS_DESC_REG, R5,
target::CallSiteData::arguments_descriptor_offset());
// Get the receiver's class ID (first read number of arguments from
// arguments descriptor array and then access the receiver from the stack).
__ LoadCompressedSmiFieldFromOffset(
- R7, R4, target::ArgumentsDescriptor::count_offset());
+ R7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
__ sub(R7, R7, Operand(1));
// R0 <- [SP + (R7 << 3)]
@@ -2577,7 +2578,7 @@
// Compute address of arguments.
__ LoadCompressedSmiFieldFromOffset(
- R7, R4, target::ArgumentsDescriptor::count_offset());
+ R7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
__ sub(R7, R7, Operand(1));
// R7: argument_count - 1 (untagged).
@@ -2589,7 +2590,7 @@
__ EnterStubFrame();
// Preserve IC data object and arguments descriptor array and
// setup space on stack for result (target code object).
- __ Push(R4); // Preserve arguments descriptor array.
+ __ Push(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ Push(R5); // Preserve IC Data.
if (save_entry_point) {
__ SmiTag(R8);
@@ -2609,13 +2610,13 @@
__ Drop(num_args + 1);
// Pop returned function object into R0.
// Restore arguments descriptor array and IC data array.
- __ Pop(R0); // Pop returned function object into R0.
+ __ Pop(FUNCTION_REG); // Pop returned function object into R0.
if (save_entry_point) {
__ Pop(R8);
__ SmiUntag(R8);
}
__ Pop(R5); // Restore IC Data.
- __ Pop(R4); // Restore arguments descriptor array.
+ __ Pop(ARGS_DESC_REG); // Restore arguments descriptor array.
__ RestoreCodePointer();
__ LeaveStubFrame();
Label call_target_function;
@@ -2632,7 +2633,7 @@
target::ICData::TargetIndexFor(num_args) * target::kCompressedWordSize;
const intptr_t count_offset =
target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
- __ LoadCompressedFromOffset(R0, R6, target_offset);
+ __ LoadCompressedFromOffset(FUNCTION_REG, R6, target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
// Update counter, ignore overflow.
@@ -2644,13 +2645,14 @@
__ Comment("Call target");
__ Bind(&call_target_function);
// R0: target function.
- __ LoadCompressedFieldFromOffset(CODE_REG, R0,
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
if (save_entry_point) {
- __ add(R2, R0, Operand(R8));
+ __ add(R2, FUNCTION_REG, Operand(R8));
__ ldr(R2, Address(R2, 0));
} else {
- __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
+ __ LoadFieldFromOffset(R2, FUNCTION_REG,
+ target::Function::entry_point_offset());
}
__ br(R2);
@@ -2821,14 +2823,14 @@
}
// Load arguments descriptor into R4.
- __ LoadFieldFromOffset(R4, R5,
+ __ LoadFieldFromOffset(ARGS_DESC_REG, R5,
target::CallSiteData::arguments_descriptor_offset());
// Get function and call it, if possible.
- __ LoadCompressedFromOffset(R0, R6, target_offset);
- __ LoadCompressedFieldFromOffset(CODE_REG, R0,
+ __ LoadCompressedFromOffset(FUNCTION_REG, R6, target_offset);
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
- __ add(R2, R0, Operand(R8));
+ __ add(R2, FUNCTION_REG, Operand(R8));
__ ldr(R2, Address(R2, 0));
__ br(R2);
@@ -2869,21 +2871,22 @@
}
// Stub for compiling a function and jumping to the compiled code.
-// R4: Arguments descriptor.
-// R0: Function.
+// ARGS_DESC_REG: Arguments descriptor.
+// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// Preserve arg desc.
__ EnterStubFrame();
- __ Push(R4); // Save arg. desc.
- __ Push(R0); // Pass function.
+ __ Push(ARGS_DESC_REG); // Save arg. desc.
+ __ Push(FUNCTION_REG); // Pass function.
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
- __ Pop(R0); // Restore argument.
- __ Pop(R4); // Restore arg desc.
+ __ Pop(FUNCTION_REG); // Restore function.
+ __ Pop(ARGS_DESC_REG); // Restore arg desc.
__ LeaveStubFrame();
- __ LoadCompressedFieldFromOffset(CODE_REG, R0,
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
- __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
+ __ LoadFieldFromOffset(R2, FUNCTION_REG,
+ target::Function::entry_point_offset());
__ br(R2);
}
@@ -3308,21 +3311,22 @@
// Calls to the runtime to optimize the given function.
// R6: function to be re-optimized.
-// R4: argument descriptor (preserved).
+// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
- __ Push(R4);
+ __ Push(ARGS_DESC_REG);
// Setup space on stack for the return value.
__ Push(ZR);
__ Push(R6);
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ Pop(R0); // Discard argument.
- __ Pop(R0); // Get Function object
- __ Pop(R4); // Restore argument descriptor.
- __ LoadCompressedFieldFromOffset(CODE_REG, R0,
+ __ Pop(FUNCTION_REG); // Get Function object
+ __ Pop(ARGS_DESC_REG); // Restore argument descriptor.
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
- __ LoadFieldFromOffset(R1, R0, target::Function::entry_point_offset());
+ __ LoadFieldFromOffset(R1, FUNCTION_REG,
+ target::Function::entry_point_offset());
__ LeaveStubFrame();
__ br(R1);
__ brk(0);
@@ -3421,12 +3425,11 @@
// Called from megamorphic call sites.
// R0: receiver (passed to target)
-// R5: MegamorphicCache (preserved)
+// IC_DATA_REG: MegamorphicCache (preserved)
// Passed to target:
-// R0: receiver
+// FUNCTION_REG: target function
// CODE_REG: target Code
-// R4: arguments descriptor
-// R5: MegamorphicCache
+// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@@ -3437,8 +3440,10 @@
Label cid_loaded;
__ Bind(&cid_loaded);
- __ ldr(R2, FieldAddress(R5, target::MegamorphicCache::buckets_offset()));
- __ ldr(R1, FieldAddress(R5, target::MegamorphicCache::mask_offset()));
+ __ ldr(R2,
+ FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
+ __ ldr(R1,
+ FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
// R2: cache buckets array.
// R1: mask as a smi.
@@ -3471,13 +3476,16 @@
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
__ LoadCompressed(
- R0, FieldAddress(TMP, base + target::kCompressedWordSize, kObjectBytes));
- __ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
+ FUNCTION_REG,
+ FieldAddress(TMP, base + target::kCompressedWordSize, kObjectBytes));
+ __ ldr(R1,
+ FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ ldr(ARGS_DESC_REG,
- FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset()));
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
if (!FLAG_precompiled_mode) {
- __ LoadCompressed(CODE_REG,
- FieldAddress(R0, target::Function::code_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ br(R1);
@@ -3503,12 +3511,13 @@
// Input:
// R0 - receiver
-// R5 - icdata
+// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
- __ ldr(R8, FieldAddress(R5, target::ICData::entries_offset()));
- __ ldr(R4,
- FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset()));
+ __ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
+ __ ldr(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
__ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index 60760eb..ad1f072 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -494,14 +494,14 @@
}
// Input parameters:
-// EDX: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
- __ pushl(EDX); // Preserve arguments descriptor array.
+ __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
__ popl(EAX); // Get Code object result.
- __ popl(EDX); // Restore arguments descriptor array.
+ __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
// Remove the stub frame as we are about to jump to the dart function.
__ LeaveFrame();
@@ -510,18 +510,18 @@
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
-// EDX: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
// This was a static call.
__ EnterStubFrame();
- __ pushl(EDX); // Preserve arguments descriptor array.
+ __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
__ popl(EAX); // Get Code object.
- __ popl(EDX); // Restore arguments descriptor array.
+ __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
__ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
__ LeaveFrame();
__ jmp(EAX);
@@ -1701,11 +1701,11 @@
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
- Register ic_reg = ECX;
Register func_reg = temp_reg;
- ASSERT(ic_reg != func_reg);
+ ASSERT(func_reg != IC_DATA_REG);
__ Comment("Increment function counter");
- __ movl(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
+ __ movl(func_reg,
+ FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
__ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
}
@@ -1862,8 +1862,9 @@
__ Comment("Extract ICData initial values and receiver cid");
// ECX: IC data object (preserved).
// Load arguments descriptor into EDX.
- __ movl(EDX, FieldAddress(
- ECX, target::CallSiteData::arguments_descriptor_offset()));
+ __ movl(
+ ARGS_DESC_REG,
+ FieldAddress(ECX, target::CallSiteData::arguments_descriptor_offset()));
// Loop that checks if there is an IC data match.
Label loop, found, miss;
// ECX: IC data object (preserved).
@@ -1876,7 +1877,8 @@
// last time we need the argument descriptor, and we reuse EAX for the
// class IDs from the IC descriptor. In the 2-argument case we preserve
// the argument descriptor in EAX.
- __ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
+ __ movl(EAX, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
if (num_args == 1) {
// Load receiver into EDI.
__ movl(EDI,
@@ -1937,12 +1939,13 @@
__ Comment("IC miss");
// Compute address of arguments (first read number of arguments from
// arguments descriptor array and then compute address on the stack).
- __ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
+ __ movl(EAX, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
__ leal(EAX, Address(ESP, EAX, TIMES_2, 0)); // EAX is Smi.
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
- __ pushl(EDX); // Preserve arguments descriptor array.
+ __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(ECX); // Preserve IC data object.
__ pushl(Immediate(0)); // Result slot.
// Push call arguments.
@@ -1956,9 +1959,9 @@
for (intptr_t i = 0; i < num_args + 1; i++) {
__ popl(EAX);
}
- __ popl(EAX); // Pop returned function object into EAX.
+ __ popl(FUNCTION_REG); // Pop returned function object into EAX.
__ popl(ECX); // Restore IC data array.
- __ popl(EDX); // Restore arguments descriptor array.
+ __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
__ LeaveFrame();
Label call_target_function;
if (!FLAG_lazy_dispatchers) {
@@ -1976,11 +1979,12 @@
__ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
}
- __ movl(EAX, Address(EBX, target_offset));
+ __ movl(FUNCTION_REG, Address(EBX, target_offset));
__ Bind(&call_target_function);
__ Comment("Call target");
// EAX: Target function.
- __ jmp(FieldAddress(EAX, target::Function::entry_point_offset(entry_kind)));
+ __ jmp(FieldAddress(FUNCTION_REG,
+ target::Function::entry_point_offset(entry_kind)));
#if !defined(PRODUCT)
if (optimized == kUnoptimized) {
@@ -2141,12 +2145,14 @@
}
// Load arguments descriptor into EDX.
- __ movl(EDX, FieldAddress(
- ECX, target::CallSiteData::arguments_descriptor_offset()));
+ __ movl(
+ ARGS_DESC_REG,
+ FieldAddress(ECX, target::CallSiteData::arguments_descriptor_offset()));
// Get function and call it, if possible.
- __ movl(EAX, Address(EBX, target_offset));
- __ jmp(FieldAddress(EAX, target::Function::entry_point_offset(entry_kind)));
+ __ movl(FUNCTION_REG, Address(EBX, target_offset));
+ __ jmp(FieldAddress(FUNCTION_REG,
+ target::Function::entry_point_offset(entry_kind)));
#if !defined(PRODUCT)
__ Bind(&stepping);
@@ -2187,18 +2193,18 @@
}
// Stub for compiling a function and jumping to the compiled code.
-// EDX: Arguments descriptor.
-// EAX: Function.
+// ARGS_DESC_REG: Arguments descriptor.
+// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
- __ pushl(EDX); // Preserve arguments descriptor array.
- __ pushl(EAX); // Pass function.
+ __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
+ __ pushl(FUNCTION_REG); // Pass function.
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
- __ popl(EAX); // Restore function.
- __ popl(EDX); // Restore arguments descriptor array.
+ __ popl(FUNCTION_REG); // Restore function.
+ __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
__ LeaveFrame();
- __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
+ __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
// ECX: Contains an ICData.
@@ -2612,20 +2618,21 @@
// Calls to the runtime to optimize the given function.
// EBX: function to be reoptimized.
-// EDX: argument descriptor (preserved).
+// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ movl(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
- __ pushl(EDX);
+ __ pushl(ARGS_DESC_REG);
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ pushl(EBX);
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ popl(EAX); // Discard argument.
- __ popl(EAX); // Get Function object
- __ popl(EDX); // Restore argument descriptor.
+ __ popl(FUNCTION_REG); // Get Function object
+ __ popl(ARGS_DESC_REG); // Restore argument descriptor.
__ LeaveFrame();
- __ movl(CODE_REG, FieldAddress(EAX, target::Function::code_offset()));
- __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
+ __ movl(CODE_REG,
+ FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ int3();
}
@@ -2735,10 +2742,11 @@
// Called from megamorphic calls.
// EBX: receiver (passed to target)
-// ECX: target::MegamorphicCache (preserved)
+// IC_DATA_REG: target::MegamorphicCache (preserved)
// Passed to target:
// EBX: target entry point
-// EDX: argument descriptor
+// FUNCTION_REG: target function
+// ARGS_DESC_REG: argument descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@@ -2753,8 +2761,10 @@
Label cid_loaded;
__ Bind(&cid_loaded);
__ pushl(EBX); // save receiver
- __ movl(EBX, FieldAddress(ECX, target::MegamorphicCache::mask_offset()));
- __ movl(EDI, FieldAddress(ECX, target::MegamorphicCache::buckets_offset()));
+ __ movl(EBX,
+ FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
+ __ movl(EDI, FieldAddress(IC_DATA_REG,
+ target::MegamorphicCache::buckets_offset()));
// EDI: cache buckets array.
// EBX: mask as a smi.
@@ -2783,11 +2793,13 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
- __ movl(EDX, FieldAddress(
- ECX, target::CallSiteData::arguments_descriptor_offset()));
+ __ movl(FUNCTION_REG,
+ FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
+ __ movl(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
__ popl(EBX); // restore receiver
- __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
+ __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ Bind(&probe_failed);
// Probe failed, check if it is a miss.
diff --git a/runtime/vm/compiler/stub_code_compiler_riscv.cc b/runtime/vm/compiler/stub_code_compiler_riscv.cc
index 9fcc71b..0faaa88 100644
--- a/runtime/vm/compiler/stub_code_compiler_riscv.cc
+++ b/runtime/vm/compiler/stub_code_compiler_riscv.cc
@@ -697,17 +697,19 @@
}
// Input parameters:
-// S4: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
__ subi(SP, SP, 2 * target::kWordSize);
- __ sx(S4, Address(SP, 1 * target::kWordSize)); // Preserve args descriptor.
+ __ sx(ARGS_DESC_REG,
+ Address(SP, 1 * target::kWordSize)); // Preserve args descriptor.
__ sx(ZR, Address(SP, 0 * target::kWordSize)); // Result slot.
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
__ lx(CODE_REG, Address(SP, 0 * target::kWordSize)); // Result.
- __ lx(S4, Address(SP, 1 * target::kWordSize)); // Restore args descriptor.
+ __ lx(ARGS_DESC_REG,
+ Address(SP, 1 * target::kWordSize)); // Restore args descriptor.
__ addi(SP, SP, 2 * target::kWordSize);
__ LeaveStubFrame();
// Jump to the dart function.
@@ -717,7 +719,7 @@
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
-// S4: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@@ -731,11 +733,11 @@
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
- __ PushRegistersInOrder({S4, ZR});
+ __ PushRegistersInOrder({ARGS_DESC_REG, ZR});
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ PopRegister(CODE_REG);
- __ PopRegister(S4);
+ __ PopRegister(ARGS_DESC_REG);
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@@ -1021,27 +1023,28 @@
__ ret();
}
-// S5: ICData/MegamorphicCache
+// IC_DATA_REG: ICData/MegamorphicCache
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
__ EnterStubFrame();
- __ lx(S4,
- FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
+ __ lx(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
// Load the receiver.
__ LoadCompressedSmiFieldFromOffset(
- T2, S4, target::ArgumentsDescriptor::size_offset());
+ T2, ARGS_DESC_REG, target::ArgumentsDescriptor::size_offset());
__ slli(TMP, T2, target::kWordSizeLog2 - 1); // T2 is Smi.
__ add(TMP, TMP, FP);
__ LoadFromOffset(A0, TMP,
target::frame_layout.param_end_from_fp * target::kWordSize);
// Push: result slot, receiver, ICData/MegamorphicCache,
// arguments descriptor.
- __ PushRegistersInOrder({ZR, A0, S5, S4});
+ __ PushRegistersInOrder({ZR, A0, IC_DATA_REG, ARGS_DESC_REG});
// Adjust arguments count.
__ LoadCompressedSmiFieldFromOffset(
- T3, S4, target::ArgumentsDescriptor::type_args_len_offset());
+ T3, ARGS_DESC_REG, target::ArgumentsDescriptor::type_args_len_offset());
Label args_count_ok;
__ beqz(T3, &args_count_ok, Assembler::kNearJump);
// Include the type arguments.
@@ -1069,8 +1072,8 @@
}
// Input:
-// S4 - arguments descriptor
-// S5 - icdata/megamorphic_cache
+// ARGS_DESC_REG - arguments descriptor
+// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
@@ -2398,7 +2401,7 @@
__ Drop(num_args + 1);
// Pop returned function object into R0.
// Restore arguments descriptor array and IC data array.
- __ PopRegister(T0); // Pop returned function object into T0.
+ __ PopRegister(FUNCTION_REG); // Pop returned function object into T0.
if (save_entry_point) {
__ PopRegister(T6);
__ SmiUntag(T6);
@@ -2421,7 +2424,7 @@
target::ICData::TargetIndexFor(num_args) * target::kCompressedWordSize;
const intptr_t count_offset =
target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
- __ LoadCompressedFromOffset(T0, A1, target_offset);
+ __ LoadCompressedFromOffset(FUNCTION_REG, A1, target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
// Update counter, ignore overflow.
@@ -2433,15 +2436,16 @@
__ Comment("Call target");
__ Bind(&call_target_function);
// T0: target function.
- __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
if (save_entry_point) {
- __ add(A7, T0, T6);
+ __ add(A7, FUNCTION_REG, T6);
__ lx(A7, Address(A7, 0));
} else {
- __ LoadFieldFromOffset(A7, T0, target::Function::entry_point_offset());
+ __ LoadFieldFromOffset(A7, FUNCTION_REG,
+ target::Function::entry_point_offset());
}
- __ jr(A7); // T0: Function, argument to lazy compile stub.
+ __ jr(A7); // FUNCTION_REG: Function, argument to lazy compile stub.
#if !defined(PRODUCT)
if (optimized == kUnoptimized) {
@@ -2614,12 +2618,12 @@
target::CallSiteData::arguments_descriptor_offset());
// Get function and call it, if possible.
- __ LoadCompressedFromOffset(T0, A0, target_offset);
- __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+ __ LoadCompressedFromOffset(FUNCTION_REG, A0, target_offset);
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
- __ add(A0, T0, T6);
+ __ add(A0, FUNCTION_REG, T6);
__ lx(TMP, Address(A0, 0));
- __ jr(TMP); // T0: Function, argument to lazy compile stub.
+ __ jr(TMP); // FUNCTION_REG: Function, argument to lazy compile stub.
#if !defined(PRODUCT)
__ Bind(&stepping);
@@ -2658,21 +2662,22 @@
}
// Stub for compiling a function and jumping to the compiled code.
-// S4: Arguments descriptor.
-// T0: Function.
+// ARGS_DESC_REG: Arguments descriptor.
+// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// Preserve arg desc.
__ EnterStubFrame();
// Save arguments descriptor and pass function.
- __ PushRegistersInOrder({ARGS_DESC_REG, T0});
+ __ PushRegistersInOrder({ARGS_DESC_REG, FUNCTION_REG});
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
- __ PopRegister(T0); // Restore argument.
+ __ PopRegister(FUNCTION_REG); // Restore function.
__ PopRegister(ARGS_DESC_REG); // Restore arg desc.
__ LeaveStubFrame();
- __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
- __ LoadFieldFromOffset(TMP, T0, target::Function::entry_point_offset());
+ __ LoadFieldFromOffset(TMP, FUNCTION_REG,
+ target::Function::entry_point_offset());
__ jr(TMP);
}
@@ -3077,23 +3082,26 @@
// Calls to the runtime to optimize the given function.
// A0: function to be re-optimized.
-// S4: argument descriptor (preserved).
+// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
__ subi(SP, SP, 3 * target::kWordSize);
- __ sx(S4, Address(SP, 2 * target::kWordSize)); // Preserves args descriptor.
+ __ sx(ARGS_DESC_REG,
+ Address(SP, 2 * target::kWordSize)); // Preserves args descriptor.
__ sx(ZR, Address(SP, 1 * target::kWordSize)); // Result slot.
__ sx(A0, Address(SP, 0 * target::kWordSize)); // Function argument.
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
- __ lx(T0, Address(SP, 1 * target::kWordSize)); // Function result.
- __ lx(S4, Address(SP, 2 * target::kWordSize)); // Restore args descriptor.
+ __ lx(FUNCTION_REG, Address(SP, 1 * target::kWordSize)); // Function result.
+ __ lx(ARGS_DESC_REG,
+ Address(SP, 2 * target::kWordSize)); // Restore args descriptor.
__ addi(SP, SP, 3 * target::kWordSize);
- __ LoadCompressedFieldFromOffset(CODE_REG, T0,
+ __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
- __ LoadFieldFromOffset(A1, T0, target::Function::entry_point_offset());
+ __ LoadFieldFromOffset(A1, FUNCTION_REG,
+ target::Function::entry_point_offset());
__ LeaveStubFrame();
__ jr(A1);
__ ebreak();
@@ -3209,12 +3217,11 @@
// Called from megamorphic call sites.
// A0: receiver (passed to target)
-// S5: MegamorphicCache (preserved)
+// IC_DATA_REG: MegamorphicCache (preserved)
// Passed to target:
-// A0: receiver
+// FUNCTION_REG: target function
// CODE_REG: target Code
-// S4: arguments descriptor
-// S5: MegamorphicCache
+// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@@ -3225,8 +3232,9 @@
Label cid_loaded;
__ Bind(&cid_loaded);
- __ lx(T2, FieldAddress(S5, target::MegamorphicCache::buckets_offset()));
- __ lx(T1, FieldAddress(S5, target::MegamorphicCache::mask_offset()));
+ __ lx(T2,
+ FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
+ __ lx(T1, FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
// T2: cache buckets array.
// T1: mask as a smi.
@@ -3259,13 +3267,15 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- __ LoadCompressed(T0, FieldAddress(TMP, base + target::kCompressedWordSize));
- __ lx(A1, FieldAddress(T0, target::Function::entry_point_offset()));
+ __ LoadCompressed(FUNCTION_REG,
+ FieldAddress(TMP, base + target::kCompressedWordSize));
+ __ lx(A1, FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ lx(ARGS_DESC_REG,
- FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
if (!FLAG_precompiled_mode) {
- __ LoadCompressed(CODE_REG,
- FieldAddress(T0, target::Function::code_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ jr(A1); // T0: Function, argument to lazy compile stub.
@@ -3290,12 +3300,13 @@
// Input:
// A0 - receiver
-// S5 - icdata
+// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
- __ lx(T1, FieldAddress(S5, target::ICData::entries_offset()));
- __ lx(S4,
- FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
+ __ lx(T1, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
+ __ lx(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
__ AddImmediate(T1, target::Array::data_offset() - kHeapObjectTag);
// T1: first IC entry
__ LoadTaggedClassIdMayBeSmi(A1, A0);
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index ebbf71e..2b8bb2a 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -794,15 +794,15 @@
}
// Input parameters:
-// R10: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
- __ pushq(R10); // Preserve arguments descriptor array.
+ __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
// Setup space on stack for return value.
__ pushq(Immediate(0));
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
__ popq(CODE_REG); // Get Code object result.
- __ popq(R10); // Restore arguments descriptor array.
+ __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
// Remove the stub frame as we are about to jump to the dart function.
__ LeaveStubFrame();
@@ -812,7 +812,7 @@
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
-// R10: arguments descriptor array.
+// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@@ -824,12 +824,12 @@
__ movq(CODE_REG,
Address(THR, target::Thread::fix_callers_target_code_offset()));
__ EnterStubFrame();
- __ pushq(R10); // Preserve arguments descriptor array.
+ __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
// Setup space on stack for return value.
__ pushq(Immediate(0));
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
__ popq(CODE_REG); // Get Code object.
- __ popq(R10); // Restore arguments descriptor array.
+ __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
__ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ LeaveStubFrame();
__ jmp(RAX);
@@ -1112,19 +1112,19 @@
}
// Input:
-// RBX - icdata/megamorphic_cache
+// IC_DATA_REG - icdata/megamorphic_cache
// RDI - arguments descriptor size
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler,
Register receiver_reg) {
__ pushq(Immediate(0)); // Setup space on stack for result.
__ pushq(receiver_reg); // Receiver.
- __ pushq(RBX); // ICData/MegamorphicCache.
- __ pushq(R10); // Arguments descriptor array.
+ __ pushq(IC_DATA_REG); // ICData/MegamorphicCache.
+ __ pushq(ARGS_DESC_REG); // Arguments descriptor array.
// Adjust arguments count.
- __ OBJ(cmp)(
- FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ OBJ(cmp)(FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
__ OBJ(mov)(R10, RDI);
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
@@ -1143,8 +1143,8 @@
}
// Input:
-// RBX - icdata/megamorphic_cache
-// R10 - argument descriptor
+// IC_DATA_REG - icdata/megamorphic_cache
+// ARGS_DESC_REG - argument descriptor
static void GenerateDispatcherCode(Assembler* assembler,
Label* call_target_function) {
__ Comment("NoSuchMethodDispatch");
@@ -1155,8 +1155,8 @@
__ EnterStubFrame();
// Load the receiver.
- __ OBJ(mov)(RDI,
- FieldAddress(R10, target::ArgumentsDescriptor::size_offset()));
+ __ OBJ(mov)(RDI, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::size_offset()));
__ movq(RAX,
Address(RBP, RDI, TIMES_HALF_WORD_SIZE,
target::frame_layout.param_end_from_fp * target::kWordSize));
@@ -1165,16 +1165,17 @@
}
// Input:
-// RBX - icdata/megamorphic_cache
+// IC_DATA_REG - icdata/megamorphic_cache
// RDX - receiver
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
__ EnterStubFrame();
- __ movq(R10, FieldAddress(
- RBX, target::CallSiteData::arguments_descriptor_offset()));
- __ OBJ(mov)(RDI,
- FieldAddress(R10, target::ArgumentsDescriptor::size_offset()));
+ __ movq(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
+ __ OBJ(mov)(RDI, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::size_offset()));
GenerateNoSuchMethodDispatcherBody(assembler, /*receiver_reg=*/RDX);
}
@@ -2221,7 +2222,7 @@
__ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
-// Loads function into 'temp_reg', preserves 'ic_reg'.
+// Loads function into 'temp_reg', preserves IC_DATA_REG.
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_precompiled_mode) {
@@ -2229,11 +2230,11 @@
return;
}
if (FLAG_optimization_counter_threshold >= 0) {
- Register ic_reg = RBX;
Register func_reg = temp_reg;
- ASSERT(ic_reg != func_reg);
+ ASSERT(func_reg != IC_DATA_REG);
__ Comment("Increment function counter");
- __ movq(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
+ __ movq(func_reg,
+ FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
__ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
}
@@ -2405,19 +2406,22 @@
if (type == kInstanceCall) {
__ LoadTaggedClassIdMayBeSmi(RAX, RDX);
- __ movq(R10, FieldAddress(
- RBX, target::CallSiteData::arguments_descriptor_offset()));
+ __ movq(
+ ARGS_DESC_REG,
+ FieldAddress(RBX, target::CallSiteData::arguments_descriptor_offset()));
if (num_args == 2) {
- __ OBJ(mov)(
- RCX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
+ __ OBJ(mov)(RCX,
+ FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
__ movq(R9, Address(RSP, RCX, TIMES_4, -target::kWordSize));
__ LoadTaggedClassIdMayBeSmi(RCX, R9);
}
} else {
- __ movq(R10, FieldAddress(
- RBX, target::CallSiteData::arguments_descriptor_offset()));
- __ OBJ(mov)(RCX,
- FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
+ __ movq(
+ ARGS_DESC_REG,
+ FieldAddress(RBX, target::CallSiteData::arguments_descriptor_offset()));
+ __ OBJ(mov)(RCX, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
__ movq(RDX, Address(RSP, RCX, TIMES_4, 0));
__ LoadTaggedClassIdMayBeSmi(RAX, RDX);
if (num_args == 2) {
@@ -2474,15 +2478,15 @@
__ Comment("IC miss");
// Compute address of arguments (first read number of arguments from
// arguments descriptor array and then compute address on the stack).
- __ OBJ(mov)(RAX,
- FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
+ __ OBJ(mov)(RAX, FieldAddress(ARGS_DESC_REG,
+ target::ArgumentsDescriptor::count_offset()));
__ leaq(RAX, Address(RSP, RAX, TIMES_4, 0)); // RAX is Smi.
__ EnterStubFrame();
if (save_entry_point) {
__ SmiTag(R8); // Entry-point offset is not Smi.
__ pushq(R8); // Preserve entry point.
}
- __ pushq(R10); // Preserve arguments descriptor array.
+ __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushq(RBX); // Preserve IC data object.
__ pushq(Immediate(0)); // Result slot.
// Push call arguments.
@@ -2496,9 +2500,9 @@
for (intptr_t i = 0; i < num_args + 1; i++) {
__ popq(RAX);
}
- __ popq(RAX); // Pop returned function object into RAX.
+ __ popq(FUNCTION_REG); // Pop returned function object into RAX.
__ popq(RBX); // Restore IC data array.
- __ popq(R10); // Restore arguments descriptor array.
+ __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
if (save_entry_point) {
__ popq(R8); // Restore entry point.
__ SmiUntag(R8); // Entry-point offset is not Smi.
@@ -2547,7 +2551,7 @@
StaticTypeExactnessState::NotExact().Encode())));
__ Bind(&exactness_ok);
}
- __ LoadCompressed(RAX, Address(R13, target_offset));
+ __ LoadCompressed(FUNCTION_REG, Address(R13, target_offset));
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update ICData counter");
@@ -2558,13 +2562,13 @@
__ Comment("Call target (via specified entry point)");
__ Bind(&call_target_function);
// RAX: Target function.
- __ LoadCompressed(CODE_REG,
- FieldAddress(RAX, target::Function::code_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
if (save_entry_point) {
__ addq(R8, RAX);
__ jmp(Address(R8, 0));
} else {
- __ jmp(FieldAddress(RAX, target::Function::entry_point_offset()));
+ __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
if (exactness == kCheckExactness) {
@@ -2575,11 +2579,11 @@
__ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
}
__ Comment("Call target (via unchecked entry point)");
- __ LoadCompressed(RAX, Address(R13, target_offset));
- __ LoadCompressed(CODE_REG,
- FieldAddress(RAX, target::Function::code_offset()));
- __ jmp(FieldAddress(
- RAX, target::Function::entry_point_offset(CodeEntryKind::kUnchecked)));
+ __ LoadCompressed(FUNCTION_REG, Address(R13, target_offset));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset(
+ CodeEntryKind::kUnchecked)));
}
#if !defined(PRODUCT)
@@ -2753,15 +2757,16 @@
}
// Load arguments descriptor into R10.
- __ movq(R10, FieldAddress(
- RBX, target::CallSiteData::arguments_descriptor_offset()));
+ __ movq(
+ ARGS_DESC_REG,
+ FieldAddress(RBX, target::CallSiteData::arguments_descriptor_offset()));
// Get function and call it, if possible.
- __ LoadCompressed(RAX, Address(R12, target_offset));
- __ LoadCompressed(CODE_REG,
- FieldAddress(RAX, target::Function::code_offset()));
+ __ LoadCompressed(FUNCTION_REG, Address(R12, target_offset));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
- __ addq(R8, RAX);
+ __ addq(R8, FUNCTION_REG);
__ jmp(Address(R8, 0));
#if !defined(PRODUCT)
@@ -2799,20 +2804,21 @@
}
// Stub for compiling a function and jumping to the compiled code.
-// R10: Arguments descriptor.
-// RAX: Function.
+// ARGS_DESC_REG: Arguments descriptor.
+// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
- __ pushq(R10); // Preserve arguments descriptor array.
- __ pushq(RAX); // Pass function.
+ __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
+ __ pushq(FUNCTION_REG); // Pass function.
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
- __ popq(RAX); // Restore function.
- __ popq(R10); // Restore arguments descriptor array.
+ __ popq(FUNCTION_REG); // Restore function.
+ __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
__ LeaveStubFrame();
- __ LoadCompressed(CODE_REG,
- FieldAddress(RAX, target::Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ movq(RCX,
+ FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jmp(RCX);
}
@@ -3221,21 +3227,22 @@
// Calls to the runtime to optimize the given function.
// RDI: function to be reoptimized.
-// R10: argument descriptor (preserved).
+// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ movq(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
- __ pushq(R10); // Preserve args descriptor.
+ __ pushq(ARGS_DESC_REG); // Preserve args descriptor.
__ pushq(Immediate(0)); // Result slot.
__ pushq(RDI); // Arg0: function to optimize
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ popq(RAX); // Discard argument.
- __ popq(RAX); // Get Code object.
- __ popq(R10); // Restore argument descriptor.
+ __ popq(FUNCTION_REG); // Get Function object.
+ __ popq(ARGS_DESC_REG); // Restore argument descriptor.
__ LeaveStubFrame();
- __ LoadCompressed(CODE_REG,
- FieldAddress(RAX, target::Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
+ __ movq(RCX,
+ FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jmp(RCX);
__ int3();
}
@@ -3333,10 +3340,11 @@
// Called from megamorphic calls.
// RDX: receiver (passed to target)
-// RBX: target::MegamorphicCache (preserved)
+// IC_DATA_REG: target::MegamorphicCache (preserved)
// Passed to target:
+// FUNCTION_REG: target function
// CODE_REG: target Code
-// R10: arguments descriptor
+// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@@ -3349,8 +3357,10 @@
Label cid_loaded;
__ Bind(&cid_loaded);
- __ movq(R9, FieldAddress(RBX, target::MegamorphicCache::mask_offset()));
- __ movq(RDI, FieldAddress(RBX, target::MegamorphicCache::buckets_offset()));
+ __ movq(R9,
+ FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
+ __ movq(RDI, FieldAddress(IC_DATA_REG,
+ target::MegamorphicCache::buckets_offset()));
// R9: mask as a smi.
// RDI: cache buckets array.
@@ -3379,14 +3389,17 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- __ LoadCompressed(RAX, FieldAddress(RDI, RCX, TIMES_COMPRESSED_WORD_SIZE,
- base + target::kCompressedWordSize));
- __ movq(R10, FieldAddress(
- RBX, target::CallSiteData::arguments_descriptor_offset()));
- __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
+ __ LoadCompressed(FUNCTION_REG,
+ FieldAddress(RDI, RCX, TIMES_COMPRESSED_WORD_SIZE,
+ base + target::kCompressedWordSize));
+ __ movq(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
+ __ movq(RCX,
+ FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
if (!FLAG_precompiled_mode) {
- __ LoadCompressed(CODE_REG,
- FieldAddress(RAX, target::Function::code_offset()));
+ __ LoadCompressed(
+ CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ jmp(RCX);
@@ -3411,13 +3424,14 @@
}
// Input:
-// RBX - icdata
+// IC_DATA_REG - icdata
// RDX - receiver object
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
- __ movq(R13, FieldAddress(RBX, target::ICData::entries_offset()));
- __ movq(R10, FieldAddress(
- RBX, target::CallSiteData::arguments_descriptor_offset()));
+ __ movq(R13, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
+ __ movq(ARGS_DESC_REG,
+ FieldAddress(IC_DATA_REG,
+ target::CallSiteData::arguments_descriptor_offset()));
__ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
// R13: first IC entry
__ LoadTaggedClassIdMayBeSmi(RAX, RDX);
diff --git a/runtime/vm/constants_arm.h b/runtime/vm/constants_arm.h
index b63efec..7b0d4ce 100644
--- a/runtime/vm/constants_arm.h
+++ b/runtime/vm/constants_arm.h
@@ -316,8 +316,11 @@
const Register DISPATCH_TABLE_REG = NOTFP; // Dispatch table register.
const Register SPREG = SP; // Stack pointer register.
const Register FPREG = FP; // Frame pointer register.
+const Register IC_DATA_REG = R9; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = R4;
const Register CODE_REG = R6;
+// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
+const Register FUNCTION_REG = R0;
const Register THR = R10; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = R8;
@@ -432,7 +435,6 @@
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
- static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};
@@ -446,7 +448,6 @@
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
- static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index 8ec2b3e..0cd27b6 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -143,8 +143,11 @@
const Register PP = R27; // Caches object pool pointer in generated code.
const Register DISPATCH_TABLE_REG = R21; // Dispatch table register.
const Register CODE_REG = R24;
+// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
+const Register FUNCTION_REG = R0;
const Register FPREG = FP; // Frame pointer register.
const Register SPREG = R15; // Stack pointer register.
+const Register IC_DATA_REG = R5; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = R4; // Arguments descriptor register.
const Register THR = R26; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = R19;
@@ -266,7 +269,6 @@
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
- static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};
@@ -280,7 +282,6 @@
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
- static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};
diff --git a/runtime/vm/constants_ia32.h b/runtime/vm/constants_ia32.h
index 1169aa4..47f2797 100644
--- a/runtime/vm/constants_ia32.h
+++ b/runtime/vm/constants_ia32.h
@@ -82,9 +82,12 @@
const Register TMP = kNoRegister; // No scratch register used by assembler.
const Register TMP2 = kNoRegister; // No second assembler scratch register.
const Register CODE_REG = EDI;
+// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
+const Register FUNCTION_REG = EAX;
const Register PP = kNoRegister; // No object pool pointer.
const Register SPREG = ESP; // Stack pointer register.
const Register FPREG = EBP; // Frame pointer register.
+const Register IC_DATA_REG = ECX; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = EDX; // Arguments descriptor register.
const Register THR = ESI; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = EBX;
@@ -166,7 +169,6 @@
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
- static const Register kFunctionReg = EAX;
static const Register kAddressReg = ECX;
static const Register kScratchReg = EDI;
};
@@ -180,7 +182,6 @@
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
- static const Register kFunctionReg = EAX;
static const Register kAddressReg = ECX;
static const Register kScratchReg = EDI;
};
diff --git a/runtime/vm/constants_riscv.h b/runtime/vm/constants_riscv.h
index 4fb57fc..91e9d78 100644
--- a/runtime/vm/constants_riscv.h
+++ b/runtime/vm/constants_riscv.h
@@ -153,6 +153,8 @@
constexpr Register PP = A5; // Caches object pool pointer in generated code.
constexpr Register DISPATCH_TABLE_REG = S9; // Dispatch table register.
constexpr Register CODE_REG = A2;
+// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
+constexpr Register FUNCTION_REG = T0;
constexpr Register FPREG = FP; // Frame pointer register.
constexpr Register SPREG = SP; // Stack pointer register.
constexpr Register IC_DATA_REG = S5; // ICData/MegamorphicCache register.
@@ -277,7 +279,6 @@
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
- static const Register kFunctionReg = T0;
static const Register kAddressReg = T3;
static const Register kScratchReg = T4;
};
@@ -291,8 +292,6 @@
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
- static constexpr Register kFunctionReg =
- T0; // Must agreee with lazy compile stub.
static constexpr Register kAddressReg = T3;
static constexpr Register kScratchReg = T4;
};
diff --git a/runtime/vm/constants_x64.h b/runtime/vm/constants_x64.h
index 51828ea..c2a1162 100644
--- a/runtime/vm/constants_x64.h
+++ b/runtime/vm/constants_x64.h
@@ -120,8 +120,11 @@
const Register PP = R15;
const Register SPREG = RSP; // Stack pointer register.
const Register FPREG = RBP; // Frame pointer register.
+const Register IC_DATA_REG = RBX; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = R10; // Arguments descriptor register.
const Register CODE_REG = R12;
+// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
+const Register FUNCTION_REG = RAX;
const Register THR = R14; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = RBX;
@@ -235,7 +238,6 @@
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
- static const Register kFunctionReg = RAX;
static const Register kAddressReg = RCX;
static const Register kScratchReg = RSI;
};
@@ -249,7 +251,6 @@
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
- static const Register kFunctionReg = RAX;
static const Register kAddressReg = RCX;
static const Register kScratchReg = RSI;
};
diff --git a/tools/VERSION b/tools/VERSION
index d56cb22..7b8f920 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 2
MINOR 18
PATCH 0
-PRERELEASE 86
+PRERELEASE 87
PRERELEASE_PATCH 0
\ No newline at end of file