[vm] Share BoxAllocationSlowPath across architectures and use BoxABI::kResultReg
Slightly related to
Issue https://github.com/dart-lang/sdk/issues/45213
TEST=Existing test coverage.
Change-Id: I59953c9734b1e8cdba13a03c5734d084fc333130
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196929
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Tess Strickland <sstrickl@google.com>
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm.cc b/runtime/vm/compiler/asm_intrinsifier_arm.cc
index 770d456..2cfe3ad 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm.cc
@@ -56,7 +56,7 @@
// Try allocating in new space.
const Class& cls = GrowableObjectArrayClass();
- __ TryAllocate(cls, normal_ir_body, R0, R1);
+ __ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, R0, R1);
// Store backing array object in growable array object.
__ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
@@ -128,7 +128,7 @@
__ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
const Class& mint_class = MintClass();
- __ TryAllocate(mint_class, normal_ir_body, R0, R2);
+ __ TryAllocate(mint_class, normal_ir_body, Assembler::kFarJump, R0, R2);
__ str(R1, FieldAddress(R0, target::Mint::value_offset()));
__ str(R8,
@@ -871,7 +871,7 @@
UNREACHABLE();
}
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0,
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
R1); // Result register.
__ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
@@ -917,7 +917,7 @@
__ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ vmuld(D0, D0, D1);
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0,
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
R1); // Result register.
__ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
@@ -938,7 +938,7 @@
__ vmovsr(S0, R0);
__ vcvtdi(D0, S0);
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0,
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
R1); // Result register.
__ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
@@ -1094,7 +1094,7 @@
__ Bind(&double_op);
__ vsqrtd(D0, D1);
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0,
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
R1); // Result register.
__ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm64.cc b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
index 167e089..a976141 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
@@ -59,7 +59,7 @@
// Try allocating in new space.
const Class& cls = GrowableObjectArrayClass();
- __ TryAllocate(cls, normal_ir_body, R0, R1);
+ __ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, R0, R1);
// Store backing array object in growable array object.
__ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
@@ -989,7 +989,7 @@
UNREACHABLE();
}
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0, R1);
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0, R1);
__ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
@@ -1030,7 +1030,7 @@
__ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
__ fmuld(V0, V0, V1);
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0, R1);
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0, R1);
__ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
__ Bind(normal_ir_body);
@@ -1048,7 +1048,7 @@
__ scvtfdw(V0, R0);
#endif
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0, R1);
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0, R1);
__ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
__ Bind(normal_ir_body);
@@ -1196,7 +1196,7 @@
__ Bind(&double_op);
__ fsqrtd(V0, V1);
const Class& double_class = DoubleClass();
- __ TryAllocate(double_class, normal_ir_body, R0, R1);
+ __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0, R1);
__ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
__ Bind(&is_smi);
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 221f079..fcc76b4 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -3423,6 +3423,7 @@
void Assembler::TryAllocate(const Class& cls,
Label* failure,
+ JumpDistance distance,
Register instance_reg,
Register temp_reg) {
ASSERT(failure != NULL);
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index a3a12aa..263f5fb 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -1280,6 +1280,7 @@
// Only the tags field of the object is initialized.
void TryAllocate(const Class& cls,
Label* failure,
+ JumpDistance distance,
Register instance_reg,
Register temp_reg);
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 0300eb4..11cc859 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -1843,6 +1843,7 @@
void Assembler::TryAllocate(const Class& cls,
Label* failure,
+ JumpDistance distance,
Register instance_reg,
Register top_reg,
bool tag_result) {
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 5726ac1..7d0921b 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -1999,6 +1999,7 @@
// * [top_reg] will contain Thread::top_offset()
void TryAllocate(const Class& cls,
Label* failure,
+ JumpDistance distance,
Register instance_reg,
Register top_reg,
bool tag_result = true);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.h b/runtime/vm/compiler/backend/flow_graph_compiler.h
index 6973f62..f613c15 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.h
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.h
@@ -278,6 +278,26 @@
}
};
+class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
+ public:
+ BoxAllocationSlowPath(Instruction* instruction,
+ const Class& cls,
+ Register result)
+ : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
+
+ virtual void EmitNativeCode(FlowGraphCompiler* compiler);
+
+ static void Allocate(FlowGraphCompiler* compiler,
+ Instruction* instruction,
+ const Class& cls,
+ Register result,
+ Register temp);
+
+ private:
+ const Class& cls_;
+ const Register result_;
+};
+
// Slow path code which calls runtime entry to throw an exception.
class ThrowErrorSlowPathCode : public TemplateSlowPathCode<Instruction> {
public:
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 5737444..66a645f 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -5545,6 +5545,44 @@
compiler->AddNullCheck(check_null->source(), check_null->function_name());
}
+void BoxAllocationSlowPath::EmitNativeCode(FlowGraphCompiler* compiler) {
+ if (compiler::Assembler::EmittingComments()) {
+ __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
+ String::Handle(cls_.ScrubbedName()).ToCString());
+ }
+ __ Bind(entry_label());
+ const auto& stub = Code::ZoneHandle(
+ compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
+
+ LocationSummary* locs = instruction()->locs();
+
+ locs->live_registers()->Remove(Location::RegisterLocation(result_));
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateStubCall(InstructionSource(), // No token position.
+ stub, UntaggedPcDescriptors::kOther, locs);
+ __ MoveRegister(result_, AllocateBoxABI::kResultReg);
+ compiler->RestoreLiveRegisters(locs);
+ __ Jump(exit_label());
+}
+
+void BoxAllocationSlowPath::Allocate(FlowGraphCompiler* compiler,
+ Instruction* instruction,
+ const Class& cls,
+ Register result,
+ Register temp) {
+ if (compiler->intrinsic_mode()) {
+ __ TryAllocate(cls, compiler->intrinsic_slow_path_label(),
+ compiler::Assembler::kFarJump, result, temp);
+ } else {
+ auto slow_path = new BoxAllocationSlowPath(instruction, cls, result);
+ compiler->AddSlowPathCode(slow_path);
+
+ __ TryAllocate(cls, slow_path->entry_label(), compiler::Assembler::kFarJump,
+ result, temp);
+ __ Bind(slow_path->exit_label());
+ }
+}
+
void RangeErrorSlowPath::EmitSharedStubCall(FlowGraphCompiler* compiler,
bool save_fpu_registers) {
#if defined(TARGET_ARCH_IA32)
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index ba61166..bd70c29 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -2550,56 +2550,6 @@
DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(CheckConditionInstr)
-class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
- public:
- BoxAllocationSlowPath(Instruction* instruction,
- const Class& cls,
- Register result)
- : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
-
- virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
- if (compiler::Assembler::EmittingComments()) {
- __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
- String::Handle(cls_.ScrubbedName()).ToCString());
- }
- __ Bind(entry_label());
- const Code& stub = Code::ZoneHandle(
- compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
-
- LocationSummary* locs = instruction()->locs();
-
- locs->live_registers()->Remove(Location::RegisterLocation(result_));
-
- compiler->SaveLiveRegisters(locs);
- compiler->GenerateStubCall(InstructionSource(), // No token position.
- stub, UntaggedPcDescriptors::kOther, locs);
- __ MoveRegister(result_, R0);
- compiler->RestoreLiveRegisters(locs);
- __ b(exit_label());
- }
-
- static void Allocate(FlowGraphCompiler* compiler,
- Instruction* instruction,
- const Class& cls,
- Register result,
- Register temp) {
- if (compiler->intrinsic_mode()) {
- __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp);
- } else {
- BoxAllocationSlowPath* slow_path =
- new BoxAllocationSlowPath(instruction, cls, result);
- compiler->AddSlowPathCode(slow_path);
-
- __ TryAllocate(cls, slow_path->entry_label(), result, temp);
- __ Bind(slow_path->exit_label());
- }
- }
-
- private:
- const Class& cls_;
- const Register result_;
-};
-
LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
@@ -4807,7 +4757,8 @@
if (compiler->intrinsic_mode()) {
__ TryAllocate(compiler->mint_class(),
- compiler->intrinsic_slow_path_label(), out_reg, tmp);
+ compiler->intrinsic_slow_path_label(),
+ compiler::Assembler::kNearJump, out_reg, tmp);
} else if (locs()->call_on_shared_slow_path()) {
auto object_store = compiler->isolate_group()->object_store();
const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 1466497..95e5666 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -2303,56 +2303,6 @@
}
}
-class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
- public:
- BoxAllocationSlowPath(Instruction* instruction,
- const Class& cls,
- Register result)
- : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
-
- virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
- if (compiler::Assembler::EmittingComments()) {
- __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
- String::Handle(cls_.ScrubbedName()).ToCString());
- }
- __ Bind(entry_label());
- const Code& stub = Code::ZoneHandle(
- compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
-
- LocationSummary* locs = instruction()->locs();
-
- locs->live_registers()->Remove(Location::RegisterLocation(result_));
-
- compiler->SaveLiveRegisters(locs);
- compiler->GenerateStubCall(InstructionSource(), // No token position.
- stub, UntaggedPcDescriptors::kOther, locs);
- __ MoveRegister(result_, R0);
- compiler->RestoreLiveRegisters(locs);
- __ b(exit_label());
- }
-
- static void Allocate(FlowGraphCompiler* compiler,
- Instruction* instruction,
- const Class& cls,
- Register result,
- Register temp) {
- if (compiler->intrinsic_mode()) {
- __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp);
- } else {
- BoxAllocationSlowPath* slow_path =
- new BoxAllocationSlowPath(instruction, cls, result);
- compiler->AddSlowPathCode(slow_path);
-
- __ TryAllocate(cls, slow_path->entry_label(), result, temp);
- __ Bind(slow_path->exit_label());
- }
- }
-
- private:
- const Class& cls_;
- const Register result_;
-};
-
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreInstanceFieldInstr* instruction,
Register box_reg,
@@ -4074,7 +4024,8 @@
Register temp = locs()->temp(0).reg();
if (compiler->intrinsic_mode()) {
__ TryAllocate(compiler->mint_class(),
- compiler->intrinsic_slow_path_label(), out, temp);
+ compiler->intrinsic_slow_path_label(),
+ compiler::Assembler::kNearJump, out, temp);
} else if (locs()->call_on_shared_slow_path()) {
auto object_store = compiler->isolate_group()->object_store();
const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index 4ab37c5..c1aeb05 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -2039,58 +2039,6 @@
}
}
-class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
- public:
- BoxAllocationSlowPath(Instruction* instruction,
- const Class& cls,
- Register result)
- : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
-
- virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
- if (compiler::Assembler::EmittingComments()) {
- __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
- String::Handle(cls_.ScrubbedName()).ToCString());
- }
- __ Bind(entry_label());
- const Code& stub = Code::ZoneHandle(
- compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
-
- LocationSummary* locs = instruction()->locs();
-
- locs->live_registers()->Remove(Location::RegisterLocation(result_));
-
- compiler->SaveLiveRegisters(locs);
- compiler->GenerateStubCall(InstructionSource(), stub,
- UntaggedPcDescriptors::kOther, locs);
- __ MoveRegister(result_, EAX);
- compiler->RestoreLiveRegisters(locs);
- __ jmp(exit_label());
- }
-
- static void Allocate(FlowGraphCompiler* compiler,
- Instruction* instruction,
- const Class& cls,
- Register result,
- Register temp) {
- if (compiler->intrinsic_mode()) {
- __ TryAllocate(cls, compiler->intrinsic_slow_path_label(),
- compiler::Assembler::kFarJump, result, temp);
- } else {
- BoxAllocationSlowPath* slow_path =
- new BoxAllocationSlowPath(instruction, cls, result);
- compiler->AddSlowPathCode(slow_path);
-
- __ TryAllocate(cls, slow_path->entry_label(),
- compiler::Assembler::kFarJump, result, temp);
- __ Bind(slow_path->exit_label());
- }
- }
-
- private:
- const Class& cls_;
- const Register result_;
-};
-
LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index fefe242..b60e119 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -1626,58 +1626,6 @@
}
}
-class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
- public:
- BoxAllocationSlowPath(Instruction* instruction,
- const Class& cls,
- Register result)
- : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
-
- virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
- if (compiler::Assembler::EmittingComments()) {
- __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
- String::Handle(cls_.ScrubbedName()).ToCString());
- }
- __ Bind(entry_label());
- const Code& stub = Code::ZoneHandle(
- compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
-
- LocationSummary* locs = instruction()->locs();
-
- locs->live_registers()->Remove(Location::RegisterLocation(result_));
-
- compiler->SaveLiveRegisters(locs);
- compiler->GenerateStubCall(InstructionSource(), // No token position.
- stub, UntaggedPcDescriptors::kOther, locs);
- __ MoveRegister(result_, RAX);
- compiler->RestoreLiveRegisters(locs);
- __ jmp(exit_label());
- }
-
- static void Allocate(FlowGraphCompiler* compiler,
- Instruction* instruction,
- const Class& cls,
- Register result,
- Register temp) {
- if (compiler->intrinsic_mode()) {
- __ TryAllocate(cls, compiler->intrinsic_slow_path_label(),
- compiler::Assembler::kFarJump, result, temp);
- } else {
- BoxAllocationSlowPath* slow_path =
- new BoxAllocationSlowPath(instruction, cls, result);
- compiler->AddSlowPathCode(slow_path);
-
- __ TryAllocate(cls, slow_path->entry_label(),
- compiler::Assembler::kFarJump, result, temp);
- __ Bind(slow_path->exit_label());
- }
- }
-
- private:
- const Class& cls_;
- const Register result_;
-};
-
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
diff --git a/runtime/vm/compiler/stub_code_compiler.cc b/runtime/vm/compiler/stub_code_compiler.cc
index 7d04fee..5f54c67 100644
--- a/runtime/vm/compiler/stub_code_compiler.cc
+++ b/runtime/vm/compiler/stub_code_compiler.cc
@@ -19,6 +19,9 @@
namespace dart {
+DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, use_slow_path);
+
namespace compiler {
intptr_t StubCodeCompiler::WordOffsetFromFpToCpuRegister(
@@ -901,19 +904,12 @@
__ Breakpoint();
}
-#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64)
-#define EMIT_JUMP_DIST(...) __VA_ARGS__
-#else
-#define EMIT_JUMP_DIST(...)
-#endif
-
#define EMIT_BOX_ALLOCATION(Name) \
void StubCodeCompiler::GenerateAllocate##Name##Stub(Assembler* assembler) { \
Label call_runtime; \
if (!FLAG_use_slow_path && FLAG_inline_alloc) { \
__ TryAllocate(compiler::Name##Class(), &call_runtime, \
- EMIT_JUMP_DIST(Assembler::kNearJump, ) /* NOLINT */ \
- AllocateBoxABI::kResultReg, \
+ Assembler::kNearJump, AllocateBoxABI::kResultReg, \
AllocateBoxABI::kTempReg); \
__ Ret(); \
} \
@@ -932,7 +928,6 @@
EMIT_BOX_ALLOCATION(Float64x2)
#undef EMIT_BOX_ALLOCATION
-#undef EMIT_JUMP_DIST
} // namespace compiler
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index bcc1dcd..af6c94f 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -1124,7 +1124,7 @@
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
- __ TryAllocate(compiler::MintClass(), &slow_case,
+ __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
__ Ret();
@@ -1145,7 +1145,7 @@
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
- __ TryAllocate(compiler::MintClass(), &slow_case,
+ __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
__ Ret();
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index 209f6e0..3d5f6c4 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -1260,7 +1260,7 @@
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
- __ TryAllocate(compiler::MintClass(), &slow_case,
+ __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
__ Ret();
@@ -1280,7 +1280,7 @@
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
- __ TryAllocate(compiler::MintClass(), &slow_case,
+ __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
__ Ret();