[vm] Decouple intrinsifier code from runtime
This is the next step towards preventing compiler from directly peeking
into runtime and instead interact with runtime through a well defined
surface.
This CL decouples the hand-written intrinsifier code from the runtime:
* the intrinsifier is split up into a GraphIntrinsifier and AsmIntrinsifier
* the recognized methods list is moved to a separate .h file
* all intrinsifier code is moved into dart::compiler namespace
* the AsmIntrinsifier is only interacting with RT through runtime_api.h
Issue https://github.com/dart-lang/sdk/issues/31709
Change-Id: I0a73ad620e051dd49c9db7da3241212b3b74ccdd
Reviewed-on: https://dart-review.googlesource.com/c/92740
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Aart Bik <ajcbik@google.com>
diff --git a/runtime/vm/compiler/asm_intrinsifier.cc b/runtime/vm/compiler/asm_intrinsifier.cc
new file mode 100644
index 0000000..6cc847a
--- /dev/null
+++ b/runtime/vm/compiler/asm_intrinsifier.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+// Class for intrinsifying functions.
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/asm_intrinsifier.h"
+
+namespace dart {
+namespace compiler {
+
+#if !defined(TARGET_ARCH_DBC)
+
+void AsmIntrinsifier::String_identityHash(Assembler* assembler,
+ Label* normal_ir_body) {
+ String_getHashCode(assembler, normal_ir_body);
+}
+
+void AsmIntrinsifier::Double_identityHash(Assembler* assembler,
+ Label* normal_ir_body) {
+ Double_hashCode(assembler, normal_ir_body);
+}
+
+void AsmIntrinsifier::RegExp_ExecuteMatch(Assembler* assembler,
+ Label* normal_ir_body) {
+ AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(assembler, normal_ir_body,
+ /*sticky=*/false);
+}
+
+void AsmIntrinsifier::RegExp_ExecuteMatchSticky(Assembler* assembler,
+ Label* normal_ir_body) {
+ AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(assembler, normal_ir_body,
+ /*sticky=*/true);
+}
+
+#endif // !defined(TARGET_ARCH_DBC)
+
+} // namespace compiler
+} // namespace dart
+
+#endif // !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/asm_intrinsifier.h b/runtime/vm/compiler/asm_intrinsifier.h
new file mode 100644
index 0000000..9f7e5d7
--- /dev/null
+++ b/runtime/vm/compiler/asm_intrinsifier.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+// Class for intrinsifying functions.
+
+#ifndef RUNTIME_VM_COMPILER_ASM_INTRINSIFIER_H_
+#define RUNTIME_VM_COMPILER_ASM_INTRINSIFIER_H_
+
+#include "vm/allocation.h"
+#include "vm/compiler/recognized_methods_list.h"
+
+namespace dart {
+
+// Forward declarations.
+class FlowGraphCompiler;
+class Function;
+class TargetEntryInstr;
+class ParsedFunction;
+class FlowGraph;
+
+namespace compiler {
+class Assembler;
+class Label;
+
+class AsmIntrinsifier : public AllStatic {
+ public:
+ static intptr_t ParameterSlotFromSp();
+
+ static void IntrinsicCallPrologue(Assembler* assembler);
+ static void IntrinsicCallEpilogue(Assembler* assembler);
+
+ private:
+ friend class Intrinsifier;
+
+ // The "_A" value used in the intrinsification of
+ // `runtime/lib/math_patch.dart:_Random._nextState()`
+ static const int64_t kRandomAValue = 0xffffda61;
+
+ static bool CanIntrinsify(const Function& function);
+
+#define DECLARE_FUNCTION(class_name, function_name, enum_name, fp) \
+ static void enum_name(Assembler* assembler, Label* normal_ir_body);
+ ALL_INTRINSICS_LIST(DECLARE_FUNCTION)
+
+ // On DBC all intrinsics are handled the same way.
+#if defined(TARGET_ARCH_DBC)
+ GRAPH_INTRINSICS_LIST(DECLARE_FUNCTION)
+#endif // defined(TARGET_ARCH_DBC)
+
+#undef DECLARE_FUNCTION
+
+ static void IntrinsifyRegExpExecuteMatch(Assembler* assembler,
+ Label* normal_ir_body,
+ bool sticky);
+};
+
+} // namespace compiler
+} // namespace dart
+
+#endif // RUNTIME_VM_COMPILER_ASM_INTRINSIFIER_H_
diff --git a/runtime/vm/compiler/intrinsifier_arm.cc b/runtime/vm/compiler/asm_intrinsifier_arm.cc
similarity index 70%
rename from runtime/vm/compiler/intrinsifier_arm.cc
rename to runtime/vm/compiler/asm_intrinsifier_arm.cc
index beb9d4e..c00607e 100644
--- a/runtime/vm/compiler/intrinsifier_arm.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm.cc
@@ -1,23 +1,18 @@
-// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
#if defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
-#include "vm/compiler/intrinsifier.h"
+#define SHOULD_NOT_INCLUDE_RUNTIME
+#include "vm/class_id.h"
+#include "vm/compiler/asm_intrinsifier.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/cpu.h"
-#include "vm/dart_entry.h"
-#include "vm/object.h"
-#include "vm/object_store.h"
-#include "vm/regexp_assembler.h"
-#include "vm/symbols.h"
-#include "vm/timeline.h"
namespace dart {
+namespace compiler {
// When entering intrinsics code:
// R4: Arguments descriptor
@@ -29,7 +24,7 @@
#define __ assembler->
-intptr_t Intrinsifier::ParameterSlotFromSp() {
+intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
return -1;
}
@@ -37,7 +32,7 @@
return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
}
-void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
ASSERT(IsABIPreservedRegister(CODE_REG));
ASSERT(IsABIPreservedRegister(ARGS_DESC_REG));
ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
@@ -47,49 +42,50 @@
assembler->mov(CALLEE_SAVED_TEMP, Operand(LR));
}
-void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
// Restore LR.
assembler->Comment("IntrinsicCallEpilogue");
assembler->mov(LR, Operand(CALLEE_SAVED_TEMP));
}
-// Allocate a GrowableObjectArray using the backing array specified.
+// Allocate a GrowableObjectArray:: using the backing array specified.
// On stack: type argument (+1), data (+0).
-void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
+ Label* normal_ir_body) {
// The newly allocated object is returned in R0.
- const intptr_t kTypeArgumentsOffset = 1 * kWordSize;
- const intptr_t kArrayOffset = 0 * kWordSize;
+ const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
+ const intptr_t kArrayOffset = 0 * target::kWordSize;
// Try allocating in new space.
- const Class& cls = Class::Handle(
- Isolate::Current()->object_store()->growable_object_array_class());
+ const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, R0, R1);
// Store backing array object in growable array object.
__ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
// R0 is new, no barrier needed.
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, GrowableObjectArray::data_offset()), R1);
+ R0, FieldAddress(R0, target::GrowableObjectArray::data_offset()), R1);
// R0: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), R1);
+ R0,
+ FieldAddress(R0, target::GrowableObjectArray::type_arguments_offset()),
+ R1);
// Set the length field in the growable array object to 0.
__ LoadImmediate(R1, 0);
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, GrowableObjectArray::length_offset()), R1);
+ R0, FieldAddress(R0, target::GrowableObjectArray::length_offset()), R1);
__ Ret(); // Returns the newly allocated object in R0.
__ Bind(normal_ir_body);
}
-#define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \
+#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_shift) \
Label fall_through; \
- const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \
+ const intptr_t kArrayLengthStackOffset = 0 * target::kWordSize; \
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R2, cid)); \
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R2, normal_ir_body)); \
__ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
@@ -106,10 +102,11 @@
__ b(normal_ir_body, GT); \
__ mov(R2, Operand(R2, LSL, scale_shift)); \
const intptr_t fixed_size_plus_alignment_padding = \
- sizeof(Raw##type_name) + kObjectAlignment - 1; \
+ target::TypedData::InstanceSize() + \
+ target::ObjectAlignment::kObjectAlignment - 1; \
__ AddImmediate(R2, fixed_size_plus_alignment_padding); \
- __ bic(R2, R2, Operand(kObjectAlignment - 1)); \
- __ ldr(R0, Address(THR, Thread::top_offset())); \
+ __ bic(R2, R2, Operand(target::ObjectAlignment::kObjectAlignment - 1)); \
+ __ ldr(R0, Address(THR, target::Thread::top_offset())); \
\
/* R2: allocation size. */ \
__ adds(R1, R0, Operand(R2)); \
@@ -119,14 +116,14 @@
/* R0: potential new object start. */ \
/* R1: potential next object start. */ \
/* R2: allocation size. */ \
- __ ldr(IP, Address(THR, Thread::end_offset())); \
+ __ ldr(IP, Address(THR, target::Thread::end_offset())); \
__ cmp(R1, Operand(IP)); \
__ b(normal_ir_body, CS); \
\
/* Successfully allocated the object(s), now update top to point to */ \
/* next object start and initialize the object. */ \
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); \
- __ str(R1, Address(THR, Thread::top_offset())); \
+ __ str(R1, Address(THR, target::Thread::top_offset())); \
__ AddImmediate(R0, kHeapObjectTag); \
/* Initialize the tags. */ \
/* R0: new object start as a tagged pointer. */ \
@@ -134,19 +131,20 @@
/* R2: allocation size. */ \
/* R4: allocation stats address */ \
{ \
- __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \
+ __ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag); \
__ mov(R3, \
- Operand(R2, LSL, RawObject::kSizeTagPos - kObjectAlignmentLog2), \
+ Operand(R2, LSL, \
+ target::RawObject::kTagBitsSizeTagPos - \
+ target::ObjectAlignment::kObjectAlignmentLog2), \
LS); \
__ mov(R3, Operand(0), HI); \
\
/* Get the class index and insert it into the tags. */ \
- uint32_t tags = 0; \
- tags = RawObject::ClassIdTag::update(cid, tags); \
- tags = RawObject::NewBit::update(true, tags); \
+ uint32_t tags = \
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
__ LoadImmediate(TMP, tags); \
__ orr(R3, R3, Operand(TMP)); \
- __ str(R3, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \
+ __ str(R3, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */ \
} \
/* Set the length field. */ \
/* R0: new object start as a tagged pointer. */ \
@@ -155,7 +153,7 @@
/* R4: allocation stats address. */ \
__ ldr(R3, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
- R0, FieldAddress(R0, type_name::length_offset()), R3); \
+ R0, FieldAddress(R0, target::TypedData::length_offset()), R3); \
/* Initialize all array elements to 0. */ \
/* R0: new object start as a tagged pointer. */ \
/* R1: new object end address. */ \
@@ -166,14 +164,14 @@
/* data area to be initialized. */ \
__ LoadImmediate(R8, 0); \
__ mov(R9, Operand(R8)); \
- __ AddImmediate(R3, R0, sizeof(Raw##type_name) - 1); \
+ __ AddImmediate(R3, R0, target::TypedData::InstanceSize() - 1); \
Label init_loop; \
__ Bind(&init_loop); \
- __ AddImmediate(R3, 2 * kWordSize); \
+ __ AddImmediate(R3, 2 * target::kWordSize); \
__ cmp(R3, Operand(R1)); \
- __ strd(R8, R9, R3, -2 * kWordSize, LS); \
+ __ strd(R8, R9, R3, -2 * target::kWordSize, LS); \
__ b(&init_loop, CC); \
- __ str(R8, Address(R3, -2 * kWordSize), HI); \
+ __ str(R8, Address(R3, -2 * target::kWordSize), HI); \
\
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2)); \
__ Ret(); \
@@ -197,12 +195,12 @@
}
#define TYPED_DATA_ALLOCATOR(clazz) \
- void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
- Label* normal_ir_body) { \
- intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \
- intptr_t max_len = TypedData::MaxNewSpaceElements(kTypedData##clazz##Cid); \
+ void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
+ Label* normal_ir_body) { \
+ intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
+ intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
int shift = GetScaleFactor(size); \
- TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \
+ TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, shift); \
}
CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
#undef TYPED_DATA_ALLOCATOR
@@ -210,28 +208,28 @@
// Loads args from stack into R0 and R1
// Tests if they are smis, jumps to label not_smi if not.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
- __ ldr(R0, Address(SP, +0 * kWordSize));
- __ ldr(R1, Address(SP, +1 * kWordSize));
+ __ ldr(R0, Address(SP, +0 * target::kWordSize));
+ __ ldr(R1, Address(SP, +1 * target::kWordSize));
__ orr(TMP, R0, Operand(R1));
__ tst(TMP, Operand(kSmiTagMask));
__ b(not_smi, NE);
}
-void Intrinsifier::Integer_addFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
- __ adds(R0, R0, Operand(R1)); // Adds.
- __ bx(LR, VC); // Return if no overflow.
+ __ adds(R0, R0, Operand(R1)); // Adds.
+ __ bx(LR, VC); // Return if no overflow.
// Otherwise fall through.
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
Integer_addFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_subFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
__ subs(R0, R0, Operand(R1)); // Subtract.
__ bx(LR, VC); // Return if no overflow.
@@ -239,7 +237,7 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
__ subs(R0, R1, Operand(R0)); // Subtract.
__ bx(LR, VC); // Return if no overflow.
@@ -247,8 +245,8 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
__ SmiUntag(R0); // Untags R0. We only want result shifted by one.
__ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1.
@@ -257,7 +255,7 @@
__ Bind(normal_ir_body); // Fall through on overflow.
}
-void Intrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
Integer_mulFromInteger(assembler, normal_ir_body);
}
@@ -316,14 +314,14 @@
// res = res + right;
// }
// }
-void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
if (!TargetCPUFeatures::can_divide()) {
return;
}
// Check to see if we have integer division
- __ ldr(R1, Address(SP, +0 * kWordSize));
- __ ldr(R0, Address(SP, +1 * kWordSize));
+ __ ldr(R1, Address(SP, +0 * target::kWordSize));
+ __ ldr(R0, Address(SP, +1 * target::kWordSize));
__ orr(TMP, R0, Operand(R1));
__ tst(TMP, Operand(kSmiTagMask));
__ b(normal_ir_body, NE);
@@ -349,8 +347,8 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_truncDivide(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
+ Label* normal_ir_body) {
if (!TargetCPUFeatures::can_divide()) {
return;
}
@@ -373,9 +371,10 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_negate(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument.
- __ tst(R0, Operand(kSmiTagMask)); // Test for Smi.
+void AsmIntrinsifier::Integer_negate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, +0 * target::kWordSize)); // Grab first argument.
+ __ tst(R0, Operand(kSmiTagMask)); // Test for Smi.
__ b(normal_ir_body, NE);
__ rsbs(R0, R0, Operand(0)); // R0 is a Smi. R0 <- 0 - R0.
__ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise.
@@ -383,8 +382,8 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
__ and_(R0, R0, Operand(R1));
@@ -392,12 +391,13 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAnd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
__ orr(R0, R0, Operand(R1));
@@ -405,12 +405,13 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitOr(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitOrFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
__ eor(R0, R0, Operand(R1));
@@ -418,15 +419,16 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitXor(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitXorFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
ASSERT(kSmiTagShift == 1);
ASSERT(kSmiTag == 0);
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ CompareImmediate(R0, Smi::RawValue(Smi::kBits));
+ __ CompareImmediate(R0, target::ToRawSmi(target::Smi::kBits));
__ b(normal_ir_body, HI);
__ SmiUntag(R0);
@@ -459,12 +461,12 @@
// Now NOTFP has the bits that fall off of R1 on a left shift.
__ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
- const Class& mint_class =
- Class::Handle(Isolate::Current()->object_store()->mint_class());
+ const Class& mint_class = MintClass();
__ TryAllocate(mint_class, normal_ir_body, R0, R2);
- __ str(R1, FieldAddress(R0, Mint::value_offset()));
- __ str(NOTFP, FieldAddress(R0, Mint::value_offset() + kWordSize));
+ __ str(R1, FieldAddress(R0, target::Mint::value_offset()));
+ __ str(NOTFP,
+ FieldAddress(R0, target::Mint::value_offset() + target::kWordSize));
__ Ret();
__ Bind(normal_ir_body);
}
@@ -489,8 +491,9 @@
__ b(not_smi_or_mint, NE);
// Mint.
- __ ldr(res_lo, FieldAddress(reg, Mint::value_offset()));
- __ ldr(res_hi, FieldAddress(reg, Mint::value_offset() + kWordSize));
+ __ ldr(res_lo, FieldAddress(reg, target::Mint::value_offset()));
+ __ ldr(res_hi,
+ FieldAddress(reg, target::Mint::value_offset() + target::kWordSize));
__ Bind(&done);
}
@@ -504,10 +507,10 @@
__ cmp(R1, Operand(R0));
__ b(&is_true, true_condition);
__ Bind(&is_false);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ Ret();
__ Bind(&is_true);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ Ret();
// 64-bit comparison
@@ -551,39 +554,39 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, LT);
}
-void Intrinsifier::Integer_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_greaterThanFromInt(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_greaterThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GT);
}
-void Intrinsifier::Integer_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, LE);
}
-void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GE);
}
// This is called for Smi and Mint receivers. The right argument
// can be Smi, Mint or double.
-void Intrinsifier::Integer_equalToInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
Label true_label, check_for_mint;
// For integer receiver '===' check first.
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R1, Address(SP, 1 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R1, Address(SP, 1 * target::kWordSize));
__ cmp(R0, Operand(R1));
__ b(&true_label, EQ);
@@ -592,10 +595,10 @@
__ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks.
// Both arguments are smi, '===' is good enough.
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ Ret();
__ Bind(&true_label);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ Ret();
// At least one of the arguments was not Smi.
@@ -611,7 +614,8 @@
__ CompareClassId(R0, kDoubleCid, R2);
__ b(normal_ir_body, EQ);
- __ LoadObject(R0, Bool::False()); // Smi == Mint -> false.
+ __ LoadObject(R0,
+ CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
__ Ret();
__ Bind(&receiver_not_smi);
@@ -621,18 +625,19 @@
__ b(normal_ir_body, NE);
// Receiver is Mint, return false if right is Smi.
__ tst(R0, Operand(kSmiTagMask));
- __ LoadObject(R0, Bool::False(), EQ);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
__ bx(LR, EQ);
// TODO(srdjan): Implement Mint == Mint comparison.
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equal(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_equalToInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// Shift amount in R0. Value to shift in R1.
@@ -651,15 +656,17 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Smi_bitNegate(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ mvn(R0, Operand(R0));
__ bic(R0, R0, Operand(kSmiTagMask)); // Remove inverted smi-tag.
__ Ret();
}
-void Intrinsifier::Smi_bitLength(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ SmiUntag(R0);
// XOR with sign bit to complement bits if value is negative.
__ eor(R0, R0, Operand(R0, ASR, 31));
@@ -669,29 +676,29 @@
__ Ret();
}
-void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
// static void _lsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
// R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
- __ ldrd(R0, R1, SP, 2 * kWordSize);
+ __ ldrd(R0, R1, SP, 2 * target::kWordSize);
// R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
- __ ldrd(R2, R3, SP, 0 * kWordSize);
+ __ ldrd(R2, R3, SP, 0 * target::kWordSize);
__ SmiUntag(R3);
// R4 = n ~/ _DIGIT_BITS
__ Asr(R4, R3, Operand(5));
// R8 = &x_digits[0]
- __ add(R8, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R8, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// NOTFP = &x_digits[x_used]
__ add(NOTFP, R8, Operand(R0, LSL, 1));
// R6 = &r_digits[1]
__ add(R6, R2,
- Operand(TypedData::data_offset() - kHeapObjectTag +
+ Operand(target::TypedData::data_offset() - kHeapObjectTag +
kBytesPerBigIntDigit));
// R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
__ add(R4, R4, Operand(R0, ASR, 1));
@@ -710,25 +717,25 @@
__ teq(NOTFP, Operand(R8));
__ b(&loop, NE);
__ str(R9, Address(R6, -kBytesPerBigIntDigit, Address::PreIndex));
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ Ret();
}
-void Intrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
// static void _lsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
// R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
- __ ldrd(R0, R1, SP, 2 * kWordSize);
+ __ ldrd(R0, R1, SP, 2 * target::kWordSize);
// R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
- __ ldrd(R2, R3, SP, 0 * kWordSize);
+ __ ldrd(R2, R3, SP, 0 * target::kWordSize);
__ SmiUntag(R3);
// R4 = n ~/ _DIGIT_BITS
__ Asr(R4, R3, Operand(5));
// R6 = &r_digits[0]
- __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R6, R2, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// NOTFP = &x_digits[n ~/ _DIGIT_BITS]
- __ add(NOTFP, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(NOTFP, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
__ add(NOTFP, NOTFP, Operand(R4, LSL, 2));
// R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
__ add(R4, R4, Operand(1));
@@ -753,29 +760,30 @@
__ teq(R6, Operand(R8));
__ b(&loop, NE);
__ str(R9, Address(R6, 0));
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ Ret();
}
-void Intrinsifier::Bigint_absAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absAdd(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
// R0 = used, R1 = digits
- __ ldrd(R0, R1, SP, 3 * kWordSize);
+ __ ldrd(R0, R1, SP, 3 * target::kWordSize);
// R1 = &digits[0]
- __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R1, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R2 = a_used, R3 = a_digits
- __ ldrd(R2, R3, SP, 1 * kWordSize);
+ __ ldrd(R2, R3, SP, 1 * target::kWordSize);
// R3 = &a_digits[0]
- __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R8 = r_digits
- __ ldr(R8, Address(SP, 0 * kWordSize));
+ __ ldr(R8, Address(SP, 0 * target::kWordSize));
// R8 = &r_digits[0]
- __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R8, R8, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// NOTFP = &digits[a_used >> 1], a_used is Smi.
__ add(NOTFP, R1, Operand(R2, LSL, 1));
@@ -812,29 +820,30 @@
__ adc(R4, R4, Operand(0));
__ str(R4, Address(R8, 0));
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ Ret();
}
-void Intrinsifier::Bigint_absSub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absSub(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
// R0 = used, R1 = digits
- __ ldrd(R0, R1, SP, 3 * kWordSize);
+ __ ldrd(R0, R1, SP, 3 * target::kWordSize);
// R1 = &digits[0]
- __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R1, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R2 = a_used, R3 = a_digits
- __ ldrd(R2, R3, SP, 1 * kWordSize);
+ __ ldrd(R2, R3, SP, 1 * target::kWordSize);
// R3 = &a_digits[0]
- __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R8 = r_digits
- __ ldr(R8, Address(SP, 0 * kWordSize));
+ __ ldr(R8, Address(SP, 0 * target::kWordSize));
// R8 = &r_digits[0]
- __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R8, R8, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// NOTFP = &digits[a_used >> 1], a_used is Smi.
__ add(NOTFP, R1, Operand(R2, LSL, 1));
@@ -867,11 +876,12 @@
__ b(&carry_loop, NE);
__ Bind(&done);
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ Ret();
}
-void Intrinsifier::Bigint_mulAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulAdd(Uint32List x_digits, int xi,
// Uint32List m_digits, int i,
@@ -901,26 +911,26 @@
Label done;
// R3 = x, no_op if x == 0
- __ ldrd(R0, R1, SP, 5 * kWordSize); // R0 = xi as Smi, R1 = x_digits.
+ __ ldrd(R0, R1, SP, 5 * target::kWordSize); // R0 = xi as Smi, R1 = x_digits.
__ add(R1, R1, Operand(R0, LSL, 1));
- __ ldr(R3, FieldAddress(R1, TypedData::data_offset()));
+ __ ldr(R3, FieldAddress(R1, target::TypedData::data_offset()));
__ tst(R3, Operand(R3));
__ b(&done, EQ);
// R8 = SmiUntag(n), no_op if n == 0
- __ ldr(R8, Address(SP, 0 * kWordSize));
+ __ ldr(R8, Address(SP, 0 * target::kWordSize));
__ Asrs(R8, R8, Operand(kSmiTagSize));
__ b(&done, EQ);
// R4 = mip = &m_digits[i >> 1]
- __ ldrd(R0, R1, SP, 3 * kWordSize); // R0 = i as Smi, R1 = m_digits.
+ __ ldrd(R0, R1, SP, 3 * target::kWordSize); // R0 = i as Smi, R1 = m_digits.
__ add(R1, R1, Operand(R0, LSL, 1));
- __ add(R4, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R4, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R9 = ajp = &a_digits[j >> 1]
- __ ldrd(R0, R1, SP, 1 * kWordSize); // R0 = j as Smi, R1 = a_digits.
+ __ ldrd(R0, R1, SP, 1 * target::kWordSize); // R0 = j as Smi, R1 = a_digits.
__ add(R1, R1, Operand(R0, LSL, 1));
- __ add(R9, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R9, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R1 = c = 0
__ mov(R1, Operand(0));
@@ -968,11 +978,12 @@
__ b(&propagate_carry_loop, CS);
__ Bind(&done);
- __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
+ __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
__ Ret();
}
-void Intrinsifier::Bigint_sqrAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _sqrAdd(Uint32List x_digits, int i,
// Uint32List a_digits, int used) {
@@ -1000,9 +1011,9 @@
// }
// R4 = xip = &x_digits[i >> 1]
- __ ldrd(R2, R3, SP, 2 * kWordSize); // R2 = i as Smi, R3 = x_digits
+ __ ldrd(R2, R3, SP, 2 * target::kWordSize); // R2 = i as Smi, R3 = x_digits
__ add(R3, R3, Operand(R2, LSL, 1));
- __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R4, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R3 = x = *xip++, return if x == 0
Label x_zero;
@@ -1011,9 +1022,9 @@
__ b(&x_zero, EQ);
// NOTFP = ajp = &a_digits[i]
- __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits
- __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
- __ add(NOTFP, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
+ __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
+ __ add(NOTFP, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R8:R0 = t = x*x + *ajp
__ ldr(R0, Address(NOTFP, 0));
@@ -1028,7 +1039,7 @@
__ mov(R9, Operand(0));
// int n = used - i - 1; while (--n >= 0) ...
- __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
+ __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
__ sub(R6, R0, Operand(R2));
__ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
__ rsbs(R6, R0, Operand(R6, ASR, kSmiTagSize));
@@ -1081,17 +1092,17 @@
__ strd(R8, R9, NOTFP, 0);
__ Bind(&x_zero);
- __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
+ __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
__ Ret();
}
-void Intrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
+ Label* normal_ir_body) {
// No unsigned 64-bit / 32-bit divide instruction.
}
-void Intrinsifier::Montgomery_mulMod(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulMod(Uint32List args, Uint32List digits, int i) {
// uint32_t rho = args[_RHO]; // _RHO == 2.
@@ -1102,25 +1113,25 @@
// }
// R4 = args
- __ ldr(R4, Address(SP, 2 * kWordSize)); // args
+ __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
// R3 = rho = args[2]
- __ ldr(R3,
- FieldAddress(R4, TypedData::data_offset() + 2 * kBytesPerBigIntDigit));
+ __ ldr(R3, FieldAddress(R4, target::TypedData::data_offset() +
+ 2 * kBytesPerBigIntDigit));
// R2 = digits[i >> 1]
- __ ldrd(R0, R1, SP, 0 * kWordSize); // R0 = i as Smi, R1 = digits
+ __ ldrd(R0, R1, SP, 0 * target::kWordSize); // R0 = i as Smi, R1 = digits
__ add(R1, R1, Operand(R0, LSL, 1));
- __ ldr(R2, FieldAddress(R1, TypedData::data_offset()));
+ __ ldr(R2, FieldAddress(R1, target::TypedData::data_offset()));
// R1:R0 = t = rho*d
__ umull(R0, R1, R2, R3);
// args[4] = t mod DIGIT_BASE = low32(t)
- __ str(R0,
- FieldAddress(R4, TypedData::data_offset() + 4 * kBytesPerBigIntDigit));
+ __ str(R0, FieldAddress(R4, target::TypedData::data_offset() +
+ 4 * kBytesPerBigIntDigit));
- __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
+ __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
__ Ret();
}
@@ -1130,7 +1141,7 @@
static void TestLastArgumentIsDouble(Assembler* assembler,
Label* is_smi,
Label* not_double_smi) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ tst(R0, Operand(kSmiTagMask));
__ b(is_smi, EQ);
__ CompareClassId(R0, kDoubleCid, R1);
@@ -1151,17 +1162,17 @@
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in R0.
- __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag);
+ __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
__ Bind(&double_op);
- __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument.
- __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
+ __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ vcmpd(D0, D1);
__ vmstat();
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
// Return false if D0 or D1 was NaN before checking true condition.
__ bx(LR, VS);
- __ LoadObject(R0, Bool::True(), true_condition);
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()), true_condition);
__ Ret();
__ Bind(&is_smi); // Convert R0 to a double.
@@ -1173,27 +1184,28 @@
}
}
-void Intrinsifier::Double_greaterThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, HI);
}
-void Intrinsifier::Double_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, CS);
}
-void Intrinsifier::Double_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, CC);
}
-void Intrinsifier::Double_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_equal(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, EQ);
}
-void Intrinsifier::Double_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, LS);
}
@@ -1207,10 +1219,10 @@
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in R0.
- __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag);
+ __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
__ Bind(&double_op);
- __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument.
- __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
+ __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
switch (kind) {
case Token::kADD:
__ vaddd(D0, D0, D1);
@@ -1227,11 +1239,10 @@
default:
UNREACHABLE();
}
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0,
R1); // Result register.
- __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
__ Bind(&is_smi); // Convert R0 to a double.
__ SmiUntag(R0);
@@ -1242,114 +1253,113 @@
}
}
-void Intrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
}
-void Intrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
}
-void Intrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
}
-void Intrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
}
// Left is double, right is integer (Mint or Smi)
-void Intrinsifier::Double_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
Label fall_through;
// Only smis allowed.
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ tst(R0, Operand(kSmiTagMask));
__ b(normal_ir_body, NE);
// Is Smi.
__ SmiUntag(R0);
__ vmovsr(S0, R0);
__ vcvtdi(D1, S0);
- __ ldr(R0, Address(SP, 1 * kWordSize));
- __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ ldr(R0, Address(SP, 1 * target::kWordSize));
+ __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ vmuld(D0, D0, D1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0,
R1); // Result register.
- __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
__ Bind(normal_ir_body);
}
}
-void Intrinsifier::DoubleFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
Label fall_through;
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ tst(R0, Operand(kSmiTagMask));
__ b(normal_ir_body, NE);
// Is Smi.
__ SmiUntag(R0);
__ vmovsr(S0, R0);
__ vcvtdi(D0, S0);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0,
R1); // Result register.
- __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
__ Bind(normal_ir_body);
}
}
-void Intrinsifier::Double_getIsNaN(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
+ Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ vcmpd(D0, D0);
__ vmstat();
- __ LoadObject(R0, Bool::False(), VC);
- __ LoadObject(R0, Bool::True(), VS);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), VC);
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()), VS);
__ Ret();
}
}
-void Intrinsifier::Double_getIsInfinite(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
+ Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
// R1 <- value[0:31], R2 <- value[32:63]
- __ LoadFieldFromOffset(kWord, R1, R0, Double::value_offset());
- __ LoadFieldFromOffset(kWord, R2, R0, Double::value_offset() + kWordSize);
+ __ LoadFieldFromOffset(kWord, R1, R0, target::Double::value_offset());
+ __ LoadFieldFromOffset(kWord, R2, R0,
+ target::Double::value_offset() + target::kWordSize);
// If the low word isn't 0, then it isn't infinity.
__ cmp(R1, Operand(0));
- __ LoadObject(R0, Bool::False(), NE);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
__ bx(LR, NE); // Return if NE.
// Mask off the sign bit.
__ AndImmediate(R2, R2, 0x7FFFFFFF);
// Compare with +infinity.
__ CompareImmediate(R2, 0x7FF00000);
- __ LoadObject(R0, Bool::False(), NE);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
__ bx(LR, NE);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ Ret();
}
}
-void Intrinsifier::Double_getIsNegative(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
+ Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
Label is_false, is_true, is_zero;
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ vcmpdz(D0);
__ vmstat();
__ b(&is_false, VS); // NaN -> false.
@@ -1357,11 +1367,11 @@
__ b(&is_false, CS); // >= 0 -> false.
__ Bind(&is_true);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ Ret();
__ Bind(&is_false);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ Ret();
__ Bind(&is_zero);
@@ -1374,13 +1384,13 @@
}
}
-void Intrinsifier::DoubleToInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
Label fall_through;
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
// Explicit NaN check, since ARM gives an FPU exception if you try to
// convert NaN to an int.
@@ -1399,8 +1409,8 @@
}
}
-void Intrinsifier::Double_hashCode(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
+ Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
if (!TargetCPUFeatures::vfp_supported()) return;
@@ -1408,8 +1418,8 @@
// Load double value and check that it isn't NaN, since ARM gives an
// FPU exception if you try to convert NaN to an int.
Label double_hash;
- __ ldr(R1, Address(SP, 0 * kWordSize));
- __ LoadDFromOffset(D0, R1, Double::value_offset() - kHeapObjectTag);
+ __ ldr(R1, Address(SP, 0 * target::kWordSize));
+ __ LoadDFromOffset(D0, R1, target::Double::value_offset() - kHeapObjectTag);
__ vcmpd(D0, D0);
__ vmstat();
__ b(&double_hash, VS);
@@ -1435,8 +1445,8 @@
// Convert the double bits to a hash code that fits in a Smi.
__ Bind(&double_hash);
- __ ldr(R0, FieldAddress(R1, Double::value_offset()));
- __ ldr(R1, FieldAddress(R1, Double::value_offset() + 4));
+ __ ldr(R0, FieldAddress(R1, target::Double::value_offset()));
+ __ ldr(R1, FieldAddress(R1, target::Double::value_offset() + 4));
__ eor(R0, R0, Operand(R1));
__ AndImmediate(R0, R0, kSmiMax);
__ SmiTag(R0);
@@ -1446,19 +1456,18 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
Label is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Argument is double and is in R0.
- __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag);
+ __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
__ Bind(&double_op);
__ vsqrtd(D0, D1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0,
R1); // Result register.
- __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
+ __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
__ Ret();
__ Bind(&is_smi);
__ SmiUntag(R0);
@@ -1472,30 +1481,25 @@
// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
// _state[kSTATE_LO] = state & _MASK_32;
// _state[kSTATE_HI] = state >> 32;
-void Intrinsifier::Random_nextState(Assembler* assembler,
- Label* normal_ir_body) {
- const Library& math_lib = Library::Handle(Library::MathLibrary());
- ASSERT(!math_lib.IsNull());
- const Class& random_class =
- Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random()));
- ASSERT(!random_class.IsNull());
- const Field& state_field = Field::ZoneHandle(
- random_class.LookupInstanceFieldAllowPrivate(Symbols::_state()));
- ASSERT(!state_field.IsNull());
- const int64_t a_int_value = Intrinsifier::kRandomAValue;
+void AsmIntrinsifier::Random_nextState(Assembler* assembler,
+ Label* normal_ir_body) {
+ const Field& state_field = LookupMathRandomStateFieldOffset();
+ const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
+
// 'a_int_value' is a mask.
ASSERT(Utils::IsUint(32, a_int_value));
int32_t a_int32_value = static_cast<int32_t>(a_int_value);
// Receiver.
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
// Field '_state'.
- __ ldr(R1, FieldAddress(R0, state_field.Offset()));
+ __ ldr(R1, FieldAddress(R0, LookupFieldOffsetInBytes(state_field)));
// Addresses of _state[0] and _state[1].
- const int64_t disp_0 = Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
+ const int64_t disp_0 =
+ target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
const int64_t disp_1 =
- disp_0 + Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
+ disp_0 + target::Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
__ LoadImmediate(R0, a_int32_value);
__ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag);
@@ -1505,17 +1509,18 @@
__ umlal(R3, R8, R0, R2); // R8:R3 <- R8:R3 + R0 * R2.
__ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag);
__ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag);
- ASSERT(Smi::RawValue(0) == 0);
+ ASSERT(target::ToRawSmi(0) == 0);
__ eor(R0, R0, Operand(R0));
__ Ret();
}
-void Intrinsifier::ObjectEquals(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R1, Address(SP, 1 * kWordSize));
+void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R1, Address(SP, 1 * target::kWordSize));
__ cmp(R0, Operand(R1));
- __ LoadObject(R0, Bool::False(), NE);
- __ LoadObject(R0, Bool::True(), EQ);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
__ Ret();
}
@@ -1565,10 +1570,10 @@
}
// Return type quickly for simple types (not parameterized and not signature).
-void Intrinsifier::ObjectRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label use_declaration_type, not_double, not_integer;
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
__ CompareImmediate(R1, kClosureCid);
@@ -1581,49 +1586,50 @@
__ b(¬_double, NE);
__ LoadIsolate(R0);
- __ LoadFromOffset(kWord, R0, R0, Isolate::object_store_offset());
- __ LoadFromOffset(kWord, R0, R0, ObjectStore::double_type_offset());
+ __ LoadFromOffset(kWord, R0, R0, target::Isolate::object_store_offset());
+ __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::double_type_offset());
__ Ret();
__ Bind(¬_double);
JumpIfNotInteger(assembler, R1, R0, ¬_integer);
__ LoadIsolate(R0);
- __ LoadFromOffset(kWord, R0, R0, Isolate::object_store_offset());
- __ LoadFromOffset(kWord, R0, R0, ObjectStore::int_type_offset());
+ __ LoadFromOffset(kWord, R0, R0, target::Isolate::object_store_offset());
+ __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::int_type_offset());
__ Ret();
__ Bind(¬_integer);
JumpIfNotString(assembler, R1, R0, &use_declaration_type);
__ LoadIsolate(R0);
- __ LoadFromOffset(kWord, R0, R0, Isolate::object_store_offset());
- __ LoadFromOffset(kWord, R0, R0, ObjectStore::string_type_offset());
+ __ LoadFromOffset(kWord, R0, R0, target::Isolate::object_store_offset());
+ __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::string_type_offset());
__ Ret();
__ Bind(&use_declaration_type);
__ LoadClassById(R2, R1); // Overwrites R1.
- __ ldrh(R3, FieldAddress(R2, Class::num_type_arguments_offset()));
+ __ ldrh(R3, FieldAddress(
+ R2, target::Class::num_type_arguments_offset_in_bytes()));
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);
- __ ldr(R0, FieldAddress(R2, Class::declaration_type_offset()));
- __ CompareObject(R0, Object::null_object());
+ __ ldr(R0, FieldAddress(R2, target::Class::declaration_type_offset()));
+ __ CompareObject(R0, NullObject());
__ b(normal_ir_body, EQ);
__ Ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label different_cids, equal, not_equal, not_integer;
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ CompareImmediate(R1, kClosureCid);
__ b(normal_ir_body, EQ);
- __ ldr(R0, Address(SP, 1 * kWordSize));
+ __ ldr(R0, Address(SP, 1 * target::kWordSize));
__ LoadClassIdMayBeSmi(R2, R0);
// Check whether class ids match. If class ids don't match objects can still
@@ -1636,12 +1642,13 @@
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(R3, R1);
- __ ldrh(R3, FieldAddress(R3, Class::num_type_arguments_offset()));
+ __ ldrh(R3, FieldAddress(
+ R3, target::Class::num_type_arguments_offset_in_bytes()));
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);
__ Bind(&equal);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ Ret();
// Class ids are different. Check if we are comparing runtime types of
@@ -1662,26 +1669,26 @@
// Neither strings nor integers and have different class ids.
__ Bind(¬_equal);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ Ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::String_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R0, String::hash_offset()));
+void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R0, target::String::hash_offset()));
__ cmp(R0, Operand(0));
__ bx(LR, NE);
// Hash not yet computed.
__ Bind(normal_ir_body);
}
-void Intrinsifier::Type_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R0, Type::hash_offset()));
+void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R0, target::Type::hash_offset()));
__ cmp(R0, Operand(0));
__ bx(LR, NE);
// Hash not yet computed.
@@ -1694,9 +1701,10 @@
Label* return_true,
Label* return_false) {
__ SmiUntag(R1);
- __ ldr(R8, FieldAddress(R0, String::length_offset())); // this.length
+ __ ldr(R8, FieldAddress(R0, target::String::length_offset())); // this.length
__ SmiUntag(R8);
- __ ldr(R9, FieldAddress(R2, String::length_offset())); // other.length
+ __ ldr(R9,
+ FieldAddress(R2, target::String::length_offset())); // other.length
__ SmiUntag(R9);
// if (other.length == 0) return true;
@@ -1713,19 +1721,19 @@
__ b(return_false, GT);
if (receiver_cid == kOneByteStringCid) {
- __ AddImmediate(R0, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
__ add(R0, R0, Operand(R1));
} else {
ASSERT(receiver_cid == kTwoByteStringCid);
- __ AddImmediate(R0, TwoByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
__ add(R0, R0, Operand(R1));
__ add(R0, R0, Operand(R1));
}
if (other_cid == kOneByteStringCid) {
- __ AddImmediate(R2, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R2, target::OneByteString::data_offset() - kHeapObjectTag);
} else {
ASSERT(other_cid == kTwoByteStringCid);
- __ AddImmediate(R2, TwoByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R2, target::TwoByteString::data_offset() - kHeapObjectTag);
}
// i = 0
@@ -1761,13 +1769,13 @@
// bool _substringMatches(int start, String other)
// This intrinsic handles a OneByteString or TwoByteString receiver with a
// OneByteString other.
-void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
+ Label* normal_ir_body) {
Label return_true, return_false, try_two_byte;
- __ ldr(R0, Address(SP, 2 * kWordSize)); // this
- __ ldr(R1, Address(SP, 1 * kWordSize)); // start
- __ ldr(R2, Address(SP, 0 * kWordSize)); // other
- __ Push(R4); // Make ARGS_DESC_REG available.
+ __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
+ __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
+ __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
+ __ Push(R4); // Make ARGS_DESC_REG available.
__ tst(R1, Operand(kSmiTagMask));
__ b(normal_ir_body, NE); // 'start' is not a Smi.
@@ -1792,48 +1800,51 @@
__ Bind(&return_true);
__ Pop(R4);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ Ret();
__ Bind(&return_false);
__ Pop(R4);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ Ret();
__ Bind(normal_ir_body);
__ Pop(R4);
}
-void Intrinsifier::Object_getHash(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Object_getHash(Assembler* assembler,
+ Label* normal_ir_body) {
UNREACHABLE();
}
-void Intrinsifier::Object_setHash(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Object_setHash(Assembler* assembler,
+ Label* normal_ir_body) {
UNREACHABLE();
}
-void Intrinsifier::StringBaseCharAt(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
+ Label* normal_ir_body) {
Label try_two_byte_string;
- __ ldr(R1, Address(SP, 0 * kWordSize)); // Index.
- __ ldr(R0, Address(SP, 1 * kWordSize)); // String.
+ __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
__ tst(R1, Operand(kSmiTagMask));
__ b(normal_ir_body, NE); // Index is not a Smi.
// Range check.
- __ ldr(R2, FieldAddress(R0, String::length_offset()));
+ __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
__ cmp(R1, Operand(R2));
__ b(normal_ir_body, CS); // Runtime throws exception.
__ CompareClassId(R0, kOneByteStringCid, R3);
__ b(&try_two_byte_string, NE);
__ SmiUntag(R1);
- __ AddImmediate(R0, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
__ ldrb(R1, Address(R0, R1));
- __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols);
+ __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
__ b(normal_ir_body, GE);
- __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset()));
- __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
+ __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
+ __ AddImmediate(
+ R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
__ ldr(R0, Address(R0, R1, LSL, 2));
__ Ret();
@@ -1841,45 +1852,47 @@
__ CompareClassId(R0, kTwoByteStringCid, R3);
__ b(normal_ir_body, NE);
ASSERT(kSmiTagShift == 1);
- __ AddImmediate(R0, TwoByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
__ ldrh(R1, Address(R0, R1));
- __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols);
+ __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
__ b(normal_ir_body, GE);
- __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset()));
- __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
+ __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
+ __ AddImmediate(
+ R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
__ ldr(R0, Address(R0, R1, LSL, 2));
__ Ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::StringBaseIsEmpty(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R0, String::length_offset()));
- __ cmp(R0, Operand(Smi::RawValue(0)));
- __ LoadObject(R0, Bool::True(), EQ);
- __ LoadObject(R0, Bool::False(), NE);
+void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R0, target::String::length_offset()));
+ __ cmp(R0, Operand(target::ToRawSmi(0)));
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
__ Ret();
}
-void Intrinsifier::OneByteString_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R1, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R1, String::hash_offset()));
+void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R1, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R1, target::String::hash_offset()));
__ cmp(R0, Operand(0));
__ bx(LR, NE); // Return if already computed.
- __ ldr(R2, FieldAddress(R1, String::length_offset()));
+ __ ldr(R2, FieldAddress(R1, target::String::length_offset()));
Label done;
// If the string is empty, set the hash to 1, and return.
- __ cmp(R2, Operand(Smi::RawValue(0)));
+ __ cmp(R2, Operand(target::ToRawSmi(0)));
__ b(&done, EQ);
__ SmiUntag(R2);
__ mov(R3, Operand(0));
- __ AddImmediate(R8, R1, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R8, R1,
+ target::OneByteString::data_offset() - kHeapObjectTag);
// R1: Instance of OneByteString.
// R2: String length, untagged integer.
// R3: Loop counter, untagged integer.
@@ -1911,14 +1924,15 @@
__ eor(R0, R0, Operand(R0, LSR, 11));
__ add(R0, R0, Operand(R0, LSL, 15));
// hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
- __ LoadImmediate(R2, (static_cast<intptr_t>(1) << String::kHashBits) - 1);
+ __ LoadImmediate(R2,
+ (static_cast<intptr_t>(1) << target::String::kHashBits) - 1);
__ and_(R0, R0, Operand(R2));
__ cmp(R0, Operand(0));
// return hash_ == 0 ? 1 : hash_;
__ Bind(&done);
__ mov(R0, Operand(1), EQ);
__ SmiTag(R0);
- __ StoreIntoSmiField(FieldAddress(R1, String::hash_offset()), R0);
+ __ StoreIntoSmiField(FieldAddress(R1, target::String::hash_offset()), R0);
__ Ret();
}
@@ -1937,12 +1951,14 @@
// TODO(koda): Protect against negative length and overflow here.
__ SmiUntag(length_reg);
const intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawString) + kObjectAlignment - 1;
+ target::String::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
__ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
- __ bic(length_reg, length_reg, Operand(kObjectAlignment - 1));
+ __ bic(length_reg, length_reg,
+ Operand(target::ObjectAlignment::kObjectAlignment - 1));
const intptr_t cid = kOneByteStringCid;
- __ ldr(R0, Address(THR, Thread::top_offset()));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
// length_reg: allocation size.
__ adds(R1, R0, Operand(length_reg));
@@ -1952,14 +1968,14 @@
// R0: potential new object start.
// R1: potential next object start.
// R2: allocation size.
- __ ldr(NOTFP, Address(THR, Thread::end_offset()));
+ __ ldr(NOTFP, Address(THR, target::Thread::end_offset()));
__ cmp(R1, Operand(NOTFP));
__ b(&fail, CS);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
- __ str(R1, Address(THR, Thread::top_offset()));
+ __ str(R1, Address(THR, target::Thread::top_offset()));
__ AddImmediate(R0, kHeapObjectTag);
// Initialize the tags.
@@ -1968,28 +1984,29 @@
// R2: allocation size.
// R4: allocation stats address.
{
- const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
+ const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2;
- __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag);
+ __ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag);
__ mov(R3, Operand(R2, LSL, shift), LS);
__ mov(R3, Operand(0), HI);
// Get the class index and insert it into the tags.
// R3: size and bit tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R3, R3, Operand(TMP));
- __ str(R3, FieldAddress(R0, String::tags_offset())); // Store tags.
+ __ str(R3, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
}
// Set the length field using the saved length (R8).
- __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::length_offset()),
- R8);
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::String::length_offset()), R8);
// Clear hash.
__ LoadImmediate(TMP, 0);
- __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::hash_offset()), TMP);
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::String::hash_offset()), TMP);
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2));
__ b(ok);
@@ -2002,11 +2019,11 @@
// Arg1: Start index as Smi.
// Arg2: End index as Smi.
// The indexes must be valid.
-void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
- Label* normal_ir_body) {
- const intptr_t kStringOffset = 2 * kWordSize;
- const intptr_t kStartIndexOffset = 1 * kWordSize;
- const intptr_t kEndIndexOffset = 0 * kWordSize;
+void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
+ Label* normal_ir_body) {
+ const intptr_t kStringOffset = 2 * target::kWordSize;
+ const intptr_t kStartIndexOffset = 1 * target::kWordSize;
+ const intptr_t kEndIndexOffset = 0 * target::kWordSize;
Label ok;
__ ldr(R2, Address(SP, kEndIndexOffset));
@@ -2025,7 +2042,7 @@
__ SmiUntag(R1);
__ add(R3, R3, Operand(R1));
// Calculate start address and untag (- 1).
- __ AddImmediate(R3, OneByteString::data_offset() - 1);
+ __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
// R3: Start address to copy from (untagged).
// R1: Untagged start index.
@@ -2049,7 +2066,7 @@
__ AddImmediate(R8, 1);
__ sub(R2, R2, Operand(1));
__ cmp(R2, Operand(0));
- __ strb(R1, FieldAddress(NOTFP, OneByteString::data_offset()));
+ __ strb(R1, FieldAddress(NOTFP, target::OneByteString::data_offset()));
__ AddImmediate(NOTFP, 1);
__ b(&loop, GT);
@@ -2058,21 +2075,22 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteStringSetAt(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R2, Address(SP, 0 * kWordSize)); // Value.
- __ ldr(R1, Address(SP, 1 * kWordSize)); // Index.
- __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString.
+void AsmIntrinsifier::OneByteStringSetAt(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
+ __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
+ __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
__ SmiUntag(R1);
__ SmiUntag(R2);
- __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R3, R0,
+ target::OneByteString::data_offset() - kHeapObjectTag);
__ strb(R2, Address(R3, R1));
__ Ret();
}
-void Intrinsifier::OneByteString_allocate(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R2, Address(SP, 0 * kWordSize)); // Length.
+void AsmIntrinsifier::OneByteString_allocate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
Label ok;
TryAllocateOnebyteString(assembler, &ok, normal_ir_body);
@@ -2087,8 +2105,8 @@
Label* normal_ir_body,
intptr_t string_cid) {
Label is_true, is_false, loop;
- __ ldr(R0, Address(SP, 1 * kWordSize)); // This.
- __ ldr(R1, Address(SP, 0 * kWordSize)); // Other.
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
+ __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
// Are identical?
__ cmp(R0, Operand(R1));
@@ -2101,8 +2119,8 @@
__ b(normal_ir_body, NE);
// Have same length?
- __ ldr(R2, FieldAddress(R0, String::length_offset()));
- __ ldr(R3, FieldAddress(R1, String::length_offset()));
+ __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
+ __ ldr(R3, FieldAddress(R1, target::String::length_offset()));
__ cmp(R2, Operand(R3));
__ b(&is_false, NE);
@@ -2111,8 +2129,8 @@
ASSERT((string_cid == kOneByteStringCid) ||
(string_cid == kTwoByteStringCid));
const intptr_t offset = (string_cid == kOneByteStringCid)
- ? OneByteString::data_offset()
- : TwoByteString::data_offset();
+ ? target::OneByteString::data_offset()
+ : target::TwoByteString::data_offset();
__ AddImmediate(R0, offset - kHeapObjectTag);
__ AddImmediate(R1, offset - kHeapObjectTag);
__ SmiUntag(R2);
@@ -2138,33 +2156,33 @@
__ b(&loop);
__ Bind(&is_true);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ Ret();
__ Bind(&is_false);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ Ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kOneByteStringCid);
}
-void Intrinsifier::TwoByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
}
-void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
- Label* normal_ir_body,
- bool sticky) {
+void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
+ Label* normal_ir_body,
+ bool sticky) {
if (FLAG_interpret_irregexp) return;
- static const intptr_t kRegExpParamOffset = 2 * kWordSize;
- static const intptr_t kStringParamOffset = 1 * kWordSize;
+ static const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
+ static const intptr_t kStringParamOffset = 1 * target::kWordSize;
// start_index smi is located at offset 0.
// Incoming registers:
@@ -2178,82 +2196,85 @@
__ ldr(R1, Address(SP, kStringParamOffset));
__ LoadClassId(R1, R1);
__ AddImmediate(R1, -kOneByteStringCid);
- __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2));
- __ ldr(R0,
- FieldAddress(R1, RegExp::function_offset(kOneByteStringCid, sticky)));
+ __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
+ __ ldr(R0, FieldAddress(R1, target::RegExp::function_offset(kOneByteStringCid,
+ sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in R0, the argument descriptor in R4, and IC-Data in R9.
__ eor(R9, R9, Operand(R9));
// Tail-call the function.
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
}
// On stack: user tag (+0).
-void Intrinsifier::UserTag_makeCurrent(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
+ Label* normal_ir_body) {
// R1: Isolate.
__ LoadIsolate(R1);
// R0: Current user tag.
- __ ldr(R0, Address(R1, Isolate::current_tag_offset()));
+ __ ldr(R0, Address(R1, target::Isolate::current_tag_offset()));
// R2: UserTag.
- __ ldr(R2, Address(SP, +0 * kWordSize));
- // Set Isolate::current_tag_.
- __ str(R2, Address(R1, Isolate::current_tag_offset()));
+ __ ldr(R2, Address(SP, +0 * target::kWordSize));
+ // Set target::Isolate::current_tag_.
+ __ str(R2, Address(R1, target::Isolate::current_tag_offset()));
// R2: UserTag's tag.
- __ ldr(R2, FieldAddress(R2, UserTag::tag_offset()));
- // Set Isolate::user_tag_.
- __ str(R2, Address(R1, Isolate::user_tag_offset()));
+ __ ldr(R2, FieldAddress(R2, target::UserTag::tag_offset()));
+ // Set target::Isolate::user_tag_.
+ __ str(R2, Address(R1, target::Isolate::user_tag_offset()));
__ Ret();
}
-void Intrinsifier::UserTag_defaultTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(R0);
- __ ldr(R0, Address(R0, Isolate::default_tag_offset()));
+ __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
__ Ret();
}
-void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(R0);
- __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
+ __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
__ Ret();
}
-void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
+ Label* normal_ir_body) {
#if !defined(SUPPORT_TIMELINE)
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ Ret();
#else
// Load TimelineStream*.
- __ ldr(R0, Address(THR, Thread::dart_stream_offset()));
+ __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
// Load uintptr_t from TimelineStream*.
- __ ldr(R0, Address(R0, TimelineStream::enabled_offset()));
+ __ ldr(R0, Address(R0, target::TimelineStream::enabled_offset()));
__ cmp(R0, Operand(0));
- __ LoadObject(R0, Bool::True(), NE);
- __ LoadObject(R0, Bool::False(), EQ);
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()), NE);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
__ Ret();
#endif
}
-void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ LoadObject(R0, Object::null_object());
- __ str(R0, Address(THR, Thread::async_stack_trace_offset()));
+void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ LoadObject(R0, NullObject());
+ __ str(R0, Address(THR, target::Thread::async_stack_trace_offset()));
__ Ret();
}
-void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(THR, Thread::async_stack_trace_offset()));
- __ LoadObject(R0, Object::null_object());
+void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(THR, target::Thread::async_stack_trace_offset()));
+ __ LoadObject(R0, NullObject());
__ Ret();
}
+#undef __
+
+} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/intrinsifier_arm64.cc b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
similarity index 71%
rename from runtime/vm/compiler/intrinsifier_arm64.cc
rename to runtime/vm/compiler/asm_intrinsifier_arm64.cc
index 79a6cc4..6ba42cd 100644
--- a/runtime/vm/compiler/intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
@@ -1,22 +1,18 @@
-// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
#if defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
-#include "vm/compiler/intrinsifier.h"
+#define SHOULD_NOT_INCLUDE_RUNTIME
+#include "vm/class_id.h"
+#include "vm/compiler/asm_intrinsifier.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/dart_entry.h"
-#include "vm/object.h"
-#include "vm/object_store.h"
-#include "vm/regexp_assembler.h"
-#include "vm/symbols.h"
-#include "vm/timeline.h"
namespace dart {
+namespace compiler {
// When entering intrinsics code:
// R4: Arguments descriptor
@@ -28,7 +24,7 @@
#define __ assembler->
-intptr_t Intrinsifier::ParameterSlotFromSp() {
+intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
return -1;
}
@@ -36,7 +32,7 @@
return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
}
-void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
ASSERT(IsABIPreservedRegister(CODE_REG));
ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG));
ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
@@ -51,40 +47,41 @@
assembler->mov(CALLEE_SAVED_TEMP2, ARGS_DESC_REG);
}
-void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
assembler->Comment("IntrinsicCallEpilogue");
assembler->mov(LR, CALLEE_SAVED_TEMP);
assembler->mov(ARGS_DESC_REG, CALLEE_SAVED_TEMP2);
}
-// Allocate a GrowableObjectArray using the backing array specified.
+// Allocate a GrowableObjectArray:: using the backing array specified.
// On stack: type argument (+1), data (+0).
-void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
+ Label* normal_ir_body) {
// The newly allocated object is returned in R0.
- const intptr_t kTypeArgumentsOffset = 1 * kWordSize;
- const intptr_t kArrayOffset = 0 * kWordSize;
+ const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
+ const intptr_t kArrayOffset = 0 * target::kWordSize;
// Try allocating in new space.
- const Class& cls = Class::Handle(
- Isolate::Current()->object_store()->growable_object_array_class());
+ const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, R0, R1);
// Store backing array object in growable array object.
__ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
// R0 is new, no barrier needed.
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, GrowableObjectArray::data_offset()), R1);
+ R0, FieldAddress(R0, target::GrowableObjectArray::data_offset()), R1);
// R0: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), R1);
+ R0,
+ FieldAddress(R0, target::GrowableObjectArray::type_arguments_offset()),
+ R1);
// Set the length field in the growable array object to 0.
__ LoadImmediate(R1, 0);
- __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset()));
+ __ str(R1, FieldAddress(R0, target::GrowableObjectArray::length_offset()));
__ ret(); // Returns the newly allocated object in R0.
__ Bind(normal_ir_body);
@@ -107,9 +104,9 @@
return -1;
}
-#define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \
+#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_shift) \
Label fall_through; \
- const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \
+ const intptr_t kArrayLengthStackOffset = 0 * target::kWordSize; \
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, normal_ir_body)); \
__ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
/* Check that length is a positive Smi. */ \
@@ -124,10 +121,12 @@
__ b(normal_ir_body, GT); \
__ LslImmediate(R2, R2, scale_shift); \
const intptr_t fixed_size_plus_alignment_padding = \
- sizeof(Raw##type_name) + kObjectAlignment - 1; \
+ target::TypedData::InstanceSize() + \
+ target::ObjectAlignment::kObjectAlignment - 1; \
__ AddImmediate(R2, fixed_size_plus_alignment_padding); \
- __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); \
- __ ldr(R0, Address(THR, Thread::top_offset())); \
+ __ andi(R2, R2, \
+ Immediate(~(target::ObjectAlignment::kObjectAlignment - 1))); \
+ __ ldr(R0, Address(THR, target::Thread::top_offset())); \
\
/* R2: allocation size. */ \
__ adds(R1, R0, Operand(R2)); \
@@ -137,13 +136,13 @@
/* R0: potential new object start. */ \
/* R1: potential next object start. */ \
/* R2: allocation size. */ \
- __ ldr(R6, Address(THR, Thread::end_offset())); \
+ __ ldr(R6, Address(THR, target::Thread::end_offset())); \
__ cmp(R1, Operand(R6)); \
__ b(normal_ir_body, CS); \
\
/* Successfully allocated the object(s), now update top to point to */ \
/* next object start and initialize the object. */ \
- __ str(R1, Address(THR, Thread::top_offset())); \
+ __ str(R1, Address(THR, target::Thread::top_offset())); \
__ AddImmediate(R0, kHeapObjectTag); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2)); \
/* Initialize the tags. */ \
@@ -151,24 +150,25 @@
/* R1: new object end address. */ \
/* R2: allocation size. */ \
{ \
- __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \
- __ LslImmediate(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \
+ __ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag); \
+ __ LslImmediate(R2, R2, \
+ target::RawObject::kTagBitsSizeTagPos - \
+ target::ObjectAlignment::kObjectAlignmentLog2); \
__ csel(R2, ZR, R2, HI); \
\
/* Get the class index and insert it into the tags. */ \
- uint32_t tags = 0; \
- tags = RawObject::ClassIdTag::update(cid, tags); \
- tags = RawObject::NewBit::update(true, tags); \
+ uint32_t tags = \
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
__ LoadImmediate(TMP, tags); \
__ orr(R2, R2, Operand(TMP)); \
- __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \
+ __ str(R2, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */ \
} \
/* Set the length field. */ \
/* R0: new object start as a tagged pointer. */ \
/* R1: new object end address. */ \
__ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
- R0, FieldAddress(R0, type_name::length_offset()), R2); \
+ R0, FieldAddress(R0, target::TypedData::length_offset()), R2); \
/* Initialize all array elements to 0. */ \
/* R0: new object start as a tagged pointer. */ \
/* R1: new object end address. */ \
@@ -176,13 +176,13 @@
/* R3: scratch register. */ \
/* data area to be initialized. */ \
__ mov(R3, ZR); \
- __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1); \
+ __ AddImmediate(R2, R0, target::TypedData::InstanceSize() - 1); \
Label init_loop, done; \
__ Bind(&init_loop); \
__ cmp(R2, Operand(R1)); \
__ b(&done, CS); \
__ str(R3, Address(R2, 0)); \
- __ add(R2, R2, Operand(kWordSize)); \
+ __ add(R2, R2, Operand(target::kWordSize)); \
__ b(&init_loop); \
__ Bind(&done); \
\
@@ -190,12 +190,12 @@
__ Bind(normal_ir_body);
#define TYPED_DATA_ALLOCATOR(clazz) \
- void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
- Label* normal_ir_body) { \
- intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \
- intptr_t max_len = TypedData::MaxNewSpaceElements(kTypedData##clazz##Cid); \
+ void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
+ Label* normal_ir_body) { \
+ intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
+ intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
int shift = GetScaleFactor(size); \
- TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \
+ TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, shift); \
}
CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
#undef TYPED_DATA_ALLOCATOR
@@ -203,27 +203,27 @@
// Loads args from stack into R0 and R1
// Tests if they are smis, jumps to label not_smi if not.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
- __ ldr(R0, Address(SP, +0 * kWordSize));
- __ ldr(R1, Address(SP, +1 * kWordSize));
+ __ ldr(R0, Address(SP, +0 * target::kWordSize));
+ __ ldr(R1, Address(SP, +1 * target::kWordSize));
__ orr(TMP, R0, Operand(R1));
__ BranchIfNotSmi(TMP, not_smi);
}
-void Intrinsifier::Integer_addFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
- __ adds(R0, R0, Operand(R1)); // Adds.
- __ b(normal_ir_body, VS); // Fall-through on overflow.
+ __ adds(R0, R0, Operand(R1)); // Adds.
+ __ b(normal_ir_body, VS); // Fall-through on overflow.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
Integer_addFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_subFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
__ subs(R0, R0, Operand(R1)); // Subtract.
__ b(normal_ir_body, VS); // Fall-through on overflow.
@@ -231,7 +231,7 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
__ subs(R0, R1, Operand(R0)); // Subtract.
__ b(normal_ir_body, VS); // Fall-through on overflow.
@@ -239,8 +239,8 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
__ SmiUntag(R0); // Untags R6. We only want result shifted by one.
@@ -254,7 +254,7 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
Integer_mulFromInteger(assembler, normal_ir_body);
}
@@ -315,12 +315,12 @@
// res = res + right;
// }
// }
-void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
// Check to see if we have integer division
Label neg_remainder, fall_through;
- __ ldr(R1, Address(SP, +0 * kWordSize));
- __ ldr(R0, Address(SP, +1 * kWordSize));
+ __ ldr(R1, Address(SP, +0 * target::kWordSize));
+ __ ldr(R0, Address(SP, +1 * target::kWordSize));
__ orr(TMP, R0, Operand(R1));
__ BranchIfNotSmi(TMP, normal_ir_body);
// R1: Tagged left (dividend).
@@ -348,8 +348,8 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_truncDivide(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
+ Label* normal_ir_body) {
// Check to see if we have integer division
TestBothArgumentsSmis(assembler, normal_ir_body);
@@ -370,8 +370,9 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_negate(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument.
+void AsmIntrinsifier::Integer_negate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, +0 * target::kWordSize)); // Grab first argument.
__ BranchIfNotSmi(R0, normal_ir_body);
__ negs(R0, R0);
__ b(normal_ir_body, VS);
@@ -379,43 +380,46 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
__ and_(R0, R0, Operand(R1));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAnd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
__ orr(R0, R0, Operand(R1));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitOr(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitOrFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
__ eor(R0, R0, Operand(R1));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitXor(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitXorFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
ASSERT(kSmiTagShift == 1);
ASSERT(kSmiTag == 0);
const Register right = R0;
@@ -424,7 +428,7 @@
const Register result = R0;
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ CompareImmediate(right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
+ __ CompareImmediate(right, target::ToRawSmi(target::Smi::kBits));
__ b(normal_ir_body, CS);
// Left is not a constant.
@@ -448,46 +452,46 @@
TestBothArgumentsSmis(assembler, normal_ir_body);
// R0 contains the right argument, R1 the left.
__ CompareRegisters(R1, R0);
- __ LoadObject(R0, Bool::False());
- __ LoadObject(TMP, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
+ __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
__ csel(R0, TMP, R0, true_condition);
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, LT);
}
-void Intrinsifier::Integer_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_greaterThanFromInt(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_greaterThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GT);
}
-void Intrinsifier::Integer_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, LE);
}
-void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GE);
}
// This is called for Smi and Mint receivers. The right argument
// can be Smi, Mint or double.
-void Intrinsifier::Integer_equalToInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
Label true_label, check_for_mint;
// For integer receiver '===' check first.
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R1, Address(SP, 1 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R1, Address(SP, 1 * target::kWordSize));
__ cmp(R0, Operand(R1));
__ b(&true_label, EQ);
@@ -496,10 +500,10 @@
// If R0 or R1 is not a smi do Mint checks.
// Both arguments are smi, '===' is good enough.
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&true_label);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ ret();
// At least one of the arguments was not Smi.
@@ -514,7 +518,8 @@
__ CompareClassId(R0, kDoubleCid);
__ b(normal_ir_body, EQ);
- __ LoadObject(R0, Bool::False()); // Smi == Mint -> false.
+ __ LoadObject(R0,
+ CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
__ ret();
__ Bind(&receiver_not_smi);
@@ -524,18 +529,19 @@
__ b(normal_ir_body, NE);
// Receiver is Mint, return false if right is Smi.
__ BranchIfNotSmi(R0, normal_ir_body);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ ret();
// TODO(srdjan): Implement Mint == Mint comparison.
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equal(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_equalToInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// Shift amount in R0. Value to shift in R1.
@@ -555,15 +561,17 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Smi_bitNegate(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ mvn(R0, R0);
__ andi(R0, R0, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
__ ret();
}
-void Intrinsifier::Smi_bitLength(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ SmiUntag(R0);
// XOR with sign bit to complement bits if value is negative.
__ eor(R0, R0, Operand(R0, ASR, 63));
@@ -574,31 +582,31 @@
__ ret();
}
-void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
// static void _lsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
// R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
- __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset));
+ __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
__ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
__ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
// R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
- __ ldp(R4, R5, Address(SP, 0 * kWordSize, Address::PairOffset));
+ __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
__ SmiUntag(R5);
// R0 = n ~/ (2*_DIGIT_BITS)
__ AsrImmediate(R0, R5, 6);
// R6 = &x_digits[0]
- __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R6, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R7 = &x_digits[2*R2]
__ add(R7, R6, Operand(R2, LSL, 3));
// R8 = &r_digits[2*1]
__ add(R8, R4,
- Operand(TypedData::data_offset() - kHeapObjectTag +
+ Operand(target::TypedData::data_offset() - kHeapObjectTag +
2 * kBytesPerBigIntDigit));
// R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)]
__ add(R0, R0, Operand(R2));
@@ -619,27 +627,27 @@
__ cmp(R7, Operand(R6));
__ b(&loop, NE);
__ str(R1, Address(R8, -2 * kBytesPerBigIntDigit, Address::PreIndex));
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
// static void _lsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
// R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
- __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset));
+ __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
__ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
__ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
// R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
- __ ldp(R4, R5, Address(SP, 0 * kWordSize, Address::PairOffset));
+ __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
__ SmiUntag(R5);
// R0 = n ~/ (2*_DIGIT_BITS)
__ AsrImmediate(R0, R5, 6);
// R8 = &r_digits[0]
- __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R8, R4, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R7 = &x_digits[2*(n ~/ (2*_DIGIT_BITS))]
- __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R7, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
__ add(R7, R7, Operand(R0, LSL, 3));
// R6 = &r_digits[2*(R2 - n ~/ (2*_DIGIT_BITS) - 1)]
__ add(R0, R0, Operand(1));
@@ -666,33 +674,34 @@
__ cmp(R8, Operand(R6));
__ b(&loop, NE);
__ str(R1, Address(R8, 0));
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_absAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absAdd(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
// R2 = used, R3 = digits
- __ ldp(R2, R3, Address(SP, 3 * kWordSize, Address::PairOffset));
+ __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
__ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
__ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
// R3 = &digits[0]
- __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R4 = a_used, R5 = a_digits
- __ ldp(R4, R5, Address(SP, 1 * kWordSize, Address::PairOffset));
+ __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
__ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
__ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
// R5 = &a_digits[0]
- __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R5, R5, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R6 = r_digits
- __ ldr(R6, Address(SP, 0 * kWordSize));
+ __ ldr(R6, Address(SP, 0 * target::kWordSize));
// R6 = &r_digits[0]
- __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R6, R6, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R7 = &digits[a_used rounded up to even number].
__ add(R7, R3, Operand(R4, LSL, 3));
@@ -731,33 +740,34 @@
__ str(R0, Address(R6, 0));
__ Bind(&done);
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_absSub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absSub(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
// R2 = used, R3 = digits
- __ ldp(R2, R3, Address(SP, 3 * kWordSize, Address::PairOffset));
+ __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
__ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
__ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
// R3 = &digits[0]
- __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R4 = a_used, R5 = a_digits
- __ ldp(R4, R5, Address(SP, 1 * kWordSize, Address::PairOffset));
+ __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
__ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
__ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
// R5 = &a_digits[0]
- __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R5, R5, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R6 = r_digits
- __ ldr(R6, Address(SP, 0 * kWordSize));
+ __ ldr(R6, Address(SP, 0 * target::kWordSize));
// R6 = &r_digits[0]
- __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R6, R6, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R7 = &digits[a_used rounded up to even number].
__ add(R7, R3, Operand(R4, LSL, 3));
@@ -790,11 +800,12 @@
__ cbnz(&carry_loop, R9);
__ Bind(&done);
- __ LoadObject(R0, Object::null_object());
+ __ LoadObject(R0, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_mulAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulAdd(Uint32List x_digits, int xi,
// Uint32List m_digits, int i,
@@ -826,29 +837,29 @@
Label done;
// R3 = x, no_op if x == 0
// R0 = xi as Smi, R1 = x_digits.
- __ ldp(R0, R1, Address(SP, 5 * kWordSize, Address::PairOffset));
+ __ ldp(R0, R1, Address(SP, 5 * target::kWordSize, Address::PairOffset));
__ add(R1, R1, Operand(R0, LSL, 1));
- __ ldr(R3, FieldAddress(R1, TypedData::data_offset()));
+ __ ldr(R3, FieldAddress(R1, target::TypedData::data_offset()));
__ tst(R3, Operand(R3));
__ b(&done, EQ);
// R6 = (SmiUntag(n) + 1)/2, no_op if n == 0
- __ ldr(R6, Address(SP, 0 * kWordSize));
+ __ ldr(R6, Address(SP, 0 * target::kWordSize));
__ add(R6, R6, Operand(2));
__ adds(R6, ZR, Operand(R6, ASR, 2)); // SmiUntag(R6) and set cc.
__ b(&done, EQ);
// R4 = mip = &m_digits[i >> 1]
// R0 = i as Smi, R1 = m_digits.
- __ ldp(R0, R1, Address(SP, 3 * kWordSize, Address::PairOffset));
+ __ ldp(R0, R1, Address(SP, 3 * target::kWordSize, Address::PairOffset));
__ add(R1, R1, Operand(R0, LSL, 1));
- __ add(R4, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R4, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R5 = ajp = &a_digits[j >> 1]
// R0 = j as Smi, R1 = a_digits.
- __ ldp(R0, R1, Address(SP, 1 * kWordSize, Address::PairOffset));
+ __ ldp(R0, R1, Address(SP, 1 * target::kWordSize, Address::PairOffset));
__ add(R1, R1, Operand(R0, LSL, 1));
- __ add(R5, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R5, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R1 = c = 0
__ mov(R1, ZR);
@@ -900,11 +911,12 @@
__ b(&propagate_carry_loop, CS);
__ Bind(&done);
- __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed.
+ __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
__ ret();
}
-void Intrinsifier::Bigint_sqrAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _sqrAdd(Uint32List x_digits, int i,
// Uint32List a_digits, int used) {
@@ -933,9 +945,9 @@
// R4 = xip = &x_digits[i >> 1]
// R2 = i as Smi, R3 = x_digits
- __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset));
+ __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
__ add(R3, R3, Operand(R2, LSL, 1));
- __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(R4, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R3 = x = *xip++, return if x == 0
Label x_zero;
@@ -944,9 +956,9 @@
__ b(&x_zero, EQ);
// R5 = ajp = &a_digits[i]
- __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits
- __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
- __ add(R5, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
+ __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
+ __ add(R5, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
// R6:R1 = t = x*x + *ajp
__ ldr(R0, Address(R5, 0));
@@ -960,7 +972,7 @@
__ str(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
// int n = (used - i + 1)/2 - 1
- __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
+ __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
__ sub(R8, R0, Operand(R2));
__ add(R8, R8, Operand(2));
__ movn(R0, Immediate(1), 0); // R0 = ~1 = -2.
@@ -1014,12 +1026,12 @@
__ stp(R6, R7, Address(R5, 0, Address::PairOffset));
__ Bind(&x_zero);
- __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed.
+ __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
__ ret();
}
-void Intrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
+ Label* normal_ir_body) {
// There is no 128-bit by 64-bit division instruction on arm64, so we use two
// 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to
// adjust the two 32-bit digits of the estimated quotient.
@@ -1065,16 +1077,17 @@
// }
// R4 = args
- __ ldr(R4, Address(SP, 2 * kWordSize)); // args
+ __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
// R3 = yt = args[0..1]
- __ ldr(R3, FieldAddress(R4, TypedData::data_offset()));
+ __ ldr(R3, FieldAddress(R4, target::TypedData::data_offset()));
// R2 = dh = digits[(i >> 1) - 1 .. i >> 1]
// R0 = i as Smi, R1 = digits
- __ ldp(R0, R1, Address(SP, 0 * kWordSize, Address::PairOffset));
+ __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
__ add(R1, R1, Operand(R0, LSL, 1));
- __ ldr(R2, FieldAddress(R1, TypedData::data_offset() - kBytesPerBigIntDigit));
+ __ ldr(R2, FieldAddress(
+ R1, target::TypedData::data_offset() - kBytesPerBigIntDigit));
// R0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
__ movn(R0, Immediate(0), 0);
@@ -1085,8 +1098,8 @@
__ b(&return_qd, EQ);
// R1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2]
- __ ldr(R1,
- FieldAddress(R1, TypedData::data_offset() - 3 * kBytesPerBigIntDigit));
+ __ ldr(R1, FieldAddress(R1, target::TypedData::data_offset() -
+ 3 * kBytesPerBigIntDigit));
// R5 = yth = yt >> 32
__ orr(R5, ZR, Operand(R3, LSR, 32));
@@ -1190,15 +1203,15 @@
__ Bind(&return_qd);
// args[2..3] = qd
- __ str(R0,
- FieldAddress(R4, TypedData::data_offset() + 2 * kBytesPerBigIntDigit));
+ __ str(R0, FieldAddress(R4, target::TypedData::data_offset() +
+ 2 * kBytesPerBigIntDigit));
- __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed.
+ __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
__ ret();
}
-void Intrinsifier::Montgomery_mulMod(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulMod(Uint32List args, Uint32List digits, int i) {
// uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
@@ -1209,26 +1222,26 @@
// }
// R4 = args
- __ ldr(R4, Address(SP, 2 * kWordSize)); // args
+ __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
// R3 = rho = args[2..3]
- __ ldr(R3,
- FieldAddress(R4, TypedData::data_offset() + 2 * kBytesPerBigIntDigit));
+ __ ldr(R3, FieldAddress(R4, target::TypedData::data_offset() +
+ 2 * kBytesPerBigIntDigit));
// R2 = digits[i >> 1 .. (i >> 1) + 1]
// R0 = i as Smi, R1 = digits
- __ ldp(R0, R1, Address(SP, 0 * kWordSize, Address::PairOffset));
+ __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
__ add(R1, R1, Operand(R0, LSL, 1));
- __ ldr(R2, FieldAddress(R1, TypedData::data_offset()));
+ __ ldr(R2, FieldAddress(R1, target::TypedData::data_offset()));
// R0 = rho*d mod DIGIT_BASE
__ mul(R0, R2, R3); // R0 = low64(R2*R3).
// args[4 .. 5] = R0
- __ str(R0,
- FieldAddress(R4, TypedData::data_offset() + 4 * kBytesPerBigIntDigit));
+ __ str(R0, FieldAddress(R4, target::TypedData::data_offset() +
+ 4 * kBytesPerBigIntDigit));
- __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed.
+ __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
__ ret();
}
@@ -1238,7 +1251,7 @@
static void TestLastArgumentIsDouble(Assembler* assembler,
Label* is_smi,
Label* not_double_smi) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ BranchIfSmi(R0, is_smi);
__ CompareClassId(R0, kDoubleCid);
__ b(not_double_smi, NE);
@@ -1257,18 +1270,18 @@
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in R0.
- __ LoadDFieldFromOffset(V1, R0, Double::value_offset());
+ __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
__ Bind(&double_op);
- __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument.
- __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
+ __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
__ fcmpd(V0, V1);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
// Return false if D0 or D1 was NaN before checking true condition.
__ b(¬_nan, VC);
__ ret();
__ Bind(¬_nan);
- __ LoadObject(TMP, Bool::True());
+ __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
__ csel(R0, TMP, R0, true_condition);
__ ret();
@@ -1279,27 +1292,28 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_greaterThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, HI);
}
-void Intrinsifier::Double_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, CS);
}
-void Intrinsifier::Double_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, CC);
}
-void Intrinsifier::Double_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_equal(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, EQ);
}
-void Intrinsifier::Double_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, LS);
}
@@ -1312,10 +1326,10 @@
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in R0.
- __ LoadDFieldFromOffset(V1, R0, Double::value_offset());
+ __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
__ Bind(&double_op);
- __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument.
- __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
+ __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
switch (kind) {
case Token::kADD:
__ faddd(V0, V0, V1);
@@ -1332,10 +1346,9 @@
default:
UNREACHABLE();
}
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0, R1);
- __ StoreDFieldToOffset(V0, R0, Double::value_offset());
+ __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
__ Bind(&is_smi); // Convert R0 to a double.
@@ -1346,93 +1359,91 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
}
-void Intrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
}
-void Intrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
}
-void Intrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
}
// Left is double, right is integer (Mint or Smi)
-void Intrinsifier::Double_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
// Only smis allowed.
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ BranchIfNotSmi(R0, normal_ir_body);
// Is Smi.
__ SmiUntag(R0);
__ scvtfdx(V1, R0);
- __ ldr(R0, Address(SP, 1 * kWordSize));
- __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
+ __ ldr(R0, Address(SP, 1 * target::kWordSize));
+ __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
__ fmuld(V0, V0, V1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0, R1);
- __ StoreDFieldToOffset(V0, R0, Double::value_offset());
+ __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::DoubleFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
+void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ BranchIfNotSmi(R0, normal_ir_body);
// Is Smi.
__ SmiUntag(R0);
__ scvtfdx(V0, R0);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0, R1);
- __ StoreDFieldToOffset(V0, R0, Double::value_offset());
+ __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_getIsNaN(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
+void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
__ fcmpd(V0, V0);
- __ LoadObject(TMP, Bool::False());
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ csel(R0, TMP, R0, VC);
__ ret();
}
-void Intrinsifier::Double_getIsInfinite(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ LoadFieldFromOffset(R0, R0, Double::value_offset());
+void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ LoadFieldFromOffset(R0, R0, target::Double::value_offset());
// Mask off the sign.
__ AndImmediate(R0, R0, 0x7FFFFFFFFFFFFFFFLL);
// Compare with +infinity.
__ CompareImmediate(R0, 0x7FF0000000000000LL);
- __ LoadObject(R0, Bool::False());
- __ LoadObject(TMP, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
+ __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
__ csel(R0, TMP, R0, EQ);
__ ret();
}
-void Intrinsifier::Double_getIsNegative(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
+ Label* normal_ir_body) {
const Register false_reg = R0;
const Register true_reg = R2;
Label is_false, is_true, is_zero;
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
__ fcmpdz(V0);
- __ LoadObject(true_reg, Bool::True());
- __ LoadObject(false_reg, Bool::False());
+ __ LoadObject(true_reg, CastHandle<Object>(TrueObject()));
+ __ LoadObject(false_reg, CastHandle<Object>(FalseObject()));
__ b(&is_false, VS); // NaN -> false.
__ b(&is_zero, EQ); // Check for negative zero.
__ b(&is_false, CS); // >= 0 -> false.
@@ -1452,10 +1463,10 @@
__ ret();
}
-void Intrinsifier::DoubleToInteger(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
+void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
// Explicit NaN check, since ARM gives an FPU exception if you try to
// convert NaN to an int.
@@ -1472,15 +1483,15 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_hashCode(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
+ Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
// Load double value and check that it isn't NaN, since ARM gives an
// FPU exception if you try to convert NaN to an int.
Label double_hash;
- __ ldr(R1, Address(SP, 0 * kWordSize));
- __ LoadDFieldFromOffset(V0, R1, Double::value_offset());
+ __ ldr(R1, Address(SP, 0 * target::kWordSize));
+ __ LoadDFieldFromOffset(V0, R1, target::Double::value_offset());
__ fcmpd(V0, V0);
__ b(&double_hash, VS);
@@ -1515,17 +1526,16 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
Label is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Argument is double and is in R0.
- __ LoadDFieldFromOffset(V1, R0, Double::value_offset());
+ __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
__ Bind(&double_op);
__ fsqrtd(V0, V1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, R0, R1);
- __ StoreDFieldToOffset(V0, R0, Double::value_offset());
+ __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
__ ret();
__ Bind(&is_smi);
__ SmiUntag(R0);
@@ -1537,26 +1547,20 @@
// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
// _state[kSTATE_LO] = state & _MASK_32;
// _state[kSTATE_HI] = state >> 32;
-void Intrinsifier::Random_nextState(Assembler* assembler,
- Label* normal_ir_body) {
- const Library& math_lib = Library::Handle(Library::MathLibrary());
- ASSERT(!math_lib.IsNull());
- const Class& random_class =
- Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random()));
- ASSERT(!random_class.IsNull());
- const Field& state_field = Field::ZoneHandle(
- random_class.LookupInstanceFieldAllowPrivate(Symbols::_state()));
- ASSERT(!state_field.IsNull());
- const int64_t a_int_value = Intrinsifier::kRandomAValue;
+void AsmIntrinsifier::Random_nextState(Assembler* assembler,
+ Label* normal_ir_body) {
+ const Field& state_field = LookupMathRandomStateFieldOffset();
+ const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
// Receiver.
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
// Field '_state'.
- __ ldr(R1, FieldAddress(R0, state_field.Offset()));
+ __ ldr(R1, FieldAddress(R0, LookupFieldOffsetInBytes(state_field)));
// Addresses of _state[0].
const int64_t disp =
- Instance::DataOffsetFor(kTypedDataUint32ArrayCid) - kHeapObjectTag;
+ target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid) -
+ kHeapObjectTag;
__ LoadImmediate(R0, a_int_value);
__ LoadFromOffset(R2, R1, disp);
@@ -1565,17 +1569,18 @@
__ mul(R2, R0, R2);
__ add(R2, R2, Operand(R3));
__ StoreToOffset(R2, R1, disp);
- ASSERT(Smi::RawValue(0) == 0);
+ ASSERT(target::ToRawSmi(0) == 0);
__ eor(R0, R0, Operand(R0));
__ ret();
}
-void Intrinsifier::ObjectEquals(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R1, Address(SP, 1 * kWordSize));
+void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R1, Address(SP, 1 * target::kWordSize));
__ cmp(R0, Operand(R1));
- __ LoadObject(R0, Bool::False());
- __ LoadObject(TMP, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
+ __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
__ csel(R0, TMP, R0, EQ);
__ ret();
}
@@ -1626,10 +1631,10 @@
}
// Return type quickly for simple types (not parameterized and not signature).
-void Intrinsifier::ObjectRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label use_declaration_type, not_double, not_integer;
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
__ CompareImmediate(R1, kClosureCid);
@@ -1642,49 +1647,51 @@
__ b(¬_double, NE);
__ LoadIsolate(R0);
- __ LoadFromOffset(R0, R0, Isolate::object_store_offset());
- __ LoadFromOffset(R0, R0, ObjectStore::double_type_offset());
+ __ LoadFromOffset(R0, R0, target::Isolate::object_store_offset());
+ __ LoadFromOffset(R0, R0, target::ObjectStore::double_type_offset());
__ ret();
__ Bind(¬_double);
JumpIfNotInteger(assembler, R1, R0, ¬_integer);
__ LoadIsolate(R0);
- __ LoadFromOffset(R0, R0, Isolate::object_store_offset());
- __ LoadFromOffset(R0, R0, ObjectStore::int_type_offset());
+ __ LoadFromOffset(R0, R0, target::Isolate::object_store_offset());
+ __ LoadFromOffset(R0, R0, target::ObjectStore::int_type_offset());
__ ret();
__ Bind(¬_integer);
JumpIfNotString(assembler, R1, R0, &use_declaration_type);
__ LoadIsolate(R0);
- __ LoadFromOffset(R0, R0, Isolate::object_store_offset());
- __ LoadFromOffset(R0, R0, ObjectStore::string_type_offset());
+ __ LoadFromOffset(R0, R0, target::Isolate::object_store_offset());
+ __ LoadFromOffset(R0, R0, target::ObjectStore::string_type_offset());
__ ret();
__ Bind(&use_declaration_type);
__ LoadClassById(R2, R1); // Overwrites R1.
- __ ldr(R3, FieldAddress(R2, Class::num_type_arguments_offset()), kHalfword);
+ __ ldr(R3,
+ FieldAddress(R2, target::Class::num_type_arguments_offset_in_bytes()),
+ kHalfword);
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);
- __ ldr(R0, FieldAddress(R2, Class::declaration_type_offset()));
- __ CompareObject(R0, Object::null_object());
+ __ ldr(R0, FieldAddress(R2, target::Class::declaration_type_offset()));
+ __ CompareObject(R0, NullObject());
__ b(normal_ir_body, EQ);
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label different_cids, equal, not_equal, not_integer;
- __ ldr(R0, Address(SP, 0 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ CompareImmediate(R1, kClosureCid);
__ b(normal_ir_body, EQ);
- __ ldr(R0, Address(SP, 1 * kWordSize));
+ __ ldr(R0, Address(SP, 1 * target::kWordSize));
__ LoadClassIdMayBeSmi(R2, R0);
// Check whether class ids match. If class ids don't match objects can still
@@ -1697,12 +1704,14 @@
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(R3, R1); // Overwrites R1.
- __ ldr(R3, FieldAddress(R3, Class::num_type_arguments_offset()), kHalfword);
+ __ ldr(R3,
+ FieldAddress(R3, target::Class::num_type_arguments_offset_in_bytes()),
+ kHalfword);
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);
__ Bind(&equal);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ ret();
// Class ids are different. Check if we are comparing runtime types of
@@ -1723,16 +1732,16 @@
// Neither strings nor integers and have different class ids.
__ Bind(¬_equal);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::String_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R0, String::hash_offset()), kUnsignedWord);
+void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R0, target::String::hash_offset()), kUnsignedWord);
__ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
__ b(normal_ir_body, EQ);
__ ret();
@@ -1740,28 +1749,30 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Type_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R0, Type::hash_offset()));
+void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R0, target::Type::hash_offset()));
__ cbz(normal_ir_body, R0);
__ ret();
// Hash not yet computed.
__ Bind(normal_ir_body);
}
-void Intrinsifier::Object_getHash(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R0, String::hash_offset()), kUnsignedWord);
+void AsmIntrinsifier::Object_getHash(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R0, target::String::hash_offset()), kUnsignedWord);
__ SmiTag(R0);
__ ret();
}
-void Intrinsifier::Object_setHash(Assembler* assembler, Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 1 * kWordSize)); // Object.
- __ ldr(R1, Address(SP, 0 * kWordSize)); // Value.
+void AsmIntrinsifier::Object_setHash(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Object.
+ __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Value.
__ SmiUntag(R1);
- __ str(R1, FieldAddress(R0, String::hash_offset()), kUnsignedWord);
+ __ str(R1, FieldAddress(R0, target::String::hash_offset()), kUnsignedWord);
__ ret();
}
@@ -1771,9 +1782,10 @@
Label* return_true,
Label* return_false) {
__ SmiUntag(R1);
- __ ldr(R8, FieldAddress(R0, String::length_offset())); // this.length
+ __ ldr(R8, FieldAddress(R0, target::String::length_offset())); // this.length
__ SmiUntag(R8);
- __ ldr(R9, FieldAddress(R2, String::length_offset())); // other.length
+ __ ldr(R9,
+ FieldAddress(R2, target::String::length_offset())); // other.length
__ SmiUntag(R9);
// if (other.length == 0) return true;
@@ -1790,19 +1802,19 @@
__ b(return_false, GT);
if (receiver_cid == kOneByteStringCid) {
- __ AddImmediate(R0, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
__ add(R0, R0, Operand(R1));
} else {
ASSERT(receiver_cid == kTwoByteStringCid);
- __ AddImmediate(R0, TwoByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
__ add(R0, R0, Operand(R1));
__ add(R0, R0, Operand(R1));
}
if (other_cid == kOneByteStringCid) {
- __ AddImmediate(R2, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R2, target::OneByteString::data_offset() - kHeapObjectTag);
} else {
ASSERT(other_cid == kTwoByteStringCid);
- __ AddImmediate(R2, TwoByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R2, target::TwoByteString::data_offset() - kHeapObjectTag);
}
// i = 0
@@ -1834,12 +1846,12 @@
// bool _substringMatches(int start, String other)
// This intrinsic handles a OneByteString or TwoByteString receiver with a
// OneByteString other.
-void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
+ Label* normal_ir_body) {
Label return_true, return_false, try_two_byte;
- __ ldr(R0, Address(SP, 2 * kWordSize)); // this
- __ ldr(R1, Address(SP, 1 * kWordSize)); // start
- __ ldr(R2, Address(SP, 0 * kWordSize)); // other
+ __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
+ __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
+ __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
__ BranchIfNotSmi(R1, normal_ir_body);
@@ -1862,37 +1874,38 @@
&return_false);
__ Bind(&return_true);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&return_false);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::StringBaseCharAt(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
+ Label* normal_ir_body) {
Label try_two_byte_string;
- __ ldr(R1, Address(SP, 0 * kWordSize)); // Index.
- __ ldr(R0, Address(SP, 1 * kWordSize)); // String.
- __ BranchIfNotSmi(R1, normal_ir_body); // Index is not a Smi.
+ __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
+ __ BranchIfNotSmi(R1, normal_ir_body); // Index is not a Smi.
// Range check.
- __ ldr(R2, FieldAddress(R0, String::length_offset()));
+ __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
__ cmp(R1, Operand(R2));
__ b(normal_ir_body, CS); // Runtime throws exception.
__ CompareClassId(R0, kOneByteStringCid);
__ b(&try_two_byte_string, NE);
__ SmiUntag(R1);
- __ AddImmediate(R0, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
__ ldr(R1, Address(R0, R1), kUnsignedByte);
- __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols);
+ __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
__ b(normal_ir_body, GE);
- __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset()));
- __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
+ __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
+ __ AddImmediate(
+ R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
__ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
__ ret();
@@ -1900,40 +1913,41 @@
__ CompareClassId(R0, kTwoByteStringCid);
__ b(normal_ir_body, NE);
ASSERT(kSmiTagShift == 1);
- __ AddImmediate(R0, TwoByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
__ ldr(R1, Address(R0, R1), kUnsignedHalfword);
- __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols);
+ __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
__ b(normal_ir_body, GE);
- __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset()));
- __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
+ __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
+ __ AddImmediate(
+ R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
__ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::StringBaseIsEmpty(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R0, FieldAddress(R0, String::length_offset()));
- __ cmp(R0, Operand(Smi::RawValue(0)));
- __ LoadObject(R0, Bool::True());
- __ LoadObject(TMP, Bool::False());
+void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R0, FieldAddress(R0, target::String::length_offset()));
+ __ cmp(R0, Operand(target::ToRawSmi(0)));
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
+ __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
__ csel(R0, TMP, R0, NE);
__ ret();
}
-void Intrinsifier::OneByteString_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
Label compute_hash;
- __ ldr(R1, Address(SP, 0 * kWordSize)); // OneByteString object.
- __ ldr(R0, FieldAddress(R1, String::hash_offset()), kUnsignedWord);
+ __ ldr(R1, Address(SP, 0 * target::kWordSize)); // OneByteString object.
+ __ ldr(R0, FieldAddress(R1, target::String::hash_offset()), kUnsignedWord);
__ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
__ b(&compute_hash, EQ);
__ ret(); // Return if already computed.
__ Bind(&compute_hash);
- __ ldr(R2, FieldAddress(R1, String::length_offset()));
+ __ ldr(R2, FieldAddress(R1, target::String::length_offset()));
__ SmiUntag(R2);
Label done;
@@ -1942,7 +1956,8 @@
__ b(&done, EQ);
__ mov(R3, ZR);
- __ AddImmediate(R6, R1, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R6, R1,
+ target::OneByteString::data_offset() - kHeapObjectTag);
// R1: Instance of OneByteString.
// R2: String length, untagged integer.
// R3: Loop counter, untagged integer.
@@ -1973,12 +1988,13 @@
__ eorw(R0, R0, Operand(R0, LSR, 11));
__ addw(R0, R0, Operand(R0, LSL, 15));
// hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
- __ AndImmediate(R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1);
+ __ AndImmediate(R0, R0,
+ (static_cast<intptr_t>(1) << target::String::kHashBits) - 1);
__ CompareRegisters(R0, ZR);
// return hash_ == 0 ? 1 : hash_;
__ Bind(&done);
__ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1).
- __ str(R0, FieldAddress(R1, String::hash_offset()), kUnsignedWord);
+ __ str(R0, FieldAddress(R1, target::String::hash_offset()), kUnsignedWord);
__ SmiTag(R0);
__ ret();
}
@@ -2003,12 +2019,14 @@
// length <- (length != 0) ? length : (ZR + 1).
__ csinc(length_reg, length_reg, ZR, NE);
const intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawString) + kObjectAlignment - 1;
+ target::String::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
__ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
- __ andi(length_reg, length_reg, Immediate(~(kObjectAlignment - 1)));
+ __ andi(length_reg, length_reg,
+ Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
const intptr_t cid = kOneByteStringCid;
- __ ldr(R0, Address(THR, Thread::top_offset()));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
// length_reg: allocation size.
__ adds(R1, R0, Operand(length_reg));
@@ -2018,13 +2036,13 @@
// R0: potential new object start.
// R1: potential next object start.
// R2: allocation size.
- __ ldr(R7, Address(THR, Thread::end_offset()));
+ __ ldr(R7, Address(THR, target::Thread::end_offset()));
__ cmp(R1, Operand(R7));
__ b(&fail, CS);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
- __ str(R1, Address(THR, Thread::top_offset()));
+ __ str(R1, Address(THR, target::Thread::top_offset()));
__ AddImmediate(R0, kHeapObjectTag);
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2));
@@ -2033,26 +2051,26 @@
// R1: new object end address.
// R2: allocation size.
{
- const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
+ const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2;
- __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag);
+ __ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag);
__ LslImmediate(R2, R2, shift);
__ csel(R2, R2, ZR, LS);
// Get the class index and insert it into the tags.
// R2: size and bit tags.
// This also clears the hash, which is in the high word of the tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R2, R2, Operand(TMP));
- __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags.
+ __ str(R2, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
}
// Set the length field using the saved length (R6).
- __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::length_offset()),
- R6);
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::String::length_offset()), R6);
__ b(ok);
__ Bind(&fail);
@@ -2063,11 +2081,11 @@
// Arg1: Start index as Smi.
// Arg2: End index as Smi.
// The indexes must be valid.
-void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
- Label* normal_ir_body) {
- const intptr_t kStringOffset = 2 * kWordSize;
- const intptr_t kStartIndexOffset = 1 * kWordSize;
- const intptr_t kEndIndexOffset = 0 * kWordSize;
+void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
+ Label* normal_ir_body) {
+ const intptr_t kStringOffset = 2 * target::kWordSize;
+ const intptr_t kStartIndexOffset = 1 * target::kWordSize;
+ const intptr_t kEndIndexOffset = 0 * target::kWordSize;
Label ok;
__ ldr(R2, Address(SP, kEndIndexOffset));
@@ -2085,7 +2103,7 @@
__ SmiUntag(R1);
__ add(R3, R3, Operand(R1));
// Calculate start address and untag (- 1).
- __ AddImmediate(R3, OneByteString::data_offset() - 1);
+ __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
// R3: Start address to copy from (untagged).
// R1: Untagged start index.
@@ -2109,7 +2127,8 @@
__ AddImmediate(R6, 1);
__ sub(R2, R2, Operand(1));
__ cmp(R2, Operand(0));
- __ str(R1, FieldAddress(R7, OneByteString::data_offset()), kUnsignedByte);
+ __ str(R1, FieldAddress(R7, target::OneByteString::data_offset()),
+ kUnsignedByte);
__ AddImmediate(R7, 1);
__ b(&loop, GT);
@@ -2118,23 +2137,24 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteStringSetAt(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R2, Address(SP, 0 * kWordSize)); // Value.
- __ ldr(R1, Address(SP, 1 * kWordSize)); // Index.
- __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString.
+void AsmIntrinsifier::OneByteStringSetAt(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
+ __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
+ __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
__ SmiUntag(R1);
__ SmiUntag(R2);
- __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R3, R0,
+ target::OneByteString::data_offset() - kHeapObjectTag);
__ str(R2, Address(R3, R1), kUnsignedByte);
__ ret();
}
-void Intrinsifier::OneByteString_allocate(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_allocate(Assembler* assembler,
+ Label* normal_ir_body) {
Label ok;
- __ ldr(R2, Address(SP, 0 * kWordSize)); // Length.
+ __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
TryAllocateOnebyteString(assembler, &ok, normal_ir_body);
__ Bind(&ok);
@@ -2148,8 +2168,8 @@
Label* normal_ir_body,
intptr_t string_cid) {
Label is_true, is_false, loop;
- __ ldr(R0, Address(SP, 1 * kWordSize)); // This.
- __ ldr(R1, Address(SP, 0 * kWordSize)); // Other.
+ __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
+ __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
// Are identical?
__ cmp(R0, Operand(R1));
@@ -2161,8 +2181,8 @@
__ b(normal_ir_body, NE);
// Have same length?
- __ ldr(R2, FieldAddress(R0, String::length_offset()));
- __ ldr(R3, FieldAddress(R1, String::length_offset()));
+ __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
+ __ ldr(R3, FieldAddress(R1, target::String::length_offset()));
__ cmp(R2, Operand(R3));
__ b(&is_false, NE);
@@ -2171,8 +2191,8 @@
ASSERT((string_cid == kOneByteStringCid) ||
(string_cid == kTwoByteStringCid));
const intptr_t offset = (string_cid == kOneByteStringCid)
- ? OneByteString::data_offset()
- : TwoByteString::data_offset();
+ ? target::OneByteString::data_offset()
+ : target::TwoByteString::data_offset();
__ AddImmediate(R0, offset - kHeapObjectTag);
__ AddImmediate(R1, offset - kHeapObjectTag);
__ SmiUntag(R2);
@@ -2198,33 +2218,33 @@
__ b(&loop);
__ Bind(&is_true);
- __ LoadObject(R0, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&is_false);
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kOneByteStringCid);
}
-void Intrinsifier::TwoByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
}
-void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
- Label* normal_ir_body,
- bool sticky) {
+void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
+ Label* normal_ir_body,
+ bool sticky) {
if (FLAG_interpret_irregexp) return;
- static const intptr_t kRegExpParamOffset = 2 * kWordSize;
- static const intptr_t kStringParamOffset = 1 * kWordSize;
+ static const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
+ static const intptr_t kStringParamOffset = 1 * target::kWordSize;
// start_index smi is located at offset 0.
// Incoming registers:
@@ -2238,84 +2258,87 @@
__ ldr(R1, Address(SP, kStringParamOffset));
__ LoadClassId(R1, R1);
__ AddImmediate(R1, -kOneByteStringCid);
- __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2));
- __ ldr(R0,
- FieldAddress(R1, RegExp::function_offset(kOneByteStringCid, sticky)));
+ __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
+ __ ldr(R0, FieldAddress(R1, target::RegExp::function_offset(kOneByteStringCid,
+ sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in R0, the argument descriptor in R4, and IC-Data in R5.
__ eor(R5, R5, Operand(R5));
// Tail-call the function.
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
__ br(R1);
}
// On stack: user tag (+0).
-void Intrinsifier::UserTag_makeCurrent(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
+ Label* normal_ir_body) {
// R1: Isolate.
__ LoadIsolate(R1);
// R0: Current user tag.
- __ ldr(R0, Address(R1, Isolate::current_tag_offset()));
+ __ ldr(R0, Address(R1, target::Isolate::current_tag_offset()));
// R2: UserTag.
- __ ldr(R2, Address(SP, +0 * kWordSize));
- // Set Isolate::current_tag_.
- __ str(R2, Address(R1, Isolate::current_tag_offset()));
+ __ ldr(R2, Address(SP, +0 * target::kWordSize));
+ // Set target::Isolate::current_tag_.
+ __ str(R2, Address(R1, target::Isolate::current_tag_offset()));
// R2: UserTag's tag.
- __ ldr(R2, FieldAddress(R2, UserTag::tag_offset()));
- // Set Isolate::user_tag_.
- __ str(R2, Address(R1, Isolate::user_tag_offset()));
+ __ ldr(R2, FieldAddress(R2, target::UserTag::tag_offset()));
+ // Set target::Isolate::user_tag_.
+ __ str(R2, Address(R1, target::Isolate::user_tag_offset()));
__ ret();
}
-void Intrinsifier::UserTag_defaultTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(R0);
- __ ldr(R0, Address(R0, Isolate::default_tag_offset()));
+ __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
__ ret();
}
-void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(R0);
- __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
+ __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
__ ret();
}
-void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
+ Label* normal_ir_body) {
#if !defined(SUPPORT_TIMELINE)
- __ LoadObject(R0, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
__ ret();
#else
// Load TimelineStream*.
- __ ldr(R0, Address(THR, Thread::dart_stream_offset()));
+ __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
// Load uintptr_t from TimelineStream*.
- __ ldr(R0, Address(R0, TimelineStream::enabled_offset()));
+ __ ldr(R0, Address(R0, target::TimelineStream::enabled_offset()));
__ cmp(R0, Operand(0));
- __ LoadObject(R0, Bool::False());
- __ LoadObject(TMP, Bool::True());
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()));
+ __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
__ csel(R0, TMP, R0, NE);
__ ret();
#endif
}
-void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ LoadObject(R0, Object::null_object());
- __ str(R0, Address(THR, Thread::async_stack_trace_offset()));
+void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ LoadObject(R0, NullObject());
+ __ str(R0, Address(THR, target::Thread::async_stack_trace_offset()));
__ ret();
}
-void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ ldr(R0, Address(THR, Thread::async_stack_trace_offset()));
- __ LoadObject(R0, Object::null_object());
+void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ ldr(R0, Address(THR, target::Thread::async_stack_trace_offset()));
+ __ LoadObject(R0, NullObject());
__ ret();
}
+#undef __
+
+} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/intrinsifier_dbc.cc b/runtime/vm/compiler/asm_intrinsifier_dbc.cc
similarity index 73%
rename from runtime/vm/compiler/intrinsifier_dbc.cc
rename to runtime/vm/compiler/asm_intrinsifier_dbc.cc
index b09a15f..cfb1130 100644
--- a/runtime/vm/compiler/intrinsifier_dbc.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_dbc.cc
@@ -5,28 +5,21 @@
#include "vm/globals.h" // Needed here to get TARGET_ARCH_DBC.
#if defined(TARGET_ARCH_DBC)
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/class_id.h"
+#include "vm/compiler/asm_intrinsifier.h"
+#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/intrinsifier.h"
-#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/cpu.h"
-#include "vm/dart_entry.h"
-#include "vm/object.h"
-#include "vm/object_store.h"
-#include "vm/regexp_assembler.h"
-#include "vm/simulator.h"
-#include "vm/symbols.h"
-
namespace dart {
+namespace compiler {
DECLARE_FLAG(bool, interpret_irregexp);
-intptr_t Intrinsifier::ParameterSlotFromSp() {
- return -1;
-}
-
#define DEFINE_FUNCTION(class_name, test_function_name, enum_name, fp) \
- void Intrinsifier::enum_name(Assembler* assembler, Label* normal_ir_body) { \
+ void AsmIntrinsifier::enum_name(Assembler* assembler, \
+ Label* normal_ir_body) { \
if (Simulator::IsSupportedIntrinsic(Simulator::k##enum_name##Intrinsic)) { \
assembler->Intrinsic(Simulator::k##enum_name##Intrinsic); \
} \
@@ -37,6 +30,7 @@
GRAPH_INTRINSICS_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION
+} // namespace compiler
} // namespace dart
#endif // defined TARGET_ARCH_DBC
diff --git a/runtime/vm/compiler/intrinsifier_ia32.cc b/runtime/vm/compiler/asm_intrinsifier_ia32.cc
similarity index 67%
rename from runtime/vm/compiler/intrinsifier_ia32.cc
rename to runtime/vm/compiler/asm_intrinsifier_ia32.cc
index 4229e6e..392d129 100644
--- a/runtime/vm/compiler/intrinsifier_ia32.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_ia32.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
//
@@ -11,19 +11,14 @@
#include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
#if defined(TARGET_ARCH_IA32) && !defined(DART_PRECOMPILED_RUNTIME)
-#include "vm/compiler/intrinsifier.h"
+#define SHOULD_NOT_INCLUDE_RUNTIME
+#include "vm/class_id.h"
+#include "vm/compiler/asm_intrinsifier.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/dart_entry.h"
-#include "vm/object.h"
-#include "vm/object_store.h"
-#include "vm/os.h"
-#include "vm/regexp_assembler.h"
-#include "vm/symbols.h"
-#include "vm/timeline.h"
namespace dart {
+namespace compiler {
// When entering intrinsics code:
// ECX: IC Data
@@ -36,59 +31,60 @@
#define __ assembler->
-intptr_t Intrinsifier::ParameterSlotFromSp() {
+intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
return 0;
}
-void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
COMPILE_ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG);
assembler->Comment("IntrinsicCallPrologue");
assembler->movl(CALLEE_SAVED_TEMP, ARGS_DESC_REG);
}
-void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
assembler->Comment("IntrinsicCallEpilogue");
assembler->movl(ARGS_DESC_REG, CALLEE_SAVED_TEMP);
}
-// Allocate a GrowableObjectArray using the backing array specified.
+// Allocate a GrowableObjectArray:: using the backing array specified.
// On stack: type argument (+2), data (+1), return-address (+0).
-void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
+ Label* normal_ir_body) {
// This snippet of inlined code uses the following registers:
// EAX, EBX
// and the newly allocated object is returned in EAX.
- const intptr_t kTypeArgumentsOffset = 2 * kWordSize;
+ const intptr_t kTypeArgumentsOffset = 2 * target::kWordSize;
- const intptr_t kArrayOffset = 1 * kWordSize;
+ const intptr_t kArrayOffset = 1 * target::kWordSize;
// Try allocating in new space.
- const Class& cls = Class::Handle(
- Isolate::Current()->object_store()->growable_object_array_class());
+ const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, Assembler::kNearJump, EAX, EBX);
// Store backing array object in growable array object.
__ movl(EBX, Address(ESP, kArrayOffset)); // data argument.
// EAX is new, no barrier needed.
__ StoreIntoObjectNoBarrier(
- EAX, FieldAddress(EAX, GrowableObjectArray::data_offset()), EBX);
+ EAX, FieldAddress(EAX, target::GrowableObjectArray::data_offset()), EBX);
// EAX: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ movl(EBX, Address(ESP, kTypeArgumentsOffset)); // type argument.
__ StoreIntoObjectNoBarrier(
- EAX, FieldAddress(EAX, GrowableObjectArray::type_arguments_offset()),
+ EAX,
+ FieldAddress(EAX, target::GrowableObjectArray::type_arguments_offset()),
EBX);
- __ ZeroInitSmiField(FieldAddress(EAX, GrowableObjectArray::length_offset()));
+ __ ZeroInitSmiField(
+ FieldAddress(EAX, target::GrowableObjectArray::length_offset()));
__ ret(); // returns the newly allocated object in EAX.
__ Bind(normal_ir_body);
}
-#define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_factor) \
- const intptr_t kArrayLengthStackOffset = 1 * kWordSize; \
+#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_factor) \
+ const intptr_t kArrayLengthStackOffset = 1 * target::kWordSize; \
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, EDI, normal_ir_body, false)); \
__ movl(EDI, Address(ESP, kArrayLengthStackOffset)); /* Array length. */ \
/* Check that length is a positive Smi. */ \
@@ -110,10 +106,11 @@
scale_factor = TIMES_8; \
} \
const intptr_t fixed_size_plus_alignment_padding = \
- sizeof(Raw##type_name) + kObjectAlignment - 1; \
+ target::TypedData::InstanceSize() + \
+ target::ObjectAlignment::kObjectAlignment - 1; \
__ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding)); \
- __ andl(EDI, Immediate(-kObjectAlignment)); \
- __ movl(EAX, Address(THR, Thread::top_offset())); \
+ __ andl(EDI, Immediate(-target::ObjectAlignment::kObjectAlignment)); \
+ __ movl(EAX, Address(THR, target::Thread::top_offset())); \
__ movl(EBX, EAX); \
\
/* EDI: allocation size. */ \
@@ -124,12 +121,12 @@
/* EAX: potential new object start. */ \
/* EBX: potential next object start. */ \
/* EDI: allocation size. */ \
- __ cmpl(EBX, Address(THR, Thread::end_offset())); \
+ __ cmpl(EBX, Address(THR, target::Thread::end_offset())); \
__ j(ABOVE_EQUAL, normal_ir_body); \
\
/* Successfully allocated the object(s), now update top to point to */ \
/* next object start and initialize the object. */ \
- __ movl(Address(THR, Thread::top_offset()), EBX); \
+ __ movl(Address(THR, target::Thread::top_offset()), EBX); \
__ addl(EAX, Immediate(kHeapObjectTag)); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX)); \
\
@@ -139,9 +136,10 @@
/* EDI: allocation size. */ \
{ \
Label size_tag_overflow, done; \
- __ cmpl(EDI, Immediate(RawObject::SizeTag::kMaxSizeTag)); \
+ __ cmpl(EDI, Immediate(target::RawObject::kSizeTagMaxSizeTag)); \
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \
- __ shll(EDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); \
+ __ shll(EDI, Immediate(target::RawObject::kTagBitsSizeTagPos - \
+ target::ObjectAlignment::kObjectAlignmentLog2)); \
__ jmp(&done, Assembler::kNearJump); \
\
__ Bind(&size_tag_overflow); \
@@ -149,18 +147,18 @@
__ Bind(&done); \
\
/* Get the class index and insert it into the tags. */ \
- uint32_t tags = 0; \
- tags = RawObject::ClassIdTag::update(cid, tags); \
- tags = RawObject::NewBit::update(true, tags); \
+ uint32_t tags = \
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
__ orl(EDI, Immediate(tags)); \
- __ movl(FieldAddress(EAX, type_name::tags_offset()), EDI); /* Tags. */ \
+ __ movl(FieldAddress(EAX, target::Object::tags_offset()), \
+ EDI); /* Tags. */ \
} \
/* Set the length field. */ \
/* EAX: new object start as a tagged pointer. */ \
/* EBX: new object end address. */ \
__ movl(EDI, Address(ESP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
- EAX, FieldAddress(EAX, type_name::length_offset()), EDI); \
+ EAX, FieldAddress(EAX, target::TypedData::length_offset()), EDI); \
/* Initialize all array elements to 0. */ \
/* EAX: new object start as a tagged pointer. */ \
/* EBX: new object end address. */ \
@@ -168,13 +166,13 @@
/* ECX: scratch register. */ \
/* data area to be initialized. */ \
__ xorl(ECX, ECX); /* Zero. */ \
- __ leal(EDI, FieldAddress(EAX, sizeof(Raw##type_name))); \
+ __ leal(EDI, FieldAddress(EAX, target::TypedData::InstanceSize())); \
Label done, init_loop; \
__ Bind(&init_loop); \
__ cmpl(EDI, EBX); \
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump); \
__ movl(Address(EDI, 0), ECX); \
- __ addl(EDI, Immediate(kWordSize)); \
+ __ addl(EDI, Immediate(target::kWordSize)); \
__ jmp(&init_loop, Assembler::kNearJump); \
__ Bind(&done); \
\
@@ -199,12 +197,12 @@
}
#define TYPED_DATA_ALLOCATOR(clazz) \
- void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
- Label* normal_ir_body) { \
- intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \
- intptr_t max_len = TypedData::MaxNewSpaceElements(kTypedData##clazz##Cid); \
+ void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
+ Label* normal_ir_body) { \
+ intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
+ intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
ScaleFactor scale = GetScaleFactor(size); \
- TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, scale); \
+ TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, scale); \
}
CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
#undef TYPED_DATA_ALLOCATOR
@@ -212,41 +210,41 @@
// Tests if two top most arguments are smis, jumps to label not_smi if not.
// Topmost argument is in EAX.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
- __ movl(EAX, Address(ESP, +1 * kWordSize));
- __ movl(EBX, Address(ESP, +2 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize));
__ orl(EBX, EAX);
__ testl(EBX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, not_smi, Assembler::kNearJump);
}
-void Intrinsifier::Integer_addFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ addl(EAX, Address(ESP, +2 * kWordSize));
+ __ addl(EAX, Address(ESP, +2 * target::kWordSize));
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in EAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
Integer_addFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_subFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ subl(EAX, Address(ESP, +2 * kWordSize));
+ __ subl(EAX, Address(ESP, +2 * target::kWordSize));
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in EAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
__ movl(EBX, EAX);
- __ movl(EAX, Address(ESP, +2 * kWordSize));
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize));
__ subl(EAX, EBX);
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in EAX.
@@ -254,19 +252,19 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
__ SmiUntag(EAX);
- __ imull(EAX, Address(ESP, +2 * kWordSize));
+ __ imull(EAX, Address(ESP, +2 * target::kWordSize));
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in EAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
Integer_mulFromInteger(assembler, normal_ir_body);
}
@@ -318,11 +316,11 @@
// res = res + right;
// }
// }
-void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
Label subtract;
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ movl(EBX, Address(ESP, +2 * kWordSize));
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize));
// EAX: Tagged left (dividend).
// EBX: Tagged right (divisor).
// Check if modulo by zero -> exception thrown in main function.
@@ -352,15 +350,16 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_truncDivide(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// EAX: right argument (divisor)
__ cmpl(EAX, Immediate(0));
__ j(EQUAL, normal_ir_body, Assembler::kNearJump);
__ movl(EBX, EAX);
__ SmiUntag(EBX);
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // Left argument (dividend).
+ __ movl(EAX,
+ Address(ESP, +2 * target::kWordSize)); // Left argument (dividend).
__ SmiUntag(EAX);
__ pushl(EDX); // Preserve EDX in case of 'fall_through'.
__ cdq();
@@ -375,8 +374,9 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_negate(Assembler* assembler, Label* normal_ir_body) {
- __ movl(EAX, Address(ESP, +1 * kWordSize));
+void AsmIntrinsifier::Integer_negate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
__ testl(EAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump); // Non-smi value.
__ negl(EAX);
@@ -386,60 +386,63 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ movl(EBX, Address(ESP, +2 * kWordSize));
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize));
__ andl(EAX, EBX);
// Result is in EAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAnd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ movl(EBX, Address(ESP, +2 * kWordSize));
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize));
__ orl(EAX, EBX);
// Result is in EAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitOr(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitOrFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ movl(EBX, Address(ESP, +2 * kWordSize));
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize));
__ xorl(EAX, EBX);
// Result is in EAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitXor(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitXorFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
ASSERT(kSmiTagShift == 1);
ASSERT(kSmiTag == 0);
Label overflow;
TestBothArgumentsSmis(assembler, normal_ir_body);
// Shift value is in EAX. Compare with tagged Smi.
- __ cmpl(EAX, Immediate(Smi::RawValue(Smi::kBits)));
+ __ cmpl(EAX, Immediate(target::ToRawSmi(target::Smi::kBits)));
__ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
__ SmiUntag(EAX);
- __ movl(ECX, EAX); // Shift amount must be in ECX.
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // Value.
+ __ movl(ECX, EAX); // Shift amount must be in ECX.
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Value.
// Overflow test - all the shifted-out bits must be same as the sign bit.
__ movl(EBX, EAX);
@@ -464,14 +467,14 @@
__ xorl(EDI, EDI);
__ shldl(EDI, EAX, ECX);
// Result in EDI (high) and EBX (low).
- const Class& mint_class =
- Class::Handle(Isolate::Current()->object_store()->mint_class());
+ const Class& mint_class = MintClass();
__ TryAllocate(mint_class, normal_ir_body, Assembler::kNearJump,
EAX, // Result register.
ECX); // temp
// EBX and EDI are not objects but integer values.
- __ movl(FieldAddress(EAX, Mint::value_offset()), EBX);
- __ movl(FieldAddress(EAX, Mint::value_offset() + kWordSize), EDI);
+ __ movl(FieldAddress(EAX, target::Mint::value_offset()), EBX);
+ __ movl(FieldAddress(EAX, target::Mint::value_offset() + target::kWordSize),
+ EDI);
__ ret();
__ Bind(normal_ir_body);
}
@@ -494,8 +497,8 @@
__ CompareClassId(reg, kMintCid, tmp);
__ j(NOT_EQUAL, not_smi_or_mint);
// Mint.
- __ pushl(FieldAddress(reg, Mint::value_offset() + kWordSize));
- __ pushl(FieldAddress(reg, Mint::value_offset()));
+ __ pushl(FieldAddress(reg, target::Mint::value_offset() + target::kWordSize));
+ __ pushl(FieldAddress(reg, target::Mint::value_offset()));
__ Bind(&done);
}
@@ -505,13 +508,13 @@
Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through;
TestBothArgumentsSmis(assembler, &try_mint_smi);
// EAX contains the right argument.
- __ cmpl(Address(ESP, +2 * kWordSize), EAX);
+ __ cmpl(Address(ESP, +2 * target::kWordSize), EAX);
__ j(true_condition, &is_true, Assembler::kNearJump);
__ Bind(&is_false);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
// 64-bit comparison
@@ -537,7 +540,7 @@
// Note that EDX and ECX must be preserved in case we fall through to main
// method.
// EAX contains the right argument.
- __ movl(EBX, Address(ESP, +2 * kWordSize)); // Left argument.
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize)); // Left argument.
// Push left as 64 bit integer.
Push64SmiOrMint(assembler, EBX, EDI, normal_ir_body);
// Push right as 64 bit integer.
@@ -559,65 +562,66 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, LESS);
}
-void Intrinsifier::Integer_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_greaterThanFromInt(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_greaterThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GREATER);
}
-void Intrinsifier::Integer_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, LESS_EQUAL);
}
-void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GREATER_EQUAL);
}
// This is called for Smi and Mint receivers. The right argument
// can be Smi, Mint or double.
-void Intrinsifier::Integer_equalToInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
Label true_label, check_for_mint;
// For integer receiver '===' check first.
- __ movl(EAX, Address(ESP, +1 * kWordSize));
- __ cmpl(EAX, Address(ESP, +2 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
+ __ cmpl(EAX, Address(ESP, +2 * target::kWordSize));
__ j(EQUAL, &true_label, Assembler::kNearJump);
- __ movl(EBX, Address(ESP, +2 * kWordSize));
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize));
__ orl(EAX, EBX);
__ testl(EAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
// Both arguments are smi, '===' is good enough.
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&true_label);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
// At least one of the arguments was not Smi.
Label receiver_not_smi;
__ Bind(&check_for_mint);
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // Receiver.
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Receiver.
__ testl(EAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &receiver_not_smi);
// Left (receiver) is Smi, return false if right is not Double.
// Note that an instance of Mint never contains a value that can be
// represented by Smi.
- __ movl(EAX, Address(ESP, +1 * kWordSize)); // Right argument.
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Right argument.
__ CompareClassId(EAX, kDoubleCid, EDI);
__ j(EQUAL, normal_ir_body);
- __ LoadObject(EAX, Bool::False()); // Smi == Mint -> false.
+ __ LoadObject(EAX,
+ CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
__ ret();
__ Bind(&receiver_not_smi);
@@ -625,21 +629,22 @@
__ CompareClassId(EAX, kMintCid, EDI);
__ j(NOT_EQUAL, normal_ir_body);
// Receiver is Mint, return false if right is Smi.
- __ movl(EAX, Address(ESP, +1 * kWordSize)); // Right argument.
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Right argument.
__ testl(EAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
// TODO(srdjan): Implement Mint == Mint comparison.
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equal(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_equalToInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
Label shift_count_ok;
TestBothArgumentsSmis(assembler, normal_ir_body);
// Can destroy ECX since we are not falling through.
@@ -655,9 +660,9 @@
__ j(LESS_EQUAL, &shift_count_ok, Assembler::kNearJump);
__ movl(EAX, count_limit);
__ Bind(&shift_count_ok);
- __ movl(ECX, EAX); // Shift amount must be in ECX.
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // Value.
- __ SmiUntag(EAX); // Value.
+ __ movl(ECX, EAX); // Shift amount must be in ECX.
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Value.
+ __ SmiUntag(EAX); // Value.
__ sarl(EAX, ECX);
__ SmiTag(EAX);
__ ret();
@@ -665,16 +670,18 @@
}
// Argument is Smi (receiver).
-void Intrinsifier::Smi_bitNegate(Assembler* assembler, Label* normal_ir_body) {
- __ movl(EAX, Address(ESP, +1 * kWordSize)); // Receiver.
+void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Receiver.
__ notl(EAX);
__ andl(EAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
__ ret();
}
-void Intrinsifier::Smi_bitLength(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
+ Label* normal_ir_body) {
ASSERT(kSmiTagShift == 1);
- __ movl(EAX, Address(ESP, +1 * kWordSize)); // Receiver.
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Receiver.
// XOR with sign bit to complement bits if value is negative.
__ movl(ECX, EAX);
__ sarl(ECX, Immediate(31)); // All 0 or all 1.
@@ -687,12 +694,12 @@
__ ret();
}
-void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
// static void _lsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
@@ -700,18 +707,20 @@
__ pushl(THR);
ASSERT(THR == ESI);
- __ movl(EDI, Address(ESP, 5 * kWordSize)); // x_digits
- __ movl(ECX, Address(ESP, 3 * kWordSize)); // n is Smi
+ __ movl(EDI, Address(ESP, 5 * target::kWordSize)); // x_digits
+ __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // n is Smi
__ SmiUntag(ECX);
- __ movl(EBX, Address(ESP, 2 * kWordSize)); // r_digits
+ __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
__ movl(ESI, ECX);
__ sarl(ESI, Immediate(5)); // ESI = n ~/ _DIGIT_BITS.
- __ leal(EBX, FieldAddress(EBX, ESI, TIMES_4, TypedData::data_offset()));
- __ movl(ESI, Address(ESP, 4 * kWordSize)); // x_used > 0, Smi.
+ __ leal(EBX,
+ FieldAddress(EBX, ESI, TIMES_4, target::TypedData::data_offset()));
+ __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // x_used > 0, Smi.
__ SmiUntag(ESI);
__ decl(ESI);
__ xorl(EAX, EAX); // EAX = 0.
- __ movl(EDX, FieldAddress(EDI, ESI, TIMES_4, TypedData::data_offset()));
+ __ movl(EDX,
+ FieldAddress(EDI, ESI, TIMES_4, target::TypedData::data_offset()));
__ shldl(EAX, EDX, ECX);
__ movl(Address(EBX, ESI, TIMES_4, kBytesPerBigIntDigit), EAX);
Label last;
@@ -720,8 +729,9 @@
Label loop;
__ Bind(&loop);
__ movl(EAX, EDX);
- __ movl(EDX, FieldAddress(EDI, ESI, TIMES_4,
- TypedData::data_offset() - kBytesPerBigIntDigit));
+ __ movl(EDX, FieldAddress(
+ EDI, ESI, TIMES_4,
+ target::TypedData::data_offset() - kBytesPerBigIntDigit));
__ shldl(EAX, EDX, ECX);
__ movl(Address(EBX, ESI, TIMES_4, 0), EAX);
__ decl(ESI);
@@ -732,11 +742,11 @@
// Restore THR and return.
__ popl(THR);
- __ LoadObject(EAX, Object::null_object());
+ __ LoadObject(EAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
// static void _rsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
@@ -744,20 +754,22 @@
__ pushl(THR);
ASSERT(THR == ESI);
- __ movl(EDI, Address(ESP, 5 * kWordSize)); // x_digits
- __ movl(ECX, Address(ESP, 3 * kWordSize)); // n is Smi
+ __ movl(EDI, Address(ESP, 5 * target::kWordSize)); // x_digits
+ __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // n is Smi
__ SmiUntag(ECX);
- __ movl(EBX, Address(ESP, 2 * kWordSize)); // r_digits
+ __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
__ movl(EDX, ECX);
- __ sarl(EDX, Immediate(5)); // EDX = n ~/ _DIGIT_BITS.
- __ movl(ESI, Address(ESP, 4 * kWordSize)); // x_used > 0, Smi.
+ __ sarl(EDX, Immediate(5)); // EDX = n ~/ _DIGIT_BITS.
+ __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // x_used > 0, Smi.
__ SmiUntag(ESI);
__ decl(ESI);
// EDI = &x_digits[x_used - 1].
- __ leal(EDI, FieldAddress(EDI, ESI, TIMES_4, TypedData::data_offset()));
+ __ leal(EDI,
+ FieldAddress(EDI, ESI, TIMES_4, target::TypedData::data_offset()));
__ subl(ESI, EDX);
// EBX = &r_digits[x_used - 1 - (n ~/ 32)].
- __ leal(EBX, FieldAddress(EBX, ESI, TIMES_4, TypedData::data_offset()));
+ __ leal(EBX,
+ FieldAddress(EBX, ESI, TIMES_4, target::TypedData::data_offset()));
__ negl(ESI);
__ movl(EDX, Address(EDI, ESI, TIMES_4, 0));
Label last;
@@ -777,11 +789,12 @@
// Restore THR and return.
__ popl(THR);
- __ LoadObject(EAX, Object::null_object());
+ __ LoadObject(EAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_absAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absAdd(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
@@ -790,13 +803,13 @@
__ pushl(THR);
ASSERT(THR == ESI);
- __ movl(EDI, Address(ESP, 6 * kWordSize)); // digits
- __ movl(EAX, Address(ESP, 5 * kWordSize)); // used is Smi
- __ SmiUntag(EAX); // used > 0.
- __ movl(ESI, Address(ESP, 4 * kWordSize)); // a_digits
- __ movl(ECX, Address(ESP, 3 * kWordSize)); // a_used is Smi
- __ SmiUntag(ECX); // a_used > 0.
- __ movl(EBX, Address(ESP, 2 * kWordSize)); // r_digits
+ __ movl(EDI, Address(ESP, 6 * target::kWordSize)); // digits
+ __ movl(EAX, Address(ESP, 5 * target::kWordSize)); // used is Smi
+ __ SmiUntag(EAX); // used > 0.
+ __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // a_digits
+ __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // a_used is Smi
+ __ SmiUntag(ECX); // a_used > 0.
+ __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
// Precompute 'used - a_used' now so that carry flag is not lost later.
__ subl(EAX, ECX);
@@ -807,9 +820,12 @@
Label add_loop;
__ Bind(&add_loop);
// Loop a_used times, ECX = a_used, ECX > 0.
- __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, TypedData::data_offset()));
- __ adcl(EAX, FieldAddress(ESI, EDX, TIMES_4, TypedData::data_offset()));
- __ movl(FieldAddress(EBX, EDX, TIMES_4, TypedData::data_offset()), EAX);
+ __ movl(EAX,
+ FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
+ __ adcl(EAX,
+ FieldAddress(ESI, EDX, TIMES_4, target::TypedData::data_offset()));
+ __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
+ EAX);
__ incl(EDX); // Does not affect carry flag.
__ decl(ECX); // Does not affect carry flag.
__ j(NOT_ZERO, &add_loop, Assembler::kNearJump);
@@ -822,9 +838,11 @@
Label carry_loop;
__ Bind(&carry_loop);
// Loop used - a_used times, ECX = used - a_used, ECX > 0.
- __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, TypedData::data_offset()));
+ __ movl(EAX,
+ FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
__ adcl(EAX, Immediate(0));
- __ movl(FieldAddress(EBX, EDX, TIMES_4, TypedData::data_offset()), EAX);
+ __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
+ EAX);
__ incl(EDX); // Does not affect carry flag.
__ decl(ECX); // Does not affect carry flag.
__ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
@@ -832,15 +850,17 @@
__ Bind(&last_carry);
__ movl(EAX, Immediate(0));
__ adcl(EAX, Immediate(0));
- __ movl(FieldAddress(EBX, EDX, TIMES_4, TypedData::data_offset()), EAX);
+ __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
+ EAX);
// Restore THR and return.
__ popl(THR);
- __ LoadObject(EAX, Object::null_object());
+ __ LoadObject(EAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_absSub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absSub(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
@@ -849,13 +869,13 @@
__ pushl(THR);
ASSERT(THR == ESI);
- __ movl(EDI, Address(ESP, 6 * kWordSize)); // digits
- __ movl(EAX, Address(ESP, 5 * kWordSize)); // used is Smi
- __ SmiUntag(EAX); // used > 0.
- __ movl(ESI, Address(ESP, 4 * kWordSize)); // a_digits
- __ movl(ECX, Address(ESP, 3 * kWordSize)); // a_used is Smi
- __ SmiUntag(ECX); // a_used > 0.
- __ movl(EBX, Address(ESP, 2 * kWordSize)); // r_digits
+ __ movl(EDI, Address(ESP, 6 * target::kWordSize)); // digits
+ __ movl(EAX, Address(ESP, 5 * target::kWordSize)); // used is Smi
+ __ SmiUntag(EAX); // used > 0.
+ __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // a_digits
+ __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // a_used is Smi
+ __ SmiUntag(ECX); // a_used > 0.
+ __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
// Precompute 'used - a_used' now so that carry flag is not lost later.
__ subl(EAX, ECX);
@@ -866,9 +886,12 @@
Label sub_loop;
__ Bind(&sub_loop);
// Loop a_used times, ECX = a_used, ECX > 0.
- __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, TypedData::data_offset()));
- __ sbbl(EAX, FieldAddress(ESI, EDX, TIMES_4, TypedData::data_offset()));
- __ movl(FieldAddress(EBX, EDX, TIMES_4, TypedData::data_offset()), EAX);
+ __ movl(EAX,
+ FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
+ __ sbbl(EAX,
+ FieldAddress(ESI, EDX, TIMES_4, target::TypedData::data_offset()));
+ __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
+ EAX);
__ incl(EDX); // Does not affect carry flag.
__ decl(ECX); // Does not affect carry flag.
__ j(NOT_ZERO, &sub_loop, Assembler::kNearJump);
@@ -881,9 +904,11 @@
Label carry_loop;
__ Bind(&carry_loop);
// Loop used - a_used times, ECX = used - a_used, ECX > 0.
- __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, TypedData::data_offset()));
+ __ movl(EAX,
+ FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
__ sbbl(EAX, Immediate(0));
- __ movl(FieldAddress(EBX, EDX, TIMES_4, TypedData::data_offset()), EAX);
+ __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
+ EAX);
__ incl(EDX); // Does not affect carry flag.
__ decl(ECX); // Does not affect carry flag.
__ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
@@ -891,11 +916,12 @@
__ Bind(&done);
// Restore THR and return.
__ popl(THR);
- __ LoadObject(EAX, Object::null_object());
+ __ LoadObject(EAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_mulAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulAdd(Uint32List x_digits, int xi,
// Uint32List m_digits, int i,
@@ -925,14 +951,15 @@
Label no_op;
// EBX = x, no_op if x == 0
- __ movl(ECX, Address(ESP, 7 * kWordSize)); // x_digits
- __ movl(EAX, Address(ESP, 6 * kWordSize)); // xi is Smi
- __ movl(EBX, FieldAddress(ECX, EAX, TIMES_2, TypedData::data_offset()));
+ __ movl(ECX, Address(ESP, 7 * target::kWordSize)); // x_digits
+ __ movl(EAX, Address(ESP, 6 * target::kWordSize)); // xi is Smi
+ __ movl(EBX,
+ FieldAddress(ECX, EAX, TIMES_2, target::TypedData::data_offset()));
__ testl(EBX, EBX);
__ j(ZERO, &no_op, Assembler::kNearJump);
// EDX = SmiUntag(n), no_op if n == 0
- __ movl(EDX, Address(ESP, 1 * kWordSize));
+ __ movl(EDX, Address(ESP, 1 * target::kWordSize));
__ SmiUntag(EDX);
__ j(ZERO, &no_op, Assembler::kNearJump);
@@ -941,18 +968,20 @@
ASSERT(THR == ESI);
// EDI = mip = &m_digits[i >> 1]
- __ movl(EDI, Address(ESP, 6 * kWordSize)); // m_digits
- __ movl(EAX, Address(ESP, 5 * kWordSize)); // i is Smi
- __ leal(EDI, FieldAddress(EDI, EAX, TIMES_2, TypedData::data_offset()));
+ __ movl(EDI, Address(ESP, 6 * target::kWordSize)); // m_digits
+ __ movl(EAX, Address(ESP, 5 * target::kWordSize)); // i is Smi
+ __ leal(EDI,
+ FieldAddress(EDI, EAX, TIMES_2, target::TypedData::data_offset()));
// ESI = ajp = &a_digits[j >> 1]
- __ movl(ESI, Address(ESP, 4 * kWordSize)); // a_digits
- __ movl(EAX, Address(ESP, 3 * kWordSize)); // j is Smi
- __ leal(ESI, FieldAddress(ESI, EAX, TIMES_2, TypedData::data_offset()));
+ __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // a_digits
+ __ movl(EAX, Address(ESP, 3 * target::kWordSize)); // j is Smi
+ __ leal(ESI,
+ FieldAddress(ESI, EAX, TIMES_2, target::TypedData::data_offset()));
// Save n
__ pushl(EDX);
- Address n_addr = Address(ESP, 0 * kWordSize);
+ Address n_addr = Address(ESP, 0 * target::kWordSize);
// ECX = c = 0
__ xorl(ECX, ECX);
@@ -1010,11 +1039,12 @@
__ popl(THR);
__ Bind(&no_op);
- __ movl(EAX, Immediate(Smi::RawValue(1))); // One digit processed.
+ __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
__ ret();
}
-void Intrinsifier::Bigint_sqrAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _sqrAdd(Uint32List x_digits, int i,
// Uint32List a_digits, int used) {
@@ -1042,9 +1072,10 @@
// }
// EDI = xip = &x_digits[i >> 1]
- __ movl(EDI, Address(ESP, 4 * kWordSize)); // x_digits
- __ movl(EAX, Address(ESP, 3 * kWordSize)); // i is Smi
- __ leal(EDI, FieldAddress(EDI, EAX, TIMES_2, TypedData::data_offset()));
+ __ movl(EDI, Address(ESP, 4 * target::kWordSize)); // x_digits
+ __ movl(EAX, Address(ESP, 3 * target::kWordSize)); // i is Smi
+ __ leal(EDI,
+ FieldAddress(EDI, EAX, TIMES_2, target::TypedData::data_offset()));
// EBX = x = *xip++, return if x == 0
Label x_zero;
@@ -1058,8 +1089,9 @@
ASSERT(THR == ESI);
// ESI = ajp = &a_digits[i]
- __ movl(ESI, Address(ESP, 3 * kWordSize)); // a_digits
- __ leal(ESI, FieldAddress(ESI, EAX, TIMES_4, TypedData::data_offset()));
+ __ movl(ESI, Address(ESP, 3 * target::kWordSize)); // a_digits
+ __ leal(ESI,
+ FieldAddress(ESI, EAX, TIMES_4, target::TypedData::data_offset()));
// EDX:EAX = t = x*x + *ajp
__ movl(EAX, EBX);
@@ -1072,8 +1104,8 @@
__ addl(ESI, Immediate(kBytesPerBigIntDigit));
// int n = used - i - 1
- __ movl(EAX, Address(ESP, 2 * kWordSize)); // used is Smi
- __ subl(EAX, Address(ESP, 4 * kWordSize)); // i is Smi
+ __ movl(EAX, Address(ESP, 2 * target::kWordSize)); // used is Smi
+ __ subl(EAX, Address(ESP, 4 * target::kWordSize)); // i is Smi
__ SmiUntag(EAX);
__ decl(EAX);
__ pushl(EAX); // Save n on stack.
@@ -1082,9 +1114,9 @@
__ pushl(Immediate(0)); // push high32(c) == 0
__ pushl(EDX); // push low32(c) == high32(t)
- Address n_addr = Address(ESP, 2 * kWordSize);
- Address ch_addr = Address(ESP, 1 * kWordSize);
- Address cl_addr = Address(ESP, 0 * kWordSize);
+ Address n_addr = Address(ESP, 2 * target::kWordSize);
+ Address ch_addr = Address(ESP, 1 * target::kWordSize);
+ Address cl_addr = Address(ESP, 0 * target::kWordSize);
Label loop, done;
__ Bind(&loop);
@@ -1096,7 +1128,7 @@
// n: ESP[2]
// while (--n >= 0)
- __ decl(Address(ESP, 2 * kWordSize)); // --n
+ __ decl(Address(ESP, 2 * target::kWordSize)); // --n
__ j(NEGATIVE, &done, Assembler::kNearJump);
// uint32_t xi = *xip++
@@ -1142,12 +1174,12 @@
__ Drop(3);
__ popl(THR);
__ Bind(&x_zero);
- __ movl(EAX, Immediate(Smi::RawValue(1))); // One digit processed.
+ __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
__ ret();
}
-void Intrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
// uint32_t yt = args[_YT]; // _YT == 1.
@@ -1165,16 +1197,17 @@
// }
// EDI = args
- __ movl(EDI, Address(ESP, 3 * kWordSize)); // args
+ __ movl(EDI, Address(ESP, 3 * target::kWordSize)); // args
// ECX = yt = args[1]
- __ movl(ECX,
- FieldAddress(EDI, TypedData::data_offset() + kBytesPerBigIntDigit));
+ __ movl(ECX, FieldAddress(EDI, target::TypedData::data_offset() +
+ kBytesPerBigIntDigit));
// EBX = dp = &digits[i >> 1]
- __ movl(EBX, Address(ESP, 2 * kWordSize)); // digits
- __ movl(EAX, Address(ESP, 1 * kWordSize)); // i is Smi
- __ leal(EBX, FieldAddress(EBX, EAX, TIMES_2, TypedData::data_offset()));
+ __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // digits
+ __ movl(EAX, Address(ESP, 1 * target::kWordSize)); // i is Smi
+ __ leal(EBX,
+ FieldAddress(EBX, EAX, TIMES_2, target::TypedData::data_offset()));
// EDX = dh = dp[0]
__ movl(EDX, Address(EBX, 0));
@@ -1195,16 +1228,16 @@
__ Bind(&return_qd);
// args[2] = qd
- __ movl(
- FieldAddress(EDI, TypedData::data_offset() + 2 * kBytesPerBigIntDigit),
- EAX);
+ __ movl(FieldAddress(
+ EDI, target::TypedData::data_offset() + 2 * kBytesPerBigIntDigit),
+ EAX);
- __ movl(EAX, Immediate(Smi::RawValue(1))); // One digit processed.
+ __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
__ ret();
}
-void Intrinsifier::Montgomery_mulMod(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulMod(Uint32List args, Uint32List digits, int i) {
// uint32_t rho = args[_RHO]; // _RHO == 2.
@@ -1215,26 +1248,27 @@
// }
// EDI = args
- __ movl(EDI, Address(ESP, 3 * kWordSize)); // args
+ __ movl(EDI, Address(ESP, 3 * target::kWordSize)); // args
// ECX = rho = args[2]
- __ movl(ECX, FieldAddress(
- EDI, TypedData::data_offset() + 2 * kBytesPerBigIntDigit));
+ __ movl(ECX, FieldAddress(EDI, target::TypedData::data_offset() +
+ 2 * kBytesPerBigIntDigit));
// EAX = digits[i >> 1]
- __ movl(EBX, Address(ESP, 2 * kWordSize)); // digits
- __ movl(EAX, Address(ESP, 1 * kWordSize)); // i is Smi
- __ movl(EAX, FieldAddress(EBX, EAX, TIMES_2, TypedData::data_offset()));
+ __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // digits
+ __ movl(EAX, Address(ESP, 1 * target::kWordSize)); // i is Smi
+ __ movl(EAX,
+ FieldAddress(EBX, EAX, TIMES_2, target::TypedData::data_offset()));
// EDX:EAX = t = rho*d
__ mull(ECX);
// args[4] = t mod DIGIT_BASE = low32(t)
- __ movl(
- FieldAddress(EDI, TypedData::data_offset() + 4 * kBytesPerBigIntDigit),
- EAX);
+ __ movl(FieldAddress(
+ EDI, target::TypedData::data_offset() + 4 * kBytesPerBigIntDigit),
+ EAX);
- __ movl(EAX, Immediate(Smi::RawValue(1))); // One digit processed.
+ __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
__ ret();
}
@@ -1244,7 +1278,7 @@
static void TestLastArgumentIsDouble(Assembler* assembler,
Label* is_smi,
Label* not_double_smi) {
- __ movl(EAX, Address(ESP, +1 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
__ testl(EAX, Immediate(kSmiTagMask));
__ j(ZERO, is_smi, Assembler::kNearJump); // Jump if Smi.
__ CompareClassId(EAX, kDoubleCid, EBX);
@@ -1262,19 +1296,19 @@
Label is_false, is_true, is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in EAX.
- __ movsd(XMM1, FieldAddress(EAX, Double::value_offset()));
+ __ movsd(XMM1, FieldAddress(EAX, target::Double::value_offset()));
__ Bind(&double_op);
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // Left argument.
- __ movsd(XMM0, FieldAddress(EAX, Double::value_offset()));
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left argument.
+ __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
__ comisd(XMM0, XMM1);
__ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false;
__ j(true_condition, &is_true, Assembler::kNearJump);
// Fall through false.
__ Bind(&is_false);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&is_smi);
__ SmiUntag(EAX);
@@ -1284,31 +1318,32 @@
}
// arg0 is Double, arg1 is unknown.
-void Intrinsifier::Double_greaterThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, ABOVE);
}
// arg0 is Double, arg1 is unknown.
-void Intrinsifier::Double_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, ABOVE_EQUAL);
}
// arg0 is Double, arg1 is unknown.
-void Intrinsifier::Double_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, BELOW);
}
// arg0 is Double, arg1 is unknown.
-void Intrinsifier::Double_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_equal(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, EQUAL);
}
// arg0 is Double, arg1 is unknown.
-void Intrinsifier::Double_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, BELOW_EQUAL);
}
@@ -1320,10 +1355,10 @@
Label is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in EAX.
- __ movsd(XMM1, FieldAddress(EAX, Double::value_offset()));
+ __ movsd(XMM1, FieldAddress(EAX, target::Double::value_offset()));
__ Bind(&double_op);
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // Left argument.
- __ movsd(XMM0, FieldAddress(EAX, Double::value_offset()));
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left argument.
+ __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
switch (kind) {
case Token::kADD:
__ addsd(XMM0, XMM1);
@@ -1340,12 +1375,11 @@
default:
UNREACHABLE();
}
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
EAX, // Result register.
EBX);
- __ movsd(FieldAddress(EAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ SmiUntag(EAX);
@@ -1354,116 +1388,115 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
}
-void Intrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
}
-void Intrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
}
-void Intrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
}
// Left is double, right is integer (Mint or Smi)
-void Intrinsifier::Double_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
// Only smis allowed.
- __ movl(EAX, Address(ESP, +1 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
__ testl(EAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump);
// Is Smi.
__ SmiUntag(EAX);
__ cvtsi2sd(XMM1, EAX);
- __ movl(EAX, Address(ESP, +2 * kWordSize));
- __ movsd(XMM0, FieldAddress(EAX, Double::value_offset()));
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
__ mulsd(XMM0, XMM1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
EAX, // Result register.
EBX);
- __ movsd(FieldAddress(EAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::DoubleFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
- __ movl(EAX, Address(ESP, +1 * kWordSize));
+void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
__ testl(EAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump);
// Is Smi.
__ SmiUntag(EAX);
__ cvtsi2sd(XMM0, EAX);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
EAX, // Result register.
EBX);
- __ movsd(FieldAddress(EAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_getIsNaN(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_true;
- __ movl(EAX, Address(ESP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(EAX, Double::value_offset()));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
__ comisd(XMM0, XMM0);
__ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true;
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
}
-void Intrinsifier::Double_getIsInfinite(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
+ Label* normal_ir_body) {
Label not_inf;
- __ movl(EAX, Address(ESP, +1 * kWordSize));
- __ movl(EBX, FieldAddress(EAX, Double::value_offset()));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
+ __ movl(EBX, FieldAddress(EAX, target::Double::value_offset()));
// If the low word isn't zero, then it isn't infinity.
__ cmpl(EBX, Immediate(0));
__ j(NOT_EQUAL, ¬_inf, Assembler::kNearJump);
// Check the high word.
- __ movl(EBX, FieldAddress(EAX, Double::value_offset() + kWordSize));
+ __ movl(EBX, FieldAddress(
+ EAX, target::Double::value_offset() + target::kWordSize));
// Mask off sign bit.
__ andl(EBX, Immediate(0x7FFFFFFF));
// Compare with +infinity.
__ cmpl(EBX, Immediate(0x7FF00000));
__ j(NOT_EQUAL, ¬_inf, Assembler::kNearJump);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(¬_inf);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
}
-void Intrinsifier::Double_getIsNegative(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_false, is_true, is_zero;
- __ movl(EAX, Address(ESP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(EAX, Double::value_offset()));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
__ xorpd(XMM1, XMM1); // 0.0 -> XMM1.
__ comisd(XMM0, XMM1);
__ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false.
__ j(EQUAL, &is_zero, Assembler::kNearJump); // Check for negative zero.
__ j(ABOVE_EQUAL, &is_false, Assembler::kNearJump); // >= 0 -> false.
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&is_false);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_zero);
// Check for negative zero (get the sign bit).
@@ -1473,10 +1506,10 @@
__ jmp(&is_false, Assembler::kNearJump);
}
-void Intrinsifier::DoubleToInteger(Assembler* assembler,
- Label* normal_ir_body) {
- __ movl(EAX, Address(ESP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(EAX, Double::value_offset()));
+void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
__ cvttsd2si(EAX, XMM0);
// Overflow is signalled with minint.
// Check for overflow and that it fits into Smi.
@@ -1487,14 +1520,14 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_hashCode(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
+ Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
// Convert double value to signed 32-bit int in EAX and
// back to a double in XMM1.
- __ movl(ECX, Address(ESP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(ECX, Double::value_offset()));
+ __ movl(ECX, Address(ESP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(ECX, target::Double::value_offset()));
__ cvttsd2si(EAX, XMM0);
__ cvtsi2sd(XMM1, EAX);
@@ -1514,8 +1547,8 @@
// Convert the double bits to a hash code that fits in a Smi.
__ Bind(&double_hash);
- __ movl(EAX, FieldAddress(ECX, Double::value_offset()));
- __ movl(ECX, FieldAddress(ECX, Double::value_offset() + 4));
+ __ movl(EAX, FieldAddress(ECX, target::Double::value_offset()));
+ __ movl(ECX, FieldAddress(ECX, target::Double::value_offset() + 4));
__ xorl(EAX, ECX);
__ andl(EAX, Immediate(kSmiMax));
__ SmiTag(EAX);
@@ -1526,19 +1559,18 @@
}
// Argument type is not known
-void Intrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
Label is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Argument is double and is in EAX.
- __ movsd(XMM1, FieldAddress(EAX, Double::value_offset()));
+ __ movsd(XMM1, FieldAddress(EAX, target::Double::value_offset()));
__ Bind(&double_op);
__ sqrtsd(XMM0, XMM1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
EAX, // Result register.
EBX);
- __ movsd(FieldAddress(EAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ SmiUntag(EAX);
@@ -1550,28 +1582,24 @@
// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
// _state[kSTATE_LO] = state & _MASK_32;
// _state[kSTATE_HI] = state >> 32;
-void Intrinsifier::Random_nextState(Assembler* assembler,
- Label* normal_ir_body) {
- const Library& math_lib = Library::Handle(Library::MathLibrary());
- ASSERT(!math_lib.IsNull());
- const Class& random_class =
- Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random()));
- ASSERT(!random_class.IsNull());
- const Field& state_field = Field::ZoneHandle(
- random_class.LookupInstanceFieldAllowPrivate(Symbols::_state()));
- ASSERT(!state_field.IsNull());
- const int64_t a_int_value = Intrinsifier::kRandomAValue;
+void AsmIntrinsifier::Random_nextState(Assembler* assembler,
+ Label* normal_ir_body) {
+ const Field& state_field = LookupMathRandomStateFieldOffset();
+ const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
+
// 'a_int_value' is a mask.
ASSERT(Utils::IsUint(32, a_int_value));
int32_t a_int32_value = static_cast<int32_t>(a_int_value);
// Receiver.
- __ movl(EAX, Address(ESP, +1 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
// Field '_state'.
- __ movl(EBX, FieldAddress(EAX, state_field.Offset()));
+ __ movl(EBX, FieldAddress(EAX, LookupFieldOffsetInBytes(state_field)));
// Addresses of _state[0] and _state[1].
- const intptr_t scale = Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
- const intptr_t offset = Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
+ const intptr_t scale =
+ target::Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
+ const intptr_t offset =
+ target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
Address addr_0 = FieldAddress(EBX, 0 * scale + offset);
Address addr_1 = FieldAddress(EBX, 1 * scale + offset);
__ movl(EAX, Immediate(a_int32_value));
@@ -1581,21 +1609,22 @@
__ adcl(EDX, Immediate(0));
__ movl(addr_1, EDX);
__ movl(addr_0, EAX);
- ASSERT(Smi::RawValue(0) == 0);
+ ASSERT(target::ToRawSmi(0) == 0);
__ xorl(EAX, EAX);
__ ret();
}
// Identity comparison.
-void Intrinsifier::ObjectEquals(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_true;
- __ movl(EAX, Address(ESP, +1 * kWordSize));
- __ cmpl(EAX, Address(ESP, +2 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
+ __ cmpl(EAX, Address(ESP, +2 * target::kWordSize));
__ j(EQUAL, &is_true, Assembler::kNearJump);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
}
@@ -1634,10 +1663,10 @@
}
// Return type quickly for simple types (not parameterized and not signature).
-void Intrinsifier::ObjectRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label use_declaration_type, not_double, not_integer;
- __ movl(EAX, Address(ESP, +1 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
__ LoadClassIdMayBeSmi(EDI, EAX);
__ cmpl(EDI, Immediate(kClosureCid));
@@ -1651,8 +1680,8 @@
__ j(NOT_EQUAL, ¬_double);
__ LoadIsolate(EAX);
- __ movl(EAX, Address(EAX, Isolate::object_store_offset()));
- __ movl(EAX, Address(EAX, ObjectStore::double_type_offset()));
+ __ movl(EAX, Address(EAX, target::Isolate::object_store_offset()));
+ __ movl(EAX, Address(EAX, target::ObjectStore::double_type_offset()));
__ ret();
__ Bind(¬_double);
@@ -1661,8 +1690,8 @@
JumpIfNotInteger(assembler, EAX, ¬_integer);
__ LoadIsolate(EAX);
- __ movl(EAX, Address(EAX, Isolate::object_store_offset()));
- __ movl(EAX, Address(EAX, ObjectStore::int_type_offset()));
+ __ movl(EAX, Address(EAX, target::Isolate::object_store_offset()));
+ __ movl(EAX, Address(EAX, target::ObjectStore::int_type_offset()));
__ ret();
__ Bind(¬_integer);
@@ -1672,36 +1701,37 @@
JumpIfNotString(assembler, EAX, &use_declaration_type);
__ LoadIsolate(EAX);
- __ movl(EAX, Address(EAX, Isolate::object_store_offset()));
- __ movl(EAX, Address(EAX, ObjectStore::string_type_offset()));
+ __ movl(EAX, Address(EAX, target::Isolate::object_store_offset()));
+ __ movl(EAX, Address(EAX, target::ObjectStore::string_type_offset()));
__ ret();
// Object is neither double, nor integer, nor string.
__ Bind(&use_declaration_type);
__ LoadClassById(EBX, EDI);
- __ movzxw(EDI, FieldAddress(EBX, Class::num_type_arguments_offset()));
+ __ movzxw(EDI, FieldAddress(
+ EBX, target::Class::num_type_arguments_offset_in_bytes()));
__ cmpl(EDI, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
- __ movl(EAX, FieldAddress(EBX, Class::declaration_type_offset()));
- __ CompareObject(EAX, Object::null_object());
+ __ movl(EAX, FieldAddress(EBX, target::Class::declaration_type_offset()));
+ __ CompareObject(EAX, NullObject());
__ j(EQUAL, normal_ir_body, Assembler::kNearJump); // Not yet set.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label different_cids, equal, not_equal, not_integer;
- __ movl(EAX, Address(ESP, +1 * kWordSize));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize));
__ LoadClassIdMayBeSmi(EDI, EAX);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ cmpl(EDI, Immediate(kClosureCid));
__ j(EQUAL, normal_ir_body);
- __ movl(EAX, Address(ESP, +2 * kWordSize));
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize));
__ LoadClassIdMayBeSmi(EBX, EAX);
// Check whether class ids match. If class ids don't match objects can still
@@ -1714,12 +1744,13 @@
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(EBX, EDI);
- __ movzxw(EBX, FieldAddress(EBX, Class::num_type_arguments_offset()));
+ __ movzxw(EBX, FieldAddress(
+ EBX, target::Class::num_type_arguments_offset_in_bytes()));
__ cmpl(EBX, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
__ Bind(&equal);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
// Class ids are different. Check if we are comparing runtime types of
@@ -1748,16 +1779,16 @@
// Fall-through to the not equal case.
__ Bind(¬_equal);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::String_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ movl(EAX, Address(ESP, +1 * kWordSize)); // String object.
- __ movl(EAX, FieldAddress(EAX, String::hash_offset()));
+void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // String object.
+ __ movl(EAX, FieldAddress(EAX, target::String::hash_offset()));
__ cmpl(EAX, Immediate(0));
__ j(EQUAL, normal_ir_body, Assembler::kNearJump);
__ ret();
@@ -1765,10 +1796,10 @@
// Hash not yet computed.
}
-void Intrinsifier::Type_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ movl(EAX, Address(ESP, +1 * kWordSize)); // Type object.
- __ movl(EAX, FieldAddress(EAX, Type::hash_offset()));
+void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Type object.
+ __ movl(EAX, FieldAddress(EAX, target::Type::hash_offset()));
__ testl(EAX, EAX);
__ j(EQUAL, normal_ir_body, Assembler::kNearJump);
__ ret();
@@ -1777,85 +1808,89 @@
}
// bool _substringMatches(int start, String other)
-void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
+ Label* normal_ir_body) {
// For precompilation, not implemented on IA32.
}
-void Intrinsifier::Object_getHash(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Object_getHash(Assembler* assembler,
+ Label* normal_ir_body) {
UNREACHABLE();
}
-void Intrinsifier::Object_setHash(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Object_setHash(Assembler* assembler,
+ Label* normal_ir_body) {
UNREACHABLE();
}
-void Intrinsifier::StringBaseCharAt(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
+ Label* normal_ir_body) {
Label try_two_byte_string;
- __ movl(EBX, Address(ESP, +1 * kWordSize)); // Index.
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // String.
+ __ movl(EBX, Address(ESP, +1 * target::kWordSize)); // Index.
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // String.
__ testl(EBX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump); // Non-smi index.
// Range check.
- __ cmpl(EBX, FieldAddress(EAX, String::length_offset()));
+ __ cmpl(EBX, FieldAddress(EAX, target::String::length_offset()));
// Runtime throws exception.
__ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
__ CompareClassId(EAX, kOneByteStringCid, EDI);
__ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump);
__ SmiUntag(EBX);
- __ movzxb(EBX, FieldAddress(EAX, EBX, TIMES_1, OneByteString::data_offset()));
- __ cmpl(EBX, Immediate(Symbols::kNumberOfOneCharCodeSymbols));
+ __ movzxb(EBX, FieldAddress(EAX, EBX, TIMES_1,
+ target::OneByteString::data_offset()));
+ __ cmpl(EBX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
__ j(GREATER_EQUAL, normal_ir_body);
- __ movl(EAX,
- Immediate(reinterpret_cast<uword>(Symbols::PredefinedAddress())));
+ __ movl(EAX, Immediate(SymbolsPredefinedAddress()));
__ movl(EAX, Address(EAX, EBX, TIMES_4,
- Symbols::kNullCharCodeSymbolOffset * kWordSize));
+ target::Symbols::kNullCharCodeSymbolOffset *
+ target::kWordSize));
__ ret();
__ Bind(&try_two_byte_string);
__ CompareClassId(EAX, kTwoByteStringCid, EDI);
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
ASSERT(kSmiTagShift == 1);
- __ movzxw(EBX, FieldAddress(EAX, EBX, TIMES_1, TwoByteString::data_offset()));
- __ cmpl(EBX, Immediate(Symbols::kNumberOfOneCharCodeSymbols));
+ __ movzxw(EBX, FieldAddress(EAX, EBX, TIMES_1,
+ target::TwoByteString::data_offset()));
+ __ cmpl(EBX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
__ j(GREATER_EQUAL, normal_ir_body);
- __ movl(EAX,
- Immediate(reinterpret_cast<uword>(Symbols::PredefinedAddress())));
+ __ movl(EAX, Immediate(SymbolsPredefinedAddress()));
__ movl(EAX, Address(EAX, EBX, TIMES_4,
- Symbols::kNullCharCodeSymbolOffset * kWordSize));
+ target::Symbols::kNullCharCodeSymbolOffset *
+ target::kWordSize));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::StringBaseIsEmpty(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_true;
// Get length.
- __ movl(EAX, Address(ESP, +1 * kWordSize)); // String object.
- __ movl(EAX, FieldAddress(EAX, String::length_offset()));
- __ cmpl(EAX, Immediate(Smi::RawValue(0)));
+ __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // String object.
+ __ movl(EAX, FieldAddress(EAX, target::String::length_offset()));
+ __ cmpl(EAX, Immediate(target::ToRawSmi(0)));
__ j(EQUAL, &is_true, Assembler::kNearJump);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
}
-void Intrinsifier::OneByteString_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
Label compute_hash;
- __ movl(EBX, Address(ESP, +1 * kWordSize)); // OneByteString object.
- __ movl(EAX, FieldAddress(EBX, String::hash_offset()));
+ __ movl(EBX, Address(ESP, +1 * target::kWordSize)); // OneByteString object.
+ __ movl(EAX, FieldAddress(EBX, target::String::hash_offset()));
__ cmpl(EAX, Immediate(0));
__ j(EQUAL, &compute_hash, Assembler::kNearJump);
__ ret();
__ Bind(&compute_hash);
// Hash not yet computed, use algorithm of class StringHasher.
- __ movl(ECX, FieldAddress(EBX, String::length_offset()));
+ __ movl(ECX, FieldAddress(EBX, target::String::length_offset()));
__ SmiUntag(ECX);
__ xorl(EAX, EAX);
__ xorl(EDI, EDI);
@@ -1872,7 +1907,8 @@
// hash_ += hash_ << 10;
// hash_ ^= hash_ >> 6;
// Get one characters (ch).
- __ movzxb(EDX, FieldAddress(EBX, EDI, TIMES_1, OneByteString::data_offset()));
+ __ movzxb(EDX, FieldAddress(EBX, EDI, TIMES_1,
+ target::OneByteString::data_offset()));
// EDX: ch and temporary.
__ addl(EAX, EDX);
__ movl(EDX, EAX);
@@ -1900,8 +1936,9 @@
__ shll(EDX, Immediate(15));
__ addl(EAX, EDX);
// hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
- __ andl(EAX,
- Immediate(((static_cast<intptr_t>(1) << String::kHashBits) - 1)));
+ __ andl(
+ EAX,
+ Immediate(((static_cast<intptr_t>(1) << target::String::kHashBits) - 1)));
// return hash_ == 0 ? 1 : hash_;
__ cmpl(EAX, Immediate(0));
@@ -1909,7 +1946,7 @@
__ incl(EAX);
__ Bind(&set_hash_code);
__ SmiTag(EAX);
- __ StoreIntoSmiField(FieldAddress(EBX, String::hash_offset()), EAX);
+ __ StoreIntoSmiField(FieldAddress(EBX, target::String::hash_offset()), EAX);
__ ret();
}
@@ -1929,13 +1966,14 @@
__ pushl(EDI); // Preserve length.
__ SmiUntag(EDI);
const intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawString) + kObjectAlignment - 1;
+ target::String::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
__ leal(EDI, Address(EDI, TIMES_1,
fixed_size_plus_alignment_padding)); // EDI is untagged.
- __ andl(EDI, Immediate(-kObjectAlignment));
+ __ andl(EDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
const intptr_t cid = kOneByteStringCid;
- __ movl(EAX, Address(THR, Thread::top_offset()));
+ __ movl(EAX, Address(THR, target::Thread::top_offset()));
__ movl(EBX, EAX);
// EDI: allocation size.
@@ -1946,12 +1984,12 @@
// EAX: potential new object start.
// EBX: potential next object start.
// EDI: allocation size.
- __ cmpl(EBX, Address(THR, Thread::end_offset()));
+ __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &pop_and_fail);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
- __ movl(Address(THR, Thread::top_offset()), EBX);
+ __ movl(Address(THR, target::Thread::top_offset()), EBX);
__ addl(EAX, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX));
@@ -1962,9 +2000,10 @@
// EDI: allocation size.
{
Label size_tag_overflow, done;
- __ cmpl(EDI, Immediate(RawObject::SizeTag::kMaxSizeTag));
+ __ cmpl(EDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shll(EDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2));
+ __ shll(EDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&size_tag_overflow);
@@ -1972,19 +2011,18 @@
__ Bind(&done);
// Get the class index and insert it into the tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ orl(EDI, Immediate(tags));
- __ movl(FieldAddress(EAX, String::tags_offset()), EDI); // Tags.
+ __ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); // Tags.
}
// Set the length field.
__ popl(EDI);
- __ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, String::length_offset()),
- EDI);
+ __ StoreIntoObjectNoBarrier(
+ EAX, FieldAddress(EAX, target::String::length_offset()), EDI);
// Clear hash.
- __ ZeroInitSmiField(FieldAddress(EAX, String::hash_offset()));
+ __ ZeroInitSmiField(FieldAddress(EAX, target::String::hash_offset()));
__ jmp(ok, Assembler::kNearJump);
__ Bind(&pop_and_fail);
@@ -1996,11 +2034,11 @@
// Arg1: Start index as Smi.
// Arg2: End index as Smi.
// The indexes must be valid.
-void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
- Label* normal_ir_body) {
- const intptr_t kStringOffset = 3 * kWordSize;
- const intptr_t kStartIndexOffset = 2 * kWordSize;
- const intptr_t kEndIndexOffset = 1 * kWordSize;
+void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
+ Label* normal_ir_body) {
+ const intptr_t kStringOffset = 3 * target::kWordSize;
+ const intptr_t kStartIndexOffset = 2 * target::kWordSize;
+ const intptr_t kEndIndexOffset = 1 * target::kWordSize;
Label ok;
__ movl(EAX, Address(ESP, +kStartIndexOffset));
__ movl(EDI, Address(ESP, +kEndIndexOffset));
@@ -2016,7 +2054,8 @@
__ movl(EDI, Address(ESP, +kStringOffset));
__ movl(EBX, Address(ESP, +kStartIndexOffset));
__ SmiUntag(EBX);
- __ leal(EDI, FieldAddress(EDI, EBX, TIMES_1, OneByteString::data_offset()));
+ __ leal(EDI, FieldAddress(EDI, EBX, TIMES_1,
+ target::OneByteString::data_offset()));
// EDI: Start address to copy from (untagged).
// EBX: Untagged start index.
__ movl(ECX, Address(ESP, +kEndIndexOffset));
@@ -2032,7 +2071,8 @@
__ jmp(&check, Assembler::kNearJump);
__ Bind(&loop);
__ movzxb(EBX, Address(EDI, EDX, TIMES_1, 0));
- __ movb(FieldAddress(EAX, EDX, TIMES_1, OneByteString::data_offset()), BL);
+ __ movb(FieldAddress(EAX, EDX, TIMES_1, target::OneByteString::data_offset()),
+ BL);
__ incl(EDX);
__ Bind(&check);
__ cmpl(EDX, ECX);
@@ -2041,20 +2081,21 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteStringSetAt(Assembler* assembler,
- Label* normal_ir_body) {
- __ movl(ECX, Address(ESP, +1 * kWordSize)); // Value.
- __ movl(EBX, Address(ESP, +2 * kWordSize)); // Index.
- __ movl(EAX, Address(ESP, +3 * kWordSize)); // OneByteString.
+void AsmIntrinsifier::OneByteStringSetAt(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(ECX, Address(ESP, +1 * target::kWordSize)); // Value.
+ __ movl(EBX, Address(ESP, +2 * target::kWordSize)); // Index.
+ __ movl(EAX, Address(ESP, +3 * target::kWordSize)); // OneByteString.
__ SmiUntag(EBX);
__ SmiUntag(ECX);
- __ movb(FieldAddress(EAX, EBX, TIMES_1, OneByteString::data_offset()), CL);
+ __ movb(FieldAddress(EAX, EBX, TIMES_1, target::OneByteString::data_offset()),
+ CL);
__ ret();
}
-void Intrinsifier::OneByteString_allocate(Assembler* assembler,
- Label* normal_ir_body) {
- __ movl(EDI, Address(ESP, +1 * kWordSize)); // Length.
+void AsmIntrinsifier::OneByteString_allocate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(EDI, Address(ESP, +1 * target::kWordSize)); // Length.
Label ok;
TryAllocateOnebyteString(assembler, &ok, normal_ir_body, EDI);
// EDI: Start address to copy from (untagged).
@@ -2070,8 +2111,8 @@
Label* normal_ir_body,
intptr_t string_cid) {
Label is_true, is_false, loop;
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // This.
- __ movl(EBX, Address(ESP, +1 * kWordSize)); // Other.
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // This.
+ __ movl(EBX, Address(ESP, +1 * target::kWordSize)); // Other.
// Are identical?
__ cmpl(EAX, EBX);
@@ -2084,8 +2125,8 @@
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
// Have same length?
- __ movl(EDI, FieldAddress(EAX, String::length_offset()));
- __ cmpl(EDI, FieldAddress(EBX, String::length_offset()));
+ __ movl(EDI, FieldAddress(EAX, target::String::length_offset()));
+ __ cmpl(EDI, FieldAddress(EBX, target::String::length_offset()));
__ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
// Check contents, no fall-through possible.
@@ -2096,15 +2137,15 @@
__ cmpl(EDI, Immediate(0));
__ j(LESS, &is_true, Assembler::kNearJump);
if (string_cid == kOneByteStringCid) {
- __ movzxb(ECX,
- FieldAddress(EAX, EDI, TIMES_1, OneByteString::data_offset()));
- __ movzxb(EDX,
- FieldAddress(EBX, EDI, TIMES_1, OneByteString::data_offset()));
+ __ movzxb(ECX, FieldAddress(EAX, EDI, TIMES_1,
+ target::OneByteString::data_offset()));
+ __ movzxb(EDX, FieldAddress(EBX, EDI, TIMES_1,
+ target::OneByteString::data_offset()));
} else if (string_cid == kTwoByteStringCid) {
- __ movzxw(ECX,
- FieldAddress(EAX, EDI, TIMES_2, TwoByteString::data_offset()));
- __ movzxw(EDX,
- FieldAddress(EBX, EDI, TIMES_2, TwoByteString::data_offset()));
+ __ movzxw(ECX, FieldAddress(EAX, EDI, TIMES_2,
+ target::TwoByteString::data_offset()));
+ __ movzxw(EDX, FieldAddress(EBX, EDI, TIMES_2,
+ target::TwoByteString::data_offset()));
} else {
UNIMPLEMENTED();
}
@@ -2113,33 +2154,33 @@
__ jmp(&loop, Assembler::kNearJump);
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&is_false);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kOneByteStringCid);
}
-void Intrinsifier::TwoByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
}
-void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
- Label* normal_ir_body,
- bool sticky) {
+void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
+ Label* normal_ir_body,
+ bool sticky) {
if (FLAG_interpret_irregexp) return;
- static const intptr_t kRegExpParamOffset = 3 * kWordSize;
- static const intptr_t kStringParamOffset = 2 * kWordSize;
+ static const intptr_t kRegExpParamOffset = 3 * target::kWordSize;
+ static const intptr_t kStringParamOffset = 2 * target::kWordSize;
// start_index smi is located at offset 1.
// Incoming registers:
@@ -2153,90 +2194,91 @@
__ movl(EDI, Address(ESP, kStringParamOffset));
__ LoadClassId(EDI, EDI);
__ SubImmediate(EDI, Immediate(kOneByteStringCid));
- __ movl(EAX,
- FieldAddress(EBX, EDI, TIMES_4,
- RegExp::function_offset(kOneByteStringCid, sticky)));
+ __ movl(EAX, FieldAddress(
+ EBX, EDI, TIMES_4,
+ target::RegExp::function_offset(kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in EAX, the argument descriptor in EDX, and IC-Data in ECX.
__ xorl(ECX, ECX);
// Tail-call the function.
- __ movl(EDI, FieldAddress(EAX, Function::entry_point_offset()));
+ __ movl(EDI, FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(EDI);
}
// On stack: user tag (+1), return-address (+0).
-void Intrinsifier::UserTag_makeCurrent(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
+ Label* normal_ir_body) {
// RDI: Isolate.
__ LoadIsolate(EDI);
// EAX: Current user tag.
- __ movl(EAX, Address(EDI, Isolate::current_tag_offset()));
+ __ movl(EAX, Address(EDI, target::Isolate::current_tag_offset()));
// EAX: UserTag.
- __ movl(EBX, Address(ESP, +1 * kWordSize));
- // Set Isolate::current_tag_.
- __ movl(Address(EDI, Isolate::current_tag_offset()), EBX);
+ __ movl(EBX, Address(ESP, +1 * target::kWordSize));
+ // Set target::Isolate::current_tag_.
+ __ movl(Address(EDI, target::Isolate::current_tag_offset()), EBX);
// EAX: UserTag's tag.
- __ movl(EBX, FieldAddress(EBX, UserTag::tag_offset()));
- // Set Isolate::user_tag_.
- __ movl(Address(EDI, Isolate::user_tag_offset()), EBX);
+ __ movl(EBX, FieldAddress(EBX, target::UserTag::tag_offset()));
+ // Set target::Isolate::user_tag_.
+ __ movl(Address(EDI, target::Isolate::user_tag_offset()), EBX);
__ ret();
}
-void Intrinsifier::UserTag_defaultTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(EAX);
- __ movl(EAX, Address(EAX, Isolate::default_tag_offset()));
+ __ movl(EAX, Address(EAX, target::Isolate::default_tag_offset()));
__ ret();
}
-void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(EAX);
- __ movl(EAX, Address(EAX, Isolate::current_tag_offset()));
+ __ movl(EAX, Address(EAX, target::Isolate::current_tag_offset()));
__ ret();
}
-void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
+ Label* normal_ir_body) {
#if !defined(SUPPORT_TIMELINE)
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
#else
Label true_label;
// Load TimelineStream*.
- __ movl(EAX, Address(THR, Thread::dart_stream_offset()));
+ __ movl(EAX, Address(THR, target::Thread::dart_stream_offset()));
// Load uintptr_t from TimelineStream*.
- __ movl(EAX, Address(EAX, TimelineStream::enabled_offset()));
+ __ movl(EAX, Address(EAX, target::TimelineStream::enabled_offset()));
__ cmpl(EAX, Immediate(0));
__ j(NOT_ZERO, &true_label, Assembler::kNearJump);
// Not enabled.
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ ret();
// Enabled.
__ Bind(&true_label);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ ret();
#endif
}
-void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ LoadObject(EAX, Object::null_object());
- __ movl(Address(THR, Thread::async_stack_trace_offset()), EAX);
+void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ LoadObject(EAX, NullObject());
+ __ movl(Address(THR, target::Thread::async_stack_trace_offset()), EAX);
__ ret();
}
-void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ movl(Address(THR, Thread::async_stack_trace_offset()), EAX);
- __ LoadObject(EAX, Object::null_object());
+void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movl(Address(THR, target::Thread::async_stack_trace_offset()), EAX);
+ __ LoadObject(EAX, NullObject());
__ ret();
}
#undef __
+} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_IA32) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/intrinsifier_x64.cc b/runtime/vm/compiler/asm_intrinsifier_x64.cc
similarity index 67%
rename from runtime/vm/compiler/intrinsifier_x64.cc
rename to runtime/vm/compiler/asm_intrinsifier_x64.cc
index e313634..1c34df5 100644
--- a/runtime/vm/compiler/intrinsifier_x64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_x64.cc
@@ -1,22 +1,18 @@
-// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
#if defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
-#include "vm/compiler/intrinsifier.h"
+#define SHOULD_NOT_INCLUDE_RUNTIME
+#include "vm/class_id.h"
+#include "vm/compiler/asm_intrinsifier.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/dart_entry.h"
-#include "vm/instructions.h"
-#include "vm/object_store.h"
-#include "vm/regexp_assembler.h"
-#include "vm/symbols.h"
-#include "vm/timeline.h"
namespace dart {
+namespace compiler {
// When entering intrinsics code:
// R10: Arguments descriptor
@@ -28,7 +24,7 @@
#define __ assembler->
-intptr_t Intrinsifier::ParameterSlotFromSp() {
+intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
return 0;
}
@@ -36,7 +32,7 @@
return ((1 << reg) & CallingConventions::kCalleeSaveCpuRegisters) != 0;
}
-void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
ASSERT(IsABIPreservedRegister(CODE_REG));
ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG));
ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
@@ -47,49 +43,50 @@
assembler->movq(CALLEE_SAVED_TEMP, ARGS_DESC_REG);
}
-void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
assembler->Comment("IntrinsicCallEpilogue");
assembler->movq(ARGS_DESC_REG, CALLEE_SAVED_TEMP);
}
// Allocate a GrowableObjectArray using the backing array specified.
// On stack: type argument (+2), data (+1), return-address (+0).
-void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
+ Label* normal_ir_body) {
// This snippet of inlined code uses the following registers:
// RAX, RCX, R13
// and the newly allocated object is returned in RAX.
- const intptr_t kTypeArgumentsOffset = 2 * kWordSize;
- const intptr_t kArrayOffset = 1 * kWordSize;
+ const intptr_t kTypeArgumentsOffset = 2 * target::kWordSize;
+ const intptr_t kArrayOffset = 1 * target::kWordSize;
// Try allocating in new space.
- const Class& cls = Class::Handle(
- Isolate::Current()->object_store()->growable_object_array_class());
+ const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, RAX, R13);
// Store backing array object in growable array object.
__ movq(RCX, Address(RSP, kArrayOffset)); // data argument.
// RAX is new, no barrier needed.
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, GrowableObjectArray::data_offset()), RCX);
+ RAX, FieldAddress(RAX, target::GrowableObjectArray::data_offset()), RCX);
// RAX: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ movq(RCX, Address(RSP, kTypeArgumentsOffset)); // type argument.
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, GrowableObjectArray::type_arguments_offset()),
+ RAX,
+ FieldAddress(RAX, target::GrowableObjectArray::type_arguments_offset()),
RCX);
// Set the length field in the growable array object to 0.
- __ ZeroInitSmiField(FieldAddress(RAX, GrowableObjectArray::length_offset()));
+ __ ZeroInitSmiField(
+ FieldAddress(RAX, target::GrowableObjectArray::length_offset()));
__ ret(); // returns the newly allocated object in RAX.
__ Bind(normal_ir_body);
}
-#define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_factor) \
+#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_factor) \
Label fall_through; \
- const intptr_t kArrayLengthStackOffset = 1 * kWordSize; \
+ const intptr_t kArrayLengthStackOffset = 1 * target::kWordSize; \
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, normal_ir_body, false)); \
__ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \
/* Check that length is a positive Smi. */ \
@@ -111,10 +108,11 @@
scale_factor = TIMES_8; \
} \
const intptr_t fixed_size_plus_alignment_padding = \
- sizeof(Raw##type_name) + kObjectAlignment - 1; \
+ target::TypedData::InstanceSize() + \
+ target::ObjectAlignment::kObjectAlignment - 1; \
__ leaq(RDI, Address(RDI, scale_factor, fixed_size_plus_alignment_padding)); \
- __ andq(RDI, Immediate(-kObjectAlignment)); \
- __ movq(RAX, Address(THR, Thread::top_offset())); \
+ __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment)); \
+ __ movq(RAX, Address(THR, target::Thread::top_offset())); \
__ movq(RCX, RAX); \
\
/* RDI: allocation size. */ \
@@ -125,12 +123,12 @@
/* RAX: potential new object start. */ \
/* RCX: potential next object start. */ \
/* RDI: allocation size. */ \
- __ cmpq(RCX, Address(THR, Thread::end_offset())); \
+ __ cmpq(RCX, Address(THR, target::Thread::end_offset())); \
__ j(ABOVE_EQUAL, normal_ir_body); \
\
/* Successfully allocated the object(s), now update top to point to */ \
/* next object start and initialize the object. */ \
- __ movq(Address(THR, Thread::top_offset()), RCX); \
+ __ movq(Address(THR, target::Thread::top_offset()), RCX); \
__ addq(RAX, Immediate(kHeapObjectTag)); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI)); \
/* Initialize the tags. */ \
@@ -140,9 +138,10 @@
/* R13: scratch register. */ \
{ \
Label size_tag_overflow, done; \
- __ cmpq(RDI, Immediate(RawObject::SizeTag::kMaxSizeTag)); \
+ __ cmpq(RDI, Immediate(target::RawObject::kSizeTagMaxSizeTag)); \
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \
- __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); \
+ __ shlq(RDI, Immediate(target::RawObject::kTagBitsSizeTagPos - \
+ target::ObjectAlignment::kObjectAlignmentLog2)); \
__ jmp(&done, Assembler::kNearJump); \
\
__ Bind(&size_tag_overflow); \
@@ -150,18 +149,18 @@
__ Bind(&done); \
\
/* Get the class index and insert it into the tags. */ \
- uint32_t tags = 0; \
- tags = RawObject::ClassIdTag::update(cid, tags); \
- tags = RawObject::NewBit::update(true, tags); \
+ uint32_t tags = \
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
__ orq(RDI, Immediate(tags)); \
- __ movq(FieldAddress(RAX, type_name::tags_offset()), RDI); /* Tags. */ \
+ __ movq(FieldAddress(RAX, target::Object::tags_offset()), \
+ RDI); /* Tags. */ \
} \
/* Set the length field. */ \
/* RAX: new object start as a tagged pointer. */ \
/* RCX: new object end address. */ \
__ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
- RAX, FieldAddress(RAX, type_name::length_offset()), RDI); \
+ RAX, FieldAddress(RAX, target::TypedData::length_offset()), RDI); \
/* Initialize all array elements to 0. */ \
/* RAX: new object start as a tagged pointer. */ \
/* RCX: new object end address. */ \
@@ -169,13 +168,13 @@
/* RBX: scratch register. */ \
/* data area to be initialized. */ \
__ xorq(RBX, RBX); /* Zero. */ \
- __ leaq(RDI, FieldAddress(RAX, sizeof(Raw##type_name))); \
+ __ leaq(RDI, FieldAddress(RAX, target::TypedData::InstanceSize())); \
Label done, init_loop; \
__ Bind(&init_loop); \
__ cmpq(RDI, RCX); \
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump); \
__ movq(Address(RDI, 0), RBX); \
- __ addq(RDI, Immediate(kWordSize)); \
+ __ addq(RDI, Immediate(target::kWordSize)); \
__ jmp(&init_loop, Assembler::kNearJump); \
__ Bind(&done); \
\
@@ -200,12 +199,12 @@
}
#define TYPED_DATA_ALLOCATOR(clazz) \
- void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
- Label* normal_ir_body) { \
- intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \
- intptr_t max_len = TypedData::MaxNewSpaceElements(kTypedData##clazz##Cid); \
+ void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
+ Label* normal_ir_body) { \
+ intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
+ intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
ScaleFactor scale = GetScaleFactor(size); \
- TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, scale); \
+ TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, scale); \
}
CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
#undef TYPED_DATA_ALLOCATOR
@@ -213,44 +212,44 @@
// Tests if two top most arguments are smis, jumps to label not_smi if not.
// Topmost argument is in RAX.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
- __ movq(RAX, Address(RSP, +1 * kWordSize));
- __ movq(RCX, Address(RSP, +2 * kWordSize));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
+ __ movq(RCX, Address(RSP, +2 * target::kWordSize));
__ orq(RCX, RAX);
__ testq(RCX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, not_smi);
}
-void Intrinsifier::Integer_addFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX contains right argument.
- __ addq(RAX, Address(RSP, +2 * kWordSize));
+ __ addq(RAX, Address(RSP, +2 * target::kWordSize));
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in RAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
Integer_addFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_subFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX contains right argument, which is the actual minuend of subtraction.
- __ subq(RAX, Address(RSP, +2 * kWordSize));
+ __ subq(RAX, Address(RSP, +2 * target::kWordSize));
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in RAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX contains right argument, which is the actual subtrahend of subtraction.
__ movq(RCX, RAX);
- __ movq(RAX, Address(RSP, +2 * kWordSize));
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize));
__ subq(RAX, RCX);
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in RAX.
@@ -258,20 +257,20 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX is the right argument.
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
__ SmiUntag(RAX);
- __ imulq(RAX, Address(RSP, +2 * kWordSize));
+ __ imulq(RAX, Address(RSP, +2 * target::kWordSize));
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Result is in RAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
Integer_mulFromInteger(assembler, normal_ir_body);
}
@@ -347,11 +346,11 @@
// res = res + right;
// }
// }
-void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
Label negative_result;
TestBothArgumentsSmis(assembler, normal_ir_body);
- __ movq(RCX, Address(RSP, +2 * kWordSize));
+ __ movq(RCX, Address(RSP, +2 * target::kWordSize));
// RAX: Tagged left (dividend).
// RCX: Tagged right (divisor).
__ cmpq(RCX, Immediate(0));
@@ -381,15 +380,16 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_truncDivide(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
+ Label* normal_ir_body) {
Label not_32bit;
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX: right argument (divisor)
__ cmpq(RAX, Immediate(0));
__ j(EQUAL, normal_ir_body, Assembler::kNearJump);
__ movq(RCX, RAX);
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument (dividend).
+ __ movq(RAX,
+ Address(RSP, +2 * target::kWordSize)); // Left argument (dividend).
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency. We are checking
@@ -428,8 +428,9 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_negate(Assembler* assembler, Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +1 * kWordSize));
+void AsmIntrinsifier::Integer_negate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump); // Non-smi value.
__ negq(RAX);
@@ -439,60 +440,63 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX is the right argument.
- __ andq(RAX, Address(RSP, +2 * kWordSize));
+ __ andq(RAX, Address(RSP, +2 * target::kWordSize));
// Result is in RAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitAnd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX is the right argument.
- __ orq(RAX, Address(RSP, +2 * kWordSize));
+ __ orq(RAX, Address(RSP, +2 * target::kWordSize));
// Result is in RAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitOr(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitOrFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX is the right argument.
- __ xorq(RAX, Address(RSP, +2 * kWordSize));
+ __ xorq(RAX, Address(RSP, +2 * target::kWordSize));
// Result is in RAX.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_bitXor(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitXorFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
ASSERT(kSmiTagShift == 1);
ASSERT(kSmiTag == 0);
Label overflow;
TestBothArgumentsSmis(assembler, normal_ir_body);
// Shift value is in RAX. Compare with tagged Smi.
- __ cmpq(RAX, Immediate(Smi::RawValue(Smi::kBits)));
+ __ cmpq(RAX, Immediate(target::ToRawSmi(target::Smi::kBits)));
__ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
__ SmiUntag(RAX);
- __ movq(RCX, RAX); // Shift amount must be in RCX.
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
+ __ movq(RCX, RAX); // Shift amount must be in RCX.
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Value.
// Overflow test - all the shifted-out bits must be same as the sign bit.
__ movq(RDI, RAX);
@@ -518,78 +522,78 @@
Label true_label;
TestBothArgumentsSmis(assembler, normal_ir_body);
// RAX contains the right argument.
- __ cmpq(Address(RSP, +2 * kWordSize), RAX);
+ __ cmpq(Address(RSP, +2 * target::kWordSize), RAX);
__ j(true_condition, &true_label, Assembler::kNearJump);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&true_label);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
- CompareIntegers(assembler, normal_ir_body, LESS);
-}
-
-void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
- Label* normal_ir_body) {
- CompareIntegers(assembler, normal_ir_body, LESS);
-}
-
-void Intrinsifier::Integer_greaterThan(Assembler* assembler,
+void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
Label* normal_ir_body) {
+ CompareIntegers(assembler, normal_ir_body, LESS);
+}
+
+void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
+ Label* normal_ir_body) {
+ CompareIntegers(assembler, normal_ir_body, LESS);
+}
+
+void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GREATER);
}
-void Intrinsifier::Integer_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, LESS_EQUAL);
}
-void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareIntegers(assembler, normal_ir_body, GREATER_EQUAL);
}
// This is called for Smi and Mint receivers. The right argument
// can be Smi, Mint or double.
-void Intrinsifier::Integer_equalToInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
Label true_label, check_for_mint;
const intptr_t kReceiverOffset = 2;
const intptr_t kArgumentOffset = 1;
// For integer receiver '===' check first.
- __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
- __ movq(RCX, Address(RSP, +kReceiverOffset * kWordSize));
+ __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
+ __ movq(RCX, Address(RSP, +kReceiverOffset * target::kWordSize));
__ cmpq(RAX, RCX);
__ j(EQUAL, &true_label, Assembler::kNearJump);
__ orq(RAX, RCX);
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
// Both arguments are smi, '===' is good enough.
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&true_label);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
// At least one of the arguments was not Smi.
Label receiver_not_smi;
__ Bind(&check_for_mint);
- __ movq(RAX, Address(RSP, +kReceiverOffset * kWordSize));
+ __ movq(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &receiver_not_smi);
// Left (receiver) is Smi, return false if right is not Double.
// Note that an instance of Mint never contains a value that can be
// represented by Smi.
- __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
+ __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
__ CompareClassId(RAX, kDoubleCid);
__ j(EQUAL, normal_ir_body);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&receiver_not_smi);
@@ -597,22 +601,23 @@
__ CompareClassId(RAX, kMintCid);
__ j(NOT_EQUAL, normal_ir_body);
// Receiver is Mint, return false if right is Smi.
- __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
+ __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body);
// Smi == Mint -> false.
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
// TODO(srdjan): Implement Mint == Mint comparison.
__ Bind(normal_ir_body);
}
-void Intrinsifier::Integer_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_equal(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_equalToInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
Label shift_count_ok;
TestBothArgumentsSmis(assembler, normal_ir_body);
const Immediate& count_limit = Immediate(0x3F);
@@ -627,9 +632,9 @@
__ j(LESS_EQUAL, &shift_count_ok, Assembler::kNearJump);
__ movq(RAX, count_limit);
__ Bind(&shift_count_ok);
- __ movq(RCX, RAX); // Shift amount must be in RCX.
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
- __ SmiUntag(RAX); // Value.
+ __ movq(RCX, RAX); // Shift amount must be in RCX.
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Value.
+ __ SmiUntag(RAX); // Value.
__ sarq(RAX, RCX);
__ SmiTag(RAX);
__ ret();
@@ -637,16 +642,18 @@
}
// Argument is Smi (receiver).
-void Intrinsifier::Smi_bitNegate(Assembler* assembler, Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
+void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Index.
__ notq(RAX);
__ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
__ ret();
}
-void Intrinsifier::Smi_bitLength(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
+ Label* normal_ir_body) {
ASSERT(kSmiTagShift == 1);
- __ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Index.
// XOR with sign bit to complement bits if value is negative.
__ movq(RCX, RAX);
__ sarq(RCX, Immediate(63)); // All 0 or all 1.
@@ -659,27 +666,29 @@
__ ret();
}
-void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
+ Label* normal_ir_body) {
Integer_bitAndFromInteger(assembler, normal_ir_body);
}
-void Intrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
// static void _lsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
- __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits
- __ movq(R8, Address(RSP, 3 * kWordSize)); // x_used is Smi
+ __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
+ __ movq(R8, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
__ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up.
__ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read.
- __ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
+ __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
__ SmiUntag(RCX);
- __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
+ __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
__ movq(RSI, RCX);
__ sarq(RSI, Immediate(6)); // RSI = n ~/ (2*_DIGIT_BITS).
- __ leaq(RBX, FieldAddress(RBX, RSI, TIMES_8, TypedData::data_offset()));
+ __ leaq(RBX,
+ FieldAddress(RBX, RSI, TIMES_8, target::TypedData::data_offset()));
__ xorq(RAX, RAX); // RAX = 0.
- __ movq(RDX, FieldAddress(RDI, R8, TIMES_8, TypedData::data_offset()));
+ __ movq(RDX,
+ FieldAddress(RDI, R8, TIMES_8, target::TypedData::data_offset()));
__ shldq(RAX, RDX, RCX);
__ movq(Address(RBX, R8, TIMES_8, 2 * kBytesPerBigIntDigit), RAX);
Label last;
@@ -688,9 +697,9 @@
Label loop;
__ Bind(&loop);
__ movq(RAX, RDX);
- __ movq(RDX,
- FieldAddress(RDI, R8, TIMES_8,
- TypedData::data_offset() - 2 * kBytesPerBigIntDigit));
+ __ movq(RDX, FieldAddress(RDI, R8, TIMES_8,
+ target::TypedData::data_offset() -
+ 2 * kBytesPerBigIntDigit));
__ shldq(RAX, RDX, RCX);
__ movq(Address(RBX, R8, TIMES_8, 0), RAX);
__ decq(R8);
@@ -698,26 +707,28 @@
__ Bind(&last);
__ shldq(RDX, R8, RCX); // R8 == 0.
__ movq(Address(RBX, 0), RDX);
- __ LoadObject(RAX, Object::null_object());
+ __ LoadObject(RAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
// static void _rsh(Uint32List x_digits, int x_used, int n,
// Uint32List r_digits)
- __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits
- __ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
+ __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
+ __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
__ SmiUntag(RCX);
- __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
+ __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
__ movq(RDX, RCX);
- __ sarq(RDX, Immediate(6)); // RDX = n ~/ (2*_DIGIT_BITS).
- __ movq(RSI, Address(RSP, 3 * kWordSize)); // x_used is Smi
+ __ sarq(RDX, Immediate(6)); // RDX = n ~/ (2*_DIGIT_BITS).
+ __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
__ subq(RSI, Immediate(2)); // x_used > 0, Smi. RSI = x_used - 1, round up.
__ sarq(RSI, Immediate(2));
- __ leaq(RDI, FieldAddress(RDI, RSI, TIMES_8, TypedData::data_offset()));
+ __ leaq(RDI,
+ FieldAddress(RDI, RSI, TIMES_8, target::TypedData::data_offset()));
__ subq(RSI, RDX); // RSI + 1 = number of digit pairs to read.
- __ leaq(RBX, FieldAddress(RBX, RSI, TIMES_8, TypedData::data_offset()));
+ __ leaq(RBX,
+ FieldAddress(RBX, RSI, TIMES_8, target::TypedData::data_offset()));
__ negq(RSI);
__ movq(RDX, Address(RDI, RSI, TIMES_8, 0));
Label last;
@@ -734,24 +745,25 @@
__ Bind(&last);
__ shrdq(RDX, RSI, RCX); // RSI == 0.
__ movq(Address(RBX, 0), RDX);
- __ LoadObject(RAX, Object::null_object());
+ __ LoadObject(RAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_absAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absAdd(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
- __ movq(RDI, Address(RSP, 5 * kWordSize)); // digits
- __ movq(R8, Address(RSP, 4 * kWordSize)); // used is Smi
+ __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
+ __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
__ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
__ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
- __ movq(RSI, Address(RSP, 3 * kWordSize)); // a_digits
- __ movq(RCX, Address(RSP, 2 * kWordSize)); // a_used is Smi
+ __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
+ __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
__ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
__ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
- __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
+ __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
// Precompute 'used - a_used' now so that carry flag is not lost later.
__ subq(R8, RCX);
@@ -761,9 +773,12 @@
Label add_loop;
__ Bind(&add_loop);
// Loop (a_used+1)/2 times, RCX > 0.
- __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset()));
- __ adcq(RAX, FieldAddress(RSI, RDX, TIMES_8, TypedData::data_offset()));
- __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX);
+ __ movq(RAX,
+ FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
+ __ adcq(RAX,
+ FieldAddress(RSI, RDX, TIMES_8, target::TypedData::data_offset()));
+ __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
+ RAX);
__ incq(RDX); // Does not affect carry flag.
__ decq(RCX); // Does not affect carry flag.
__ j(NOT_ZERO, &add_loop, Assembler::kNearJump);
@@ -775,9 +790,11 @@
Label carry_loop;
__ Bind(&carry_loop);
// Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
- __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset()));
+ __ movq(RAX,
+ FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
__ adcq(RAX, Immediate(0));
- __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX);
+ __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
+ RAX);
__ incq(RDX); // Does not affect carry flag.
__ decq(R8); // Does not affect carry flag.
__ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
@@ -785,28 +802,29 @@
__ Bind(&last_carry);
Label done;
__ j(NOT_CARRY, &done);
- __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()),
+ __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
Immediate(1));
__ Bind(&done);
- __ LoadObject(RAX, Object::null_object());
+ __ LoadObject(RAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_absSub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
+ Label* normal_ir_body) {
// static void _absSub(Uint32List digits, int used,
// Uint32List a_digits, int a_used,
// Uint32List r_digits)
- __ movq(RDI, Address(RSP, 5 * kWordSize)); // digits
- __ movq(R8, Address(RSP, 4 * kWordSize)); // used is Smi
+ __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
+ __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
__ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
__ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
- __ movq(RSI, Address(RSP, 3 * kWordSize)); // a_digits
- __ movq(RCX, Address(RSP, 2 * kWordSize)); // a_used is Smi
+ __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
+ __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
__ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
__ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
- __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
+ __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
// Precompute 'used - a_used' now so that carry flag is not lost later.
__ subq(R8, RCX);
@@ -816,9 +834,12 @@
Label sub_loop;
__ Bind(&sub_loop);
// Loop (a_used+1)/2 times, RCX > 0.
- __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset()));
- __ sbbq(RAX, FieldAddress(RSI, RDX, TIMES_8, TypedData::data_offset()));
- __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX);
+ __ movq(RAX,
+ FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
+ __ sbbq(RAX,
+ FieldAddress(RSI, RDX, TIMES_8, target::TypedData::data_offset()));
+ __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
+ RAX);
__ incq(RDX); // Does not affect carry flag.
__ decq(RCX); // Does not affect carry flag.
__ j(NOT_ZERO, &sub_loop, Assembler::kNearJump);
@@ -830,19 +851,22 @@
Label carry_loop;
__ Bind(&carry_loop);
// Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
- __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset()));
+ __ movq(RAX,
+ FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
__ sbbq(RAX, Immediate(0));
- __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX);
+ __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
+ RAX);
__ incq(RDX); // Does not affect carry flag.
__ decq(R8); // Does not affect carry flag.
__ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
__ Bind(&done);
- __ LoadObject(RAX, Object::null_object());
+ __ LoadObject(RAX, NullObject());
__ ret();
}
-void Intrinsifier::Bigint_mulAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulAdd(Uint32List x_digits, int xi,
// Uint32List m_digits, int i,
@@ -873,27 +897,30 @@
Label done;
// RBX = x, done if x == 0
- __ movq(RCX, Address(RSP, 7 * kWordSize)); // x_digits
- __ movq(RAX, Address(RSP, 6 * kWordSize)); // xi is Smi
- __ movq(RBX, FieldAddress(RCX, RAX, TIMES_2, TypedData::data_offset()));
+ __ movq(RCX, Address(RSP, 7 * target::kWordSize)); // x_digits
+ __ movq(RAX, Address(RSP, 6 * target::kWordSize)); // xi is Smi
+ __ movq(RBX,
+ FieldAddress(RCX, RAX, TIMES_2, target::TypedData::data_offset()));
__ testq(RBX, RBX);
__ j(ZERO, &done, Assembler::kNearJump);
// R8 = (SmiUntag(n) + 1)/2, no_op if n == 0
- __ movq(R8, Address(RSP, 1 * kWordSize));
+ __ movq(R8, Address(RSP, 1 * target::kWordSize));
__ addq(R8, Immediate(2));
__ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
__ j(ZERO, &done, Assembler::kNearJump);
// RDI = mip = &m_digits[i >> 1]
- __ movq(RDI, Address(RSP, 5 * kWordSize)); // m_digits
- __ movq(RAX, Address(RSP, 4 * kWordSize)); // i is Smi
- __ leaq(RDI, FieldAddress(RDI, RAX, TIMES_2, TypedData::data_offset()));
+ __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // m_digits
+ __ movq(RAX, Address(RSP, 4 * target::kWordSize)); // i is Smi
+ __ leaq(RDI,
+ FieldAddress(RDI, RAX, TIMES_2, target::TypedData::data_offset()));
// RSI = ajp = &a_digits[j >> 1]
- __ movq(RSI, Address(RSP, 3 * kWordSize)); // a_digits
- __ movq(RAX, Address(RSP, 2 * kWordSize)); // j is Smi
- __ leaq(RSI, FieldAddress(RSI, RAX, TIMES_2, TypedData::data_offset()));
+ __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
+ __ movq(RAX, Address(RSP, 2 * target::kWordSize)); // j is Smi
+ __ leaq(RSI,
+ FieldAddress(RSI, RAX, TIMES_2, target::TypedData::data_offset()));
// RCX = c = 0
__ xorq(RCX, RCX);
@@ -945,11 +972,12 @@
__ j(CARRY, &propagate_carry_loop, Assembler::kNearJump);
__ Bind(&done);
- __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed.
+ __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
__ ret();
}
-void Intrinsifier::Bigint_sqrAdd(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _sqrAdd(Uint32List x_digits, int i,
// Uint32List a_digits, int used) {
@@ -977,9 +1005,10 @@
// }
// RDI = xip = &x_digits[i >> 1]
- __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits
- __ movq(RAX, Address(RSP, 3 * kWordSize)); // i is Smi
- __ leaq(RDI, FieldAddress(RDI, RAX, TIMES_2, TypedData::data_offset()));
+ __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
+ __ movq(RAX, Address(RSP, 3 * target::kWordSize)); // i is Smi
+ __ leaq(RDI,
+ FieldAddress(RDI, RAX, TIMES_2, target::TypedData::data_offset()));
// RBX = x = *xip++, return if x == 0
Label x_zero;
@@ -989,8 +1018,9 @@
__ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
// RSI = ajp = &a_digits[i]
- __ movq(RSI, Address(RSP, 2 * kWordSize)); // a_digits
- __ leaq(RSI, FieldAddress(RSI, RAX, TIMES_4, TypedData::data_offset()));
+ __ movq(RSI, Address(RSP, 2 * target::kWordSize)); // a_digits
+ __ leaq(RSI,
+ FieldAddress(RSI, RAX, TIMES_4, target::TypedData::data_offset()));
// RDX:RAX = t = x*x + *ajp
__ movq(RAX, RBX);
@@ -1003,8 +1033,8 @@
__ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
// int n = (used - i + 1)/2 - 1
- __ movq(R8, Address(RSP, 1 * kWordSize)); // used is Smi
- __ subq(R8, Address(RSP, 3 * kWordSize)); // i is Smi
+ __ movq(R8, Address(RSP, 1 * target::kWordSize)); // used is Smi
+ __ subq(R8, Address(RSP, 3 * target::kWordSize)); // i is Smi
__ addq(R8, Immediate(2));
__ sarq(R8, Immediate(2));
__ decq(R8); // R8 = number of digit pairs to process.
@@ -1064,12 +1094,12 @@
__ movq(Address(RSI, 2 * kBytesPerBigIntDigit), R13);
__ Bind(&x_zero);
- __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed.
+ __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
__ ret();
}
-void Intrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
// uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
@@ -1087,16 +1117,17 @@
// }
// RDI = args
- __ movq(RDI, Address(RSP, 3 * kWordSize)); // args
+ __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
// RCX = yt = args[0..1]
- __ movq(RCX, FieldAddress(RDI, TypedData::data_offset()));
+ __ movq(RCX, FieldAddress(RDI, target::TypedData::data_offset()));
// RBX = dp = &digits[(i >> 1) - 1]
- __ movq(RBX, Address(RSP, 2 * kWordSize)); // digits
- __ movq(RAX, Address(RSP, 1 * kWordSize)); // i is Smi and odd.
- __ leaq(RBX, FieldAddress(RBX, RAX, TIMES_2,
- TypedData::data_offset() - kBytesPerBigIntDigit));
+ __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
+ __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi and odd.
+ __ leaq(RBX, FieldAddress(
+ RBX, RAX, TIMES_2,
+ target::TypedData::data_offset() - kBytesPerBigIntDigit));
// RDX = dh = dp[0]
__ movq(RDX, Address(RBX, 0));
@@ -1117,16 +1148,16 @@
__ Bind(&return_qd);
// args[2..3] = qd
- __ movq(
- FieldAddress(RDI, TypedData::data_offset() + 2 * kBytesPerBigIntDigit),
- RAX);
+ __ movq(FieldAddress(
+ RDI, target::TypedData::data_offset() + 2 * kBytesPerBigIntDigit),
+ RAX);
- __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed.
+ __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
__ ret();
}
-void Intrinsifier::Montgomery_mulMod(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
+ Label* normal_ir_body) {
// Pseudo code:
// static int _mulMod(Uint32List args, Uint32List digits, int i) {
// uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
@@ -1137,26 +1168,27 @@
// }
// RDI = args
- __ movq(RDI, Address(RSP, 3 * kWordSize)); // args
+ __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
// RCX = rho = args[2 .. 3]
- __ movq(RCX, FieldAddress(
- RDI, TypedData::data_offset() + 2 * kBytesPerBigIntDigit));
+ __ movq(RCX, FieldAddress(RDI, target::TypedData::data_offset() +
+ 2 * kBytesPerBigIntDigit));
// RAX = digits[i >> 1 .. (i >> 1) + 1]
- __ movq(RBX, Address(RSP, 2 * kWordSize)); // digits
- __ movq(RAX, Address(RSP, 1 * kWordSize)); // i is Smi
- __ movq(RAX, FieldAddress(RBX, RAX, TIMES_2, TypedData::data_offset()));
+ __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
+ __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi
+ __ movq(RAX,
+ FieldAddress(RBX, RAX, TIMES_2, target::TypedData::data_offset()));
// RDX:RAX = t = rho*d
__ mulq(RCX);
// args[4 .. 5] = t mod DIGIT_BASE^2 = low64(t)
- __ movq(
- FieldAddress(RDI, TypedData::data_offset() + 4 * kBytesPerBigIntDigit),
- RAX);
+ __ movq(FieldAddress(
+ RDI, target::TypedData::data_offset() + 4 * kBytesPerBigIntDigit),
+ RAX);
- __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed.
+ __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
__ ret();
}
@@ -1166,7 +1198,7 @@
static void TestLastArgumentIsDouble(Assembler* assembler,
Label* is_smi,
Label* not_double_smi) {
- __ movq(RAX, Address(RSP, +1 * kWordSize));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(ZERO, is_smi); // Jump if Smi.
__ CompareClassId(RAX, kDoubleCid);
@@ -1184,19 +1216,19 @@
Label is_false, is_true, is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in RAX.
- __ movsd(XMM1, FieldAddress(RAX, Double::value_offset()));
+ __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
__ Bind(&double_op);
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument.
- __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
+ __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
__ comisd(XMM0, XMM1);
__ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false;
__ j(true_condition, &is_true, Assembler::kNearJump);
// Fall through false.
__ Bind(&is_false);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&is_smi);
__ SmiUntag(RAX);
@@ -1205,27 +1237,28 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_greaterThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, ABOVE);
}
-void Intrinsifier::Double_greaterEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, ABOVE_EQUAL);
}
-void Intrinsifier::Double_lessThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, BELOW);
}
-void Intrinsifier::Double_equal(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_equal(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, EQUAL);
}
-void Intrinsifier::Double_lessEqualThan(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
+ Label* normal_ir_body) {
CompareDoubles(assembler, normal_ir_body, BELOW_EQUAL);
}
@@ -1237,10 +1270,10 @@
Label is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Both arguments are double, right operand is in RAX.
- __ movsd(XMM1, FieldAddress(RAX, Double::value_offset()));
+ __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
__ Bind(&double_op);
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument.
- __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
+ __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
switch (kind) {
case Token::kADD:
__ addsd(XMM0, XMM1);
@@ -1257,12 +1290,11 @@
default:
UNREACHABLE();
}
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
RAX, // Result register.
R13);
- __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ SmiUntag(RAX);
@@ -1271,112 +1303,110 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
}
-void Intrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
}
-void Intrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
}
-void Intrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
}
-void Intrinsifier::Double_mulFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
// Only smis allowed.
- __ movq(RAX, Address(RSP, +1 * kWordSize));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body);
// Is Smi.
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
- __ movq(RAX, Address(RSP, +2 * kWordSize));
- __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
__ mulsd(XMM0, XMM1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
RAX, // Result register.
R13);
- __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(normal_ir_body);
}
// Left is double, right is integer (Mint or Smi)
-void Intrinsifier::DoubleFromInteger(Assembler* assembler,
- Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +1 * kWordSize));
+void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body);
// Is Smi.
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM0, RAX);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
RAX, // Result register.
R13);
- __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_getIsNaN(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_true;
- __ movq(RAX, Address(RSP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
__ comisd(XMM0, XMM0);
__ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true;
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
}
-void Intrinsifier::Double_getIsInfinite(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_inf, done;
- __ movq(RAX, Address(RSP, +1 * kWordSize));
- __ movq(RAX, FieldAddress(RAX, Double::value_offset()));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
+ __ movq(RAX, FieldAddress(RAX, target::Double::value_offset()));
// Mask off the sign.
__ AndImmediate(RAX, Immediate(0x7FFFFFFFFFFFFFFFLL));
// Compare with +infinity.
__ CompareImmediate(RAX, Immediate(0x7FF0000000000000LL));
__ j(EQUAL, &is_inf, Assembler::kNearJump);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ jmp(&done);
__ Bind(&is_inf);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ Bind(&done);
__ ret();
}
-void Intrinsifier::Double_getIsNegative(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_false, is_true, is_zero;
- __ movq(RAX, Address(RSP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
__ xorpd(XMM1, XMM1); // 0.0 -> XMM1.
__ comisd(XMM0, XMM1);
__ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false.
__ j(EQUAL, &is_zero, Assembler::kNearJump); // Check for negative zero.
__ j(ABOVE_EQUAL, &is_false, Assembler::kNearJump); // >= 0 -> false.
__ Bind(&is_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&is_false);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_zero);
// Check for negative zero (get the sign bit).
@@ -1386,10 +1416,10 @@
__ jmp(&is_false, Assembler::kNearJump);
}
-void Intrinsifier::DoubleToInteger(Assembler* assembler,
- Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
+void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
__ cvttsd2siq(RAX, XMM0);
// Overflow is signalled with minint.
// Check for overflow and that it fits into Smi.
@@ -1401,14 +1431,14 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::Double_hashCode(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
+ Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
// Convert double value to signed 64-bit int in RAX and
// back to a double in XMM1.
- __ movq(RCX, Address(RSP, +1 * kWordSize));
- __ movsd(XMM0, FieldAddress(RCX, Double::value_offset()));
+ __ movq(RCX, Address(RSP, +1 * target::kWordSize));
+ __ movsd(XMM0, FieldAddress(RCX, target::Double::value_offset()));
__ cvttsd2siq(RAX, XMM0);
__ cvtsi2sdq(XMM1, RAX);
@@ -1428,7 +1458,7 @@
// Convert the double bits to a hash code that fits in a Smi.
__ Bind(&double_hash);
- __ movq(RAX, FieldAddress(RCX, Double::value_offset()));
+ __ movq(RAX, FieldAddress(RCX, target::Double::value_offset()));
__ movq(RCX, RAX);
__ shrq(RCX, Immediate(32));
__ xorq(RAX, RCX);
@@ -1440,19 +1470,18 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
Label is_smi, double_op;
TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
// Argument is double and is in RAX.
- __ movsd(XMM1, FieldAddress(RAX, Double::value_offset()));
+ __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
__ Bind(&double_op);
__ sqrtsd(XMM0, XMM1);
- const Class& double_class =
- Class::Handle(Isolate::Current()->object_store()->double_class());
+ const Class& double_class = DoubleClass();
__ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
RAX, // Result register.
R13);
- __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
+ __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ SmiUntag(RAX);
@@ -1464,25 +1493,20 @@
// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
// _state[kSTATE_LO] = state & _MASK_32;
// _state[kSTATE_HI] = state >> 32;
-void Intrinsifier::Random_nextState(Assembler* assembler,
- Label* normal_ir_body) {
- const Library& math_lib = Library::Handle(Library::MathLibrary());
- ASSERT(!math_lib.IsNull());
- const Class& random_class =
- Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random()));
- ASSERT(!random_class.IsNull());
- const Field& state_field = Field::ZoneHandle(
- random_class.LookupInstanceFieldAllowPrivate(Symbols::_state()));
- ASSERT(!state_field.IsNull());
- const int64_t a_int_value = Intrinsifier::kRandomAValue;
+void AsmIntrinsifier::Random_nextState(Assembler* assembler,
+ Label* normal_ir_body) {
+ const Field& state_field = LookupMathRandomStateFieldOffset();
+ const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
// Receiver.
- __ movq(RAX, Address(RSP, +1 * kWordSize));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
// Field '_state'.
- __ movq(RBX, FieldAddress(RAX, state_field.Offset()));
+ __ movq(RBX, FieldAddress(RAX, LookupFieldOffsetInBytes(state_field)));
// Addresses of _state[0] and _state[1].
- const intptr_t scale = Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
- const intptr_t offset = Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
+ const intptr_t scale =
+ target::Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
+ const intptr_t offset =
+ target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
Address addr_0 = FieldAddress(RBX, 0 * scale + offset);
Address addr_1 = FieldAddress(RBX, 1 * scale + offset);
__ movq(RAX, Immediate(a_int_value));
@@ -1493,24 +1517,25 @@
__ movl(addr_0, RDX);
__ shrq(RDX, Immediate(32));
__ movl(addr_1, RDX);
- ASSERT(Smi::RawValue(0) == 0);
+ ASSERT(target::ToRawSmi(0) == 0);
__ xorq(RAX, RAX);
__ ret();
}
// Identity comparison.
-void Intrinsifier::ObjectEquals(Assembler* assembler, Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_true;
const intptr_t kReceiverOffset = 2;
const intptr_t kArgumentOffset = 1;
- __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
- __ cmpq(RAX, Address(RSP, +kReceiverOffset * kWordSize));
+ __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
+ __ cmpq(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
__ j(EQUAL, &is_true, Assembler::kNearJump);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
}
@@ -1549,10 +1574,10 @@
}
// Return type quickly for simple types (not parameterized and not signature).
-void Intrinsifier::ObjectRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label use_declaration_type, not_integer, not_double;
- __ movq(RAX, Address(RSP, +1 * kWordSize));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
__ LoadClassIdMayBeSmi(RCX, RAX);
// RCX: untagged cid of instance (RAX).
@@ -1567,8 +1592,8 @@
__ j(NOT_EQUAL, ¬_double);
__ LoadIsolate(RAX);
- __ movq(RAX, Address(RAX, Isolate::object_store_offset()));
- __ movq(RAX, Address(RAX, ObjectStore::double_type_offset()));
+ __ movq(RAX, Address(RAX, target::Isolate::object_store_offset()));
+ __ movq(RAX, Address(RAX, target::ObjectStore::double_type_offset()));
__ ret();
__ Bind(¬_double);
@@ -1577,8 +1602,8 @@
JumpIfNotInteger(assembler, RAX, ¬_integer);
__ LoadIsolate(RAX);
- __ movq(RAX, Address(RAX, Isolate::object_store_offset()));
- __ movq(RAX, Address(RAX, ObjectStore::int_type_offset()));
+ __ movq(RAX, Address(RAX, target::Isolate::object_store_offset()));
+ __ movq(RAX, Address(RAX, target::ObjectStore::int_type_offset()));
__ ret();
__ Bind(¬_integer);
@@ -1588,36 +1613,37 @@
JumpIfNotString(assembler, RAX, &use_declaration_type);
__ LoadIsolate(RAX);
- __ movq(RAX, Address(RAX, Isolate::object_store_offset()));
- __ movq(RAX, Address(RAX, ObjectStore::string_type_offset()));
+ __ movq(RAX, Address(RAX, target::Isolate::object_store_offset()));
+ __ movq(RAX, Address(RAX, target::ObjectStore::string_type_offset()));
__ ret();
// Object is neither double, nor integer, nor string.
__ Bind(&use_declaration_type);
__ LoadClassById(RDI, RCX);
- __ movzxw(RCX, FieldAddress(RDI, Class::num_type_arguments_offset()));
+ __ movzxw(RCX, FieldAddress(
+ RDI, target::Class::num_type_arguments_offset_in_bytes()));
__ cmpq(RCX, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
- __ movq(RAX, FieldAddress(RDI, Class::declaration_type_offset()));
- __ CompareObject(RAX, Object::null_object());
+ __ movq(RAX, FieldAddress(RDI, target::Class::declaration_type_offset()));
+ __ CompareObject(RAX, NullObject());
__ j(EQUAL, normal_ir_body, Assembler::kNearJump); // Not yet set.
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
+ Label* normal_ir_body) {
Label different_cids, equal, not_equal, not_integer;
- __ movq(RAX, Address(RSP, +1 * kWordSize));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize));
__ LoadClassIdMayBeSmi(RCX, RAX);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ cmpq(RCX, Immediate(kClosureCid));
__ j(EQUAL, normal_ir_body);
- __ movq(RAX, Address(RSP, +2 * kWordSize));
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize));
__ LoadClassIdMayBeSmi(RDX, RAX);
// Check whether class ids match. If class ids don't match objects can still
@@ -1630,12 +1656,13 @@
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(RDI, RCX);
- __ movzxw(RCX, FieldAddress(RDI, Class::num_type_arguments_offset()));
+ __ movzxw(RCX, FieldAddress(
+ RDI, target::Class::num_type_arguments_offset_in_bytes()));
__ cmpq(RCX, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
__ Bind(&equal);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
// Class ids are different. Check if we are comparing runtime types of
@@ -1664,16 +1691,16 @@
// Fall-through to the not equal case.
__ Bind(¬_equal);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::String_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +1 * kWordSize)); // String object.
- __ movl(RAX, FieldAddress(RAX, String::hash_offset()));
+void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
+ __ movl(RAX, FieldAddress(RAX, target::String::hash_offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagShift == 1);
__ addq(RAX, RAX); // Smi tag RAX, setting Z flag.
@@ -1683,10 +1710,10 @@
// Hash not yet computed.
}
-void Intrinsifier::Type_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +1 * kWordSize)); // Type object.
- __ movq(RAX, FieldAddress(RAX, Type::hash_offset()));
+void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Type object.
+ __ movq(RAX, FieldAddress(RAX, target::Type::hash_offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagShift == 1);
__ testq(RAX, RAX);
@@ -1696,18 +1723,20 @@
// Hash not yet computed.
}
-void Intrinsifier::Object_getHash(Assembler* assembler, Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +1 * kWordSize)); // Object.
- __ movl(RAX, FieldAddress(RAX, String::hash_offset()));
+void AsmIntrinsifier::Object_getHash(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Object.
+ __ movl(RAX, FieldAddress(RAX, target::String::hash_offset()));
__ SmiTag(RAX);
__ ret();
}
-void Intrinsifier::Object_setHash(Assembler* assembler, Label* normal_ir_body) {
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // Object.
- __ movq(RDX, Address(RSP, +1 * kWordSize)); // Value.
+void AsmIntrinsifier::Object_setHash(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Object.
+ __ movq(RDX, Address(RSP, +1 * target::kWordSize)); // Value.
__ SmiUntag(RDX);
- __ movl(FieldAddress(RAX, String::hash_offset()), RDX);
+ __ movl(FieldAddress(RAX, target::String::hash_offset()), RDX);
__ ret();
}
@@ -1716,8 +1745,8 @@
intptr_t other_cid,
Label* return_true,
Label* return_false) {
- __ movq(R8, FieldAddress(RAX, String::length_offset()));
- __ movq(R9, FieldAddress(RCX, String::length_offset()));
+ __ movq(R8, FieldAddress(RAX, target::String::length_offset()));
+ __ movq(R9, FieldAddress(RCX, target::String::length_offset()));
// if (other.length == 0) return true;
__ testq(R9, R9);
@@ -1733,8 +1762,8 @@
__ cmpq(R11, R8);
__ j(GREATER, return_false);
- __ SmiUntag(RBX); // start
- __ SmiUntag(R9); // other.length
+ __ SmiUntag(RBX); // start
+ __ SmiUntag(R9); // other.length
__ LoadImmediate(R11, Immediate(0)); // i = 0
// do
@@ -1746,21 +1775,21 @@
__ movq(R8, R11);
__ addq(R8, RBX);
if (receiver_cid == kOneByteStringCid) {
- __ movzxb(R12,
- FieldAddress(RAX, R8, TIMES_1, OneByteString::data_offset()));
+ __ movzxb(R12, FieldAddress(RAX, R8, TIMES_1,
+ target::OneByteString::data_offset()));
} else {
ASSERT(receiver_cid == kTwoByteStringCid);
- __ movzxw(R12,
- FieldAddress(RAX, R8, TIMES_2, TwoByteString::data_offset()));
+ __ movzxw(R12, FieldAddress(RAX, R8, TIMES_2,
+ target::TwoByteString::data_offset()));
}
// other.codeUnitAt(i)
if (other_cid == kOneByteStringCid) {
- __ movzxb(R13,
- FieldAddress(RCX, R11, TIMES_1, OneByteString::data_offset()));
+ __ movzxb(R13, FieldAddress(RCX, R11, TIMES_1,
+ target::OneByteString::data_offset()));
} else {
ASSERT(other_cid == kTwoByteStringCid);
- __ movzxw(R13,
- FieldAddress(RCX, R11, TIMES_2, TwoByteString::data_offset()));
+ __ movzxw(R13, FieldAddress(RCX, R11, TIMES_2,
+ target::TwoByteString::data_offset()));
}
__ cmpq(R12, R13);
__ j(NOT_EQUAL, return_false);
@@ -1776,12 +1805,12 @@
// bool _substringMatches(int start, String other)
// This intrinsic handles a OneByteString or TwoByteString receiver with a
// OneByteString other.
-void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
+ Label* normal_ir_body) {
Label return_true, return_false, try_two_byte;
- __ movq(RAX, Address(RSP, +3 * kWordSize)); // receiver
- __ movq(RBX, Address(RSP, +2 * kWordSize)); // start
- __ movq(RCX, Address(RSP, +1 * kWordSize)); // other
+ __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // receiver
+ __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // start
+ __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // other
__ testq(RBX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body); // 'start' is not Smi.
@@ -1805,73 +1834,81 @@
&return_false);
__ Bind(&return_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&return_false);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::StringBaseCharAt(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
+ Label* normal_ir_body) {
Label try_two_byte_string;
- __ movq(RCX, Address(RSP, +1 * kWordSize)); // Index.
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // String.
+ __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Index.
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // String.
__ testq(RCX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, normal_ir_body); // Non-smi index.
// Range check.
- __ cmpq(RCX, FieldAddress(RAX, String::length_offset()));
+ __ cmpq(RCX, FieldAddress(RAX, target::String::length_offset()));
// Runtime throws exception.
__ j(ABOVE_EQUAL, normal_ir_body);
__ CompareClassId(RAX, kOneByteStringCid);
__ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump);
__ SmiUntag(RCX);
- __ movzxb(RCX, FieldAddress(RAX, RCX, TIMES_1, OneByteString::data_offset()));
- __ cmpq(RCX, Immediate(Symbols::kNumberOfOneCharCodeSymbols));
+ __ movzxb(RCX, FieldAddress(RAX, RCX, TIMES_1,
+ target::OneByteString::data_offset()));
+ __ cmpq(RCX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
__ j(GREATER_EQUAL, normal_ir_body);
- __ movq(RAX, Address(THR, Thread::predefined_symbols_address_offset()));
+ __ movq(RAX,
+ Address(THR, target::Thread::predefined_symbols_address_offset()));
__ movq(RAX, Address(RAX, RCX, TIMES_8,
- Symbols::kNullCharCodeSymbolOffset * kWordSize));
+ target::Symbols::kNullCharCodeSymbolOffset *
+ target::kWordSize));
__ ret();
__ Bind(&try_two_byte_string);
__ CompareClassId(RAX, kTwoByteStringCid);
__ j(NOT_EQUAL, normal_ir_body);
ASSERT(kSmiTagShift == 1);
- __ movzxw(RCX, FieldAddress(RAX, RCX, TIMES_1, OneByteString::data_offset()));
- __ cmpq(RCX, Immediate(Symbols::kNumberOfOneCharCodeSymbols));
+ __ movzxw(RCX, FieldAddress(RAX, RCX, TIMES_1,
+ target::OneByteString::data_offset()));
+ __ cmpq(RCX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
__ j(GREATER_EQUAL, normal_ir_body);
- __ movq(RAX, Address(THR, Thread::predefined_symbols_address_offset()));
+ __ movq(RAX,
+ Address(THR, target::Thread::predefined_symbols_address_offset()));
__ movq(RAX, Address(RAX, RCX, TIMES_8,
- Symbols::kNullCharCodeSymbolOffset * kWordSize));
+ target::Symbols::kNullCharCodeSymbolOffset *
+ target::kWordSize));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::StringBaseIsEmpty(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
+ Label* normal_ir_body) {
Label is_true;
// Get length.
- __ movq(RAX, Address(RSP, +1 * kWordSize)); // String object.
- __ movq(RAX, FieldAddress(RAX, String::length_offset()));
- __ cmpq(RAX, Immediate(Smi::RawValue(0)));
+ __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
+ __ movq(RAX, FieldAddress(RAX, target::String::length_offset()));
+ __ cmpq(RAX, Immediate(target::ToRawSmi(0)));
__ j(EQUAL, &is_true, Assembler::kNearJump);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(&is_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
}
-void Intrinsifier::OneByteString_getHashCode(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
+ Label* normal_ir_body) {
Label compute_hash;
- __ movq(RBX, Address(RSP, +1 * kWordSize)); // OneByteString object.
- __ movl(RAX, FieldAddress(RBX, String::hash_offset()));
+ __ movq(
+ RBX,
+ Address(RSP, +1 * target::kWordSize)); // target::OneByteString object.
+ __ movl(RAX, FieldAddress(RBX, target::String::hash_offset()));
__ cmpq(RAX, Immediate(0));
__ j(EQUAL, &compute_hash, Assembler::kNearJump);
__ SmiTag(RAX);
@@ -1879,11 +1916,11 @@
__ Bind(&compute_hash);
// Hash not yet computed, use algorithm of class StringHasher.
- __ movq(RCX, FieldAddress(RBX, String::length_offset()));
+ __ movq(RCX, FieldAddress(RBX, target::String::length_offset()));
__ SmiUntag(RCX);
__ xorq(RAX, RAX);
__ xorq(RDI, RDI);
- // RBX: Instance of OneByteString.
+ // RBX: Instance of target::OneByteString.
// RCX: String length, untagged integer.
// RDI: Loop counter, untagged integer.
// RAX: Hash code, untagged integer.
@@ -1896,7 +1933,8 @@
// hash_ += hash_ << 10;
// hash_ ^= hash_ >> 6;
// Get one characters (ch).
- __ movzxb(RDX, FieldAddress(RBX, RDI, TIMES_1, OneByteString::data_offset()));
+ __ movzxb(RDX, FieldAddress(RBX, RDI, TIMES_1,
+ target::OneByteString::data_offset()));
// RDX: ch and temporary.
__ addl(RAX, RDX);
__ movq(RDX, RAX);
@@ -1924,15 +1962,16 @@
__ shll(RDX, Immediate(15));
__ addl(RAX, RDX);
// hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
- __ andl(RAX,
- Immediate(((static_cast<intptr_t>(1) << String::kHashBits) - 1)));
+ __ andl(
+ RAX,
+ Immediate(((static_cast<intptr_t>(1) << target::String::kHashBits) - 1)));
// return hash_ == 0 ? 1 : hash_;
__ cmpq(RAX, Immediate(0));
__ j(NOT_EQUAL, &set_hash_code, Assembler::kNearJump);
__ incq(RAX);
__ Bind(&set_hash_code);
- __ movl(FieldAddress(RBX, String::hash_offset()), RAX);
+ __ movl(FieldAddress(RBX, target::String::hash_offset()), RAX);
__ SmiTag(RAX);
__ ret();
}
@@ -1959,12 +1998,13 @@
__ addq(RDI, Immediate(1));
__ Bind(¬_zero_length);
const intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawString) + kObjectAlignment - 1;
+ target::String::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
__ addq(RDI, Immediate(fixed_size_plus_alignment_padding));
- __ andq(RDI, Immediate(-kObjectAlignment));
+ __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
const intptr_t cid = kOneByteStringCid;
- __ movq(RAX, Address(THR, Thread::top_offset()));
+ __ movq(RAX, Address(THR, target::Thread::top_offset()));
// RDI: allocation size.
__ movq(RCX, RAX);
@@ -1975,12 +2015,12 @@
// RAX: potential new object start.
// RCX: potential next object start.
// RDI: allocation size.
- __ cmpq(RCX, Address(THR, Thread::end_offset()));
+ __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &pop_and_fail);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
- __ movq(Address(THR, Thread::top_offset()), RCX);
+ __ movq(Address(THR, target::Thread::top_offset()), RCX);
__ addq(RAX, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI));
@@ -1989,9 +2029,10 @@
// RDI: allocation size.
{
Label size_tag_overflow, done;
- __ cmpq(RDI, Immediate(RawObject::SizeTag::kMaxSizeTag));
+ __ cmpq(RDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2));
+ __ shlq(RDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&size_tag_overflow);
@@ -2000,17 +2041,16 @@
// Get the class index and insert it into the tags.
// This also clears the hash, which is in the high bits of the tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ orq(RDI, Immediate(tags));
- __ movq(FieldAddress(RAX, String::tags_offset()), RDI); // Tags.
+ __ movq(FieldAddress(RAX, target::Object::tags_offset()), RDI); // Tags.
}
// Set the length field.
__ popq(RDI);
- __ StoreIntoObjectNoBarrier(RAX, FieldAddress(RAX, String::length_offset()),
- RDI);
+ __ StoreIntoObjectNoBarrier(
+ RAX, FieldAddress(RAX, target::String::length_offset()), RDI);
__ jmp(ok, Assembler::kNearJump);
__ Bind(&pop_and_fail);
@@ -2018,15 +2058,15 @@
__ jmp(failure);
}
-// Arg0: OneByteString (receiver).
+// Arg0: target::OneByteString (receiver).
// Arg1: Start index as Smi.
// Arg2: End index as Smi.
// The indexes must be valid.
-void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
- Label* normal_ir_body) {
- const intptr_t kStringOffset = 3 * kWordSize;
- const intptr_t kStartIndexOffset = 2 * kWordSize;
- const intptr_t kEndIndexOffset = 1 * kWordSize;
+void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
+ Label* normal_ir_body) {
+ const intptr_t kStringOffset = 3 * target::kWordSize;
+ const intptr_t kStartIndexOffset = 2 * target::kWordSize;
+ const intptr_t kEndIndexOffset = 1 * target::kWordSize;
Label ok;
__ movq(RSI, Address(RSP, +kStartIndexOffset));
__ movq(RDI, Address(RSP, +kEndIndexOffset));
@@ -2042,7 +2082,8 @@
__ movq(RSI, Address(RSP, +kStringOffset));
__ movq(RBX, Address(RSP, +kStartIndexOffset));
__ SmiUntag(RBX);
- __ leaq(RSI, FieldAddress(RSI, RBX, TIMES_1, OneByteString::data_offset()));
+ __ leaq(RSI, FieldAddress(RSI, RBX, TIMES_1,
+ target::OneByteString::data_offset()));
// RSI: Start address to copy from (untagged).
// RBX: Untagged start index.
__ movq(RCX, Address(RSP, +kEndIndexOffset));
@@ -2058,7 +2099,8 @@
__ jmp(&check, Assembler::kNearJump);
__ Bind(&loop);
__ movzxb(RBX, Address(RSI, RDX, TIMES_1, 0));
- __ movb(FieldAddress(RAX, RDX, TIMES_1, OneByteString::data_offset()), RBX);
+ __ movb(FieldAddress(RAX, RDX, TIMES_1, target::OneByteString::data_offset()),
+ RBX);
__ incq(RDX);
__ Bind(&check);
__ cmpq(RDX, RCX);
@@ -2067,20 +2109,21 @@
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteStringSetAt(Assembler* assembler,
- Label* normal_ir_body) {
- __ movq(RCX, Address(RSP, +1 * kWordSize)); // Value.
- __ movq(RBX, Address(RSP, +2 * kWordSize)); // Index.
- __ movq(RAX, Address(RSP, +3 * kWordSize)); // OneByteString.
+void AsmIntrinsifier::OneByteStringSetAt(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Value.
+ __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // Index.
+ __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // target::OneByteString.
__ SmiUntag(RBX);
__ SmiUntag(RCX);
- __ movb(FieldAddress(RAX, RBX, TIMES_1, OneByteString::data_offset()), RCX);
+ __ movb(FieldAddress(RAX, RBX, TIMES_1, target::OneByteString::data_offset()),
+ RCX);
__ ret();
}
-void Intrinsifier::OneByteString_allocate(Assembler* assembler,
- Label* normal_ir_body) {
- __ movq(RDI, Address(RSP, +1 * kWordSize)); // Length.v=
+void AsmIntrinsifier::OneByteString_allocate(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(RDI, Address(RSP, +1 * target::kWordSize)); // Length.v=
Label ok;
TryAllocateOnebyteString(assembler, &ok, normal_ir_body, RDI);
// RDI: Start address to copy from (untagged).
@@ -2096,22 +2139,22 @@
Label* normal_ir_body,
intptr_t string_cid) {
Label is_true, is_false, loop;
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // This.
- __ movq(RCX, Address(RSP, +1 * kWordSize)); // Other.
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // This.
+ __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Other.
// Are identical?
__ cmpq(RAX, RCX);
__ j(EQUAL, &is_true, Assembler::kNearJump);
- // Is other OneByteString?
+ // Is other target::OneByteString?
__ testq(RCX, Immediate(kSmiTagMask));
__ j(ZERO, &is_false); // Smi
__ CompareClassId(RCX, string_cid);
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
// Have same length?
- __ movq(RDI, FieldAddress(RAX, String::length_offset()));
- __ cmpq(RDI, FieldAddress(RCX, String::length_offset()));
+ __ movq(RDI, FieldAddress(RAX, target::String::length_offset()));
+ __ cmpq(RDI, FieldAddress(RCX, target::String::length_offset()));
__ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
// Check contents, no fall-through possible.
@@ -2122,15 +2165,15 @@
__ cmpq(RDI, Immediate(0));
__ j(LESS, &is_true, Assembler::kNearJump);
if (string_cid == kOneByteStringCid) {
- __ movzxb(RBX,
- FieldAddress(RAX, RDI, TIMES_1, OneByteString::data_offset()));
- __ movzxb(RDX,
- FieldAddress(RCX, RDI, TIMES_1, OneByteString::data_offset()));
+ __ movzxb(RBX, FieldAddress(RAX, RDI, TIMES_1,
+ target::OneByteString::data_offset()));
+ __ movzxb(RDX, FieldAddress(RCX, RDI, TIMES_1,
+ target::OneByteString::data_offset()));
} else if (string_cid == kTwoByteStringCid) {
- __ movzxw(RBX,
- FieldAddress(RAX, RDI, TIMES_2, TwoByteString::data_offset()));
- __ movzxw(RDX,
- FieldAddress(RCX, RDI, TIMES_2, TwoByteString::data_offset()));
+ __ movzxw(RBX, FieldAddress(RAX, RDI, TIMES_2,
+ target::TwoByteString::data_offset()));
+ __ movzxw(RDX, FieldAddress(RCX, RDI, TIMES_2,
+ target::TwoByteString::data_offset()));
} else {
UNIMPLEMENTED();
}
@@ -2139,33 +2182,33 @@
__ jmp(&loop, Assembler::kNearJump);
__ Bind(&is_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
__ Bind(&is_false);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
__ Bind(normal_ir_body);
}
-void Intrinsifier::OneByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kOneByteStringCid);
}
-void Intrinsifier::TwoByteString_equality(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
+ Label* normal_ir_body) {
StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
}
-void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
- Label* normal_ir_body,
- bool sticky) {
+void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
+ Label* normal_ir_body,
+ bool sticky) {
if (FLAG_interpret_irregexp) return;
- static const intptr_t kRegExpParamOffset = 3 * kWordSize;
- static const intptr_t kStringParamOffset = 2 * kWordSize;
+ static const intptr_t kRegExpParamOffset = 3 * target::kWordSize;
+ static const intptr_t kStringParamOffset = 2 * target::kWordSize;
// start_index smi is located at offset 1.
// Incoming registers:
@@ -2179,91 +2222,92 @@
__ movq(RDI, Address(RSP, kStringParamOffset));
__ LoadClassId(RDI, RDI);
__ SubImmediate(RDI, Immediate(kOneByteStringCid));
- __ movq(RAX,
- FieldAddress(RBX, RDI, TIMES_8,
- RegExp::function_offset(kOneByteStringCid, sticky)));
+ __ movq(RAX, FieldAddress(
+ RBX, RDI, TIMES_8,
+ target::RegExp::function_offset(kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in RAX, the argument descriptor in R10, and IC-Data in RCX.
__ xorq(RCX, RCX);
// Tail-call the function.
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
- __ movq(RDI, FieldAddress(RAX, Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
+ __ movq(RDI, FieldAddress(RAX, target::Function::entry_point_offset()));
__ jmp(RDI);
}
// On stack: user tag (+1), return-address (+0).
-void Intrinsifier::UserTag_makeCurrent(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
+ Label* normal_ir_body) {
// RBX: Isolate.
__ LoadIsolate(RBX);
// RAX: Current user tag.
- __ movq(RAX, Address(RBX, Isolate::current_tag_offset()));
+ __ movq(RAX, Address(RBX, target::Isolate::current_tag_offset()));
// R10: UserTag.
- __ movq(R10, Address(RSP, +1 * kWordSize));
+ __ movq(R10, Address(RSP, +1 * target::kWordSize));
// Set Isolate::current_tag_.
- __ movq(Address(RBX, Isolate::current_tag_offset()), R10);
+ __ movq(Address(RBX, target::Isolate::current_tag_offset()), R10);
// R10: UserTag's tag.
- __ movq(R10, FieldAddress(R10, UserTag::tag_offset()));
+ __ movq(R10, FieldAddress(R10, target::UserTag::tag_offset()));
// Set Isolate::user_tag_.
- __ movq(Address(RBX, Isolate::user_tag_offset()), R10);
+ __ movq(Address(RBX, target::Isolate::user_tag_offset()), R10);
__ ret();
}
-void Intrinsifier::UserTag_defaultTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(RAX);
- __ movq(RAX, Address(RAX, Isolate::default_tag_offset()));
+ __ movq(RAX, Address(RAX, target::Isolate::default_tag_offset()));
__ ret();
}
-void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
+ Label* normal_ir_body) {
__ LoadIsolate(RAX);
- __ movq(RAX, Address(RAX, Isolate::current_tag_offset()));
+ __ movq(RAX, Address(RAX, target::Isolate::current_tag_offset()));
__ ret();
}
-void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
- Label* normal_ir_body) {
+void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
+ Label* normal_ir_body) {
#if !defined(SUPPORT_TIMELINE)
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
#else
Label true_label;
// Load TimelineStream*.
- __ movq(RAX, Address(THR, Thread::dart_stream_offset()));
+ __ movq(RAX, Address(THR, target::Thread::dart_stream_offset()));
// Load uintptr_t from TimelineStream*.
- __ movq(RAX, Address(RAX, TimelineStream::enabled_offset()));
+ __ movq(RAX, Address(RAX, target::TimelineStream::enabled_offset()));
__ cmpq(RAX, Immediate(0));
__ j(NOT_ZERO, &true_label, Assembler::kNearJump);
// Not enabled.
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ ret();
// Enabled.
__ Bind(&true_label);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ ret();
#endif
}
-void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ LoadObject(RAX, Object::null_object());
- __ movq(Address(THR, Thread::async_stack_trace_offset()), RAX);
+void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ LoadObject(RAX, NullObject());
+ __ movq(Address(THR, target::Thread::async_stack_trace_offset()), RAX);
__ ret();
}
-void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
- Label* normal_ir_body) {
- __ movq(Address(THR, Thread::async_stack_trace_offset()), RAX);
- __ LoadObject(RAX, Object::null_object());
+void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
+ Label* normal_ir_body) {
+ __ movq(Address(THR, target::Thread::async_stack_trace_offset()), RAX);
+ __ LoadObject(RAX, NullObject());
__ ret();
}
#undef __
+} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/backend/flow_graph.h b/runtime/vm/compiler/backend/flow_graph.h
index 0e3673f..0012962 100644
--- a/runtime/vm/compiler/backend/flow_graph.h
+++ b/runtime/vm/compiler/backend/flow_graph.h
@@ -17,6 +17,10 @@
class LoopHierarchy;
class VariableLivenessAnalysis;
+namespace compiler {
+class GraphIntrinsifier;
+}
+
class BlockIterator : public ValueObject {
public:
explicit BlockIterator(const GrowableArray<BlockEntryInstr*>& block_order)
@@ -415,7 +419,7 @@
friend class BranchSimplifier;
friend class ConstantPropagator;
friend class DeadCodeElimination;
- friend class Intrinsifier;
+ friend class compiler::GraphIntrinsifier;
// SSA transformation methods and fields.
void ComputeDominators(GrowableArray<BitVector*>* dominance_frontier);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index d051c21..cd503e0 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -1220,7 +1220,7 @@
EnterIntrinsicMode();
SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
- bool complete = Intrinsifier::Intrinsify(parsed_function(), this);
+ bool complete = compiler::Intrinsifier::Intrinsify(parsed_function(), this);
SpecialStatsEnd(CombinedCodeStatistics::kTagIntrinsics);
ExitIntrinsicMode();
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 373a5b2..6241f4b 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -45,6 +45,10 @@
class UnboxIntegerInstr;
class TypeUsageInfo;
+namespace compiler {
+class BlockBuilder;
+}
+
class Value : public ZoneAllocated {
public:
// A forward iterator that allows removing the current value from the
@@ -7834,7 +7838,7 @@
private:
friend class ShallowIterator;
- friend class BlockBuilder; // For Environment constructor.
+ friend class compiler::BlockBuilder; // For Environment constructor.
Environment(intptr_t length,
intptr_t fixed_parameter_count,
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index d41a5a8..24c985a 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -9,6 +9,13 @@
"aot/aot_call_specializer.h",
"aot/precompiler.cc",
"aot/precompiler.h",
+ "asm_intrinsifier.cc",
+ "asm_intrinsifier.h",
+ "asm_intrinsifier_arm.cc",
+ "asm_intrinsifier_arm64.cc",
+ "asm_intrinsifier_dbc.cc",
+ "asm_intrinsifier_ia32.cc",
+ "asm_intrinsifier_x64.cc",
"assembler/assembler.cc",
"assembler/assembler.h",
"assembler/assembler_arm.cc",
@@ -36,9 +43,9 @@
"backend/branch_optimizer.h",
"backend/code_statistics.cc",
"backend/code_statistics.h",
+ "backend/compile_type.h",
"backend/constant_propagator.cc",
"backend/constant_propagator.h",
- "backend/compile_type.h",
"backend/flow_graph.cc",
"backend/flow_graph.h",
"backend/flow_graph_checker.cc",
@@ -107,29 +114,31 @@
"frontend/prologue_builder.h",
"frontend/scope_builder.cc",
"frontend/scope_builder.h",
+ "graph_intrinsifier.cc",
+ "graph_intrinsifier.h",
+ "graph_intrinsifier_arm.cc",
+ "graph_intrinsifier_arm64.cc",
+ "graph_intrinsifier_ia32.cc",
+ "graph_intrinsifier_x64.cc",
"intrinsifier.cc",
"intrinsifier.h",
- "intrinsifier_arm.cc",
- "intrinsifier_arm64.cc",
- "intrinsifier_dbc.cc",
- "intrinsifier_ia32.cc",
- "intrinsifier_x64.cc",
"jit/compiler.cc",
"jit/compiler.h",
"jit/jit_call_specializer.cc",
"jit/jit_call_specializer.h",
"method_recognizer.cc",
"method_recognizer.h",
+ "recognized_methods_list.h",
+ "relocation.cc",
+ "relocation.h",
+ "runtime_api.cc",
+ "runtime_api.h",
"stub_code_compiler.h",
"stub_code_compiler_arm.cc",
"stub_code_compiler_arm64.cc",
"stub_code_compiler_dbc.cc",
"stub_code_compiler_ia32.cc",
"stub_code_compiler_x64.cc",
- "relocation.cc",
- "relocation.h",
- "runtime_api.cc",
- "runtime_api.h",
]
compiler_sources_tests = [
diff --git a/runtime/vm/compiler/graph_intrinsifier.cc b/runtime/vm/compiler/graph_intrinsifier.cc
new file mode 100644
index 0000000..d537054
--- /dev/null
+++ b/runtime/vm/compiler/graph_intrinsifier.cc
@@ -0,0 +1,1039 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+// Class for intrinsifying functions.
+
+// DBC does not use graph intrinsics.
+#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(TARGET_ARCH_DBC)
+
+#include "vm/compiler/graph_intrinsifier.h"
+#include "vm/compiler/backend/flow_graph.h"
+#include "vm/compiler/backend/flow_graph_compiler.h"
+#include "vm/compiler/backend/il.h"
+#include "vm/compiler/backend/il_printer.h"
+#include "vm/compiler/backend/linearscan.h"
+#include "vm/compiler/jit/compiler.h"
+#include "vm/cpu.h"
+
+namespace dart {
+
+DECLARE_FLAG(bool, code_comments);
+DECLARE_FLAG(bool, print_flow_graph);
+DECLARE_FLAG(bool, print_flow_graph_optimized);
+
+namespace compiler {
+
+static void EmitCodeFor(FlowGraphCompiler* compiler, FlowGraph* graph) {
+ // The FlowGraph here is constructed by the intrinsics builder methods, and
+ // is different from compiler->flow_graph(), the original method's flow graph.
+ compiler->assembler()->Comment("Graph intrinsic begin");
+ for (intptr_t i = 0; i < graph->reverse_postorder().length(); i++) {
+ BlockEntryInstr* block = graph->reverse_postorder()[i];
+ if (block->IsGraphEntry()) continue; // No code for graph entry needed.
+
+ if (block->HasParallelMove()) {
+ compiler->parallel_move_resolver()->EmitNativeCode(
+ block->parallel_move());
+ }
+
+ for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
+ Instruction* instr = it.Current();
+ if (FLAG_code_comments) compiler->EmitComment(instr);
+ if (instr->IsParallelMove()) {
+ compiler->parallel_move_resolver()->EmitNativeCode(
+ instr->AsParallelMove());
+ } else if (instr->IsInvokeMathCFunction()) {
+ ASSERT(instr->locs() != NULL);
+ GraphIntrinsifier::IntrinsicCallPrologue(compiler->assembler());
+ instr->EmitNativeCode(compiler);
+ GraphIntrinsifier::IntrinsicCallEpilogue(compiler->assembler());
+ } else {
+ ASSERT(instr->locs() != NULL);
+ // Calls are not supported in intrinsics code.
+ ASSERT(!instr->locs()->always_calls());
+ instr->EmitNativeCode(compiler);
+ }
+ }
+ }
+ compiler->assembler()->Comment("Graph intrinsic end");
+}
+
+bool GraphIntrinsifier::GraphIntrinsify(const ParsedFunction& parsed_function,
+ FlowGraphCompiler* compiler) {
+ ASSERT(!parsed_function.function().HasOptionalParameters());
+ PrologueInfo prologue_info(-1, -1);
+
+ auto graph_entry =
+ new GraphEntryInstr(parsed_function, Compiler::kNoOSRDeoptId);
+
+ intptr_t block_id = 1; // 0 is GraphEntry.
+ graph_entry->set_normal_entry(
+ new FunctionEntryInstr(graph_entry, block_id, kInvalidTryIndex,
+ CompilerState::Current().GetNextDeoptId()));
+
+ FlowGraph* graph =
+ new FlowGraph(parsed_function, graph_entry, block_id, prologue_info);
+ const Function& function = parsed_function.function();
+ switch (function.recognized_kind()) {
+#define EMIT_CASE(class_name, function_name, enum_name, fp) \
+ case MethodRecognizer::k##enum_name: \
+ if (!Build_##enum_name(graph)) return false; \
+ break;
+
+ GRAPH_INTRINSICS_LIST(EMIT_CASE);
+ default:
+ return false;
+#undef EMIT_CASE
+ }
+
+ if (FLAG_support_il_printer && FLAG_print_flow_graph &&
+ FlowGraphPrinter::ShouldPrint(function)) {
+ THR_Print("Intrinsic graph before\n");
+ FlowGraphPrinter printer(*graph);
+ printer.PrintBlocks();
+ }
+
+ // Prepare for register allocation (cf. FinalizeGraph).
+ graph->RemoveRedefinitions();
+
+ // Ensure loop hierarchy has been computed.
+ GrowableArray<BitVector*> dominance_frontier;
+ graph->ComputeDominators(&dominance_frontier);
+ graph->GetLoopHierarchy();
+
+ // Perform register allocation on the SSA graph.
+ FlowGraphAllocator allocator(*graph, true); // Intrinsic mode.
+ allocator.AllocateRegisters();
+
+ if (FLAG_support_il_printer && FLAG_print_flow_graph &&
+ FlowGraphPrinter::ShouldPrint(function)) {
+ THR_Print("Intrinsic graph after\n");
+ FlowGraphPrinter printer(*graph);
+ printer.PrintBlocks();
+ }
+ EmitCodeFor(compiler, graph);
+ return true;
+}
+
+static intptr_t CidForRepresentation(Representation rep) {
+ switch (rep) {
+ case kUnboxedDouble:
+ return kDoubleCid;
+ case kUnboxedFloat32x4:
+ return kFloat32x4Cid;
+ case kUnboxedInt32x4:
+ return kInt32x4Cid;
+ case kUnboxedFloat64x2:
+ return kFloat64x2Cid;
+ case kUnboxedUint32:
+ return kDynamicCid; // smi or mint.
+ default:
+ UNREACHABLE();
+ return kIllegalCid;
+ }
+}
+
+static Representation RepresentationForCid(intptr_t cid) {
+ switch (cid) {
+ case kDoubleCid:
+ return kUnboxedDouble;
+ case kFloat32x4Cid:
+ return kUnboxedFloat32x4;
+ case kInt32x4Cid:
+ return kUnboxedInt32x4;
+ case kFloat64x2Cid:
+ return kUnboxedFloat64x2;
+ default:
+ UNREACHABLE();
+ return kNoRepresentation;
+ }
+}
+
+// Notes about the graph intrinsics:
+//
+// IR instructions which would jump to a deoptimization sequence on failure
+// instead branch to the intrinsic slow path.
+//
+class BlockBuilder : public ValueObject {
+ public:
+ BlockBuilder(FlowGraph* flow_graph, BlockEntryInstr* entry)
+ : flow_graph_(flow_graph),
+ entry_(entry),
+ current_(entry),
+ fall_through_env_(new Environment(0,
+ 0,
+ DeoptId::kNone,
+ flow_graph->parsed_function(),
+ NULL)) {}
+
+ Definition* AddToInitialDefinitions(Definition* def) {
+ def->set_ssa_temp_index(flow_graph_->alloc_ssa_temp_index());
+ auto normal_entry = flow_graph_->graph_entry()->normal_entry();
+ flow_graph_->AddToInitialDefinitions(normal_entry, def);
+ return def;
+ }
+
+ Definition* AddDefinition(Definition* def) {
+ def->set_ssa_temp_index(flow_graph_->alloc_ssa_temp_index());
+ AddInstruction(def);
+ return def;
+ }
+
+ Instruction* AddInstruction(Instruction* instr) {
+ if (instr->ComputeCanDeoptimize()) {
+ // Since we use the presence of an environment to determine if an
+ // instructions can deoptimize, we need an empty environment for
+ // instructions that "deoptimize" to the intrinsic fall-through code.
+ instr->SetEnvironment(fall_through_env_);
+ }
+ current_ = current_->AppendInstruction(instr);
+ return instr;
+ }
+
+ void AddIntrinsicReturn(Value* value) {
+ ReturnInstr* instr = new ReturnInstr(
+ TokenPos(), value, CompilerState::Current().GetNextDeoptId());
+ AddInstruction(instr);
+ entry_->set_last_instruction(instr);
+ }
+
+ Definition* AddParameter(intptr_t index) {
+ intptr_t adjustment = GraphIntrinsifier::ParameterSlotFromSp();
+ return AddToInitialDefinitions(new ParameterInstr(
+ adjustment + index, flow_graph_->graph_entry(), SPREG));
+ }
+
+ TokenPosition TokenPos() { return flow_graph_->function().token_pos(); }
+
+ Definition* AddNullDefinition() {
+ return AddDefinition(new ConstantInstr(Object::ZoneHandle(Object::null())));
+ }
+
+ Definition* AddUnboxInstr(Representation rep, Value* value, bool is_checked) {
+ Definition* unboxed_value =
+ AddDefinition(UnboxInstr::Create(rep, value, DeoptId::kNone));
+ if (is_checked) {
+ // The type of |value| has already been checked and it is safe to
+ // adjust reaching type. This is done manually because there is no type
+ // propagation when building intrinsics.
+ unboxed_value->AsUnbox()->value()->SetReachingType(
+ new CompileType(CompileType::FromCid(CidForRepresentation(rep))));
+ }
+ return unboxed_value;
+ }
+
+ Definition* AddUnboxInstr(Representation rep,
+ Definition* boxed,
+ bool is_checked) {
+ return AddUnboxInstr(rep, new Value(boxed), is_checked);
+ }
+
+ Definition* InvokeMathCFunction(MethodRecognizer::Kind recognized_kind,
+ ZoneGrowableArray<Value*>* args) {
+ return InvokeMathCFunctionHelper(recognized_kind, args);
+ }
+
+ private:
+ Definition* InvokeMathCFunctionHelper(MethodRecognizer::Kind recognized_kind,
+ ZoneGrowableArray<Value*>* args) {
+ InvokeMathCFunctionInstr* invoke_math_c_function =
+ new InvokeMathCFunctionInstr(args, DeoptId::kNone, recognized_kind,
+ TokenPos());
+ AddDefinition(invoke_math_c_function);
+ return invoke_math_c_function;
+ }
+
+ FlowGraph* flow_graph_;
+ BlockEntryInstr* entry_;
+ Instruction* current_;
+ Environment* fall_through_env_;
+};
+
+static Definition* PrepareIndexedOp(FlowGraph* flow_graph,
+ BlockBuilder* builder,
+ Definition* array,
+ Definition* index,
+ const Slot& length_field) {
+ Definition* length = builder->AddDefinition(new LoadFieldInstr(
+ new Value(array), length_field, TokenPosition::kNoSource));
+ // Note that the intrinsifier must always use deopting array bound
+ // checks, because intrinsics currently don't support calls.
+ Definition* safe_index = new CheckArrayBoundInstr(
+ new Value(length), new Value(index), DeoptId::kNone);
+ builder->AddDefinition(safe_index);
+ return safe_index;
+}
+
+static bool IntrinsifyArrayGetIndexed(FlowGraph* flow_graph,
+ intptr_t array_cid) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* index = builder.AddParameter(1);
+ Definition* array = builder.AddParameter(2);
+
+ index = PrepareIndexedOp(flow_graph, &builder, array, index,
+ Slot::GetLengthFieldForArrayCid(array_cid));
+
+ if (RawObject::IsExternalTypedDataClassId(array_cid)) {
+ array = builder.AddDefinition(new LoadUntaggedInstr(
+ new Value(array), ExternalTypedData::data_offset()));
+ }
+
+ Definition* result = builder.AddDefinition(new LoadIndexedInstr(
+ new Value(array), new Value(index),
+ Instance::ElementSizeFor(array_cid), // index scale
+ array_cid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
+ // Box and/or convert result if necessary.
+ switch (array_cid) {
+ case kTypedDataInt32ArrayCid:
+ case kExternalTypedDataInt32ArrayCid:
+ result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedInt32, new Value(result)));
+ break;
+ case kTypedDataUint32ArrayCid:
+ case kExternalTypedDataUint32ArrayCid:
+ result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedUint32, new Value(result)));
+ break;
+ case kTypedDataFloat32ArrayCid:
+ result = builder.AddDefinition(
+ new FloatToDoubleInstr(new Value(result), DeoptId::kNone));
+ // Fall through.
+ case kTypedDataFloat64ArrayCid:
+ result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedDouble, new Value(result)));
+ break;
+ case kTypedDataFloat32x4ArrayCid:
+ result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedFloat32x4, new Value(result)));
+ break;
+ case kTypedDataInt32x4ArrayCid:
+ result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedInt32x4, new Value(result)));
+ break;
+ case kTypedDataFloat64x2ArrayCid:
+ result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedFloat64x2, new Value(result)));
+ break;
+ case kArrayCid:
+ case kImmutableArrayCid:
+ case kTypedDataInt8ArrayCid:
+ case kTypedDataUint8ArrayCid:
+ case kExternalTypedDataUint8ArrayCid:
+ case kTypedDataUint8ClampedArrayCid:
+ case kExternalTypedDataUint8ClampedArrayCid:
+ case kTypedDataInt16ArrayCid:
+ case kTypedDataUint16ArrayCid:
+ // Nothing to do.
+ break;
+ case kTypedDataInt64ArrayCid:
+ case kTypedDataUint64ArrayCid:
+ result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedInt64, new Value(result)));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ builder.AddIntrinsicReturn(new Value(result));
+ return true;
+}
+
+static bool IntrinsifyArraySetIndexed(FlowGraph* flow_graph,
+ intptr_t array_cid) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* value = builder.AddParameter(1);
+ Definition* index = builder.AddParameter(2);
+ Definition* array = builder.AddParameter(3);
+
+ index = PrepareIndexedOp(flow_graph, &builder, array, index,
+ Slot::GetLengthFieldForArrayCid(array_cid));
+
+ // Value check/conversion.
+ switch (array_cid) {
+ case kTypedDataInt8ArrayCid:
+ case kTypedDataUint8ArrayCid:
+ case kExternalTypedDataUint8ArrayCid:
+ case kTypedDataUint8ClampedArrayCid:
+ case kExternalTypedDataUint8ClampedArrayCid:
+ case kTypedDataInt16ArrayCid:
+ case kTypedDataUint16ArrayCid:
+ builder.AddInstruction(new CheckSmiInstr(new Value(value), DeoptId::kNone,
+ builder.TokenPos()));
+ break;
+ case kTypedDataInt32ArrayCid:
+ case kExternalTypedDataInt32ArrayCid:
+ // Use same truncating unbox-instruction for int32 and uint32.
+ // Fall-through.
+ case kTypedDataUint32ArrayCid:
+ case kExternalTypedDataUint32ArrayCid:
+ // Supports smi and mint, slow-case for bigints.
+ value = builder.AddUnboxInstr(kUnboxedUint32, new Value(value),
+ /* is_checked = */ false);
+ break;
+ case kTypedDataInt64ArrayCid:
+ case kTypedDataUint64ArrayCid:
+ value = builder.AddUnboxInstr(kUnboxedInt64, new Value(value),
+ /* is_checked = */ false);
+ break;
+
+ case kTypedDataFloat32ArrayCid:
+ case kTypedDataFloat64ArrayCid:
+ case kTypedDataFloat32x4ArrayCid:
+ case kTypedDataInt32x4ArrayCid:
+ case kTypedDataFloat64x2ArrayCid: {
+ intptr_t value_check_cid = kDoubleCid;
+ Representation rep = kUnboxedDouble;
+ switch (array_cid) {
+ case kTypedDataFloat32x4ArrayCid:
+ value_check_cid = kFloat32x4Cid;
+ rep = kUnboxedFloat32x4;
+ break;
+ case kTypedDataInt32x4ArrayCid:
+ value_check_cid = kInt32x4Cid;
+ rep = kUnboxedInt32x4;
+ break;
+ case kTypedDataFloat64x2ArrayCid:
+ value_check_cid = kFloat64x2Cid;
+ rep = kUnboxedFloat64x2;
+ break;
+ default:
+ // Float32/Float64 case already handled.
+ break;
+ }
+ Zone* zone = flow_graph->zone();
+ Cids* value_check = Cids::CreateMonomorphic(zone, value_check_cid);
+ builder.AddInstruction(new CheckClassInstr(
+ new Value(value), DeoptId::kNone, *value_check, builder.TokenPos()));
+ value = builder.AddUnboxInstr(rep, new Value(value),
+ /* is_checked = */ true);
+ if (array_cid == kTypedDataFloat32ArrayCid) {
+ value = builder.AddDefinition(
+ new DoubleToFloatInstr(new Value(value), DeoptId::kNone));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ if (RawObject::IsExternalTypedDataClassId(array_cid)) {
+ array = builder.AddDefinition(new LoadUntaggedInstr(
+ new Value(array), ExternalTypedData::data_offset()));
+ }
+ // No store barrier.
+ ASSERT(RawObject::IsExternalTypedDataClassId(array_cid) ||
+ RawObject::IsTypedDataClassId(array_cid));
+ builder.AddInstruction(new StoreIndexedInstr(
+ new Value(array), new Value(index), new Value(value), kNoStoreBarrier,
+ Instance::ElementSizeFor(array_cid), // index scale
+ array_cid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
+ // Return null.
+ Definition* null_def = builder.AddNullDefinition();
+ builder.AddIntrinsicReturn(new Value(null_def));
+ return true;
+}
+
+#define DEFINE_ARRAY_GETTER_INTRINSIC(enum_name) \
+ bool GraphIntrinsifier::Build_##enum_name##GetIndexed( \
+ FlowGraph* flow_graph) { \
+ return IntrinsifyArrayGetIndexed( \
+ flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
+ MethodRecognizer::k##enum_name##GetIndexed)); \
+ }
+
+#define DEFINE_ARRAY_SETTER_INTRINSIC(enum_name) \
+ bool GraphIntrinsifier::Build_##enum_name##SetIndexed( \
+ FlowGraph* flow_graph) { \
+ return IntrinsifyArraySetIndexed( \
+ flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
+ MethodRecognizer::k##enum_name##SetIndexed)); \
+ }
+
+DEFINE_ARRAY_GETTER_INTRINSIC(ObjectArray)
+DEFINE_ARRAY_GETTER_INTRINSIC(ImmutableArray)
+
+#define DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(enum_name) \
+ DEFINE_ARRAY_GETTER_INTRINSIC(enum_name) \
+ DEFINE_ARRAY_SETTER_INTRINSIC(enum_name)
+
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int8Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint8Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(ExternalUint8Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint8ClampedArray)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(ExternalUint8ClampedArray)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int16Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint16Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int32Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint32Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int64Array)
+DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint64Array)
+
+#undef DEFINE_ARRAY_GETTER_SETTER_INTRINSICS
+#undef DEFINE_ARRAY_GETTER_INTRINSIC
+#undef DEFINE_ARRAY_SETTER_INTRINSIC
+
+#define DEFINE_FLOAT_ARRAY_GETTER_INTRINSIC(enum_name) \
+ bool GraphIntrinsifier::Build_##enum_name##GetIndexed( \
+ FlowGraph* flow_graph) { \
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) { \
+ return false; \
+ } \
+ return IntrinsifyArrayGetIndexed( \
+ flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
+ MethodRecognizer::k##enum_name##GetIndexed)); \
+ }
+
+#define DEFINE_FLOAT_ARRAY_SETTER_INTRINSIC(enum_name) \
+ bool GraphIntrinsifier::Build_##enum_name##SetIndexed( \
+ FlowGraph* flow_graph) { \
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) { \
+ return false; \
+ } \
+ return IntrinsifyArraySetIndexed( \
+ flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
+ MethodRecognizer::k##enum_name##SetIndexed)); \
+ }
+
+#define DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS(enum_name) \
+ DEFINE_FLOAT_ARRAY_GETTER_INTRINSIC(enum_name) \
+ DEFINE_FLOAT_ARRAY_SETTER_INTRINSIC(enum_name)
+
+DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS(Float64Array)
+DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS(Float32Array)
+
+#undef DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS
+#undef DEFINE_FLOAT_ARRAY_GETTER_INTRINSIC
+#undef DEFINE_FLOAT_ARRAY_SETTER_INTRINSIC
+
+#define DEFINE_SIMD_ARRAY_GETTER_INTRINSIC(enum_name) \
+ bool GraphIntrinsifier::Build_##enum_name##GetIndexed( \
+ FlowGraph* flow_graph) { \
+ if (!FlowGraphCompiler::SupportsUnboxedSimd128()) { \
+ return false; \
+ } \
+ return IntrinsifyArrayGetIndexed( \
+ flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
+ MethodRecognizer::k##enum_name##GetIndexed)); \
+ }
+
+#define DEFINE_SIMD_ARRAY_SETTER_INTRINSIC(enum_name) \
+ bool GraphIntrinsifier::Build_##enum_name##SetIndexed( \
+ FlowGraph* flow_graph) { \
+ if (!FlowGraphCompiler::SupportsUnboxedSimd128()) { \
+ return false; \
+ } \
+ return IntrinsifyArraySetIndexed( \
+ flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
+ MethodRecognizer::k##enum_name##SetIndexed)); \
+ }
+
+#define DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(enum_name) \
+ DEFINE_SIMD_ARRAY_GETTER_INTRINSIC(enum_name) \
+ DEFINE_SIMD_ARRAY_SETTER_INTRINSIC(enum_name)
+
+DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(Float32x4Array)
+DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(Int32x4Array)
+DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(Float64x2Array)
+
+#undef DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS
+#undef DEFINE_SIMD_ARRAY_GETTER_INTRINSIC
+#undef DEFINE_SIMD_ARRAY_SETTER_INTRINSIC
+
+static bool BuildCodeUnitAt(FlowGraph* flow_graph, intptr_t cid) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* index = builder.AddParameter(1);
+ Definition* str = builder.AddParameter(2);
+
+ index =
+ PrepareIndexedOp(flow_graph, &builder, str, index, Slot::String_length());
+
+ // For external strings: Load external data.
+ if (cid == kExternalOneByteStringCid) {
+ str = builder.AddDefinition(new LoadUntaggedInstr(
+ new Value(str), ExternalOneByteString::external_data_offset()));
+ } else if (cid == kExternalTwoByteStringCid) {
+ str = builder.AddDefinition(new LoadUntaggedInstr(
+ new Value(str), ExternalTwoByteString::external_data_offset()));
+ }
+
+ Definition* result = builder.AddDefinition(new LoadIndexedInstr(
+ new Value(str), new Value(index), Instance::ElementSizeFor(cid), cid,
+ kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
+ builder.AddIntrinsicReturn(new Value(result));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_OneByteStringCodeUnitAt(FlowGraph* flow_graph) {
+ return BuildCodeUnitAt(flow_graph, kOneByteStringCid);
+}
+
+bool GraphIntrinsifier::Build_TwoByteStringCodeUnitAt(FlowGraph* flow_graph) {
+ return BuildCodeUnitAt(flow_graph, kTwoByteStringCid);
+}
+
+bool GraphIntrinsifier::Build_ExternalOneByteStringCodeUnitAt(
+ FlowGraph* flow_graph) {
+ return BuildCodeUnitAt(flow_graph, kExternalOneByteStringCid);
+}
+
+bool GraphIntrinsifier::Build_ExternalTwoByteStringCodeUnitAt(
+ FlowGraph* flow_graph) {
+ return BuildCodeUnitAt(flow_graph, kExternalTwoByteStringCid);
+}
+
+static bool BuildSimdOp(FlowGraph* flow_graph, intptr_t cid, Token::Kind kind) {
+ if (!FlowGraphCompiler::SupportsUnboxedSimd128()) return false;
+
+ const Representation rep = RepresentationForCid(cid);
+
+ Zone* zone = flow_graph->zone();
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* right = builder.AddParameter(1);
+ Definition* left = builder.AddParameter(2);
+
+ Cids* value_check = Cids::CreateMonomorphic(zone, cid);
+ // Check argument. Receiver (left) is known to be a Float32x4.
+ builder.AddInstruction(new CheckClassInstr(new Value(right), DeoptId::kNone,
+ *value_check, builder.TokenPos()));
+ Definition* left_simd = builder.AddUnboxInstr(rep, new Value(left),
+ /* is_checked = */ true);
+
+ Definition* right_simd = builder.AddUnboxInstr(rep, new Value(right),
+ /* is_checked = */ true);
+
+ Definition* unboxed_result = builder.AddDefinition(SimdOpInstr::Create(
+ SimdOpInstr::KindForOperator(cid, kind), new Value(left_simd),
+ new Value(right_simd), DeoptId::kNone));
+ Definition* result =
+ builder.AddDefinition(BoxInstr::Create(rep, new Value(unboxed_result)));
+ builder.AddIntrinsicReturn(new Value(result));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_Float32x4Mul(FlowGraph* flow_graph) {
+ return BuildSimdOp(flow_graph, kFloat32x4Cid, Token::kMUL);
+}
+
+bool GraphIntrinsifier::Build_Float32x4Sub(FlowGraph* flow_graph) {
+ return BuildSimdOp(flow_graph, kFloat32x4Cid, Token::kSUB);
+}
+
+bool GraphIntrinsifier::Build_Float32x4Add(FlowGraph* flow_graph) {
+ return BuildSimdOp(flow_graph, kFloat32x4Cid, Token::kADD);
+}
+
+static bool BuildFloat32x4Shuffle(FlowGraph* flow_graph,
+ MethodRecognizer::Kind kind) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles() ||
+ !FlowGraphCompiler::SupportsUnboxedSimd128()) {
+ return false;
+ }
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* receiver = builder.AddParameter(1);
+
+ Definition* unboxed_receiver =
+ builder.AddUnboxInstr(kUnboxedFloat32x4, new Value(receiver),
+ /* is_checked = */ true);
+
+ Definition* unboxed_result = builder.AddDefinition(
+ SimdOpInstr::Create(kind, new Value(unboxed_receiver), DeoptId::kNone));
+
+ Definition* result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedDouble, new Value(unboxed_result)));
+ builder.AddIntrinsicReturn(new Value(result));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_Float32x4ShuffleX(FlowGraph* flow_graph) {
+ return BuildFloat32x4Shuffle(flow_graph,
+ MethodRecognizer::kFloat32x4ShuffleX);
+}
+
+bool GraphIntrinsifier::Build_Float32x4ShuffleY(FlowGraph* flow_graph) {
+ return BuildFloat32x4Shuffle(flow_graph,
+ MethodRecognizer::kFloat32x4ShuffleY);
+}
+
+bool GraphIntrinsifier::Build_Float32x4ShuffleZ(FlowGraph* flow_graph) {
+ return BuildFloat32x4Shuffle(flow_graph,
+ MethodRecognizer::kFloat32x4ShuffleZ);
+}
+
+bool GraphIntrinsifier::Build_Float32x4ShuffleW(FlowGraph* flow_graph) {
+ return BuildFloat32x4Shuffle(flow_graph,
+ MethodRecognizer::kFloat32x4ShuffleW);
+}
+
+static bool BuildLoadField(FlowGraph* flow_graph, const Slot& field) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* array = builder.AddParameter(1);
+
+ Definition* length = builder.AddDefinition(
+ new LoadFieldInstr(new Value(array), field, builder.TokenPos()));
+ builder.AddIntrinsicReturn(new Value(length));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_ObjectArrayLength(FlowGraph* flow_graph) {
+ return BuildLoadField(flow_graph, Slot::Array_length());
+}
+
+bool GraphIntrinsifier::Build_ImmutableArrayLength(FlowGraph* flow_graph) {
+ return BuildLoadField(flow_graph, Slot::Array_length());
+}
+
+bool GraphIntrinsifier::Build_GrowableArrayLength(FlowGraph* flow_graph) {
+ return BuildLoadField(flow_graph, Slot::GrowableObjectArray_length());
+}
+
+bool GraphIntrinsifier::Build_StringBaseLength(FlowGraph* flow_graph) {
+ return BuildLoadField(flow_graph, Slot::String_length());
+}
+
+bool GraphIntrinsifier::Build_TypedDataLength(FlowGraph* flow_graph) {
+ return BuildLoadField(flow_graph, Slot::TypedData_length());
+}
+
+bool GraphIntrinsifier::Build_GrowableArrayCapacity(FlowGraph* flow_graph) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* array = builder.AddParameter(1);
+
+ Definition* backing_store = builder.AddDefinition(new LoadFieldInstr(
+ new Value(array), Slot::GrowableObjectArray_data(), builder.TokenPos()));
+ Definition* capacity = builder.AddDefinition(new LoadFieldInstr(
+ new Value(backing_store), Slot::Array_length(), builder.TokenPos()));
+ builder.AddIntrinsicReturn(new Value(capacity));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_GrowableArrayGetIndexed(FlowGraph* flow_graph) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* index = builder.AddParameter(1);
+ Definition* growable_array = builder.AddParameter(2);
+
+ index = PrepareIndexedOp(flow_graph, &builder, growable_array, index,
+ Slot::GrowableObjectArray_length());
+
+ Definition* backing_store = builder.AddDefinition(
+ new LoadFieldInstr(new Value(growable_array),
+ Slot::GrowableObjectArray_data(), builder.TokenPos()));
+ Definition* result = builder.AddDefinition(new LoadIndexedInstr(
+ new Value(backing_store), new Value(index),
+ Instance::ElementSizeFor(kArrayCid), // index scale
+ kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
+ builder.AddIntrinsicReturn(new Value(result));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_ObjectArraySetIndexed(FlowGraph* flow_graph) {
+ if (Isolate::Current()->argument_type_checks()) {
+ return false;
+ }
+
+ return Build_ObjectArraySetIndexedUnchecked(flow_graph);
+}
+
+bool GraphIntrinsifier::Build_ObjectArraySetIndexedUnchecked(
+ FlowGraph* flow_graph) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* value = builder.AddParameter(1);
+ Definition* index = builder.AddParameter(2);
+ Definition* array = builder.AddParameter(3);
+
+ index = PrepareIndexedOp(flow_graph, &builder, array, index,
+ Slot::Array_length());
+
+ builder.AddInstruction(new StoreIndexedInstr(
+ new Value(array), new Value(index), new Value(value), kEmitStoreBarrier,
+ Instance::ElementSizeFor(kArrayCid), // index scale
+ kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
+ // Return null.
+ Definition* null_def = builder.AddNullDefinition();
+ builder.AddIntrinsicReturn(new Value(null_def));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_GrowableArraySetIndexed(FlowGraph* flow_graph) {
+ if (Isolate::Current()->argument_type_checks()) {
+ return false;
+ }
+
+ return Build_GrowableArraySetIndexedUnchecked(flow_graph);
+}
+
+bool GraphIntrinsifier::Build_GrowableArraySetIndexedUnchecked(
+ FlowGraph* flow_graph) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* value = builder.AddParameter(1);
+ Definition* index = builder.AddParameter(2);
+ Definition* array = builder.AddParameter(3);
+
+ index = PrepareIndexedOp(flow_graph, &builder, array, index,
+ Slot::GrowableObjectArray_length());
+
+ Definition* backing_store = builder.AddDefinition(new LoadFieldInstr(
+ new Value(array), Slot::GrowableObjectArray_data(), builder.TokenPos()));
+
+ builder.AddInstruction(new StoreIndexedInstr(
+ new Value(backing_store), new Value(index), new Value(value),
+ kEmitStoreBarrier,
+ Instance::ElementSizeFor(kArrayCid), // index scale
+ kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
+ // Return null.
+ Definition* null_def = builder.AddNullDefinition();
+ builder.AddIntrinsicReturn(new Value(null_def));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_GrowableArraySetData(FlowGraph* flow_graph) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* data = builder.AddParameter(1);
+ Definition* growable_array = builder.AddParameter(2);
+ Zone* zone = flow_graph->zone();
+
+ Cids* value_check = Cids::CreateMonomorphic(zone, kArrayCid);
+ builder.AddInstruction(new CheckClassInstr(new Value(data), DeoptId::kNone,
+ *value_check, builder.TokenPos()));
+
+ builder.AddInstruction(new StoreInstanceFieldInstr(
+ Slot::GrowableObjectArray_data(), new Value(growable_array),
+ new Value(data), kEmitStoreBarrier, builder.TokenPos()));
+ // Return null.
+ Definition* null_def = builder.AddNullDefinition();
+ builder.AddIntrinsicReturn(new Value(null_def));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_GrowableArraySetLength(FlowGraph* flow_graph) {
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* length = builder.AddParameter(1);
+ Definition* growable_array = builder.AddParameter(2);
+
+ builder.AddInstruction(
+ new CheckSmiInstr(new Value(length), DeoptId::kNone, builder.TokenPos()));
+ builder.AddInstruction(new StoreInstanceFieldInstr(
+ Slot::GrowableObjectArray_length(), new Value(growable_array),
+ new Value(length), kNoStoreBarrier, builder.TokenPos()));
+ Definition* null_def = builder.AddNullDefinition();
+ builder.AddIntrinsicReturn(new Value(null_def));
+ return true;
+}
+
+bool GraphIntrinsifier::Build_DoubleFlipSignBit(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) {
+ return false;
+ }
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ Definition* receiver = builder.AddParameter(1);
+ Definition* unboxed_value =
+ builder.AddUnboxInstr(kUnboxedDouble, new Value(receiver),
+ /* is_checked = */ true);
+ Definition* unboxed_result = builder.AddDefinition(new UnaryDoubleOpInstr(
+ Token::kNEGATE, new Value(unboxed_value), DeoptId::kNone));
+ Definition* result = builder.AddDefinition(
+ BoxInstr::Create(kUnboxedDouble, new Value(unboxed_result)));
+ builder.AddIntrinsicReturn(new Value(result));
+ return true;
+}
+
+static bool BuildInvokeMathCFunction(BlockBuilder* builder,
+ MethodRecognizer::Kind kind,
+ intptr_t num_parameters = 1) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) {
+ return false;
+ }
+ ZoneGrowableArray<Value*>* args =
+ new ZoneGrowableArray<Value*>(num_parameters);
+
+ for (intptr_t i = 0; i < num_parameters; i++) {
+ const intptr_t parameter_index = (num_parameters - i);
+ Definition* value = builder->AddParameter(parameter_index);
+ Definition* unboxed_value =
+ builder->AddUnboxInstr(kUnboxedDouble, value, /* is_checked = */ false);
+ args->Add(new Value(unboxed_value));
+ }
+
+ Definition* unboxed_result = builder->InvokeMathCFunction(kind, args);
+
+ Definition* result = builder->AddDefinition(
+ BoxInstr::Create(kUnboxedDouble, new Value(unboxed_result)));
+
+ builder->AddIntrinsicReturn(new Value(result));
+
+ return true;
+}
+
+bool GraphIntrinsifier::Build_MathSin(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathSin);
+}
+
+bool GraphIntrinsifier::Build_MathCos(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathCos);
+}
+
+bool GraphIntrinsifier::Build_MathTan(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathTan);
+}
+
+bool GraphIntrinsifier::Build_MathAsin(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAsin);
+}
+
+bool GraphIntrinsifier::Build_MathAcos(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAcos);
+}
+
+bool GraphIntrinsifier::Build_MathAtan(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAtan);
+}
+
+bool GraphIntrinsifier::Build_MathAtan2(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAtan2,
+ /* num_parameters = */ 2);
+}
+
+bool GraphIntrinsifier::Build_DoubleMod(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleMod,
+ /* num_parameters = */ 2);
+}
+
+bool GraphIntrinsifier::Build_DoubleCeil(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+ // TODO(johnmccutchan): On X86 this intrinsic can be written in a different
+ // way.
+ if (TargetCPUFeatures::double_truncate_round_supported()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleCeil);
+}
+
+bool GraphIntrinsifier::Build_DoubleFloor(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+ // TODO(johnmccutchan): On X86 this intrinsic can be written in a different
+ // way.
+ if (TargetCPUFeatures::double_truncate_round_supported()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleFloor);
+}
+
+bool GraphIntrinsifier::Build_DoubleTruncate(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+ // TODO(johnmccutchan): On X86 this intrinsic can be written in a different
+ // way.
+ if (TargetCPUFeatures::double_truncate_round_supported()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleTruncate);
+}
+
+bool GraphIntrinsifier::Build_DoubleRound(FlowGraph* flow_graph) {
+ if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
+
+ GraphEntryInstr* graph_entry = flow_graph->graph_entry();
+ auto normal_entry = graph_entry->normal_entry();
+ BlockBuilder builder(flow_graph, normal_entry);
+
+ return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleRound);
+}
+
+} // namespace compiler
+} // namespace dart
+
+#endif // !defined(DART_PRECOMPILED_RUNTIME) && !defined(TARGET_ARCH_DBC)
diff --git a/runtime/vm/compiler/graph_intrinsifier.h b/runtime/vm/compiler/graph_intrinsifier.h
new file mode 100644
index 0000000..6282b58
--- /dev/null
+++ b/runtime/vm/compiler/graph_intrinsifier.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+// Class for intrinsifying functions.
+
+#ifndef RUNTIME_VM_COMPILER_GRAPH_INTRINSIFIER_H_
+#define RUNTIME_VM_COMPILER_GRAPH_INTRINSIFIER_H_
+
+#include "vm/allocation.h"
+#include "vm/compiler/recognized_methods_list.h"
+
+namespace dart {
+
+// Forward declarations.
+class FlowGraphCompiler;
+class ParsedFunction;
+class FlowGraph;
+
+namespace compiler {
+class Assembler;
+class Label;
+
+#if !defined(TARGET_ARCH_DBC)
+
+class GraphIntrinsifier : public AllStatic {
+ public:
+ static intptr_t ParameterSlotFromSp();
+
+ static bool GraphIntrinsify(const ParsedFunction& parsed_function,
+ FlowGraphCompiler* compiler);
+
+ static void IntrinsicCallPrologue(Assembler* assembler);
+ static void IntrinsicCallEpilogue(Assembler* assembler);
+
+ private:
+#define DECLARE_FUNCTION(class_name, function_name, enum_name, fp) \
+ static void enum_name(Assembler* assembler, Label* normal_ir_body);
+
+ // On DBC graph intrinsics are handled in the same way as non-graph ones.
+ GRAPH_INTRINSICS_LIST(DECLARE_FUNCTION)
+
+#undef DECLARE_FUNCTION
+
+#define DECLARE_FUNCTION(class_name, function_name, enum_name, fp) \
+ static bool Build_##enum_name(FlowGraph* flow_graph);
+
+ GRAPH_INTRINSICS_LIST(DECLARE_FUNCTION)
+
+#undef DECLARE_FUNCTION
+};
+
+#else // !defined(TARGET_ARCH_DBC)
+
+class GraphIntrinsifier : public AllStatic {
+ public:
+ static bool GraphIntrinsify(const ParsedFunction& parsed_function,
+ FlowGraphCompiler* compiler) {
+ return false;
+ }
+};
+
+#endif // !defined(TARGET_ARCH_DBC)
+
+} // namespace compiler
+} // namespace dart
+
+#endif // RUNTIME_VM_COMPILER_GRAPH_INTRINSIFIER_H_
diff --git a/runtime/vm/compiler/graph_intrinsifier_arm.cc b/runtime/vm/compiler/graph_intrinsifier_arm.cc
new file mode 100644
index 0000000..39b30c2
--- /dev/null
+++ b/runtime/vm/compiler/graph_intrinsifier_arm.cc
@@ -0,0 +1,45 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
+#if defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/graph_intrinsifier.h"
+
+namespace dart {
+namespace compiler {
+
+#define __ assembler->
+
+intptr_t GraphIntrinsifier::ParameterSlotFromSp() {
+ return -1;
+}
+
+static bool IsABIPreservedRegister(Register reg) {
+ return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
+}
+
+void GraphIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+ ASSERT(IsABIPreservedRegister(CODE_REG));
+ ASSERT(IsABIPreservedRegister(ARGS_DESC_REG));
+ ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
+
+ // Save LR by moving it to a callee saved temporary register.
+ assembler->Comment("IntrinsicCallPrologue");
+ assembler->mov(CALLEE_SAVED_TEMP, Operand(LR));
+}
+
+void GraphIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+ // Restore LR.
+ assembler->Comment("IntrinsicCallEpilogue");
+ assembler->mov(LR, Operand(CALLEE_SAVED_TEMP));
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace dart
+
+#endif // defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/graph_intrinsifier_arm64.cc b/runtime/vm/compiler/graph_intrinsifier_arm64.cc
new file mode 100644
index 0000000..0dc98a7
--- /dev/null
+++ b/runtime/vm/compiler/graph_intrinsifier_arm64.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
+#if defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/graph_intrinsifier.h"
+
+namespace dart {
+namespace compiler {
+
+#define __ assembler->
+
+intptr_t GraphIntrinsifier::ParameterSlotFromSp() {
+ return -1;
+}
+
+static bool IsABIPreservedRegister(Register reg) {
+ return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
+}
+
+void GraphIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+ ASSERT(IsABIPreservedRegister(CODE_REG));
+ ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG));
+ ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
+ ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP2));
+ ASSERT(CALLEE_SAVED_TEMP != CODE_REG);
+ ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG);
+ ASSERT(CALLEE_SAVED_TEMP2 != CODE_REG);
+ ASSERT(CALLEE_SAVED_TEMP2 != ARGS_DESC_REG);
+
+ assembler->Comment("IntrinsicCallPrologue");
+ assembler->mov(CALLEE_SAVED_TEMP, LR);
+ assembler->mov(CALLEE_SAVED_TEMP2, ARGS_DESC_REG);
+}
+
+void GraphIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+ assembler->Comment("IntrinsicCallEpilogue");
+ assembler->mov(LR, CALLEE_SAVED_TEMP);
+ assembler->mov(ARGS_DESC_REG, CALLEE_SAVED_TEMP2);
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace dart
+
+#endif // defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/graph_intrinsifier_ia32.cc b/runtime/vm/compiler/graph_intrinsifier_ia32.cc
new file mode 100644
index 0000000..e89c176
--- /dev/null
+++ b/runtime/vm/compiler/graph_intrinsifier_ia32.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
+#if defined(TARGET_ARCH_IA32) && !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/graph_intrinsifier.h"
+
+namespace dart {
+namespace compiler {
+
+#define __ assembler->
+
+intptr_t GraphIntrinsifier::ParameterSlotFromSp() {
+ return 0;
+}
+
+void GraphIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+ COMPILE_ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG);
+
+ assembler->Comment("IntrinsicCallPrologue");
+ assembler->movl(CALLEE_SAVED_TEMP, ARGS_DESC_REG);
+}
+
+void GraphIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+ assembler->Comment("IntrinsicCallEpilogue");
+ assembler->movl(ARGS_DESC_REG, CALLEE_SAVED_TEMP);
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace dart
+
+#endif // defined(TARGET_ARCH_IA32) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/graph_intrinsifier_x64.cc b/runtime/vm/compiler/graph_intrinsifier_x64.cc
new file mode 100644
index 0000000..46a8004
--- /dev/null
+++ b/runtime/vm/compiler/graph_intrinsifier_x64.cc
@@ -0,0 +1,45 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
+#if defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/graph_intrinsifier.h"
+
+namespace dart {
+namespace compiler {
+
+#define __ assembler->
+
+intptr_t GraphIntrinsifier::ParameterSlotFromSp() {
+ return 0;
+}
+
+static bool IsABIPreservedRegister(Register reg) {
+ return ((1 << reg) & CallingConventions::kCalleeSaveCpuRegisters) != 0;
+}
+
+void GraphIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
+ ASSERT(IsABIPreservedRegister(CODE_REG));
+ ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG));
+ ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
+ ASSERT(CALLEE_SAVED_TEMP != CODE_REG);
+ ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG);
+
+ assembler->Comment("IntrinsicCallPrologue");
+ assembler->movq(CALLEE_SAVED_TEMP, ARGS_DESC_REG);
+}
+
+void GraphIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
+ assembler->Comment("IntrinsicCallEpilogue");
+ assembler->movq(ARGS_DESC_REG, CALLEE_SAVED_TEMP);
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace dart
+
+#endif // defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/intrinsifier.cc b/runtime/vm/compiler/intrinsifier.cc
index 3902dac..ee1649f 100644
--- a/runtime/vm/compiler/intrinsifier.cc
+++ b/runtime/vm/compiler/intrinsifier.cc
@@ -24,9 +24,8 @@
DEFINE_FLAG(bool, intrinsify, true, "Instrinsify when possible");
DEFINE_FLAG(bool, trace_intrinsifier, false, "Trace intrinsifier");
-DECLARE_FLAG(bool, code_comments);
-DECLARE_FLAG(bool, print_flow_graph);
-DECLARE_FLAG(bool, print_flow_graph_optimized);
+
+namespace compiler {
bool Intrinsifier::CanIntrinsify(const Function& function) {
if (FLAG_trace_intrinsifier) {
@@ -174,105 +173,6 @@
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
-// DBC does not use graph intrinsics.
-#if !defined(TARGET_ARCH_DBC)
-static void EmitCodeFor(FlowGraphCompiler* compiler, FlowGraph* graph) {
- // The FlowGraph here is constructed by the intrinsics builder methods, and
- // is different from compiler->flow_graph(), the original method's flow graph.
- compiler->assembler()->Comment("Graph intrinsic begin");
- for (intptr_t i = 0; i < graph->reverse_postorder().length(); i++) {
- BlockEntryInstr* block = graph->reverse_postorder()[i];
- if (block->IsGraphEntry()) continue; // No code for graph entry needed.
-
- if (block->HasParallelMove()) {
- compiler->parallel_move_resolver()->EmitNativeCode(
- block->parallel_move());
- }
-
- for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
- Instruction* instr = it.Current();
- if (FLAG_code_comments) compiler->EmitComment(instr);
- if (instr->IsParallelMove()) {
- compiler->parallel_move_resolver()->EmitNativeCode(
- instr->AsParallelMove());
- } else if (instr->IsInvokeMathCFunction()) {
- ASSERT(instr->locs() != NULL);
- Intrinsifier::IntrinsicCallPrologue(compiler->assembler());
- instr->EmitNativeCode(compiler);
- Intrinsifier::IntrinsicCallEpilogue(compiler->assembler());
- } else {
- ASSERT(instr->locs() != NULL);
- // Calls are not supported in intrinsics code.
- ASSERT(!instr->locs()->always_calls());
- instr->EmitNativeCode(compiler);
- }
- }
- }
- compiler->assembler()->Comment("Graph intrinsic end");
-}
-#endif
-
-bool Intrinsifier::GraphIntrinsify(const ParsedFunction& parsed_function,
- FlowGraphCompiler* compiler) {
-#if !defined(TARGET_ARCH_DBC)
- ASSERT(!parsed_function.function().HasOptionalParameters());
- PrologueInfo prologue_info(-1, -1);
-
- auto graph_entry =
- new GraphEntryInstr(parsed_function, Compiler::kNoOSRDeoptId);
-
- intptr_t block_id = 1; // 0 is GraphEntry.
- graph_entry->set_normal_entry(
- new FunctionEntryInstr(graph_entry, block_id, kInvalidTryIndex,
- CompilerState::Current().GetNextDeoptId()));
-
- FlowGraph* graph =
- new FlowGraph(parsed_function, graph_entry, block_id, prologue_info);
- const Function& function = parsed_function.function();
- switch (function.recognized_kind()) {
-#define EMIT_CASE(class_name, function_name, enum_name, fp) \
- case MethodRecognizer::k##enum_name: \
- if (!Build_##enum_name(graph)) return false; \
- break;
-
- GRAPH_INTRINSICS_LIST(EMIT_CASE);
- default:
- return false;
-#undef EMIT_CASE
- }
-
- if (FLAG_support_il_printer && FLAG_print_flow_graph &&
- FlowGraphPrinter::ShouldPrint(function)) {
- THR_Print("Intrinsic graph before\n");
- FlowGraphPrinter printer(*graph);
- printer.PrintBlocks();
- }
-
- // Prepare for register allocation (cf. FinalizeGraph).
- graph->RemoveRedefinitions();
-
- // Ensure loop hierarchy has been computed.
- GrowableArray<BitVector*> dominance_frontier;
- graph->ComputeDominators(&dominance_frontier);
- graph->GetLoopHierarchy();
-
- // Perform register allocation on the SSA graph.
- FlowGraphAllocator allocator(*graph, true); // Intrinsic mode.
- allocator.AllocateRegisters();
-
- if (FLAG_support_il_printer && FLAG_print_flow_graph &&
- FlowGraphPrinter::ShouldPrint(function)) {
- THR_Print("Intrinsic graph after\n");
- FlowGraphPrinter printer(*graph);
- printer.PrintBlocks();
- }
- EmitCodeFor(compiler, graph);
- return true;
-#else
- return false;
-#endif // !defined(TARGET_ARCH_DBC)
-}
-
// Returns true if fall-through code can be omitted.
bool Intrinsifier::Intrinsify(const ParsedFunction& parsed_function,
FlowGraphCompiler* compiler) {
@@ -282,7 +182,7 @@
}
ASSERT(!compiler->flow_graph().IsCompiledForOsr());
- if (GraphIntrinsify(parsed_function, compiler)) {
+ if (GraphIntrinsifier::GraphIntrinsify(parsed_function, compiler)) {
return compiler->intrinsic_slow_path_label()->IsUnused();
}
@@ -308,7 +208,7 @@
compiler->assembler()->Comment("Intrinsic"); \
Label normal_ir_body; \
const auto size_before = compiler->assembler()->CodeSize(); \
- enum_name(compiler->assembler(), &normal_ir_body); \
+ AsmIntrinsifier::enum_name(compiler->assembler(), &normal_ir_body); \
const auto size_after = compiler->assembler()->CodeSize(); \
if (size_before == size_after) return false; \
if (!normal_ir_body.IsBound()) { \
@@ -345,939 +245,7 @@
return false;
}
-#if !defined(TARGET_ARCH_DBC)
-static intptr_t CidForRepresentation(Representation rep) {
- switch (rep) {
- case kUnboxedDouble:
- return kDoubleCid;
- case kUnboxedFloat32x4:
- return kFloat32x4Cid;
- case kUnboxedInt32x4:
- return kInt32x4Cid;
- case kUnboxedFloat64x2:
- return kFloat64x2Cid;
- case kUnboxedUint32:
- return kDynamicCid; // smi or mint.
- default:
- UNREACHABLE();
- return kIllegalCid;
- }
-}
-
-static Representation RepresentationForCid(intptr_t cid) {
- switch (cid) {
- case kDoubleCid:
- return kUnboxedDouble;
- case kFloat32x4Cid:
- return kUnboxedFloat32x4;
- case kInt32x4Cid:
- return kUnboxedInt32x4;
- case kFloat64x2Cid:
- return kUnboxedFloat64x2;
- default:
- UNREACHABLE();
- return kNoRepresentation;
- }
-}
-
-// Notes about the graph intrinsics:
-//
-// IR instructions which would jump to a deoptimization sequence on failure
-// instead branch to the intrinsic slow path.
-//
-class BlockBuilder : public ValueObject {
- public:
- BlockBuilder(FlowGraph* flow_graph, BlockEntryInstr* entry)
- : flow_graph_(flow_graph),
- entry_(entry),
- current_(entry),
- fall_through_env_(new Environment(0,
- 0,
- DeoptId::kNone,
- flow_graph->parsed_function(),
- NULL)) {}
-
- Definition* AddToInitialDefinitions(Definition* def) {
- def->set_ssa_temp_index(flow_graph_->alloc_ssa_temp_index());
- auto normal_entry = flow_graph_->graph_entry()->normal_entry();
- flow_graph_->AddToInitialDefinitions(normal_entry, def);
- return def;
- }
-
- Definition* AddDefinition(Definition* def) {
- def->set_ssa_temp_index(flow_graph_->alloc_ssa_temp_index());
- AddInstruction(def);
- return def;
- }
-
- Instruction* AddInstruction(Instruction* instr) {
- if (instr->ComputeCanDeoptimize()) {
- // Since we use the presence of an environment to determine if an
- // instructions can deoptimize, we need an empty environment for
- // instructions that "deoptimize" to the intrinsic fall-through code.
- instr->SetEnvironment(fall_through_env_);
- }
- current_ = current_->AppendInstruction(instr);
- return instr;
- }
-
- void AddIntrinsicReturn(Value* value) {
- ReturnInstr* instr = new ReturnInstr(
- TokenPos(), value, CompilerState::Current().GetNextDeoptId());
- AddInstruction(instr);
- entry_->set_last_instruction(instr);
- }
-
- Definition* AddParameter(intptr_t index) {
- intptr_t adjustment = Intrinsifier::ParameterSlotFromSp();
- return AddToInitialDefinitions(new ParameterInstr(
- adjustment + index, flow_graph_->graph_entry(), SPREG));
- }
-
- TokenPosition TokenPos() { return flow_graph_->function().token_pos(); }
-
- Definition* AddNullDefinition() {
- return AddDefinition(new ConstantInstr(Object::ZoneHandle(Object::null())));
- }
-
- Definition* AddUnboxInstr(Representation rep, Value* value, bool is_checked) {
- Definition* unboxed_value =
- AddDefinition(UnboxInstr::Create(rep, value, DeoptId::kNone));
- if (is_checked) {
- // The type of |value| has already been checked and it is safe to
- // adjust reaching type. This is done manually because there is no type
- // propagation when building intrinsics.
- unboxed_value->AsUnbox()->value()->SetReachingType(
- new CompileType(CompileType::FromCid(CidForRepresentation(rep))));
- }
- return unboxed_value;
- }
-
- Definition* AddUnboxInstr(Representation rep,
- Definition* boxed,
- bool is_checked) {
- return AddUnboxInstr(rep, new Value(boxed), is_checked);
- }
-
- Definition* InvokeMathCFunction(MethodRecognizer::Kind recognized_kind,
- ZoneGrowableArray<Value*>* args) {
- return InvokeMathCFunctionHelper(recognized_kind, args);
- }
-
- private:
- Definition* InvokeMathCFunctionHelper(MethodRecognizer::Kind recognized_kind,
- ZoneGrowableArray<Value*>* args) {
- InvokeMathCFunctionInstr* invoke_math_c_function =
- new InvokeMathCFunctionInstr(args, DeoptId::kNone, recognized_kind,
- TokenPos());
- AddDefinition(invoke_math_c_function);
- return invoke_math_c_function;
- }
-
- FlowGraph* flow_graph_;
- BlockEntryInstr* entry_;
- Instruction* current_;
- Environment* fall_through_env_;
-};
-
-static Definition* PrepareIndexedOp(FlowGraph* flow_graph,
- BlockBuilder* builder,
- Definition* array,
- Definition* index,
- const Slot& length_field) {
- Definition* length = builder->AddDefinition(new LoadFieldInstr(
- new Value(array), length_field, TokenPosition::kNoSource));
- // Note that the intrinsifier must always use deopting array bound
- // checks, because intrinsics currently don't support calls.
- Definition* safe_index = new CheckArrayBoundInstr(
- new Value(length), new Value(index), DeoptId::kNone);
- builder->AddDefinition(safe_index);
- return safe_index;
-}
-
-static bool IntrinsifyArrayGetIndexed(FlowGraph* flow_graph,
- intptr_t array_cid) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* index = builder.AddParameter(1);
- Definition* array = builder.AddParameter(2);
-
- index = PrepareIndexedOp(flow_graph, &builder, array, index,
- Slot::GetLengthFieldForArrayCid(array_cid));
-
- if (RawObject::IsExternalTypedDataClassId(array_cid)) {
- array = builder.AddDefinition(new LoadUntaggedInstr(
- new Value(array), ExternalTypedData::data_offset()));
- }
-
- Definition* result = builder.AddDefinition(new LoadIndexedInstr(
- new Value(array), new Value(index),
- Instance::ElementSizeFor(array_cid), // index scale
- array_cid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
- // Box and/or convert result if necessary.
- switch (array_cid) {
- case kTypedDataInt32ArrayCid:
- case kExternalTypedDataInt32ArrayCid:
- result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedInt32, new Value(result)));
- break;
- case kTypedDataUint32ArrayCid:
- case kExternalTypedDataUint32ArrayCid:
- result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedUint32, new Value(result)));
- break;
- case kTypedDataFloat32ArrayCid:
- result = builder.AddDefinition(
- new FloatToDoubleInstr(new Value(result), DeoptId::kNone));
- // Fall through.
- case kTypedDataFloat64ArrayCid:
- result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedDouble, new Value(result)));
- break;
- case kTypedDataFloat32x4ArrayCid:
- result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedFloat32x4, new Value(result)));
- break;
- case kTypedDataInt32x4ArrayCid:
- result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedInt32x4, new Value(result)));
- break;
- case kTypedDataFloat64x2ArrayCid:
- result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedFloat64x2, new Value(result)));
- break;
- case kArrayCid:
- case kImmutableArrayCid:
- case kTypedDataInt8ArrayCid:
- case kTypedDataUint8ArrayCid:
- case kExternalTypedDataUint8ArrayCid:
- case kTypedDataUint8ClampedArrayCid:
- case kExternalTypedDataUint8ClampedArrayCid:
- case kTypedDataInt16ArrayCid:
- case kTypedDataUint16ArrayCid:
- // Nothing to do.
- break;
- case kTypedDataInt64ArrayCid:
- case kTypedDataUint64ArrayCid:
- result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedInt64, new Value(result)));
- break;
- default:
- UNREACHABLE();
- break;
- }
- builder.AddIntrinsicReturn(new Value(result));
- return true;
-}
-
-static bool IntrinsifyArraySetIndexed(FlowGraph* flow_graph,
- intptr_t array_cid) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* value = builder.AddParameter(1);
- Definition* index = builder.AddParameter(2);
- Definition* array = builder.AddParameter(3);
-
- index = PrepareIndexedOp(flow_graph, &builder, array, index,
- Slot::GetLengthFieldForArrayCid(array_cid));
-
- // Value check/conversion.
- switch (array_cid) {
- case kTypedDataInt8ArrayCid:
- case kTypedDataUint8ArrayCid:
- case kExternalTypedDataUint8ArrayCid:
- case kTypedDataUint8ClampedArrayCid:
- case kExternalTypedDataUint8ClampedArrayCid:
- case kTypedDataInt16ArrayCid:
- case kTypedDataUint16ArrayCid:
- builder.AddInstruction(new CheckSmiInstr(new Value(value), DeoptId::kNone,
- builder.TokenPos()));
- break;
- case kTypedDataInt32ArrayCid:
- case kExternalTypedDataInt32ArrayCid:
- // Use same truncating unbox-instruction for int32 and uint32.
- // Fall-through.
- case kTypedDataUint32ArrayCid:
- case kExternalTypedDataUint32ArrayCid:
- // Supports smi and mint, slow-case for bigints.
- value = builder.AddUnboxInstr(kUnboxedUint32, new Value(value),
- /* is_checked = */ false);
- break;
- case kTypedDataInt64ArrayCid:
- case kTypedDataUint64ArrayCid:
- value = builder.AddUnboxInstr(kUnboxedInt64, new Value(value),
- /* is_checked = */ false);
- break;
-
- case kTypedDataFloat32ArrayCid:
- case kTypedDataFloat64ArrayCid:
- case kTypedDataFloat32x4ArrayCid:
- case kTypedDataInt32x4ArrayCid:
- case kTypedDataFloat64x2ArrayCid: {
- intptr_t value_check_cid = kDoubleCid;
- Representation rep = kUnboxedDouble;
- switch (array_cid) {
- case kTypedDataFloat32x4ArrayCid:
- value_check_cid = kFloat32x4Cid;
- rep = kUnboxedFloat32x4;
- break;
- case kTypedDataInt32x4ArrayCid:
- value_check_cid = kInt32x4Cid;
- rep = kUnboxedInt32x4;
- break;
- case kTypedDataFloat64x2ArrayCid:
- value_check_cid = kFloat64x2Cid;
- rep = kUnboxedFloat64x2;
- break;
- default:
- // Float32/Float64 case already handled.
- break;
- }
- Zone* zone = flow_graph->zone();
- Cids* value_check = Cids::CreateMonomorphic(zone, value_check_cid);
- builder.AddInstruction(new CheckClassInstr(
- new Value(value), DeoptId::kNone, *value_check, builder.TokenPos()));
- value = builder.AddUnboxInstr(rep, new Value(value),
- /* is_checked = */ true);
- if (array_cid == kTypedDataFloat32ArrayCid) {
- value = builder.AddDefinition(
- new DoubleToFloatInstr(new Value(value), DeoptId::kNone));
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-
- if (RawObject::IsExternalTypedDataClassId(array_cid)) {
- array = builder.AddDefinition(new LoadUntaggedInstr(
- new Value(array), ExternalTypedData::data_offset()));
- }
- // No store barrier.
- ASSERT(RawObject::IsExternalTypedDataClassId(array_cid) ||
- RawObject::IsTypedDataClassId(array_cid));
- builder.AddInstruction(new StoreIndexedInstr(
- new Value(array), new Value(index), new Value(value), kNoStoreBarrier,
- Instance::ElementSizeFor(array_cid), // index scale
- array_cid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
- // Return null.
- Definition* null_def = builder.AddNullDefinition();
- builder.AddIntrinsicReturn(new Value(null_def));
- return true;
-}
-
-#define DEFINE_ARRAY_GETTER_INTRINSIC(enum_name) \
- bool Intrinsifier::Build_##enum_name##GetIndexed(FlowGraph* flow_graph) { \
- return IntrinsifyArrayGetIndexed( \
- flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
- MethodRecognizer::k##enum_name##GetIndexed)); \
- }
-
-#define DEFINE_ARRAY_SETTER_INTRINSIC(enum_name) \
- bool Intrinsifier::Build_##enum_name##SetIndexed(FlowGraph* flow_graph) { \
- return IntrinsifyArraySetIndexed( \
- flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
- MethodRecognizer::k##enum_name##SetIndexed)); \
- }
-
-DEFINE_ARRAY_GETTER_INTRINSIC(ObjectArray)
-DEFINE_ARRAY_GETTER_INTRINSIC(ImmutableArray)
-
-#define DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(enum_name) \
- DEFINE_ARRAY_GETTER_INTRINSIC(enum_name) \
- DEFINE_ARRAY_SETTER_INTRINSIC(enum_name)
-
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int8Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint8Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(ExternalUint8Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint8ClampedArray)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(ExternalUint8ClampedArray)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int16Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint16Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int32Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint32Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Int64Array)
-DEFINE_ARRAY_GETTER_SETTER_INTRINSICS(Uint64Array)
-
-#undef DEFINE_ARRAY_GETTER_SETTER_INTRINSICS
-#undef DEFINE_ARRAY_GETTER_INTRINSIC
-#undef DEFINE_ARRAY_SETTER_INTRINSIC
-
-#define DEFINE_FLOAT_ARRAY_GETTER_INTRINSIC(enum_name) \
- bool Intrinsifier::Build_##enum_name##GetIndexed(FlowGraph* flow_graph) { \
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) { \
- return false; \
- } \
- return IntrinsifyArrayGetIndexed( \
- flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
- MethodRecognizer::k##enum_name##GetIndexed)); \
- }
-
-#define DEFINE_FLOAT_ARRAY_SETTER_INTRINSIC(enum_name) \
- bool Intrinsifier::Build_##enum_name##SetIndexed(FlowGraph* flow_graph) { \
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) { \
- return false; \
- } \
- return IntrinsifyArraySetIndexed( \
- flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
- MethodRecognizer::k##enum_name##SetIndexed)); \
- }
-
-#define DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS(enum_name) \
- DEFINE_FLOAT_ARRAY_GETTER_INTRINSIC(enum_name) \
- DEFINE_FLOAT_ARRAY_SETTER_INTRINSIC(enum_name)
-
-DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS(Float64Array)
-DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS(Float32Array)
-
-#undef DEFINE_FLOAT_ARRAY_GETTER_SETTER_INTRINSICS
-#undef DEFINE_FLOAT_ARRAY_GETTER_INTRINSIC
-#undef DEFINE_FLOAT_ARRAY_SETTER_INTRINSIC
-
-#define DEFINE_SIMD_ARRAY_GETTER_INTRINSIC(enum_name) \
- bool Intrinsifier::Build_##enum_name##GetIndexed(FlowGraph* flow_graph) { \
- if (!FlowGraphCompiler::SupportsUnboxedSimd128()) { \
- return false; \
- } \
- return IntrinsifyArrayGetIndexed( \
- flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
- MethodRecognizer::k##enum_name##GetIndexed)); \
- }
-
-#define DEFINE_SIMD_ARRAY_SETTER_INTRINSIC(enum_name) \
- bool Intrinsifier::Build_##enum_name##SetIndexed(FlowGraph* flow_graph) { \
- if (!FlowGraphCompiler::SupportsUnboxedSimd128()) { \
- return false; \
- } \
- return IntrinsifyArraySetIndexed( \
- flow_graph, MethodRecognizer::MethodKindToReceiverCid( \
- MethodRecognizer::k##enum_name##SetIndexed)); \
- }
-
-#define DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(enum_name) \
- DEFINE_SIMD_ARRAY_GETTER_INTRINSIC(enum_name) \
- DEFINE_SIMD_ARRAY_SETTER_INTRINSIC(enum_name)
-
-DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(Float32x4Array)
-DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(Int32x4Array)
-DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS(Float64x2Array)
-
-#undef DEFINE_SIMD_ARRAY_GETTER_SETTER_INTRINSICS
-#undef DEFINE_SIMD_ARRAY_GETTER_INTRINSIC
-#undef DEFINE_SIMD_ARRAY_SETTER_INTRINSIC
-
-static bool BuildCodeUnitAt(FlowGraph* flow_graph, intptr_t cid) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* index = builder.AddParameter(1);
- Definition* str = builder.AddParameter(2);
-
- index =
- PrepareIndexedOp(flow_graph, &builder, str, index, Slot::String_length());
-
- // For external strings: Load external data.
- if (cid == kExternalOneByteStringCid) {
- str = builder.AddDefinition(new LoadUntaggedInstr(
- new Value(str), ExternalOneByteString::external_data_offset()));
- } else if (cid == kExternalTwoByteStringCid) {
- str = builder.AddDefinition(new LoadUntaggedInstr(
- new Value(str), ExternalTwoByteString::external_data_offset()));
- }
-
- Definition* result = builder.AddDefinition(new LoadIndexedInstr(
- new Value(str), new Value(index), Instance::ElementSizeFor(cid), cid,
- kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
- builder.AddIntrinsicReturn(new Value(result));
- return true;
-}
-
-bool Intrinsifier::Build_OneByteStringCodeUnitAt(FlowGraph* flow_graph) {
- return BuildCodeUnitAt(flow_graph, kOneByteStringCid);
-}
-
-bool Intrinsifier::Build_TwoByteStringCodeUnitAt(FlowGraph* flow_graph) {
- return BuildCodeUnitAt(flow_graph, kTwoByteStringCid);
-}
-
-bool Intrinsifier::Build_ExternalOneByteStringCodeUnitAt(
- FlowGraph* flow_graph) {
- return BuildCodeUnitAt(flow_graph, kExternalOneByteStringCid);
-}
-
-bool Intrinsifier::Build_ExternalTwoByteStringCodeUnitAt(
- FlowGraph* flow_graph) {
- return BuildCodeUnitAt(flow_graph, kExternalTwoByteStringCid);
-}
-
-static bool BuildSimdOp(FlowGraph* flow_graph, intptr_t cid, Token::Kind kind) {
- if (!FlowGraphCompiler::SupportsUnboxedSimd128()) return false;
-
- const Representation rep = RepresentationForCid(cid);
-
- Zone* zone = flow_graph->zone();
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* right = builder.AddParameter(1);
- Definition* left = builder.AddParameter(2);
-
- Cids* value_check = Cids::CreateMonomorphic(zone, cid);
- // Check argument. Receiver (left) is known to be a Float32x4.
- builder.AddInstruction(new CheckClassInstr(new Value(right), DeoptId::kNone,
- *value_check, builder.TokenPos()));
- Definition* left_simd = builder.AddUnboxInstr(rep, new Value(left),
- /* is_checked = */ true);
-
- Definition* right_simd = builder.AddUnboxInstr(rep, new Value(right),
- /* is_checked = */ true);
-
- Definition* unboxed_result = builder.AddDefinition(SimdOpInstr::Create(
- SimdOpInstr::KindForOperator(cid, kind), new Value(left_simd),
- new Value(right_simd), DeoptId::kNone));
- Definition* result =
- builder.AddDefinition(BoxInstr::Create(rep, new Value(unboxed_result)));
- builder.AddIntrinsicReturn(new Value(result));
- return true;
-}
-
-bool Intrinsifier::Build_Float32x4Mul(FlowGraph* flow_graph) {
- return BuildSimdOp(flow_graph, kFloat32x4Cid, Token::kMUL);
-}
-
-bool Intrinsifier::Build_Float32x4Sub(FlowGraph* flow_graph) {
- return BuildSimdOp(flow_graph, kFloat32x4Cid, Token::kSUB);
-}
-
-bool Intrinsifier::Build_Float32x4Add(FlowGraph* flow_graph) {
- return BuildSimdOp(flow_graph, kFloat32x4Cid, Token::kADD);
-}
-
-static bool BuildFloat32x4Shuffle(FlowGraph* flow_graph,
- MethodRecognizer::Kind kind) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles() ||
- !FlowGraphCompiler::SupportsUnboxedSimd128()) {
- return false;
- }
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* receiver = builder.AddParameter(1);
-
- Definition* unboxed_receiver =
- builder.AddUnboxInstr(kUnboxedFloat32x4, new Value(receiver),
- /* is_checked = */ true);
-
- Definition* unboxed_result = builder.AddDefinition(
- SimdOpInstr::Create(kind, new Value(unboxed_receiver), DeoptId::kNone));
-
- Definition* result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedDouble, new Value(unboxed_result)));
- builder.AddIntrinsicReturn(new Value(result));
- return true;
-}
-
-bool Intrinsifier::Build_Float32x4ShuffleX(FlowGraph* flow_graph) {
- return BuildFloat32x4Shuffle(flow_graph,
- MethodRecognizer::kFloat32x4ShuffleX);
-}
-
-bool Intrinsifier::Build_Float32x4ShuffleY(FlowGraph* flow_graph) {
- return BuildFloat32x4Shuffle(flow_graph,
- MethodRecognizer::kFloat32x4ShuffleY);
-}
-
-bool Intrinsifier::Build_Float32x4ShuffleZ(FlowGraph* flow_graph) {
- return BuildFloat32x4Shuffle(flow_graph,
- MethodRecognizer::kFloat32x4ShuffleZ);
-}
-
-bool Intrinsifier::Build_Float32x4ShuffleW(FlowGraph* flow_graph) {
- return BuildFloat32x4Shuffle(flow_graph,
- MethodRecognizer::kFloat32x4ShuffleW);
-}
-
-static bool BuildLoadField(FlowGraph* flow_graph, const Slot& field) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* array = builder.AddParameter(1);
-
- Definition* length = builder.AddDefinition(
- new LoadFieldInstr(new Value(array), field, builder.TokenPos()));
- builder.AddIntrinsicReturn(new Value(length));
- return true;
-}
-
-bool Intrinsifier::Build_ObjectArrayLength(FlowGraph* flow_graph) {
- return BuildLoadField(flow_graph, Slot::Array_length());
-}
-
-bool Intrinsifier::Build_ImmutableArrayLength(FlowGraph* flow_graph) {
- return BuildLoadField(flow_graph, Slot::Array_length());
-}
-
-bool Intrinsifier::Build_GrowableArrayLength(FlowGraph* flow_graph) {
- return BuildLoadField(flow_graph, Slot::GrowableObjectArray_length());
-}
-
-bool Intrinsifier::Build_StringBaseLength(FlowGraph* flow_graph) {
- return BuildLoadField(flow_graph, Slot::String_length());
-}
-
-bool Intrinsifier::Build_TypedDataLength(FlowGraph* flow_graph) {
- return BuildLoadField(flow_graph, Slot::TypedData_length());
-}
-
-bool Intrinsifier::Build_GrowableArrayCapacity(FlowGraph* flow_graph) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* array = builder.AddParameter(1);
-
- Definition* backing_store = builder.AddDefinition(new LoadFieldInstr(
- new Value(array), Slot::GrowableObjectArray_data(), builder.TokenPos()));
- Definition* capacity = builder.AddDefinition(new LoadFieldInstr(
- new Value(backing_store), Slot::Array_length(), builder.TokenPos()));
- builder.AddIntrinsicReturn(new Value(capacity));
- return true;
-}
-
-bool Intrinsifier::Build_GrowableArrayGetIndexed(FlowGraph* flow_graph) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* index = builder.AddParameter(1);
- Definition* growable_array = builder.AddParameter(2);
-
- index = PrepareIndexedOp(flow_graph, &builder, growable_array, index,
- Slot::GrowableObjectArray_length());
-
- Definition* backing_store = builder.AddDefinition(
- new LoadFieldInstr(new Value(growable_array),
- Slot::GrowableObjectArray_data(), builder.TokenPos()));
- Definition* result = builder.AddDefinition(new LoadIndexedInstr(
- new Value(backing_store), new Value(index),
- Instance::ElementSizeFor(kArrayCid), // index scale
- kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
- builder.AddIntrinsicReturn(new Value(result));
- return true;
-}
-
-bool Intrinsifier::Build_ObjectArraySetIndexed(FlowGraph* flow_graph) {
- if (Isolate::Current()->argument_type_checks()) {
- return false;
- }
-
- return Build_ObjectArraySetIndexedUnchecked(flow_graph);
-}
-
-bool Intrinsifier::Build_ObjectArraySetIndexedUnchecked(FlowGraph* flow_graph) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* value = builder.AddParameter(1);
- Definition* index = builder.AddParameter(2);
- Definition* array = builder.AddParameter(3);
-
- index = PrepareIndexedOp(flow_graph, &builder, array, index,
- Slot::Array_length());
-
- builder.AddInstruction(new StoreIndexedInstr(
- new Value(array), new Value(index), new Value(value), kEmitStoreBarrier,
- Instance::ElementSizeFor(kArrayCid), // index scale
- kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
- // Return null.
- Definition* null_def = builder.AddNullDefinition();
- builder.AddIntrinsicReturn(new Value(null_def));
- return true;
-}
-
-bool Intrinsifier::Build_GrowableArraySetIndexed(FlowGraph* flow_graph) {
- if (Isolate::Current()->argument_type_checks()) {
- return false;
- }
-
- return Build_GrowableArraySetIndexedUnchecked(flow_graph);
-}
-
-bool Intrinsifier::Build_GrowableArraySetIndexedUnchecked(
- FlowGraph* flow_graph) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* value = builder.AddParameter(1);
- Definition* index = builder.AddParameter(2);
- Definition* array = builder.AddParameter(3);
-
- index = PrepareIndexedOp(flow_graph, &builder, array, index,
- Slot::GrowableObjectArray_length());
-
- Definition* backing_store = builder.AddDefinition(new LoadFieldInstr(
- new Value(array), Slot::GrowableObjectArray_data(), builder.TokenPos()));
-
- builder.AddInstruction(new StoreIndexedInstr(
- new Value(backing_store), new Value(index), new Value(value),
- kEmitStoreBarrier,
- Instance::ElementSizeFor(kArrayCid), // index scale
- kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
- // Return null.
- Definition* null_def = builder.AddNullDefinition();
- builder.AddIntrinsicReturn(new Value(null_def));
- return true;
-}
-
-bool Intrinsifier::Build_GrowableArraySetData(FlowGraph* flow_graph) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* data = builder.AddParameter(1);
- Definition* growable_array = builder.AddParameter(2);
- Zone* zone = flow_graph->zone();
-
- Cids* value_check = Cids::CreateMonomorphic(zone, kArrayCid);
- builder.AddInstruction(new CheckClassInstr(new Value(data), DeoptId::kNone,
- *value_check, builder.TokenPos()));
-
- builder.AddInstruction(new StoreInstanceFieldInstr(
- Slot::GrowableObjectArray_data(), new Value(growable_array),
- new Value(data), kEmitStoreBarrier, builder.TokenPos()));
- // Return null.
- Definition* null_def = builder.AddNullDefinition();
- builder.AddIntrinsicReturn(new Value(null_def));
- return true;
-}
-
-bool Intrinsifier::Build_GrowableArraySetLength(FlowGraph* flow_graph) {
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* length = builder.AddParameter(1);
- Definition* growable_array = builder.AddParameter(2);
-
- builder.AddInstruction(
- new CheckSmiInstr(new Value(length), DeoptId::kNone, builder.TokenPos()));
- builder.AddInstruction(new StoreInstanceFieldInstr(
- Slot::GrowableObjectArray_length(), new Value(growable_array),
- new Value(length), kNoStoreBarrier, builder.TokenPos()));
- Definition* null_def = builder.AddNullDefinition();
- builder.AddIntrinsicReturn(new Value(null_def));
- return true;
-}
-
-bool Intrinsifier::Build_DoubleFlipSignBit(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) {
- return false;
- }
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- Definition* receiver = builder.AddParameter(1);
- Definition* unboxed_value =
- builder.AddUnboxInstr(kUnboxedDouble, new Value(receiver),
- /* is_checked = */ true);
- Definition* unboxed_result = builder.AddDefinition(new UnaryDoubleOpInstr(
- Token::kNEGATE, new Value(unboxed_value), DeoptId::kNone));
- Definition* result = builder.AddDefinition(
- BoxInstr::Create(kUnboxedDouble, new Value(unboxed_result)));
- builder.AddIntrinsicReturn(new Value(result));
- return true;
-}
-
-static bool BuildInvokeMathCFunction(BlockBuilder* builder,
- MethodRecognizer::Kind kind,
- intptr_t num_parameters = 1) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) {
- return false;
- }
- ZoneGrowableArray<Value*>* args =
- new ZoneGrowableArray<Value*>(num_parameters);
-
- for (intptr_t i = 0; i < num_parameters; i++) {
- const intptr_t parameter_index = (num_parameters - i);
- Definition* value = builder->AddParameter(parameter_index);
- Definition* unboxed_value =
- builder->AddUnboxInstr(kUnboxedDouble, value, /* is_checked = */ false);
- args->Add(new Value(unboxed_value));
- }
-
- Definition* unboxed_result = builder->InvokeMathCFunction(kind, args);
-
- Definition* result = builder->AddDefinition(
- BoxInstr::Create(kUnboxedDouble, new Value(unboxed_result)));
-
- builder->AddIntrinsicReturn(new Value(result));
-
- return true;
-}
-
-bool Intrinsifier::Build_MathSin(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathSin);
-}
-
-bool Intrinsifier::Build_MathCos(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathCos);
-}
-
-bool Intrinsifier::Build_MathTan(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathTan);
-}
-
-bool Intrinsifier::Build_MathAsin(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAsin);
-}
-
-bool Intrinsifier::Build_MathAcos(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAcos);
-}
-
-bool Intrinsifier::Build_MathAtan(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAtan);
-}
-
-bool Intrinsifier::Build_MathAtan2(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kMathAtan2,
- /* num_parameters = */ 2);
-}
-
-bool Intrinsifier::Build_DoubleMod(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleMod,
- /* num_parameters = */ 2);
-}
-
-bool Intrinsifier::Build_DoubleCeil(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
- // TODO(johnmccutchan): On X86 this intrinsic can be written in a different
- // way.
- if (TargetCPUFeatures::double_truncate_round_supported()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleCeil);
-}
-
-bool Intrinsifier::Build_DoubleFloor(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
- // TODO(johnmccutchan): On X86 this intrinsic can be written in a different
- // way.
- if (TargetCPUFeatures::double_truncate_round_supported()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleFloor);
-}
-
-bool Intrinsifier::Build_DoubleTruncate(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
- // TODO(johnmccutchan): On X86 this intrinsic can be written in a different
- // way.
- if (TargetCPUFeatures::double_truncate_round_supported()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleTruncate);
-}
-
-bool Intrinsifier::Build_DoubleRound(FlowGraph* flow_graph) {
- if (!FlowGraphCompiler::SupportsUnboxedDoubles()) return false;
-
- GraphEntryInstr* graph_entry = flow_graph->graph_entry();
- auto normal_entry = graph_entry->normal_entry();
- BlockBuilder builder(flow_graph, normal_entry);
-
- return BuildInvokeMathCFunction(&builder, MethodRecognizer::kDoubleRound);
-}
-
-void Intrinsifier::String_identityHash(Assembler* assembler,
- Label* normal_ir_body) {
- String_getHashCode(assembler, normal_ir_body);
-}
-
-void Intrinsifier::Double_identityHash(Assembler* assembler,
- Label* normal_ir_body) {
- Double_hashCode(assembler, normal_ir_body);
-}
-
-void Intrinsifier::RegExp_ExecuteMatch(Assembler* assembler,
- Label* normal_ir_body) {
- IntrinsifyRegExpExecuteMatch(assembler, normal_ir_body, /*sticky=*/false);
-}
-
-void Intrinsifier::RegExp_ExecuteMatchSticky(Assembler* assembler,
- Label* normal_ir_body) {
- IntrinsifyRegExpExecuteMatch(assembler, normal_ir_body, /*sticky=*/true);
-}
-#endif // !defined(TARGET_ARCH_DBC)
-
+} // namespace compiler
} // namespace dart
#endif // !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/intrinsifier.h b/runtime/vm/compiler/intrinsifier.h
index 694518e..3aa9148 100644
--- a/runtime/vm/compiler/intrinsifier.h
+++ b/runtime/vm/compiler/intrinsifier.h
@@ -7,23 +7,20 @@
#define RUNTIME_VM_COMPILER_INTRINSIFIER_H_
#include "vm/allocation.h"
+#include "vm/compiler/asm_intrinsifier.h"
+#include "vm/compiler/graph_intrinsifier.h"
#include "vm/compiler/method_recognizer.h"
namespace dart {
// Forward declarations.
+class FlowGraphCompiler;
+class Function;
+class ParsedFunction;
+
namespace compiler {
class Assembler;
class Label;
-} // namespace compiler
-class FlowGraphCompiler;
-class Function;
-class TargetEntryInstr;
-class ParsedFunction;
-class FlowGraph;
-
-using compiler::Assembler;
-using compiler::Label;
class Intrinsifier : public AllStatic {
public:
@@ -33,46 +30,11 @@
static void InitializeState();
#endif
- static bool GraphIntrinsify(const ParsedFunction& parsed_function,
- FlowGraphCompiler* compiler);
-
- static intptr_t ParameterSlotFromSp();
-
- static void IntrinsicCallPrologue(Assembler* assembler);
- static void IntrinsicCallEpilogue(Assembler* assembler);
-
private:
- // The "_A" value used in the intrinsification of
- // `runtime/lib/math_patch.dart:_Random._nextState()`
- static const int64_t kRandomAValue = 0xffffda61;
-
static bool CanIntrinsify(const Function& function);
-
-#define DECLARE_FUNCTION(class_name, function_name, enum_name, fp) \
- static void enum_name(Assembler* assembler, Label* normal_ir_body);
-
- ALL_INTRINSICS_LIST(DECLARE_FUNCTION)
-#if defined(TARGET_ARCH_DBC)
- // On DBC graph intrinsics are handled in the same way as non-graph ones.
- GRAPH_INTRINSICS_LIST(DECLARE_FUNCTION)
-#endif
-
-#undef DECLARE_FUNCTION
-
-#if !defined(TARGET_ARCH_DBC)
-#define DECLARE_FUNCTION(class_name, function_name, enum_name, fp) \
- static bool Build_##enum_name(FlowGraph* flow_graph);
-
- GRAPH_INTRINSICS_LIST(DECLARE_FUNCTION)
-
-#undef DECLARE_FUNCTION
-
- static void IntrinsifyRegExpExecuteMatch(Assembler* assembler,
- Label* normal_ir_body,
- bool sticky);
-#endif
};
+} // namespace compiler
} // namespace dart
#endif // RUNTIME_VM_COMPILER_INTRINSIFIER_H_
diff --git a/runtime/vm/compiler/method_recognizer.h b/runtime/vm/compiler/method_recognizer.h
index 75ceb07..dbde9de 100644
--- a/runtime/vm/compiler/method_recognizer.h
+++ b/runtime/vm/compiler/method_recognizer.h
@@ -6,519 +6,12 @@
#define RUNTIME_VM_COMPILER_METHOD_RECOGNIZER_H_
#include "vm/allocation.h"
+#include "vm/compiler/recognized_methods_list.h"
#include "vm/growable_array.h"
#include "vm/token.h"
namespace dart {
-// clang-format off
-// (class-name, function-name, recognized enum, result type, fingerprint).
-// When adding a new function add a 0 as fingerprint, build and run to get the
-// correct fingerprint from the mismatch error (or use Library::GetFunction()
-// and print func.SourceFingerprint()).
-#define OTHER_RECOGNIZED_LIST(V) \
- V(::, identical, ObjectIdentical, 0x49c6e96a) \
- V(ClassID, getID, ClassIDgetID, 0x7b18b257) \
- V(Object, Object., ObjectConstructor, 0x681617fe) \
- V(List, ., ListFactory, 0x629f8324) \
- V(_List, ., ObjectArrayAllocate, 0x2121902f) \
- V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x7041895a) \
- V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x336fa3ea) \
- V(_TypedList, _getInt16, ByteArrayBaseGetInt16, 0x231bbe2e) \
- V(_TypedList, _getUint16, ByteArrayBaseGetUint16, 0x0371785f) \
- V(_TypedList, _getInt32, ByteArrayBaseGetInt32, 0x65ab3a20) \
- V(_TypedList, _getUint32, ByteArrayBaseGetUint32, 0x0cb0fcf6) \
- V(_TypedList, _getInt64, ByteArrayBaseGetInt64, 0x7db75d78) \
- V(_TypedList, _getUint64, ByteArrayBaseGetUint64, 0x1487cfc6) \
- V(_TypedList, _getFloat32, ByteArrayBaseGetFloat32, 0x6674ea6f) \
- V(_TypedList, _getFloat64, ByteArrayBaseGetFloat64, 0x236c6e7a) \
- V(_TypedList, _getFloat32x4, ByteArrayBaseGetFloat32x4, 0x5c367ffb) \
- V(_TypedList, _getInt32x4, ByteArrayBaseGetInt32x4, 0x772d1c0f) \
- V(_TypedList, _setInt8, ByteArrayBaseSetInt8, 0x12bae36a) \
- V(_TypedList, _setUint8, ByteArrayBaseSetUint8, 0x15821cc9) \
- V(_TypedList, _setInt16, ByteArrayBaseSetInt16, 0x1f8237fa) \
- V(_TypedList, _setUint16, ByteArrayBaseSetUint16, 0x181e5d16) \
- V(_TypedList, _setInt32, ByteArrayBaseSetInt32, 0x7ddb9f87) \
- V(_TypedList, _setUint32, ByteArrayBaseSetUint32, 0x74094f8d) \
- V(_TypedList, _setInt64, ByteArrayBaseSetInt64, 0x4741396e) \
- V(_TypedList, _setUint64, ByteArrayBaseSetUint64, 0x3b398ae4) \
- V(_TypedList, _setFloat32, ByteArrayBaseSetFloat32, 0x03db087b) \
- V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x38a80b0d) \
- V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x40052c4e) \
- V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x07b89f54) \
- V(::, _toClampedUint8, ConvertIntToClampedUint8, 0x564b0435) \
- V(_StringBase, _interpolate, StringBaseInterpolate, 0x01ecb15a) \
- V(_IntegerImplementation, toDouble, IntegerToDouble, 0x05da96ed) \
- V(_Double, _add, DoubleAdd, 0x2a38277b) \
- V(_Double, _sub, DoubleSub, 0x4f466391) \
- V(_Double, _mul, DoubleMul, 0x175e4f66) \
- V(_Double, _div, DoubleDiv, 0x0854181b) \
- V(::, min, MathMin, 0x32ebc57d) \
- V(::, max, MathMax, 0x377e8889) \
- V(::, _doublePow, MathDoublePow, 0x5add0ec1) \
- V(::, _intPow, MathIntPow, 0x11b45569) \
- V(Float32x4, Float32x4., Float32x4Constructor, 0x26ea459b) \
- V(Float32x4, Float32x4.zero, Float32x4Zero, 0x16eca604) \
- V(Float32x4, Float32x4.splat, Float32x4Splat, 0x694e83e3) \
- V(Float32x4, Float32x4.fromInt32x4Bits, Int32x4ToFloat32x4, 0x2f62ebd3) \
- V(Float32x4, Float32x4.fromFloat64x2, Float64x2ToFloat32x4, 0x50ed6910) \
- V(_Float32x4, shuffle, Float32x4Shuffle, 0x7829101f) \
- V(_Float32x4, shuffleMix, Float32x4ShuffleMix, 0x4182c06b) \
- V(_Float32x4, get:signMask, Float32x4GetSignMask, 0x1d08b351) \
- V(_Float32x4, equal, Float32x4Equal, 0x11adb239) \
- V(_Float32x4, greaterThan, Float32x4GreaterThan, 0x48adaf58) \
- V(_Float32x4, greaterThanOrEqual, Float32x4GreaterThanOrEqual, 0x32db94ca) \
- V(_Float32x4, lessThan, Float32x4LessThan, 0x425b000c) \
- V(_Float32x4, lessThanOrEqual, Float32x4LessThanOrEqual, 0x0278c2f8) \
- V(_Float32x4, notEqual, Float32x4NotEqual, 0x2987cd26) \
- V(_Float32x4, min, Float32x4Min, 0x5ed74b6f) \
- V(_Float32x4, max, Float32x4Max, 0x68696442) \
- V(_Float32x4, scale, Float32x4Scale, 0x704e4122) \
- V(_Float32x4, sqrt, Float32x4Sqrt, 0x2c967a6f) \
- V(_Float32x4, reciprocalSqrt, Float32x4ReciprocalSqrt, 0x6264bfe8) \
- V(_Float32x4, reciprocal, Float32x4Reciprocal, 0x3cd7e819) \
- V(_Float32x4, unary-, Float32x4Negate, 0x37accb52) \
- V(_Float32x4, abs, Float32x4Abs, 0x471cdd87) \
- V(_Float32x4, clamp, Float32x4Clamp, 0x2cb30492) \
- V(_Float32x4, withX, Float32x4WithX, 0x4e336aff) \
- V(_Float32x4, withY, Float32x4WithY, 0x0a72b910) \
- V(_Float32x4, withZ, Float32x4WithZ, 0x31e93658) \
- V(_Float32x4, withW, Float32x4WithW, 0x60ddc105) \
- V(Float64x2, Float64x2., Float64x2Constructor, 0x43054b9f) \
- V(Float64x2, Float64x2.zero, Float64x2Zero, 0x4af12f9d) \
- V(Float64x2, Float64x2.splat, Float64x2Splat, 0x134edef0) \
- V(Float64x2, Float64x2.fromFloat32x4, Float32x4ToFloat64x2, 0x17d6b5e4) \
- V(_Float64x2, get:x, Float64x2GetX, 0x58c09c58) \
- V(_Float64x2, get:y, Float64x2GetY, 0x3cf5e5b8) \
- V(_Float64x2, unary-, Float64x2Negate, 0x415ca009) \
- V(_Float64x2, abs, Float64x2Abs, 0x031f9e47) \
- V(_Float64x2, sqrt, Float64x2Sqrt, 0x77f711dd) \
- V(_Float64x2, get:signMask, Float64x2GetSignMask, 0x27deda4b) \
- V(_Float64x2, scale, Float64x2Scale, 0x26830a61) \
- V(_Float64x2, withX, Float64x2WithX, 0x1d2bcaf5) \
- V(_Float64x2, withY, Float64x2WithY, 0x383ed6ac) \
- V(_Float64x2, min, Float64x2Min, 0x28d7ddf6) \
- V(_Float64x2, max, Float64x2Max, 0x0bd74e5b) \
- V(Int32x4, Int32x4., Int32x4Constructor, 0x480555a9) \
- V(Int32x4, Int32x4.bool, Int32x4BoolConstructor, 0x36aa6963) \
- V(Int32x4, Int32x4.fromFloat32x4Bits, Float32x4ToInt32x4, 0x6715388a) \
- V(_Int32x4, get:flagX, Int32x4GetFlagX, 0x56396c82) \
- V(_Int32x4, get:flagY, Int32x4GetFlagY, 0x44704738) \
- V(_Int32x4, get:flagZ, Int32x4GetFlagZ, 0x20d6ff37) \
- V(_Int32x4, get:flagW, Int32x4GetFlagW, 0x5045616a) \
- V(_Int32x4, get:signMask, Int32x4GetSignMask, 0x2c1fb2a3) \
- V(_Int32x4, shuffle, Int32x4Shuffle, 0x20bc0b16) \
- V(_Int32x4, shuffleMix, Int32x4ShuffleMix, 0x5c7056e1) \
- V(_Int32x4, select, Int32x4Select, 0x6b49654f) \
- V(_Int32x4, withFlagX, Int32x4WithFlagX, 0x0ef58fcf) \
- V(_Int32x4, withFlagY, Int32x4WithFlagY, 0x6485a9c4) \
- V(_Int32x4, withFlagZ, Int32x4WithFlagZ, 0x267acdfa) \
- V(_Int32x4, withFlagW, Int32x4WithFlagW, 0x345ac675) \
- V(_HashVMBase, get:_index, LinkedHashMap_getIndex, 0x02477157) \
- V(_HashVMBase, set:_index, LinkedHashMap_setIndex, 0x4fc8d5e0) \
- V(_HashVMBase, get:_data, LinkedHashMap_getData, 0x2d7a70ac) \
- V(_HashVMBase, set:_data, LinkedHashMap_setData, 0x0ec032e8) \
- V(_HashVMBase, get:_usedData, LinkedHashMap_getUsedData, 0x088599ed) \
- V(_HashVMBase, set:_usedData, LinkedHashMap_setUsedData, 0x5f42ca86) \
- V(_HashVMBase, get:_hashMask, LinkedHashMap_getHashMask, 0x32f3b13b) \
- V(_HashVMBase, set:_hashMask, LinkedHashMap_setHashMask, 0x7219c45b) \
- V(_HashVMBase, get:_deletedKeys, LinkedHashMap_getDeletedKeys, 0x558481c2) \
- V(_HashVMBase, set:_deletedKeys, LinkedHashMap_setDeletedKeys, 0x5aa9888d) \
- V(::, _classRangeCheck, ClassRangeCheck, 0x2ae76b84) \
- V(::, _classRangeCheckNegative, ClassRangeCheckNegated, 0x5acdfb75) \
-
-
-// List of intrinsics:
-// (class-name, function-name, intrinsification method, fingerprint).
-#define CORE_LIB_INTRINSIC_LIST(V) \
- V(_Smi, ~, Smi_bitNegate, 0x67299f4f) \
- V(_Smi, get:bitLength, Smi_bitLength, 0x25b3cb0a) \
- V(_Smi, _bitAndFromSmi, Smi_bitAndFromSmi, 0x562d5047) \
- V(_BigIntImpl, _lsh, Bigint_lsh, 0x5b6cfc8b) \
- V(_BigIntImpl, _rsh, Bigint_rsh, 0x6ff14a49) \
- V(_BigIntImpl, _absAdd, Bigint_absAdd, 0x5bf14238) \
- V(_BigIntImpl, _absSub, Bigint_absSub, 0x1de5bd32) \
- V(_BigIntImpl, _mulAdd, Bigint_mulAdd, 0x6f277966) \
- V(_BigIntImpl, _sqrAdd, Bigint_sqrAdd, 0x68e4c8ea) \
- V(_BigIntImpl, _estimateQuotientDigit, Bigint_estimateQuotientDigit, \
- 0x35456d91) \
- V(_BigIntMontgomeryReduction, _mulMod, Montgomery_mulMod, 0x0f7b0375) \
- V(_Double, >, Double_greaterThan, 0x4f1375a3) \
- V(_Double, >=, Double_greaterEqualThan, 0x4260c184) \
- V(_Double, <, Double_lessThan, 0x365d1eba) \
- V(_Double, <=, Double_lessEqualThan, 0x74b5eb64) \
- V(_Double, ==, Double_equal, 0x613492fc) \
- V(_Double, +, Double_add, 0x53994370) \
- V(_Double, -, Double_sub, 0x3b69d466) \
- V(_Double, *, Double_mul, 0x2bb9bd5d) \
- V(_Double, /, Double_div, 0x483eee28) \
- V(_Double, get:hashCode, Double_hashCode, 0x702b77b7) \
- V(_Double, get:_identityHashCode, Double_identityHash, 0x7bda5549) \
- V(_Double, get:isNaN, Double_getIsNaN, 0x0af9d4a9) \
- V(_Double, get:isInfinite, Double_getIsInfinite, 0x0f7acb47) \
- V(_Double, get:isNegative, Double_getIsNegative, 0x3a59e7f4) \
- V(_Double, _mulFromInteger, Double_mulFromInteger, 0x2017fcf6) \
- V(_Double, .fromInteger, DoubleFromInteger, 0x6d234f4b) \
- V(_GrowableList, .withData, GrowableArray_Allocate, 0x28b2138e) \
- V(_RegExp, _ExecuteMatch, RegExp_ExecuteMatch, 0x380184b1) \
- V(_RegExp, _ExecuteMatchSticky, RegExp_ExecuteMatchSticky, 0x79b8f955) \
- V(Object, ==, ObjectEquals, 0x7b32a55a) \
- V(Object, get:runtimeType, ObjectRuntimeType, 0x00e8ab29) \
- V(Object, _haveSameRuntimeType, ObjectHaveSameRuntimeType, 0x4dc50799) \
- V(_StringBase, get:hashCode, String_getHashCode, 0x78c3d446) \
- V(_StringBase, get:_identityHashCode, String_identityHash, 0x0472b1d8) \
- V(_StringBase, get:isEmpty, StringBaseIsEmpty, 0x4a8b29c8) \
- V(_StringBase, _substringMatches, StringBaseSubstringMatches, 0x46de4f10) \
- V(_StringBase, [], StringBaseCharAt, 0x7cbb8603) \
- V(_OneByteString, get:hashCode, OneByteString_getHashCode, 0x78c3d446) \
- V(_OneByteString, _substringUncheckedNative, \
- OneByteString_substringUnchecked, 0x3538ad86) \
- V(_OneByteString, _setAt, OneByteStringSetAt, 0x11ffddd1) \
- V(_OneByteString, _allocate, OneByteString_allocate, 0x74933376) \
- V(_OneByteString, ==, OneByteString_equality, 0x4eda197e) \
- V(_TwoByteString, ==, TwoByteString_equality, 0x4eda197e) \
- V(_Type, get:hashCode, Type_getHashCode, 0x18d1523f) \
- V(::, _getHash, Object_getHash, 0x2827856d) \
- V(::, _setHash, Object_setHash, 0x690faebd) \
-
-
-#define CORE_INTEGER_LIB_INTRINSIC_LIST(V) \
- V(_IntegerImplementation, _addFromInteger, Integer_addFromInteger, \
- 0x6a10c54a) \
- V(_IntegerImplementation, +, Integer_add, 0x43d53af7) \
- V(_IntegerImplementation, _subFromInteger, Integer_subFromInteger, \
- 0x3fa4b1ed) \
- V(_IntegerImplementation, -, Integer_sub, 0x2dc22e03) \
- V(_IntegerImplementation, _mulFromInteger, Integer_mulFromInteger, \
- 0x3216e299) \
- V(_IntegerImplementation, *, Integer_mul, 0x4e7a1c24) \
- V(_IntegerImplementation, _moduloFromInteger, Integer_moduloFromInteger, \
- 0x6348b974) \
- V(_IntegerImplementation, ~/, Integer_truncDivide, 0x4efb2d39) \
- V(_IntegerImplementation, unary-, Integer_negate, 0x428bf6fa) \
- V(_IntegerImplementation, _bitAndFromInteger, Integer_bitAndFromInteger, \
- 0x395b1678) \
- V(_IntegerImplementation, &, Integer_bitAnd, 0x5ab35f30) \
- V(_IntegerImplementation, _bitOrFromInteger, Integer_bitOrFromInteger, \
- 0x6a36b395) \
- V(_IntegerImplementation, |, Integer_bitOr, 0x267fa107) \
- V(_IntegerImplementation, _bitXorFromInteger, Integer_bitXorFromInteger, \
- 0x72da93f0) \
- V(_IntegerImplementation, ^, Integer_bitXor, 0x0c7b0230) \
- V(_IntegerImplementation, _greaterThanFromInteger, \
- Integer_greaterThanFromInt, 0x4a50ed58) \
- V(_IntegerImplementation, >, Integer_greaterThan, 0x6599a6e1) \
- V(_IntegerImplementation, ==, Integer_equal, 0x58abc487) \
- V(_IntegerImplementation, _equalToInteger, Integer_equalToInteger, \
- 0x063be842) \
- V(_IntegerImplementation, <, Integer_lessThan, 0x365d1eba) \
- V(_IntegerImplementation, <=, Integer_lessEqualThan, 0x74b5eb64) \
- V(_IntegerImplementation, >=, Integer_greaterEqualThan, 0x4260c184) \
- V(_IntegerImplementation, <<, Integer_shl, 0x371c45fa) \
- V(_IntegerImplementation, >>, Integer_sar, 0x2b630578) \
- V(_Double, toInt, DoubleToInteger, 0x26ef344b) \
-
-#define MATH_LIB_INTRINSIC_LIST(V) \
- V(::, sqrt, MathSqrt, 0x70482cf3) \
- V(_Random, _nextState, Random_nextState, 0x2842c4d5) \
-
-#define GRAPH_MATH_LIB_INTRINSIC_LIST(V) \
- V(::, sin, MathSin, 0x6b7bd98c) \
- V(::, cos, MathCos, 0x459bf5fe) \
- V(::, tan, MathTan, 0x3bcd772a) \
- V(::, asin, MathAsin, 0x2ecc2fcd) \
- V(::, acos, MathAcos, 0x08cf2212) \
- V(::, atan, MathAtan, 0x1e2731d5) \
- V(::, atan2, MathAtan2, 0x39f1fa41) \
-
-#define TYPED_DATA_LIB_INTRINSIC_LIST(V) \
- V(Int8List, ., TypedData_Int8Array_factory, 0x7e39a3a1) \
- V(Uint8List, ., TypedData_Uint8Array_factory, 0x3a79adf7) \
- V(Uint8ClampedList, ., TypedData_Uint8ClampedArray_factory, 0x67f38395) \
- V(Int16List, ., TypedData_Int16Array_factory, 0x6477bda8) \
- V(Uint16List, ., TypedData_Uint16Array_factory, 0x5707c5a2) \
- V(Int32List, ., TypedData_Int32Array_factory, 0x2b96ec0e) \
- V(Uint32List, ., TypedData_Uint32Array_factory, 0x0c1c0d62) \
- V(Int64List, ., TypedData_Int64Array_factory, 0x279ab485) \
- V(Uint64List, ., TypedData_Uint64Array_factory, 0x7bcb89c2) \
- V(Float32List, ., TypedData_Float32Array_factory, 0x43506c09) \
- V(Float64List, ., TypedData_Float64Array_factory, 0x1fde3eaf) \
- V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x4a4030d6) \
- V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x6dd02406) \
- V(Float64x2List, ., TypedData_Float64x2Array_factory, 0x688e4e97) \
-
-#define GRAPH_TYPED_DATA_INTRINSICS_LIST(V) \
- V(_Int8List, [], Int8ArrayGetIndexed, 0x49767a2c) \
- V(_Int8List, []=, Int8ArraySetIndexed, 0x24f42cd0) \
- V(_Uint8List, [], Uint8ArrayGetIndexed, 0x088d86d4) \
- V(_Uint8List, []=, Uint8ArraySetIndexed, 0x12639541) \
- V(_ExternalUint8Array, [], ExternalUint8ArrayGetIndexed, 0x088d86d4) \
- V(_ExternalUint8Array, []=, ExternalUint8ArraySetIndexed, 0x12639541) \
- V(_Uint8ClampedList, [], Uint8ClampedArrayGetIndexed, 0x088d86d4) \
- V(_Uint8ClampedList, []=, Uint8ClampedArraySetIndexed, 0x6790dba1) \
- V(_ExternalUint8ClampedArray, [], ExternalUint8ClampedArrayGetIndexed, \
- 0x088d86d4) \
- V(_ExternalUint8ClampedArray, []=, ExternalUint8ClampedArraySetIndexed, \
- 0x6790dba1) \
- V(_Int16List, [], Int16ArrayGetIndexed, 0x5ec64948) \
- V(_Int16List, []=, Int16ArraySetIndexed, 0x0e4e8221) \
- V(_Uint16List, [], Uint16ArrayGetIndexed, 0x5f49d093) \
- V(_Uint16List, []=, Uint16ArraySetIndexed, 0x2efbc90f) \
- V(_Int32List, [], Int32ArrayGetIndexed, 0x4bc0d3dd) \
- V(_Int32List, []=, Int32ArraySetIndexed, 0x1adf9823) \
- V(_Uint32List, [], Uint32ArrayGetIndexed, 0x188658ce) \
- V(_Uint32List, []=, Uint32ArraySetIndexed, 0x01f51a79) \
- V(_Int64List, [], Int64ArrayGetIndexed, 0x51eafb97) \
- V(_Int64List, []=, Int64ArraySetIndexed, 0x376181fb) \
- V(_Uint64List, [], Uint64ArrayGetIndexed, 0x4b2a1ba2) \
- V(_Uint64List, []=, Uint64ArraySetIndexed, 0x5f881bd4) \
- V(_Float64List, [], Float64ArrayGetIndexed, 0x0a714486) \
- V(_Float64List, []=, Float64ArraySetIndexed, 0x04937367) \
- V(_Float32List, [], Float32ArrayGetIndexed, 0x5ade301f) \
- V(_Float32List, []=, Float32ArraySetIndexed, 0x0d5c2e2b) \
- V(_Float32x4List, [], Float32x4ArrayGetIndexed, 0x128cddeb) \
- V(_Float32x4List, []=, Float32x4ArraySetIndexed, 0x7ad55c72) \
- V(_Int32x4List, [], Int32x4ArrayGetIndexed, 0x4b78af9c) \
- V(_Int32x4List, []=, Int32x4ArraySetIndexed, 0x31453dab) \
- V(_Float64x2List, [], Float64x2ArrayGetIndexed, 0x644a0be1) \
- V(_Float64x2List, []=, Float64x2ArraySetIndexed, 0x6b836b0b) \
- V(_TypedList, get:length, TypedDataLength, 0x2091c4d8) \
- V(_Float32x4, get:x, Float32x4ShuffleX, 0x63d1a9fd) \
- V(_Float32x4, get:y, Float32x4ShuffleY, 0x203523d9) \
- V(_Float32x4, get:z, Float32x4ShuffleZ, 0x13190678) \
- V(_Float32x4, get:w, Float32x4ShuffleW, 0x698a38de) \
- V(_Float32x4, *, Float32x4Mul, 0x5dec68b2) \
- V(_Float32x4, -, Float32x4Sub, 0x3ea14461) \
- V(_Float32x4, +, Float32x4Add, 0x7ffcf301) \
-
-#define GRAPH_CORE_INTRINSICS_LIST(V) \
- V(_List, get:length, ObjectArrayLength, 0x25952390) \
- V(_List, [], ObjectArrayGetIndexed, 0x653da02e) \
- V(_List, []=, ObjectArraySetIndexed, 0x16b3d2b0) \
- V(_List, _setIndexed, ObjectArraySetIndexedUnchecked, 0x50d64c75) \
- V(_ImmutableList, get:length, ImmutableArrayLength, 0x25952390) \
- V(_ImmutableList, [], ImmutableArrayGetIndexed, 0x653da02e) \
- V(_GrowableList, get:length, GrowableArrayLength, 0x18dd86b4) \
- V(_GrowableList, get:_capacity, GrowableArrayCapacity, 0x2e04be60) \
- V(_GrowableList, _setData, GrowableArraySetData, 0x3dbea348) \
- V(_GrowableList, _setLength, GrowableArraySetLength, 0x753e55da) \
- V(_GrowableList, [], GrowableArrayGetIndexed, 0x446fe1f0) \
- V(_GrowableList, []=, GrowableArraySetIndexed, 0x40a462ec) \
- V(_GrowableList, _setIndexed, GrowableArraySetIndexedUnchecked, 0x297083df) \
- V(_StringBase, get:length, StringBaseLength, 0x2a2d03d1) \
- V(_OneByteString, codeUnitAt, OneByteStringCodeUnitAt, 0x55a0a1f3) \
- V(_TwoByteString, codeUnitAt, TwoByteStringCodeUnitAt, 0x55a0a1f3) \
- V(_ExternalOneByteString, codeUnitAt, ExternalOneByteStringCodeUnitAt, \
- 0x55a0a1f3) \
- V(_ExternalTwoByteString, codeUnitAt, ExternalTwoByteStringCodeUnitAt, \
- 0x55a0a1f3) \
- V(_Double, unary-, DoubleFlipSignBit, 0x6db4674f) \
- V(_Double, truncateToDouble, DoubleTruncate, 0x2f27e5d3) \
- V(_Double, roundToDouble, DoubleRound, 0x2f89c512) \
- V(_Double, floorToDouble, DoubleFloor, 0x6aa87a5f) \
- V(_Double, ceilToDouble, DoubleCeil, 0x1b045e9e) \
- V(_Double, _modulo, DoubleMod, 0x5b8ceed7)
-
-
-#define GRAPH_INTRINSICS_LIST(V) \
- GRAPH_CORE_INTRINSICS_LIST(V) \
- GRAPH_TYPED_DATA_INTRINSICS_LIST(V) \
- GRAPH_MATH_LIB_INTRINSIC_LIST(V) \
-
-#define DEVELOPER_LIB_INTRINSIC_LIST(V) \
- V(_UserTag, makeCurrent, UserTag_makeCurrent, 0x0b3066fd) \
- V(::, _getDefaultTag, UserTag_defaultTag, 0x69f3f1ad) \
- V(::, _getCurrentTag, Profiler_getCurrentTag, 0x05fa99d2) \
- V(::, _isDartStreamEnabled, Timeline_isDartStreamEnabled, 0x72f13f7a) \
-
-#define ASYNC_LIB_INTRINSIC_LIST(V) \
- V(::, _clearAsyncThreadStackTrace, ClearAsyncThreadStackTrace, 0x2edd4b25) \
- V(::, _setAsyncThreadStackTrace, SetAsyncThreadStackTrace, 0x04f429a7) \
-
-#define ALL_INTRINSICS_NO_INTEGER_LIB_LIST(V) \
- ASYNC_LIB_INTRINSIC_LIST(V) \
- CORE_LIB_INTRINSIC_LIST(V) \
- DEVELOPER_LIB_INTRINSIC_LIST(V) \
- MATH_LIB_INTRINSIC_LIST(V) \
- TYPED_DATA_LIB_INTRINSIC_LIST(V) \
-
-#define ALL_INTRINSICS_LIST(V) \
- ALL_INTRINSICS_NO_INTEGER_LIB_LIST(V) \
- CORE_INTEGER_LIB_INTRINSIC_LIST(V)
-
-#define RECOGNIZED_LIST(V) \
- OTHER_RECOGNIZED_LIST(V) \
- ALL_INTRINSICS_LIST(V) \
- GRAPH_INTRINSICS_LIST(V)
-
-// A list of core function that should always be inlined.
-#define INLINE_WHITE_LIST(V) \
- V(Object, ==, ObjectEquals, 0x7b32a55a) \
- V(_List, get:length, ObjectArrayLength, 0x25952390) \
- V(_ImmutableList, get:length, ImmutableArrayLength, 0x25952390) \
- V(_TypedList, get:length, TypedDataLength, 0x2091c4d8) \
- V(_GrowableList, get:length, GrowableArrayLength, 0x18dd86b4) \
- V(_GrowableList, get:_capacity, GrowableArrayCapacity, 0x2e04be60) \
- V(_GrowableList, add, GrowableListAdd, 0x40b490b8) \
- V(_GrowableList, removeLast, GrowableListRemoveLast, 0x007855e5) \
- V(_StringBase, get:length, StringBaseLength, 0x2a2d03d1) \
- V(ListIterator, moveNext, ListIteratorMoveNext, 0x2dca30ce) \
- V(_FixedSizeArrayIterator, moveNext, FixedListIteratorMoveNext, 0x324eb20b) \
- V(_GrowableList, get:iterator, GrowableArrayIterator, 0x5bd2ef37) \
- V(_GrowableList, forEach, GrowableArrayForEach, 0x74900bb8) \
- V(_List, ., ObjectArrayAllocate, 0x2121902f) \
- V(ListMixin, get:isEmpty, ListMixinIsEmpty, 0x7be74d04) \
- V(_List, get:iterator, ObjectArrayIterator, 0x6c851c55) \
- V(_List, forEach, ObjectArrayForEach, 0x11406b13) \
- V(_List, _slice, ObjectArraySlice, 0x4c865d1d) \
- V(_ImmutableList, get:iterator, ImmutableArrayIterator, 0x6c851c55) \
- V(_ImmutableList, forEach, ImmutableArrayForEach, 0x11406b13) \
- V(_Int8ArrayView, [], Int8ArrayViewGetIndexed, 0x7e5a8458) \
- V(_Int8ArrayView, []=, Int8ArrayViewSetIndexed, 0x62f615e4) \
- V(_Uint8ArrayView, [], Uint8ArrayViewGetIndexed, 0x7d308247) \
- V(_Uint8ArrayView, []=, Uint8ArrayViewSetIndexed, 0x65ba546e) \
- V(_Uint8ClampedArrayView, [], Uint8ClampedArrayViewGetIndexed, 0x7d308247) \
- V(_Uint8ClampedArrayView, []=, Uint8ClampedArrayViewSetIndexed, 0x65ba546e) \
- V(_Uint16ArrayView, [], Uint16ArrayViewGetIndexed, 0xe96836dd) \
- V(_Uint16ArrayView, []=, Uint16ArrayViewSetIndexed, 0x15b02947) \
- V(_Int16ArrayView, [], Int16ArrayViewGetIndexed, 0x1b24a48b) \
- V(_Int16ArrayView, []=, Int16ArrayViewSetIndexed, 0xb91ec2e6) \
- V(_Uint32ArrayView, [], Uint32ArrayViewGetIndexed, 0x8a4f93b3) \
- V(_Uint32ArrayView, []=, Uint32ArrayViewSetIndexed, 0xf54918b5) \
- V(_Int32ArrayView, [], Int32ArrayViewGetIndexed, 0x85040819) \
- V(_Int32ArrayView, []=, Int32ArrayViewSetIndexed, 0xaec8c6f5) \
- V(_Uint64ArrayView, [], Uint64ArrayViewGetIndexed, 0xd0c44fe7) \
- V(_Uint64ArrayView, []=, Uint64ArrayViewSetIndexed, 0x402712b7) \
- V(_Int64ArrayView, [], Int64ArrayViewGetIndexed, 0xf3090b95) \
- V(_Int64ArrayView, []=, Int64ArrayViewSetIndexed, 0xca07e497) \
- V(_Float32ArrayView, [], Float32ArrayViewGetIndexed, 0xef967533) \
- V(_Float32ArrayView, []=, Float32ArrayViewSetIndexed, 0xc9b691bd) \
- V(_Float64ArrayView, [], Float64ArrayViewGetIndexed, 0x9d83f585) \
- V(_Float64ArrayView, []=, Float64ArrayViewSetIndexed, 0x3c1adabd) \
- V(_ByteDataView, setInt8, ByteDataViewSetInt8, 0x6395293e) \
- V(_ByteDataView, setUint8, ByteDataViewSetUint8, 0x79979d1f) \
- V(_ByteDataView, setInt16, ByteDataViewSetInt16, 0x525ec534) \
- V(_ByteDataView, setUint16, ByteDataViewSetUint16, 0x48eda263) \
- V(_ByteDataView, setInt32, ByteDataViewSetInt32, 0x523666fa) \
- V(_ByteDataView, setUint32, ByteDataViewSetUint32, 0x5a4683da) \
- V(_ByteDataView, setInt64, ByteDataViewSetInt64, 0x4283a650) \
- V(_ByteDataView, setUint64, ByteDataViewSetUint64, 0x687a1892) \
- V(_ByteDataView, setFloat32, ByteDataViewSetFloat32, 0x7d5784fd) \
- V(_ByteDataView, setFloat64, ByteDataViewSetFloat64, 0x00101e3f) \
- V(_ByteDataView, getInt8, ByteDataViewGetInt8, 0x68448b4d) \
- V(_ByteDataView, getUint8, ByteDataViewGetUint8, 0x5d68cbf2) \
- V(_ByteDataView, getInt16, ByteDataViewGetInt16, 0x691b5ead) \
- V(_ByteDataView, getUint16, ByteDataViewGetUint16, 0x78b744d8) \
- V(_ByteDataView, getInt32, ByteDataViewGetInt32, 0x3a0f4efa) \
- V(_ByteDataView, getUint32, ByteDataViewGetUint32, 0x583261be) \
- V(_ByteDataView, getInt64, ByteDataViewGetInt64, 0x77de471c) \
- V(_ByteDataView, getUint64, ByteDataViewGetUint64, 0x0ffadc4b) \
- V(_ByteDataView, getFloat32, ByteDataViewGetFloat32, 0x6a205749) \
- V(_ByteDataView, getFloat64, ByteDataViewGetFloat64, 0x69f58d27) \
- V(::, exp, MathExp, 0x32ab9efa) \
- V(::, log, MathLog, 0x1ee8f9fc) \
- V(::, max, MathMax, 0x377e8889) \
- V(::, min, MathMin, 0x32ebc57d) \
- V(::, pow, MathPow, 0x79efc5a2) \
- V(::, _classRangeCheck, ClassRangeCheck, 0x2ae76b84) \
- V(::, _classRangeCheckNegative, ClassRangeCheckNegated, 0x5acdfb75) \
- V(::, _toInt, ConvertMaskedInt, 0x713908fd) \
- V(::, _toInt8, ConvertIntToInt8, 0x7484a780) \
- V(::, _toUint8, ConvertIntToUint8, 0x0a15b522) \
- V(::, _toInt16, ConvertIntToInt16, 0x0a83fcc6) \
- V(::, _toUint16, ConvertIntToUint16, 0x6087d1af) \
- V(::, _toInt32, ConvertIntToInt32, 0x62b451b9) \
- V(::, _toUint32, ConvertIntToUint32, 0x17a8e085) \
- V(::, _byteSwap16, ByteSwap16, 0x44f173be) \
- V(::, _byteSwap32, ByteSwap32, 0x6219333b) \
- V(::, _byteSwap64, ByteSwap64, 0x9abe57e0) \
- V(Lists, copy, ListsCopy, 0x40e974f6) \
- V(_HashVMBase, get:_index, LinkedHashMap_getIndex, 0x02477157) \
- V(_HashVMBase, set:_index, LinkedHashMap_setIndex, 0x4fc8d5e0) \
- V(_HashVMBase, get:_data, LinkedHashMap_getData, 0x2d7a70ac) \
- V(_HashVMBase, set:_data, LinkedHashMap_setData, 0x0ec032e8) \
- V(_HashVMBase, get:_usedData, LinkedHashMap_getUsedData, 0x088599ed) \
- V(_HashVMBase, set:_usedData, LinkedHashMap_setUsedData, 0x5f42ca86) \
- V(_HashVMBase, get:_hashMask, LinkedHashMap_getHashMask, 0x32f3b13b) \
- V(_HashVMBase, set:_hashMask, LinkedHashMap_setHashMask, 0x7219c45b) \
- V(_HashVMBase, get:_deletedKeys, LinkedHashMap_getDeletedKeys, 0x558481c2) \
- V(_HashVMBase, set:_deletedKeys, LinkedHashMap_setDeletedKeys, 0x5aa9888d) \
-
-// A list of core function that should never be inlined.
-#define INLINE_BLACK_LIST(V) \
- V(::, asin, MathAsin, 0x2ecc2fcd) \
- V(::, acos, MathAcos, 0x08cf2212) \
- V(::, atan, MathAtan, 0x1e2731d5) \
- V(::, atan2, MathAtan2, 0x39f1fa41) \
- V(::, cos, MathCos, 0x459bf5fe) \
- V(::, sin, MathSin, 0x6b7bd98c) \
- V(::, sqrt, MathSqrt, 0x70482cf3) \
- V(::, tan, MathTan, 0x3bcd772a) \
- V(_BigIntImpl, _lsh, Bigint_lsh, 0x5b6cfc8b) \
- V(_BigIntImpl, _rsh, Bigint_rsh, 0x6ff14a49) \
- V(_BigIntImpl, _absAdd, Bigint_absAdd, 0x5bf14238) \
- V(_BigIntImpl, _absSub, Bigint_absSub, 0x1de5bd32) \
- V(_BigIntImpl, _mulAdd, Bigint_mulAdd, 0x6f277966) \
- V(_BigIntImpl, _sqrAdd, Bigint_sqrAdd, 0x68e4c8ea) \
- V(_BigIntImpl, _estimateQuotientDigit, Bigint_estimateQuotientDigit, \
- 0x35456d91) \
- V(_BigIntMontgomeryReduction, _mulMod, Montgomery_mulMod, 0x0f7b0375) \
- V(_Double, >, Double_greaterThan, 0x4f1375a3) \
- V(_Double, >=, Double_greaterEqualThan, 0x4260c184) \
- V(_Double, <, Double_lessThan, 0x365d1eba) \
- V(_Double, <=, Double_lessEqualThan, 0x74b5eb64) \
- V(_Double, ==, Double_equal, 0x613492fc) \
- V(_Double, +, Double_add, 0x53994370) \
- V(_Double, -, Double_sub, 0x3b69d466) \
- V(_Double, *, Double_mul, 0x2bb9bd5d) \
- V(_Double, /, Double_div, 0x483eee28) \
- V(_IntegerImplementation, +, Integer_add, 0x43d53af7) \
- V(_IntegerImplementation, -, Integer_sub, 0x2dc22e03) \
- V(_IntegerImplementation, *, Integer_mul, 0x4e7a1c24) \
- V(_IntegerImplementation, ~/, Integer_truncDivide, 0x4efb2d39) \
- V(_IntegerImplementation, unary-, Integer_negate, 0x428bf6fa) \
- V(_IntegerImplementation, &, Integer_bitAnd, 0x5ab35f30) \
- V(_IntegerImplementation, |, Integer_bitOr, 0x267fa107) \
- V(_IntegerImplementation, ^, Integer_bitXor, 0x0c7b0230) \
- V(_IntegerImplementation, >, Integer_greaterThan, 0x6599a6e1) \
- V(_IntegerImplementation, ==, Integer_equal, 0x58abc487) \
- V(_IntegerImplementation, <, Integer_lessThan, 0x365d1eba) \
- V(_IntegerImplementation, <=, Integer_lessEqualThan, 0x74b5eb64) \
- V(_IntegerImplementation, >=, Integer_greaterEqualThan, 0x4260c184) \
- V(_IntegerImplementation, <<, Integer_shl, 0x371c45fa) \
- V(_IntegerImplementation, >>, Integer_sar, 0x2b630578) \
-
-// A list of core functions that internally dispatch based on received id.
-#define POLYMORPHIC_TARGET_LIST(V) \
- V(_StringBase, [], StringBaseCharAt, 0x7cbb8603) \
- V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x7041895a) \
- V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x336fa3ea) \
- V(_TypedList, _getInt16, ByteArrayBaseGetInt16, 0x231bbe2e) \
- V(_TypedList, _getUint16, ByteArrayBaseGetUint16, 0x0371785f) \
- V(_TypedList, _getInt32, ByteArrayBaseGetInt32, 0x65ab3a20) \
- V(_TypedList, _getUint32, ByteArrayBaseGetUint32, 0x0cb0fcf6) \
- V(_TypedList, _getInt64, ByteArrayBaseGetInt64, 0x7db75d78) \
- V(_TypedList, _getUint64, ByteArrayBaseGetUint64, 0x1487cfc6) \
- V(_TypedList, _getFloat32, ByteArrayBaseGetFloat32, 0x6674ea6f) \
- V(_TypedList, _getFloat64, ByteArrayBaseGetFloat64, 0x236c6e7a) \
- V(_TypedList, _getFloat32x4, ByteArrayBaseGetFloat32x4, 0x5c367ffb) \
- V(_TypedList, _getInt32x4, ByteArrayBaseGetInt32x4, 0x772d1c0f) \
- V(_TypedList, _setInt8, ByteArrayBaseSetInt8, 0x12bae36a) \
- V(_TypedList, _setUint8, ByteArrayBaseSetInt8, 0x15821cc9) \
- V(_TypedList, _setInt16, ByteArrayBaseSetInt16, 0x1f8237fa) \
- V(_TypedList, _setUint16, ByteArrayBaseSetInt16, 0x181e5d16) \
- V(_TypedList, _setInt32, ByteArrayBaseSetInt32, 0x7ddb9f87) \
- V(_TypedList, _setUint32, ByteArrayBaseSetUint32, 0x74094f8d) \
- V(_TypedList, _setInt64, ByteArrayBaseSetInt64, 0x4741396e) \
- V(_TypedList, _setUint64, ByteArrayBaseSetUint64, 0x3b398ae4) \
- V(_TypedList, _setFloat32, ByteArrayBaseSetFloat32, 0x03db087b) \
- V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x38a80b0d) \
- V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x40052c4e) \
- V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x07b89f54) \
- V(Object, get:runtimeType, ObjectRuntimeType, 0x00e8ab29)
-
-// clang-format on
-
// Forward declarations.
class Function;
class Library;
@@ -588,35 +81,6 @@
ASSERT(f.CheckSourceFingerprint(#p0 ", " #p1 ", " #p2, fp))
#endif // !defined(DART_PRECOMPILED_RUNTIME)
-// clang-format off
-// List of recognized list factories:
-// (factory-name-symbol, class-name-string, constructor-name-string,
-// result-cid, fingerprint).
-#define RECOGNIZED_LIST_FACTORY_LIST(V) \
- V(_ListFactory, _List, ., kArrayCid, 0x2121902f) \
- V(_GrowableListWithData, _GrowableList, .withData, kGrowableObjectArrayCid, \
- 0x28b2138e) \
- V(_GrowableListFactory, _GrowableList, ., kGrowableObjectArrayCid, \
- 0x3eed680b) \
- V(_Int8ArrayFactory, Int8List, ., kTypedDataInt8ArrayCid, 0x7e39a3a1) \
- V(_Uint8ArrayFactory, Uint8List, ., kTypedDataUint8ArrayCid, 0x3a79adf7) \
- V(_Uint8ClampedArrayFactory, Uint8ClampedList, ., \
- kTypedDataUint8ClampedArrayCid, 0x67f38395) \
- V(_Int16ArrayFactory, Int16List, ., kTypedDataInt16ArrayCid, 0x6477bda8) \
- V(_Uint16ArrayFactory, Uint16List, ., kTypedDataUint16ArrayCid, 0x5707c5a2) \
- V(_Int32ArrayFactory, Int32List, ., kTypedDataInt32ArrayCid, 0x2b96ec0e) \
- V(_Uint32ArrayFactory, Uint32List, ., kTypedDataUint32ArrayCid, 0x0c1c0d62) \
- V(_Int64ArrayFactory, Int64List, ., kTypedDataInt64ArrayCid, 0x279ab485) \
- V(_Uint64ArrayFactory, Uint64List, ., kTypedDataUint64ArrayCid, 0x7bcb89c2) \
- V(_Float64ArrayFactory, Float64List, ., kTypedDataFloat64ArrayCid, \
- 0x1fde3eaf) \
- V(_Float32ArrayFactory, Float32List, ., kTypedDataFloat32ArrayCid, \
- 0x43506c09) \
- V(_Float32x4ArrayFactory, Float32x4List, ., kTypedDataFloat32x4ArrayCid, \
- 0x4a4030d6)
-
-// clang-format on
-
// Class that recognizes factories and returns corresponding result cid.
class FactoryRecognizer : public AllStatic {
public:
diff --git a/runtime/vm/compiler/recognized_methods_list.h b/runtime/vm/compiler/recognized_methods_list.h
new file mode 100644
index 0000000..fa35d7c
--- /dev/null
+++ b/runtime/vm/compiler/recognized_methods_list.h
@@ -0,0 +1,545 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_RECOGNIZED_METHODS_LIST_H_
+#define RUNTIME_VM_COMPILER_RECOGNIZED_METHODS_LIST_H_
+
+namespace dart {
+
+// clang-format off
+// (class-name, function-name, recognized enum, result type, fingerprint).
+// When adding a new function add a 0 as fingerprint, build and run to get the
+// correct fingerprint from the mismatch error (or use Library::GetFunction()
+// and print func.SourceFingerprint()).
+#define OTHER_RECOGNIZED_LIST(V) \
+ V(::, identical, ObjectIdentical, 0x49c6e96a) \
+ V(ClassID, getID, ClassIDgetID, 0x7b18b257) \
+ V(Object, Object., ObjectConstructor, 0x681617fe) \
+ V(List, ., ListFactory, 0x629f8324) \
+ V(_List, ., ObjectArrayAllocate, 0x2121902f) \
+ V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x7041895a) \
+ V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x336fa3ea) \
+ V(_TypedList, _getInt16, ByteArrayBaseGetInt16, 0x231bbe2e) \
+ V(_TypedList, _getUint16, ByteArrayBaseGetUint16, 0x0371785f) \
+ V(_TypedList, _getInt32, ByteArrayBaseGetInt32, 0x65ab3a20) \
+ V(_TypedList, _getUint32, ByteArrayBaseGetUint32, 0x0cb0fcf6) \
+ V(_TypedList, _getInt64, ByteArrayBaseGetInt64, 0x7db75d78) \
+ V(_TypedList, _getUint64, ByteArrayBaseGetUint64, 0x1487cfc6) \
+ V(_TypedList, _getFloat32, ByteArrayBaseGetFloat32, 0x6674ea6f) \
+ V(_TypedList, _getFloat64, ByteArrayBaseGetFloat64, 0x236c6e7a) \
+ V(_TypedList, _getFloat32x4, ByteArrayBaseGetFloat32x4, 0x5c367ffb) \
+ V(_TypedList, _getInt32x4, ByteArrayBaseGetInt32x4, 0x772d1c0f) \
+ V(_TypedList, _setInt8, ByteArrayBaseSetInt8, 0x12bae36a) \
+ V(_TypedList, _setUint8, ByteArrayBaseSetUint8, 0x15821cc9) \
+ V(_TypedList, _setInt16, ByteArrayBaseSetInt16, 0x1f8237fa) \
+ V(_TypedList, _setUint16, ByteArrayBaseSetUint16, 0x181e5d16) \
+ V(_TypedList, _setInt32, ByteArrayBaseSetInt32, 0x7ddb9f87) \
+ V(_TypedList, _setUint32, ByteArrayBaseSetUint32, 0x74094f8d) \
+ V(_TypedList, _setInt64, ByteArrayBaseSetInt64, 0x4741396e) \
+ V(_TypedList, _setUint64, ByteArrayBaseSetUint64, 0x3b398ae4) \
+ V(_TypedList, _setFloat32, ByteArrayBaseSetFloat32, 0x03db087b) \
+ V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x38a80b0d) \
+ V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x40052c4e) \
+ V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x07b89f54) \
+ V(::, _toClampedUint8, ConvertIntToClampedUint8, 0x564b0435) \
+ V(_StringBase, _interpolate, StringBaseInterpolate, 0x01ecb15a) \
+ V(_IntegerImplementation, toDouble, IntegerToDouble, 0x05da96ed) \
+ V(_Double, _add, DoubleAdd, 0x2a38277b) \
+ V(_Double, _sub, DoubleSub, 0x4f466391) \
+ V(_Double, _mul, DoubleMul, 0x175e4f66) \
+ V(_Double, _div, DoubleDiv, 0x0854181b) \
+ V(::, min, MathMin, 0x32ebc57d) \
+ V(::, max, MathMax, 0x377e8889) \
+ V(::, _doublePow, MathDoublePow, 0x5add0ec1) \
+ V(::, _intPow, MathIntPow, 0x11b45569) \
+ V(Float32x4, Float32x4., Float32x4Constructor, 0x26ea459b) \
+ V(Float32x4, Float32x4.zero, Float32x4Zero, 0x16eca604) \
+ V(Float32x4, Float32x4.splat, Float32x4Splat, 0x694e83e3) \
+ V(Float32x4, Float32x4.fromInt32x4Bits, Int32x4ToFloat32x4, 0x2f62ebd3) \
+ V(Float32x4, Float32x4.fromFloat64x2, Float64x2ToFloat32x4, 0x50ed6910) \
+ V(_Float32x4, shuffle, Float32x4Shuffle, 0x7829101f) \
+ V(_Float32x4, shuffleMix, Float32x4ShuffleMix, 0x4182c06b) \
+ V(_Float32x4, get:signMask, Float32x4GetSignMask, 0x1d08b351) \
+ V(_Float32x4, equal, Float32x4Equal, 0x11adb239) \
+ V(_Float32x4, greaterThan, Float32x4GreaterThan, 0x48adaf58) \
+ V(_Float32x4, greaterThanOrEqual, Float32x4GreaterThanOrEqual, 0x32db94ca) \
+ V(_Float32x4, lessThan, Float32x4LessThan, 0x425b000c) \
+ V(_Float32x4, lessThanOrEqual, Float32x4LessThanOrEqual, 0x0278c2f8) \
+ V(_Float32x4, notEqual, Float32x4NotEqual, 0x2987cd26) \
+ V(_Float32x4, min, Float32x4Min, 0x5ed74b6f) \
+ V(_Float32x4, max, Float32x4Max, 0x68696442) \
+ V(_Float32x4, scale, Float32x4Scale, 0x704e4122) \
+ V(_Float32x4, sqrt, Float32x4Sqrt, 0x2c967a6f) \
+ V(_Float32x4, reciprocalSqrt, Float32x4ReciprocalSqrt, 0x6264bfe8) \
+ V(_Float32x4, reciprocal, Float32x4Reciprocal, 0x3cd7e819) \
+ V(_Float32x4, unary-, Float32x4Negate, 0x37accb52) \
+ V(_Float32x4, abs, Float32x4Abs, 0x471cdd87) \
+ V(_Float32x4, clamp, Float32x4Clamp, 0x2cb30492) \
+ V(_Float32x4, withX, Float32x4WithX, 0x4e336aff) \
+ V(_Float32x4, withY, Float32x4WithY, 0x0a72b910) \
+ V(_Float32x4, withZ, Float32x4WithZ, 0x31e93658) \
+ V(_Float32x4, withW, Float32x4WithW, 0x60ddc105) \
+ V(Float64x2, Float64x2., Float64x2Constructor, 0x43054b9f) \
+ V(Float64x2, Float64x2.zero, Float64x2Zero, 0x4af12f9d) \
+ V(Float64x2, Float64x2.splat, Float64x2Splat, 0x134edef0) \
+ V(Float64x2, Float64x2.fromFloat32x4, Float32x4ToFloat64x2, 0x17d6b5e4) \
+ V(_Float64x2, get:x, Float64x2GetX, 0x58c09c58) \
+ V(_Float64x2, get:y, Float64x2GetY, 0x3cf5e5b8) \
+ V(_Float64x2, unary-, Float64x2Negate, 0x415ca009) \
+ V(_Float64x2, abs, Float64x2Abs, 0x031f9e47) \
+ V(_Float64x2, sqrt, Float64x2Sqrt, 0x77f711dd) \
+ V(_Float64x2, get:signMask, Float64x2GetSignMask, 0x27deda4b) \
+ V(_Float64x2, scale, Float64x2Scale, 0x26830a61) \
+ V(_Float64x2, withX, Float64x2WithX, 0x1d2bcaf5) \
+ V(_Float64x2, withY, Float64x2WithY, 0x383ed6ac) \
+ V(_Float64x2, min, Float64x2Min, 0x28d7ddf6) \
+ V(_Float64x2, max, Float64x2Max, 0x0bd74e5b) \
+ V(Int32x4, Int32x4., Int32x4Constructor, 0x480555a9) \
+ V(Int32x4, Int32x4.bool, Int32x4BoolConstructor, 0x36aa6963) \
+ V(Int32x4, Int32x4.fromFloat32x4Bits, Float32x4ToInt32x4, 0x6715388a) \
+ V(_Int32x4, get:flagX, Int32x4GetFlagX, 0x56396c82) \
+ V(_Int32x4, get:flagY, Int32x4GetFlagY, 0x44704738) \
+ V(_Int32x4, get:flagZ, Int32x4GetFlagZ, 0x20d6ff37) \
+ V(_Int32x4, get:flagW, Int32x4GetFlagW, 0x5045616a) \
+ V(_Int32x4, get:signMask, Int32x4GetSignMask, 0x2c1fb2a3) \
+ V(_Int32x4, shuffle, Int32x4Shuffle, 0x20bc0b16) \
+ V(_Int32x4, shuffleMix, Int32x4ShuffleMix, 0x5c7056e1) \
+ V(_Int32x4, select, Int32x4Select, 0x6b49654f) \
+ V(_Int32x4, withFlagX, Int32x4WithFlagX, 0x0ef58fcf) \
+ V(_Int32x4, withFlagY, Int32x4WithFlagY, 0x6485a9c4) \
+ V(_Int32x4, withFlagZ, Int32x4WithFlagZ, 0x267acdfa) \
+ V(_Int32x4, withFlagW, Int32x4WithFlagW, 0x345ac675) \
+ V(_HashVMBase, get:_index, LinkedHashMap_getIndex, 0x02477157) \
+ V(_HashVMBase, set:_index, LinkedHashMap_setIndex, 0x4fc8d5e0) \
+ V(_HashVMBase, get:_data, LinkedHashMap_getData, 0x2d7a70ac) \
+ V(_HashVMBase, set:_data, LinkedHashMap_setData, 0x0ec032e8) \
+ V(_HashVMBase, get:_usedData, LinkedHashMap_getUsedData, 0x088599ed) \
+ V(_HashVMBase, set:_usedData, LinkedHashMap_setUsedData, 0x5f42ca86) \
+ V(_HashVMBase, get:_hashMask, LinkedHashMap_getHashMask, 0x32f3b13b) \
+ V(_HashVMBase, set:_hashMask, LinkedHashMap_setHashMask, 0x7219c45b) \
+ V(_HashVMBase, get:_deletedKeys, LinkedHashMap_getDeletedKeys, 0x558481c2) \
+ V(_HashVMBase, set:_deletedKeys, LinkedHashMap_setDeletedKeys, 0x5aa9888d) \
+ V(::, _classRangeCheck, ClassRangeCheck, 0x2ae76b84) \
+ V(::, _classRangeCheckNegative, ClassRangeCheckNegated, 0x5acdfb75) \
+
+// List of intrinsics:
+// (class-name, function-name, intrinsification method, fingerprint).
+#define CORE_LIB_INTRINSIC_LIST(V) \
+ V(_Smi, ~, Smi_bitNegate, 0x67299f4f) \
+ V(_Smi, get:bitLength, Smi_bitLength, 0x25b3cb0a) \
+ V(_Smi, _bitAndFromSmi, Smi_bitAndFromSmi, 0x562d5047) \
+ V(_BigIntImpl, _lsh, Bigint_lsh, 0x5b6cfc8b) \
+ V(_BigIntImpl, _rsh, Bigint_rsh, 0x6ff14a49) \
+ V(_BigIntImpl, _absAdd, Bigint_absAdd, 0x5bf14238) \
+ V(_BigIntImpl, _absSub, Bigint_absSub, 0x1de5bd32) \
+ V(_BigIntImpl, _mulAdd, Bigint_mulAdd, 0x6f277966) \
+ V(_BigIntImpl, _sqrAdd, Bigint_sqrAdd, 0x68e4c8ea) \
+ V(_BigIntImpl, _estimateQuotientDigit, Bigint_estimateQuotientDigit, \
+ 0x35456d91) \
+ V(_BigIntMontgomeryReduction, _mulMod, Montgomery_mulMod, 0x0f7b0375) \
+ V(_Double, >, Double_greaterThan, 0x4f1375a3) \
+ V(_Double, >=, Double_greaterEqualThan, 0x4260c184) \
+ V(_Double, <, Double_lessThan, 0x365d1eba) \
+ V(_Double, <=, Double_lessEqualThan, 0x74b5eb64) \
+ V(_Double, ==, Double_equal, 0x613492fc) \
+ V(_Double, +, Double_add, 0x53994370) \
+ V(_Double, -, Double_sub, 0x3b69d466) \
+ V(_Double, *, Double_mul, 0x2bb9bd5d) \
+ V(_Double, /, Double_div, 0x483eee28) \
+ V(_Double, get:hashCode, Double_hashCode, 0x702b77b7) \
+ V(_Double, get:_identityHashCode, Double_identityHash, 0x7bda5549) \
+ V(_Double, get:isNaN, Double_getIsNaN, 0x0af9d4a9) \
+ V(_Double, get:isInfinite, Double_getIsInfinite, 0x0f7acb47) \
+ V(_Double, get:isNegative, Double_getIsNegative, 0x3a59e7f4) \
+ V(_Double, _mulFromInteger, Double_mulFromInteger, 0x2017fcf6) \
+ V(_Double, .fromInteger, DoubleFromInteger, 0x6d234f4b) \
+ V(_GrowableList, .withData, GrowableArray_Allocate, 0x28b2138e) \
+ V(_RegExp, _ExecuteMatch, RegExp_ExecuteMatch, 0x380184b1) \
+ V(_RegExp, _ExecuteMatchSticky, RegExp_ExecuteMatchSticky, 0x79b8f955) \
+ V(Object, ==, ObjectEquals, 0x7b32a55a) \
+ V(Object, get:runtimeType, ObjectRuntimeType, 0x00e8ab29) \
+ V(Object, _haveSameRuntimeType, ObjectHaveSameRuntimeType, 0x4dc50799) \
+ V(_StringBase, get:hashCode, String_getHashCode, 0x78c3d446) \
+ V(_StringBase, get:_identityHashCode, String_identityHash, 0x0472b1d8) \
+ V(_StringBase, get:isEmpty, StringBaseIsEmpty, 0x4a8b29c8) \
+ V(_StringBase, _substringMatches, StringBaseSubstringMatches, 0x46de4f10) \
+ V(_StringBase, [], StringBaseCharAt, 0x7cbb8603) \
+ V(_OneByteString, get:hashCode, OneByteString_getHashCode, 0x78c3d446) \
+ V(_OneByteString, _substringUncheckedNative, \
+ OneByteString_substringUnchecked, 0x3538ad86) \
+ V(_OneByteString, _setAt, OneByteStringSetAt, 0x11ffddd1) \
+ V(_OneByteString, _allocate, OneByteString_allocate, 0x74933376) \
+ V(_OneByteString, ==, OneByteString_equality, 0x4eda197e) \
+ V(_TwoByteString, ==, TwoByteString_equality, 0x4eda197e) \
+ V(_Type, get:hashCode, Type_getHashCode, 0x18d1523f) \
+ V(::, _getHash, Object_getHash, 0x2827856d) \
+ V(::, _setHash, Object_setHash, 0x690faebd) \
+
+
+#define CORE_INTEGER_LIB_INTRINSIC_LIST(V) \
+ V(_IntegerImplementation, _addFromInteger, Integer_addFromInteger, \
+ 0x6a10c54a) \
+ V(_IntegerImplementation, +, Integer_add, 0x43d53af7) \
+ V(_IntegerImplementation, _subFromInteger, Integer_subFromInteger, \
+ 0x3fa4b1ed) \
+ V(_IntegerImplementation, -, Integer_sub, 0x2dc22e03) \
+ V(_IntegerImplementation, _mulFromInteger, Integer_mulFromInteger, \
+ 0x3216e299) \
+ V(_IntegerImplementation, *, Integer_mul, 0x4e7a1c24) \
+ V(_IntegerImplementation, _moduloFromInteger, Integer_moduloFromInteger, \
+ 0x6348b974) \
+ V(_IntegerImplementation, ~/, Integer_truncDivide, 0x4efb2d39) \
+ V(_IntegerImplementation, unary-, Integer_negate, 0x428bf6fa) \
+ V(_IntegerImplementation, _bitAndFromInteger, Integer_bitAndFromInteger, \
+ 0x395b1678) \
+ V(_IntegerImplementation, &, Integer_bitAnd, 0x5ab35f30) \
+ V(_IntegerImplementation, _bitOrFromInteger, Integer_bitOrFromInteger, \
+ 0x6a36b395) \
+ V(_IntegerImplementation, |, Integer_bitOr, 0x267fa107) \
+ V(_IntegerImplementation, _bitXorFromInteger, Integer_bitXorFromInteger, \
+ 0x72da93f0) \
+ V(_IntegerImplementation, ^, Integer_bitXor, 0x0c7b0230) \
+ V(_IntegerImplementation, _greaterThanFromInteger, \
+ Integer_greaterThanFromInt, 0x4a50ed58) \
+ V(_IntegerImplementation, >, Integer_greaterThan, 0x6599a6e1) \
+ V(_IntegerImplementation, ==, Integer_equal, 0x58abc487) \
+ V(_IntegerImplementation, _equalToInteger, Integer_equalToInteger, \
+ 0x063be842) \
+ V(_IntegerImplementation, <, Integer_lessThan, 0x365d1eba) \
+ V(_IntegerImplementation, <=, Integer_lessEqualThan, 0x74b5eb64) \
+ V(_IntegerImplementation, >=, Integer_greaterEqualThan, 0x4260c184) \
+ V(_IntegerImplementation, <<, Integer_shl, 0x371c45fa) \
+ V(_IntegerImplementation, >>, Integer_sar, 0x2b630578) \
+ V(_Double, toInt, DoubleToInteger, 0x26ef344b) \
+
+#define MATH_LIB_INTRINSIC_LIST(V) \
+ V(::, sqrt, MathSqrt, 0x70482cf3) \
+ V(_Random, _nextState, Random_nextState, 0x2842c4d5) \
+
+#define GRAPH_MATH_LIB_INTRINSIC_LIST(V) \
+ V(::, sin, MathSin, 0x6b7bd98c) \
+ V(::, cos, MathCos, 0x459bf5fe) \
+ V(::, tan, MathTan, 0x3bcd772a) \
+ V(::, asin, MathAsin, 0x2ecc2fcd) \
+ V(::, acos, MathAcos, 0x08cf2212) \
+ V(::, atan, MathAtan, 0x1e2731d5) \
+ V(::, atan2, MathAtan2, 0x39f1fa41) \
+
+#define TYPED_DATA_LIB_INTRINSIC_LIST(V) \
+ V(Int8List, ., TypedData_Int8Array_factory, 0x7e39a3a1) \
+ V(Uint8List, ., TypedData_Uint8Array_factory, 0x3a79adf7) \
+ V(Uint8ClampedList, ., TypedData_Uint8ClampedArray_factory, 0x67f38395) \
+ V(Int16List, ., TypedData_Int16Array_factory, 0x6477bda8) \
+ V(Uint16List, ., TypedData_Uint16Array_factory, 0x5707c5a2) \
+ V(Int32List, ., TypedData_Int32Array_factory, 0x2b96ec0e) \
+ V(Uint32List, ., TypedData_Uint32Array_factory, 0x0c1c0d62) \
+ V(Int64List, ., TypedData_Int64Array_factory, 0x279ab485) \
+ V(Uint64List, ., TypedData_Uint64Array_factory, 0x7bcb89c2) \
+ V(Float32List, ., TypedData_Float32Array_factory, 0x43506c09) \
+ V(Float64List, ., TypedData_Float64Array_factory, 0x1fde3eaf) \
+ V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x4a4030d6) \
+ V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x6dd02406) \
+ V(Float64x2List, ., TypedData_Float64x2Array_factory, 0x688e4e97) \
+
+#define GRAPH_TYPED_DATA_INTRINSICS_LIST(V) \
+ V(_Int8List, [], Int8ArrayGetIndexed, 0x49767a2c) \
+ V(_Int8List, []=, Int8ArraySetIndexed, 0x24f42cd0) \
+ V(_Uint8List, [], Uint8ArrayGetIndexed, 0x088d86d4) \
+ V(_Uint8List, []=, Uint8ArraySetIndexed, 0x12639541) \
+ V(_ExternalUint8Array, [], ExternalUint8ArrayGetIndexed, 0x088d86d4) \
+ V(_ExternalUint8Array, []=, ExternalUint8ArraySetIndexed, 0x12639541) \
+ V(_Uint8ClampedList, [], Uint8ClampedArrayGetIndexed, 0x088d86d4) \
+ V(_Uint8ClampedList, []=, Uint8ClampedArraySetIndexed, 0x6790dba1) \
+ V(_ExternalUint8ClampedArray, [], ExternalUint8ClampedArrayGetIndexed, \
+ 0x088d86d4) \
+ V(_ExternalUint8ClampedArray, []=, ExternalUint8ClampedArraySetIndexed, \
+ 0x6790dba1) \
+ V(_Int16List, [], Int16ArrayGetIndexed, 0x5ec64948) \
+ V(_Int16List, []=, Int16ArraySetIndexed, 0x0e4e8221) \
+ V(_Uint16List, [], Uint16ArrayGetIndexed, 0x5f49d093) \
+ V(_Uint16List, []=, Uint16ArraySetIndexed, 0x2efbc90f) \
+ V(_Int32List, [], Int32ArrayGetIndexed, 0x4bc0d3dd) \
+ V(_Int32List, []=, Int32ArraySetIndexed, 0x1adf9823) \
+ V(_Uint32List, [], Uint32ArrayGetIndexed, 0x188658ce) \
+ V(_Uint32List, []=, Uint32ArraySetIndexed, 0x01f51a79) \
+ V(_Int64List, [], Int64ArrayGetIndexed, 0x51eafb97) \
+ V(_Int64List, []=, Int64ArraySetIndexed, 0x376181fb) \
+ V(_Uint64List, [], Uint64ArrayGetIndexed, 0x4b2a1ba2) \
+ V(_Uint64List, []=, Uint64ArraySetIndexed, 0x5f881bd4) \
+ V(_Float64List, [], Float64ArrayGetIndexed, 0x0a714486) \
+ V(_Float64List, []=, Float64ArraySetIndexed, 0x04937367) \
+ V(_Float32List, [], Float32ArrayGetIndexed, 0x5ade301f) \
+ V(_Float32List, []=, Float32ArraySetIndexed, 0x0d5c2e2b) \
+ V(_Float32x4List, [], Float32x4ArrayGetIndexed, 0x128cddeb) \
+ V(_Float32x4List, []=, Float32x4ArraySetIndexed, 0x7ad55c72) \
+ V(_Int32x4List, [], Int32x4ArrayGetIndexed, 0x4b78af9c) \
+ V(_Int32x4List, []=, Int32x4ArraySetIndexed, 0x31453dab) \
+ V(_Float64x2List, [], Float64x2ArrayGetIndexed, 0x644a0be1) \
+ V(_Float64x2List, []=, Float64x2ArraySetIndexed, 0x6b836b0b) \
+ V(_TypedList, get:length, TypedDataLength, 0x2091c4d8) \
+ V(_Float32x4, get:x, Float32x4ShuffleX, 0x63d1a9fd) \
+ V(_Float32x4, get:y, Float32x4ShuffleY, 0x203523d9) \
+ V(_Float32x4, get:z, Float32x4ShuffleZ, 0x13190678) \
+ V(_Float32x4, get:w, Float32x4ShuffleW, 0x698a38de) \
+ V(_Float32x4, *, Float32x4Mul, 0x5dec68b2) \
+ V(_Float32x4, -, Float32x4Sub, 0x3ea14461) \
+ V(_Float32x4, +, Float32x4Add, 0x7ffcf301) \
+
+#define GRAPH_CORE_INTRINSICS_LIST(V) \
+ V(_List, get:length, ObjectArrayLength, 0x25952390) \
+ V(_List, [], ObjectArrayGetIndexed, 0x653da02e) \
+ V(_List, []=, ObjectArraySetIndexed, 0x16b3d2b0) \
+ V(_List, _setIndexed, ObjectArraySetIndexedUnchecked, 0x50d64c75) \
+ V(_ImmutableList, get:length, ImmutableArrayLength, 0x25952390) \
+ V(_ImmutableList, [], ImmutableArrayGetIndexed, 0x653da02e) \
+ V(_GrowableList, get:length, GrowableArrayLength, 0x18dd86b4) \
+ V(_GrowableList, get:_capacity, GrowableArrayCapacity, 0x2e04be60) \
+ V(_GrowableList, _setData, GrowableArraySetData, 0x3dbea348) \
+ V(_GrowableList, _setLength, GrowableArraySetLength, 0x753e55da) \
+ V(_GrowableList, [], GrowableArrayGetIndexed, 0x446fe1f0) \
+ V(_GrowableList, []=, GrowableArraySetIndexed, 0x40a462ec) \
+ V(_GrowableList, _setIndexed, GrowableArraySetIndexedUnchecked, 0x297083df) \
+ V(_StringBase, get:length, StringBaseLength, 0x2a2d03d1) \
+ V(_OneByteString, codeUnitAt, OneByteStringCodeUnitAt, 0x55a0a1f3) \
+ V(_TwoByteString, codeUnitAt, TwoByteStringCodeUnitAt, 0x55a0a1f3) \
+ V(_ExternalOneByteString, codeUnitAt, ExternalOneByteStringCodeUnitAt, \
+ 0x55a0a1f3) \
+ V(_ExternalTwoByteString, codeUnitAt, ExternalTwoByteStringCodeUnitAt, \
+ 0x55a0a1f3) \
+ V(_Double, unary-, DoubleFlipSignBit, 0x6db4674f) \
+ V(_Double, truncateToDouble, DoubleTruncate, 0x2f27e5d3) \
+ V(_Double, roundToDouble, DoubleRound, 0x2f89c512) \
+ V(_Double, floorToDouble, DoubleFloor, 0x6aa87a5f) \
+ V(_Double, ceilToDouble, DoubleCeil, 0x1b045e9e) \
+ V(_Double, _modulo, DoubleMod, 0x5b8ceed7)
+
+
+#define GRAPH_INTRINSICS_LIST(V) \
+ GRAPH_CORE_INTRINSICS_LIST(V) \
+ GRAPH_TYPED_DATA_INTRINSICS_LIST(V) \
+ GRAPH_MATH_LIB_INTRINSIC_LIST(V) \
+
+#define DEVELOPER_LIB_INTRINSIC_LIST(V) \
+ V(_UserTag, makeCurrent, UserTag_makeCurrent, 0x0b3066fd) \
+ V(::, _getDefaultTag, UserTag_defaultTag, 0x69f3f1ad) \
+ V(::, _getCurrentTag, Profiler_getCurrentTag, 0x05fa99d2) \
+ V(::, _isDartStreamEnabled, Timeline_isDartStreamEnabled, 0x72f13f7a) \
+
+#define ASYNC_LIB_INTRINSIC_LIST(V) \
+ V(::, _clearAsyncThreadStackTrace, ClearAsyncThreadStackTrace, 0x2edd4b25) \
+ V(::, _setAsyncThreadStackTrace, SetAsyncThreadStackTrace, 0x04f429a7) \
+
+#define ALL_INTRINSICS_NO_INTEGER_LIB_LIST(V) \
+ ASYNC_LIB_INTRINSIC_LIST(V) \
+ CORE_LIB_INTRINSIC_LIST(V) \
+ DEVELOPER_LIB_INTRINSIC_LIST(V) \
+ MATH_LIB_INTRINSIC_LIST(V) \
+ TYPED_DATA_LIB_INTRINSIC_LIST(V) \
+
+#define ALL_INTRINSICS_LIST(V) \
+ ALL_INTRINSICS_NO_INTEGER_LIB_LIST(V) \
+ CORE_INTEGER_LIB_INTRINSIC_LIST(V)
+
+#define RECOGNIZED_LIST(V) \
+ OTHER_RECOGNIZED_LIST(V) \
+ ALL_INTRINSICS_LIST(V) \
+ GRAPH_INTRINSICS_LIST(V)
+
+// A list of core function that should always be inlined.
+#define INLINE_WHITE_LIST(V) \
+ V(Object, ==, ObjectEquals, 0x7b32a55a) \
+ V(_List, get:length, ObjectArrayLength, 0x25952390) \
+ V(_ImmutableList, get:length, ImmutableArrayLength, 0x25952390) \
+ V(_TypedList, get:length, TypedDataLength, 0x2091c4d8) \
+ V(_GrowableList, get:length, GrowableArrayLength, 0x18dd86b4) \
+ V(_GrowableList, get:_capacity, GrowableArrayCapacity, 0x2e04be60) \
+ V(_GrowableList, add, GrowableListAdd, 0x40b490b8) \
+ V(_GrowableList, removeLast, GrowableListRemoveLast, 0x007855e5) \
+ V(_StringBase, get:length, StringBaseLength, 0x2a2d03d1) \
+ V(ListIterator, moveNext, ListIteratorMoveNext, 0x2dca30ce) \
+ V(_FixedSizeArrayIterator, moveNext, FixedListIteratorMoveNext, 0x324eb20b) \
+ V(_GrowableList, get:iterator, GrowableArrayIterator, 0x5bd2ef37) \
+ V(_GrowableList, forEach, GrowableArrayForEach, 0x74900bb8) \
+ V(_List, ., ObjectArrayAllocate, 0x2121902f) \
+ V(ListMixin, get:isEmpty, ListMixinIsEmpty, 0x7be74d04) \
+ V(_List, get:iterator, ObjectArrayIterator, 0x6c851c55) \
+ V(_List, forEach, ObjectArrayForEach, 0x11406b13) \
+ V(_List, _slice, ObjectArraySlice, 0x4c865d1d) \
+ V(_ImmutableList, get:iterator, ImmutableArrayIterator, 0x6c851c55) \
+ V(_ImmutableList, forEach, ImmutableArrayForEach, 0x11406b13) \
+ V(_Int8ArrayView, [], Int8ArrayViewGetIndexed, 0x7e5a8458) \
+ V(_Int8ArrayView, []=, Int8ArrayViewSetIndexed, 0x62f615e4) \
+ V(_Uint8ArrayView, [], Uint8ArrayViewGetIndexed, 0x7d308247) \
+ V(_Uint8ArrayView, []=, Uint8ArrayViewSetIndexed, 0x65ba546e) \
+ V(_Uint8ClampedArrayView, [], Uint8ClampedArrayViewGetIndexed, 0x7d308247) \
+ V(_Uint8ClampedArrayView, []=, Uint8ClampedArrayViewSetIndexed, 0x65ba546e) \
+ V(_Uint16ArrayView, [], Uint16ArrayViewGetIndexed, 0xe96836dd) \
+ V(_Uint16ArrayView, []=, Uint16ArrayViewSetIndexed, 0x15b02947) \
+ V(_Int16ArrayView, [], Int16ArrayViewGetIndexed, 0x1b24a48b) \
+ V(_Int16ArrayView, []=, Int16ArrayViewSetIndexed, 0xb91ec2e6) \
+ V(_Uint32ArrayView, [], Uint32ArrayViewGetIndexed, 0x8a4f93b3) \
+ V(_Uint32ArrayView, []=, Uint32ArrayViewSetIndexed, 0xf54918b5) \
+ V(_Int32ArrayView, [], Int32ArrayViewGetIndexed, 0x85040819) \
+ V(_Int32ArrayView, []=, Int32ArrayViewSetIndexed, 0xaec8c6f5) \
+ V(_Uint64ArrayView, [], Uint64ArrayViewGetIndexed, 0xd0c44fe7) \
+ V(_Uint64ArrayView, []=, Uint64ArrayViewSetIndexed, 0x402712b7) \
+ V(_Int64ArrayView, [], Int64ArrayViewGetIndexed, 0xf3090b95) \
+ V(_Int64ArrayView, []=, Int64ArrayViewSetIndexed, 0xca07e497) \
+ V(_Float32ArrayView, [], Float32ArrayViewGetIndexed, 0xef967533) \
+ V(_Float32ArrayView, []=, Float32ArrayViewSetIndexed, 0xc9b691bd) \
+ V(_Float64ArrayView, [], Float64ArrayViewGetIndexed, 0x9d83f585) \
+ V(_Float64ArrayView, []=, Float64ArrayViewSetIndexed, 0x3c1adabd) \
+ V(_ByteDataView, setInt8, ByteDataViewSetInt8, 0x6395293e) \
+ V(_ByteDataView, setUint8, ByteDataViewSetUint8, 0x79979d1f) \
+ V(_ByteDataView, setInt16, ByteDataViewSetInt16, 0x525ec534) \
+ V(_ByteDataView, setUint16, ByteDataViewSetUint16, 0x48eda263) \
+ V(_ByteDataView, setInt32, ByteDataViewSetInt32, 0x523666fa) \
+ V(_ByteDataView, setUint32, ByteDataViewSetUint32, 0x5a4683da) \
+ V(_ByteDataView, setInt64, ByteDataViewSetInt64, 0x4283a650) \
+ V(_ByteDataView, setUint64, ByteDataViewSetUint64, 0x687a1892) \
+ V(_ByteDataView, setFloat32, ByteDataViewSetFloat32, 0x7d5784fd) \
+ V(_ByteDataView, setFloat64, ByteDataViewSetFloat64, 0x00101e3f) \
+ V(_ByteDataView, getInt8, ByteDataViewGetInt8, 0x68448b4d) \
+ V(_ByteDataView, getUint8, ByteDataViewGetUint8, 0x5d68cbf2) \
+ V(_ByteDataView, getInt16, ByteDataViewGetInt16, 0x691b5ead) \
+ V(_ByteDataView, getUint16, ByteDataViewGetUint16, 0x78b744d8) \
+ V(_ByteDataView, getInt32, ByteDataViewGetInt32, 0x3a0f4efa) \
+ V(_ByteDataView, getUint32, ByteDataViewGetUint32, 0x583261be) \
+ V(_ByteDataView, getInt64, ByteDataViewGetInt64, 0x77de471c) \
+ V(_ByteDataView, getUint64, ByteDataViewGetUint64, 0x0ffadc4b) \
+ V(_ByteDataView, getFloat32, ByteDataViewGetFloat32, 0x6a205749) \
+ V(_ByteDataView, getFloat64, ByteDataViewGetFloat64, 0x69f58d27) \
+ V(::, exp, MathExp, 0x32ab9efa) \
+ V(::, log, MathLog, 0x1ee8f9fc) \
+ V(::, max, MathMax, 0x377e8889) \
+ V(::, min, MathMin, 0x32ebc57d) \
+ V(::, pow, MathPow, 0x79efc5a2) \
+ V(::, _classRangeCheck, ClassRangeCheck, 0x2ae76b84) \
+ V(::, _classRangeCheckNegative, ClassRangeCheckNegated, 0x5acdfb75) \
+ V(::, _toInt, ConvertMaskedInt, 0x713908fd) \
+ V(::, _toInt8, ConvertIntToInt8, 0x7484a780) \
+ V(::, _toUint8, ConvertIntToUint8, 0x0a15b522) \
+ V(::, _toInt16, ConvertIntToInt16, 0x0a83fcc6) \
+ V(::, _toUint16, ConvertIntToUint16, 0x6087d1af) \
+ V(::, _toInt32, ConvertIntToInt32, 0x62b451b9) \
+ V(::, _toUint32, ConvertIntToUint32, 0x17a8e085) \
+ V(::, _byteSwap16, ByteSwap16, 0x44f173be) \
+ V(::, _byteSwap32, ByteSwap32, 0x6219333b) \
+ V(::, _byteSwap64, ByteSwap64, 0x9abe57e0) \
+ V(Lists, copy, ListsCopy, 0x40e974f6) \
+ V(_HashVMBase, get:_index, LinkedHashMap_getIndex, 0x02477157) \
+ V(_HashVMBase, set:_index, LinkedHashMap_setIndex, 0x4fc8d5e0) \
+ V(_HashVMBase, get:_data, LinkedHashMap_getData, 0x2d7a70ac) \
+ V(_HashVMBase, set:_data, LinkedHashMap_setData, 0x0ec032e8) \
+ V(_HashVMBase, get:_usedData, LinkedHashMap_getUsedData, 0x088599ed) \
+ V(_HashVMBase, set:_usedData, LinkedHashMap_setUsedData, 0x5f42ca86) \
+ V(_HashVMBase, get:_hashMask, LinkedHashMap_getHashMask, 0x32f3b13b) \
+ V(_HashVMBase, set:_hashMask, LinkedHashMap_setHashMask, 0x7219c45b) \
+ V(_HashVMBase, get:_deletedKeys, LinkedHashMap_getDeletedKeys, 0x558481c2) \
+ V(_HashVMBase, set:_deletedKeys, LinkedHashMap_setDeletedKeys, 0x5aa9888d) \
+
+// A list of core function that should never be inlined.
+#define INLINE_BLACK_LIST(V) \
+ V(::, asin, MathAsin, 0x2ecc2fcd) \
+ V(::, acos, MathAcos, 0x08cf2212) \
+ V(::, atan, MathAtan, 0x1e2731d5) \
+ V(::, atan2, MathAtan2, 0x39f1fa41) \
+ V(::, cos, MathCos, 0x459bf5fe) \
+ V(::, sin, MathSin, 0x6b7bd98c) \
+ V(::, sqrt, MathSqrt, 0x70482cf3) \
+ V(::, tan, MathTan, 0x3bcd772a) \
+ V(_BigIntImpl, _lsh, Bigint_lsh, 0x5b6cfc8b) \
+ V(_BigIntImpl, _rsh, Bigint_rsh, 0x6ff14a49) \
+ V(_BigIntImpl, _absAdd, Bigint_absAdd, 0x5bf14238) \
+ V(_BigIntImpl, _absSub, Bigint_absSub, 0x1de5bd32) \
+ V(_BigIntImpl, _mulAdd, Bigint_mulAdd, 0x6f277966) \
+ V(_BigIntImpl, _sqrAdd, Bigint_sqrAdd, 0x68e4c8ea) \
+ V(_BigIntImpl, _estimateQuotientDigit, Bigint_estimateQuotientDigit, \
+ 0x35456d91) \
+ V(_BigIntMontgomeryReduction, _mulMod, Montgomery_mulMod, 0x0f7b0375) \
+ V(_Double, >, Double_greaterThan, 0x4f1375a3) \
+ V(_Double, >=, Double_greaterEqualThan, 0x4260c184) \
+ V(_Double, <, Double_lessThan, 0x365d1eba) \
+ V(_Double, <=, Double_lessEqualThan, 0x74b5eb64) \
+ V(_Double, ==, Double_equal, 0x613492fc) \
+ V(_Double, +, Double_add, 0x53994370) \
+ V(_Double, -, Double_sub, 0x3b69d466) \
+ V(_Double, *, Double_mul, 0x2bb9bd5d) \
+ V(_Double, /, Double_div, 0x483eee28) \
+ V(_IntegerImplementation, +, Integer_add, 0x43d53af7) \
+ V(_IntegerImplementation, -, Integer_sub, 0x2dc22e03) \
+ V(_IntegerImplementation, *, Integer_mul, 0x4e7a1c24) \
+ V(_IntegerImplementation, ~/, Integer_truncDivide, 0x4efb2d39) \
+ V(_IntegerImplementation, unary-, Integer_negate, 0x428bf6fa) \
+ V(_IntegerImplementation, &, Integer_bitAnd, 0x5ab35f30) \
+ V(_IntegerImplementation, |, Integer_bitOr, 0x267fa107) \
+ V(_IntegerImplementation, ^, Integer_bitXor, 0x0c7b0230) \
+ V(_IntegerImplementation, >, Integer_greaterThan, 0x6599a6e1) \
+ V(_IntegerImplementation, ==, Integer_equal, 0x58abc487) \
+ V(_IntegerImplementation, <, Integer_lessThan, 0x365d1eba) \
+ V(_IntegerImplementation, <=, Integer_lessEqualThan, 0x74b5eb64) \
+ V(_IntegerImplementation, >=, Integer_greaterEqualThan, 0x4260c184) \
+ V(_IntegerImplementation, <<, Integer_shl, 0x371c45fa) \
+ V(_IntegerImplementation, >>, Integer_sar, 0x2b630578) \
+
+// A list of core functions that internally dispatch based on received id.
+#define POLYMORPHIC_TARGET_LIST(V) \
+ V(_StringBase, [], StringBaseCharAt, 0x7cbb8603) \
+ V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x7041895a) \
+ V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x336fa3ea) \
+ V(_TypedList, _getInt16, ByteArrayBaseGetInt16, 0x231bbe2e) \
+ V(_TypedList, _getUint16, ByteArrayBaseGetUint16, 0x0371785f) \
+ V(_TypedList, _getInt32, ByteArrayBaseGetInt32, 0x65ab3a20) \
+ V(_TypedList, _getUint32, ByteArrayBaseGetUint32, 0x0cb0fcf6) \
+ V(_TypedList, _getInt64, ByteArrayBaseGetInt64, 0x7db75d78) \
+ V(_TypedList, _getUint64, ByteArrayBaseGetUint64, 0x1487cfc6) \
+ V(_TypedList, _getFloat32, ByteArrayBaseGetFloat32, 0x6674ea6f) \
+ V(_TypedList, _getFloat64, ByteArrayBaseGetFloat64, 0x236c6e7a) \
+ V(_TypedList, _getFloat32x4, ByteArrayBaseGetFloat32x4, 0x5c367ffb) \
+ V(_TypedList, _getInt32x4, ByteArrayBaseGetInt32x4, 0x772d1c0f) \
+ V(_TypedList, _setInt8, ByteArrayBaseSetInt8, 0x12bae36a) \
+ V(_TypedList, _setUint8, ByteArrayBaseSetInt8, 0x15821cc9) \
+ V(_TypedList, _setInt16, ByteArrayBaseSetInt16, 0x1f8237fa) \
+ V(_TypedList, _setUint16, ByteArrayBaseSetInt16, 0x181e5d16) \
+ V(_TypedList, _setInt32, ByteArrayBaseSetInt32, 0x7ddb9f87) \
+ V(_TypedList, _setUint32, ByteArrayBaseSetUint32, 0x74094f8d) \
+ V(_TypedList, _setInt64, ByteArrayBaseSetInt64, 0x4741396e) \
+ V(_TypedList, _setUint64, ByteArrayBaseSetUint64, 0x3b398ae4) \
+ V(_TypedList, _setFloat32, ByteArrayBaseSetFloat32, 0x03db087b) \
+ V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x38a80b0d) \
+ V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x40052c4e) \
+ V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x07b89f54) \
+ V(Object, get:runtimeType, ObjectRuntimeType, 0x00e8ab29)
+
+// List of recognized list factories:
+// (factory-name-symbol, class-name-string, constructor-name-string,
+// result-cid, fingerprint).
+#define RECOGNIZED_LIST_FACTORY_LIST(V) \
+ V(_ListFactory, _List, ., kArrayCid, 0x2121902f) \
+ V(_GrowableListWithData, _GrowableList, .withData, kGrowableObjectArrayCid, \
+ 0x28b2138e) \
+ V(_GrowableListFactory, _GrowableList, ., kGrowableObjectArrayCid, \
+ 0x3eed680b) \
+ V(_Int8ArrayFactory, Int8List, ., kTypedDataInt8ArrayCid, 0x7e39a3a1) \
+ V(_Uint8ArrayFactory, Uint8List, ., kTypedDataUint8ArrayCid, 0x3a79adf7) \
+ V(_Uint8ClampedArrayFactory, Uint8ClampedList, ., \
+ kTypedDataUint8ClampedArrayCid, 0x67f38395) \
+ V(_Int16ArrayFactory, Int16List, ., kTypedDataInt16ArrayCid, 0x6477bda8) \
+ V(_Uint16ArrayFactory, Uint16List, ., kTypedDataUint16ArrayCid, 0x5707c5a2) \
+ V(_Int32ArrayFactory, Int32List, ., kTypedDataInt32ArrayCid, 0x2b96ec0e) \
+ V(_Uint32ArrayFactory, Uint32List, ., kTypedDataUint32ArrayCid, 0x0c1c0d62) \
+ V(_Int64ArrayFactory, Int64List, ., kTypedDataInt64ArrayCid, 0x279ab485) \
+ V(_Uint64ArrayFactory, Uint64List, ., kTypedDataUint64ArrayCid, 0x7bcb89c2) \
+ V(_Float64ArrayFactory, Float64List, ., kTypedDataFloat64ArrayCid, \
+ 0x1fde3eaf) \
+ V(_Float32ArrayFactory, Float32List, ., kTypedDataFloat32ArrayCid, \
+ 0x43506c09) \
+ V(_Float32x4ArrayFactory, Float32x4List, ., kTypedDataFloat32x4ArrayCid, \
+ 0x4a4030d6)
+
+// clang-format on
+
+} // namespace dart
+
+#endif // RUNTIME_VM_COMPILER_RECOGNIZED_METHODS_LIST_H_
diff --git a/runtime/vm/compiler/runtime_api.cc b/runtime/vm/compiler/runtime_api.cc
index 17c667b..aca31eb 100644
--- a/runtime/vm/compiler/runtime_api.cc
+++ b/runtime/vm/compiler/runtime_api.cc
@@ -11,7 +11,10 @@
#include "vm/native_arguments.h"
#include "vm/native_entry.h"
#include "vm/object.h"
+#include "vm/object_store.h"
#include "vm/runtime_entry.h"
+#include "vm/symbols.h"
+#include "vm/timeline.h"
namespace dart {
namespace compiler {
@@ -24,6 +27,11 @@
return obj.IsNotTemporaryScopedHandle();
}
+#define DO(clazz) \
+ bool Is##clazz##Handle(const Object& obj) { return obj.Is##clazz(); }
+CLASS_LIST_FOR_HANDLES(DO)
+#undef DO
+
bool IsInOldSpace(const Object& obj) {
return obj.IsOld();
}
@@ -93,6 +101,21 @@
return Type::Handle(dart::Type::IntType());
}
+const Class& GrowableObjectArrayClass() {
+ auto object_store = Isolate::Current()->object_store();
+ return Class::Handle(object_store->growable_object_array_class());
+}
+
+const Class& MintClass() {
+ auto object_store = Isolate::Current()->object_store();
+ return Class::Handle(object_store->mint_class());
+}
+
+const Class& DoubleClass() {
+ auto object_store = Isolate::Current()->object_store();
+ return Class::Handle(object_store->double_class());
+}
+
bool IsOriginalObject(const Object& object) {
if (object.IsICData()) {
return ICData::Cast(object).IsOriginal();
@@ -118,6 +141,35 @@
return static_cast<int32_t>(Isolate::Current()->random()->NextUInt32());
}
+word TypedDataElementSizeInBytes(classid_t cid) {
+ return dart::TypedData::ElementSizeInBytes(cid);
+}
+
+word TypedDataMaxNewSpaceElements(classid_t cid) {
+ return dart::TypedData::MaxNewSpaceElements(cid);
+}
+
+const Field& LookupMathRandomStateFieldOffset() {
+ const auto& math_lib = dart::Library::Handle(dart::Library::MathLibrary());
+ ASSERT(!math_lib.IsNull());
+ const auto& random_class = dart::Class::Handle(
+ math_lib.LookupClassAllowPrivate(dart::Symbols::_Random()));
+ ASSERT(!random_class.IsNull());
+ const auto& state_field = dart::Field::ZoneHandle(
+ random_class.LookupInstanceFieldAllowPrivate(dart::Symbols::_state()));
+ return state_field;
+}
+
+word LookupFieldOffsetInBytes(const Field& field) {
+ return field.Offset();
+}
+
+#if defined(TARGET_ARCH_IA32)
+uword SymbolsPredefinedAddress() {
+ return reinterpret_cast<uword>(dart::Symbols::PredefinedAddress());
+}
+#endif
+
#if !defined(TARGET_ARCH_DBC)
const Code& StubCodeAllocateArray() {
return dart::StubCode::AllocateArray();
@@ -194,6 +246,14 @@
return dart::Class::type_arguments_field_offset_in_words_offset();
}
+word Class::declaration_type_offset() {
+ return dart::Class::declaration_type_offset();
+}
+
+word Class::num_type_arguments_offset_in_bytes() {
+ return dart::Class::num_type_arguments_offset();
+}
+
const word Class::kNoTypeArguments = dart::Class::kNoTypeArguments;
classid_t Class::GetId(const dart::Class& handle) {
@@ -232,6 +292,10 @@
return dart::Instance::DataOffsetFor(cid);
}
+word Instance::ElementSizeFor(intptr_t cid) {
+ return dart::Instance::ElementSizeFor(cid);
+}
+
word Function::code_offset() {
return dart::Function::code_offset();
}
@@ -326,122 +390,120 @@
return dart::SingleTargetCache::target_offset();
}
+const word Array::kMaxNewSpaceElements = dart::Array::kMaxNewSpaceElements;
+
+word Context::InstanceSize(word n) {
+ return dart::Context::InstanceSize(n);
+}
+
+word Context::variable_offset(word n) {
+ return dart::Context::variable_offset(n);
+}
+
+word TypedData::InstanceSize() {
+ return sizeof(RawTypedData);
+}
+
word Array::header_size() {
return sizeof(dart::RawArray);
}
-word Array::tags_offset() {
- return dart::Array::tags_offset();
-}
+#define CLASS_NAME_LIST(V) \
+ V(AbstractType, type_test_stub_entry_point_offset) \
+ V(ArgumentsDescriptor, count_offset) \
+ V(ArgumentsDescriptor, type_args_len_offset) \
+ V(Array, data_offset) \
+ V(Array, length_offset) \
+ V(Array, tags_offset) \
+ V(Array, type_arguments_offset) \
+ V(ClassTable, table_offset) \
+ V(Closure, context_offset) \
+ V(Closure, delayed_type_arguments_offset) \
+ V(Closure, function_offset) \
+ V(Closure, function_type_arguments_offset) \
+ V(Closure, instantiator_type_arguments_offset) \
+ V(Code, object_pool_offset) \
+ V(Code, saved_instructions_offset) \
+ V(Context, num_variables_offset) \
+ V(Context, parent_offset) \
+ V(Double, value_offset) \
+ V(Float32x4, value_offset) \
+ V(Float64x2, value_offset) \
+ V(GrowableObjectArray, data_offset) \
+ V(GrowableObjectArray, length_offset) \
+ V(GrowableObjectArray, type_arguments_offset) \
+ V(HeapPage, card_table_offset) \
+ V(Isolate, class_table_offset) \
+ V(Isolate, current_tag_offset) \
+ V(Isolate, default_tag_offset) \
+ V(Isolate, ic_miss_code_offset) \
+ V(Isolate, object_store_offset) \
+ V(Isolate, user_tag_offset) \
+ V(MarkingStackBlock, pointers_offset) \
+ V(MarkingStackBlock, top_offset) \
+ V(Mint, value_offset) \
+ V(NativeArguments, argc_tag_offset) \
+ V(NativeArguments, argv_offset) \
+ V(NativeArguments, retval_offset) \
+ V(NativeArguments, thread_offset) \
+ V(ObjectStore, double_type_offset) \
+ V(ObjectStore, int_type_offset) \
+ V(ObjectStore, string_type_offset) \
+ V(OneByteString, data_offset) \
+ V(StoreBufferBlock, pointers_offset) \
+ V(StoreBufferBlock, top_offset) \
+ V(String, hash_offset) \
+ V(String, length_offset) \
+ V(SubtypeTestCache, cache_offset) \
+ V(Thread, active_exception_offset) \
+ V(Thread, active_stacktrace_offset) \
+ V(Thread, async_stack_trace_offset) \
+ V(Thread, auto_scope_native_wrapper_entry_point_offset) \
+ V(Thread, bool_false_offset) \
+ V(Thread, bool_true_offset) \
+ V(Thread, dart_stream_offset) \
+ V(Thread, end_offset) \
+ V(Thread, global_object_pool_offset) \
+ V(Thread, isolate_offset) \
+ V(Thread, marking_stack_block_offset) \
+ V(Thread, no_scope_native_wrapper_entry_point_offset) \
+ V(Thread, object_null_offset) \
+ V(Thread, predefined_symbols_address_offset) \
+ V(Thread, resume_pc_offset) \
+ V(Thread, store_buffer_block_offset) \
+ V(Thread, top_exit_frame_info_offset) \
+ V(Thread, top_offset) \
+ V(Thread, top_resource_offset) \
+ V(Thread, vm_tag_offset) \
+ V(TimelineStream, enabled_offset) \
+ V(TwoByteString, data_offset) \
+ V(Type, arguments_offset) \
+ V(TypedData, data_offset) \
+ V(TypedData, length_offset) \
+ V(Type, hash_offset) \
+ V(TypeRef, type_offset) \
+ V(Type, signature_offset) \
+ V(Type, type_state_offset) \
+ V(UserTag, tag_offset)
-word Array::data_offset() {
- return dart::Array::data_offset();
-}
+#define DEFINE_FORWARDER(clazz, name) \
+ word clazz::name() { return dart::clazz::name(); }
-word Array::type_arguments_offset() {
- return dart::Array::type_arguments_offset();
-}
-
-word Array::length_offset() {
- return dart::Array::length_offset();
-}
-
-const word Array::kMaxNewSpaceElements = dart::Array::kMaxNewSpaceElements;
-
-word ArgumentsDescriptor::count_offset() {
- return dart::ArgumentsDescriptor::count_offset();
-}
-
-word ArgumentsDescriptor::type_args_len_offset() {
- return dart::ArgumentsDescriptor::type_args_len_offset();
-}
-
-word AbstractType::type_test_stub_entry_point_offset() {
- return dart::AbstractType::type_test_stub_entry_point_offset();
-}
-
-word Type::type_state_offset() {
- return dart::Type::type_state_offset();
-}
-
-word Type::arguments_offset() {
- return dart::Type::arguments_offset();
-}
-
-word Type::signature_offset() {
- return dart::Type::signature_offset();
-}
-
-word TypeRef::type_offset() {
- return dart::TypeRef::type_offset();
-}
+CLASS_NAME_LIST(DEFINE_FORWARDER)
+#undef DEFINE_FORWARDER
const word HeapPage::kBytesPerCardLog2 = dart::HeapPage::kBytesPerCardLog2;
-word HeapPage::card_table_offset() {
- return dart::HeapPage::card_table_offset();
+const word String::kHashBits = dart::String::kHashBits;
+
+word String::InstanceSize() {
+ return sizeof(dart::RawString);
}
bool Heap::IsAllocatableInNewSpace(intptr_t instance_size) {
return dart::Heap::IsAllocatableInNewSpace(instance_size);
}
-word Thread::active_exception_offset() {
- return dart::Thread::active_exception_offset();
-}
-
-word Thread::active_stacktrace_offset() {
- return dart::Thread::active_stacktrace_offset();
-}
-
-word Thread::resume_pc_offset() {
- return dart::Thread::resume_pc_offset();
-}
-
-word Thread::marking_stack_block_offset() {
- return dart::Thread::marking_stack_block_offset();
-}
-
-word Thread::top_exit_frame_info_offset() {
- return dart::Thread::top_exit_frame_info_offset();
-}
-
-word Thread::top_resource_offset() {
- return dart::Thread::top_resource_offset();
-}
-
-word Thread::global_object_pool_offset() {
- return dart::Thread::global_object_pool_offset();
-}
-
-word Thread::object_null_offset() {
- return dart::Thread::object_null_offset();
-}
-
-word Thread::bool_true_offset() {
- return dart::Thread::bool_true_offset();
-}
-
-word Thread::bool_false_offset() {
- return dart::Thread::bool_false_offset();
-}
-
-word Thread::top_offset() {
- return dart::Thread::top_offset();
-}
-
-word Thread::end_offset() {
- return dart::Thread::end_offset();
-}
-
-word Thread::isolate_offset() {
- return dart::Thread::isolate_offset();
-}
-
-word Thread::store_buffer_block_offset() {
- return dart::Thread::store_buffer_block_offset();
-}
-
#if !defined(TARGET_ARCH_DBC)
word Thread::write_barrier_code_offset() {
return dart::Thread::write_barrier_code_offset();
@@ -496,10 +558,6 @@
}
#endif
-word Thread::vm_tag_offset() {
- return dart::Thread::vm_tag_offset();
-}
-
#if !defined(TARGET_ARCH_DBC)
word Thread::monomorphic_miss_stub_offset() {
@@ -564,14 +622,6 @@
#endif // !defined(TARGET_ARCH_DBC)
-word Thread::no_scope_native_wrapper_entry_point_offset() {
- return dart::Thread::no_scope_native_wrapper_entry_point_offset();
-}
-
-word Thread::auto_scope_native_wrapper_entry_point_offset() {
- return dart::Thread::auto_scope_native_wrapper_entry_point_offset();
-}
-
#define DECLARE_CONSTANT_OFFSET_GETTER(name) \
word Thread::name##_address_offset() { \
return dart::Thread::name##_address_offset(); \
@@ -583,40 +633,16 @@
return dart::Thread::OffsetFromThread(object);
}
-uword StoreBufferBlock::top_offset() {
- return dart::StoreBufferBlock::top_offset();
-}
-uword StoreBufferBlock::pointers_offset() {
- return dart::StoreBufferBlock::pointers_offset();
-}
const word StoreBufferBlock::kSize = dart::StoreBufferBlock::kSize;
-uword MarkingStackBlock::top_offset() {
- return dart::MarkingStackBlock::top_offset();
-}
-uword MarkingStackBlock::pointers_offset() {
- return dart::MarkingStackBlock::pointers_offset();
-}
const word MarkingStackBlock::kSize = dart::MarkingStackBlock::kSize;
-word Isolate::class_table_offset() {
- return dart::Isolate::class_table_offset();
-}
-
-word Isolate::ic_miss_code_offset() {
- return dart::Isolate::ic_miss_code_offset();
-}
-
#if !defined(PRODUCT)
word Isolate::single_step_offset() {
return dart::Isolate::single_step_offset();
}
#endif // !defined(PRODUCT)
-word ClassTable::table_offset() {
- return dart::ClassTable::table_offset();
-}
-
#if !defined(PRODUCT)
word ClassTable::ClassOffsetFor(intptr_t cid) {
return dart::ClassTable::ClassOffsetFor(cid);
@@ -651,22 +677,10 @@
return dart::Instructions::HeaderSize();
}
-intptr_t Code::object_pool_offset() {
- return dart::Code::object_pool_offset();
-}
-
-intptr_t Code::saved_instructions_offset() {
- return dart::Code::saved_instructions_offset();
-}
-
intptr_t Code::entry_point_offset(CodeEntryKind kind) {
return dart::Code::entry_point_offset(kind);
}
-word SubtypeTestCache::cache_offset() {
- return dart::SubtypeTestCache::cache_offset();
-}
-
const word SubtypeTestCache::kTestEntryLength =
dart::SubtypeTestCache::kTestEntryLength;
const word SubtypeTestCache::kInstanceClassIdOrFunction =
@@ -687,42 +701,6 @@
return sizeof(dart::RawContext);
}
-word Context::parent_offset() {
- return dart::Context::parent_offset();
-}
-
-word Context::num_variables_offset() {
- return dart::Context::num_variables_offset();
-}
-
-word Context::variable_offset(word i) {
- return dart::Context::variable_offset(i);
-}
-
-word Context::InstanceSize(word n) {
- return dart::Context::InstanceSize(n);
-}
-
-word Closure::context_offset() {
- return dart::Closure::context_offset();
-}
-
-word Closure::delayed_type_arguments_offset() {
- return dart::Closure::delayed_type_arguments_offset();
-}
-
-word Closure::function_offset() {
- return dart::Closure::function_offset();
-}
-
-word Closure::function_type_arguments_offset() {
- return dart::Closure::function_type_arguments_offset();
-}
-
-word Closure::instantiator_type_arguments_offset() {
- return dart::Closure::instantiator_type_arguments_offset();
-}
-
#if !defined(PRODUCT)
word ClassHeapStats::TraceAllocationMask() {
return dart::ClassHeapStats::TraceAllocationMask();
@@ -741,22 +719,7 @@
}
#endif // !defined(PRODUCT)
-word Double::value_offset() {
- return dart::Double::value_offset();
-}
-
-word Mint::value_offset() {
- return dart::Mint::value_offset();
-}
-
-word Float32x4::value_offset() {
- return dart::Float32x4::value_offset();
-}
-
-word Float64x2::value_offset() {
- return dart::Float64x2::value_offset();
-}
-
+const word Smi::kBits = dart::Smi::kBits;
bool IsSmi(const dart::Object& a) {
return a.IsSmi();
}
@@ -804,26 +767,19 @@
const word NativeEntry::kNumCallWrapperArguments =
dart::NativeEntry::kNumCallWrapperArguments;
-word NativeArguments::thread_offset() {
- return dart::NativeArguments::thread_offset();
-}
-
-word NativeArguments::argc_tag_offset() {
- return dart::NativeArguments::argc_tag_offset();
-}
-
-word NativeArguments::argv_offset() {
- return dart::NativeArguments::argv_offset();
-}
-
-word NativeArguments::retval_offset() {
- return dart::NativeArguments::retval_offset();
-}
-
word NativeArguments::StructSize() {
return sizeof(dart::NativeArguments);
}
+word RegExp::function_offset(classid_t cid, bool sticky) {
+ return dart::RegExp::function_offset(cid, sticky);
+}
+
+const word Symbols::kNumberOfOneCharCodeSymbols =
+ dart::Symbols::kNumberOfOneCharCodeSymbols;
+const word Symbols::kNullCharCodeSymbolOffset =
+ dart::Symbols::kNullCharCodeSymbolOffset;
+
} // namespace target
} // namespace compiler
} // namespace dart
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
index 5029896..853fac0 100644
--- a/runtime/vm/compiler/runtime_api.h
+++ b/runtime/vm/compiler/runtime_api.h
@@ -21,6 +21,7 @@
#include "platform/globals.h"
#include "vm/allocation.h"
#include "vm/bitfield.h"
+#include "vm/class_id.h"
#include "vm/code_entry_kind.h"
#include "vm/frame_layout.h"
#include "vm/pointer_tagging.h"
@@ -30,19 +31,15 @@
namespace dart {
// Forward declarations.
-class Bool;
-class Class;
-class Code;
-class Code;
-class Function;
class LocalVariable;
class Object;
class RuntimeEntry;
-class String;
-class Type;
-class TypeArguments;
class Zone;
+#define DO(clazz) class clazz;
+CLASS_LIST_FOR_HANDLES(DO)
+#undef DO
+
namespace compiler {
class Assembler;
}
@@ -97,6 +94,9 @@
const Type& ObjectType();
const Type& VoidType();
const Type& IntType();
+const Class& GrowableObjectArrayClass();
+const Class& MintClass();
+const Class& DoubleClass();
template <typename To, typename From>
const To& CastHandle(const From& from) {
@@ -149,6 +149,22 @@
// generated code.
int32_t CreateJitCookie();
+// Returns the size in bytes for the given class id.
+word TypedDataElementSizeInBytes(classid_t cid);
+
+// Returns the size in bytes for the given class id.
+word TypedDataMaxNewSpaceElements(classid_t cid);
+
+// Looks up the dart:math's _Random._A field.
+const Field& LookupMathRandomStateFieldOffset();
+
+// Returns the offset in bytes of [field].
+word LookupFieldOffsetInBytes(const Field& field);
+
+#if defined(TARGET_ARCH_IA32)
+uword SymbolsPredefinedAddress();
+#endif
+
typedef void (*RuntimeEntryCallInternal)(const dart::RuntimeEntry*,
compiler::Assembler*,
intptr_t);
@@ -328,6 +344,11 @@
public:
static word type_arguments_field_offset_in_words_offset();
+ static word declaration_type_offset();
+
+ // The offset of the RawObject::num_type_arguments_ field in bytes.
+ static word num_type_arguments_offset_in_bytes();
+
// The value used if no type arguments vector is present.
static const word kNoTypeArguments;
@@ -358,6 +379,7 @@
// Returns the offset to the first field of [RawInstance].
static word first_field_offset();
static word DataOffsetFor(intptr_t cid);
+ static word ElementSizeFor(intptr_t cid);
};
class Function : public AllStatic {
@@ -413,6 +435,20 @@
static const word kMaxNewSpaceElements;
};
+class GrowableObjectArray : public AllStatic {
+ public:
+ static word data_offset();
+ static word type_arguments_offset();
+ static word length_offset();
+};
+
+class TypedData : public AllStatic {
+ public:
+ static word data_offset();
+ static word length_offset();
+ static word InstanceSize();
+};
+
class ArgumentsDescriptor : public AllStatic {
public:
static word count_offset();
@@ -426,6 +462,7 @@
class Type : public AllStatic {
public:
+ static word hash_offset();
static word type_state_offset();
static word arguments_offset();
static word signature_offset();
@@ -441,11 +478,34 @@
static word value_offset();
};
+class Smi : public AllStatic {
+ public:
+ static const word kBits;
+};
+
class Mint : public AllStatic {
public:
static word value_offset();
};
+class String : public AllStatic {
+ public:
+ static const word kHashBits;
+ static word hash_offset();
+ static word length_offset();
+ static word InstanceSize();
+};
+
+class OneByteString : public AllStatic {
+ public:
+ static word data_offset();
+};
+
+class TwoByteString : public AllStatic {
+ public:
+ static word data_offset();
+};
+
class Float32x4 : public AllStatic {
public:
static word value_offset();
@@ -456,8 +516,17 @@
static word value_offset();
};
+class TimelineStream : public AllStatic {
+ public:
+ static word enabled_offset();
+};
+
class Thread : public AllStatic {
public:
+ static word dart_stream_offset();
+ static word async_stack_trace_offset();
+ static word predefined_symbols_address_offset();
+
static word active_exception_offset();
static word active_stacktrace_offset();
static word resume_pc_offset();
@@ -526,20 +595,31 @@
class StoreBufferBlock : public AllStatic {
public:
- static uword top_offset();
- static uword pointers_offset();
+ static word top_offset();
+ static word pointers_offset();
static const word kSize;
};
class MarkingStackBlock : public AllStatic {
public:
- static uword top_offset();
- static uword pointers_offset();
+ static word top_offset();
+ static word pointers_offset();
static const word kSize;
};
+class ObjectStore : public AllStatic {
+ public:
+ static word double_type_offset();
+ static word int_type_offset();
+ static word string_type_offset();
+};
+
class Isolate : public AllStatic {
public:
+ static word object_store_offset();
+ static word default_tag_offset();
+ static word current_tag_offset();
+ static word user_tag_offset();
static word class_table_offset();
static word ic_miss_code_offset();
#if !defined(PRODUCT)
@@ -650,6 +730,22 @@
static const word kNumCallWrapperArguments;
};
+class RegExp : public AllStatic {
+ public:
+ static word function_offset(classid_t cid, bool sticky);
+};
+
+class UserTag : public AllStatic {
+ public:
+ static word tag_offset();
+};
+
+class Symbols : public AllStatic {
+ public:
+ static const word kNumberOfOneCharCodeSymbols;
+ static const word kNullCharCodeSymbolOffset;
+};
+
} // namespace target
} // namespace compiler
} // namespace dart
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 758a006..318379e 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -1879,7 +1879,7 @@
ClassFinalizer::VerifyBootstrapClasses();
// Set up the intrinsic state of all functions (core, math and typed data).
- Intrinsifier::InitializeState();
+ compiler::Intrinsifier::InitializeState();
// Set up recognized state of all functions (core, math and typed data).
MethodRecognizer::InitializeState();
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 80f5e1c..e426c9d0 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -843,6 +843,9 @@
StoreNonPointer(&raw_ptr()->id_, value);
}
static intptr_t id_offset() { return OFFSET_OF(RawClass, id_); }
+ static intptr_t num_type_arguments_offset() {
+ return OFFSET_OF(RawClass, num_type_arguments_);
+ }
RawString* Name() const;
RawString* ScrubbedName() const;
@@ -1397,9 +1400,6 @@
int16_t num_type_arguments() const { return raw_ptr()->num_type_arguments_; }
void set_num_type_arguments(intptr_t value) const;
- static intptr_t num_type_arguments_offset() {
- return OFFSET_OF(RawClass, num_type_arguments_);
- }
public:
bool has_pragma() const {