Reland "[vm] Decouple stub code from runtime"
This is the next step towards preventing compiler from directly peeking
into runtime and instead interact with runtime through a well defined
surface.
This CL decouples the hand-written stub codes from the runtime. The
target architecture dependent stubs are moved to
dart::compiler::StubCodeCompiler which use dart::compiler::target:*
for accessing any runtime related code.
The generation of type testing stubs is moved to separate files for the
time being.
Issue https://github.com/dart-lang/sdk/issues/31709
Change-Id: Icd0995b18a7bac496b1e12231cf437943f5c94f1
Reviewed-on: https://dart-review.googlesource.com/c/92720
Reviewed-by: Martin Kustermann <kustermann@google.com>
Commit-Queue: Martin Kustermann <kustermann@google.com>
Auto-Submit: Martin Kustermann <kustermann@google.com>
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index 3f8820e..6b79492 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -763,6 +763,10 @@
ObjectPoolBuilderEntry::Patchability patchable,
Condition cond = AL);
void PushObject(const Object& object);
+ void PushImmediate(int32_t immediate) {
+ LoadImmediate(TMP, immediate);
+ Push(TMP);
+ }
void CompareObject(Register rn, const Object& object);
enum CanBeSmi {
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index ed2c5a5..da5466b 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -1506,6 +1506,10 @@
LoadObject(TMP, object);
Push(TMP);
}
+ void PushImmediate(int64_t immediate) {
+ LoadImmediate(TMP, immediate);
+ Push(TMP);
+ }
void CompareObject(Register reg, const Object& object);
void LoadClassId(Register result, Register object);
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 2a632c2..373a5b2 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -17,6 +17,7 @@
#include "vm/native_entry.h"
#include "vm/object.h"
#include "vm/parser.h"
+#include "vm/static_type_exactness_state.h"
#include "vm/token_position.h"
namespace dart {
diff --git a/runtime/vm/compiler/backend/locations.cc b/runtime/vm/compiler/backend/locations.cc
index a06b4b8..e549879 100644
--- a/runtime/vm/compiler/backend/locations.cc
+++ b/runtime/vm/compiler/backend/locations.cc
@@ -23,6 +23,23 @@
return count;
}
+void RegisterSet::DebugPrint() {
+ for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+ Register r = static_cast<Register>(i);
+ if (ContainsRegister(r)) {
+ THR_Print("%s %s\n", Assembler::RegisterName(r),
+ IsTagged(r) ? "tagged" : "untagged");
+ }
+ }
+
+ for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+ FpuRegister r = static_cast<FpuRegister>(i);
+ if (ContainsFpuRegister(r)) {
+ THR_Print("%s\n", Assembler::FpuRegisterName(r));
+ }
+ }
+}
+
LocationSummary::LocationSummary(Zone* zone,
intptr_t input_count,
intptr_t temp_count,
diff --git a/runtime/vm/compiler/backend/locations.h b/runtime/vm/compiler/backend/locations.h
index 8be7779..9c76dd6 100644
--- a/runtime/vm/compiler/backend/locations.h
+++ b/runtime/vm/compiler/backend/locations.h
@@ -9,7 +9,6 @@
#include "vm/bitfield.h"
#include "vm/bitmap.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/log.h"
namespace dart {
@@ -561,22 +560,7 @@
}
}
- void DebugPrint() {
- for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
- Register r = static_cast<Register>(i);
- if (ContainsRegister(r)) {
- THR_Print("%s %s\n", Assembler::RegisterName(r),
- IsTagged(r) ? "tagged" : "untagged");
- }
- }
-
- for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
- FpuRegister r = static_cast<FpuRegister>(i);
- if (ContainsFpuRegister(r)) {
- THR_Print("%s\n", Assembler::FpuRegisterName(r));
- }
- }
- }
+ void DebugPrint();
void MarkUntagged(Location loc) {
ASSERT(loc.IsRegister());
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index afb3abb..d41a5a8 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -120,6 +120,12 @@
"jit/jit_call_specializer.h",
"method_recognizer.cc",
"method_recognizer.h",
+ "stub_code_compiler.h",
+ "stub_code_compiler_arm.cc",
+ "stub_code_compiler_arm64.cc",
+ "stub_code_compiler_dbc.cc",
+ "stub_code_compiler_ia32.cc",
+ "stub_code_compiler_x64.cc",
"relocation.cc",
"relocation.h",
"runtime_api.cc",
diff --git a/runtime/vm/compiler/runtime_api.cc b/runtime/vm/compiler/runtime_api.cc
index 9431ff2..564c439 100644
--- a/runtime/vm/compiler/runtime_api.cc
+++ b/runtime/vm/compiler/runtime_api.cc
@@ -6,8 +6,12 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/dart_entry.h"
#include "vm/longjump.h"
+#include "vm/native_arguments.h"
+#include "vm/native_entry.h"
#include "vm/object.h"
+#include "vm/runtime_entry.h"
namespace dart {
namespace compiler {
@@ -57,6 +61,38 @@
return Object::ZoneHandle(zone, obj.raw());
}
+const Object& NullObject() {
+ return Object::null_object();
+}
+
+const Bool& TrueObject() {
+ return dart::Bool::True();
+}
+
+const Bool& FalseObject() {
+ return dart::Bool::False();
+}
+
+const Object& EmptyTypeArguments() {
+ return Object::empty_type_arguments();
+}
+
+const Type& DynamicType() {
+ return dart::Type::dynamic_type();
+}
+
+const Type& ObjectType() {
+ return Type::Handle(dart::Type::ObjectType());
+}
+
+const Type& VoidType() {
+ return dart::Type::void_type();
+}
+
+const Type& IntType() {
+ return Type::Handle(dart::Type::IntType());
+}
+
bool IsOriginalObject(const Object& object) {
if (object.IsICData()) {
return ICData::Cast(object).IsOriginal();
@@ -82,12 +118,40 @@
return static_cast<int32_t>(Isolate::Current()->random()->NextUInt32());
}
+#if !defined(TARGET_ARCH_DBC)
+const Code& StubCodeAllocateArray() {
+ return dart::StubCode::AllocateArray();
+}
+
+const Code& StubCodeSubtype2TestCache() {
+ return dart::StubCode::Subtype2TestCache();
+}
+
+const Code& StubCodeSubtype6TestCache() {
+ return dart::StubCode::Subtype6TestCache();
+}
+#endif // !defined(TARGET_ARCH_DBC)
+
+#define DEFINE_ALIAS(name) \
+ const RuntimeEntry& k##name##RuntimeEntry(dart::k##name##RuntimeEntry);
+RUNTIME_ENTRY_LIST(DEFINE_ALIAS)
+#undef DEFINE_ALIAS
+
+#define DEFINE_ALIAS(type, name, ...) \
+ const RuntimeEntry& k##name##RuntimeEntry(dart::k##name##RuntimeEntry);
+LEAF_RUNTIME_ENTRY_LIST(DEFINE_ALIAS)
+#undef DEFINE_ALIAS
+
void BailoutWithBranchOffsetError() {
Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error());
}
namespace target {
+const word kPageSize = dart::kPageSize;
+const word kPageSizeInWords = dart::kPageSizeInWords;
+const word kPageMask = dart::kPageMask;
+
uint32_t MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size) {
return dart::RawObject::SizeTag::encode(instance_size) |
dart::RawObject::ClassIdTag::encode(cid) |
@@ -98,10 +162,27 @@
return dart::Object::tags_offset();
}
+const word RawObject::kCardRememberedBit = dart::RawObject::kCardRememberedBit;
+
+const word RawObject::kOldAndNotRememberedBit =
+ dart::RawObject::kOldAndNotRememberedBit;
+
+const word RawObject::kOldAndNotMarkedBit =
+ dart::RawObject::kOldAndNotMarkedBit;
+
const word RawObject::kClassIdTagPos = dart::RawObject::kClassIdTagPos;
const word RawObject::kClassIdTagSize = dart::RawObject::kClassIdTagSize;
+const word RawObject::kSizeTagMaxSizeTag =
+ dart::RawObject::SizeTag::kMaxSizeTag;
+
+const word RawObject::kTagBitsSizeTagPos =
+ dart::RawObject::TagBits::kSizeTagPos;
+
+const word RawAbstractType::kTypeStateFinalizedInstantiated =
+ dart::RawAbstractType::kFinalizedInstantiated;
+
const word RawObject::kBarrierOverlapShift =
dart::RawObject::kBarrierOverlapShift;
@@ -109,6 +190,12 @@
return dart::ObjectPool::element_offset(index);
}
+word Class::type_arguments_field_offset_in_words_offset() {
+ return dart::Class::type_arguments_field_offset_in_words_offset();
+}
+
+const word Class::kNoTypeArguments = dart::Class::kNoTypeArguments;
+
classid_t Class::GetId(const dart::Class& handle) {
return handle.id();
}
@@ -117,14 +204,228 @@
return handle.instance_size();
}
+intptr_t Class::NumTypeArguments(const dart::Class& klass) {
+ return klass.NumTypeArguments() > 0;
+}
+
+bool Class::HasTypeArgumentsField(const dart::Class& klass) {
+ return klass.type_arguments_field_offset() != dart::Class::kNoTypeArguments;
+}
+
+intptr_t Class::TypeArgumentsFieldOffset(const dart::Class& klass) {
+ return klass.type_arguments_field_offset();
+}
+
+intptr_t Class::InstanceSize(const dart::Class& klass) {
+ return klass.instance_size();
+}
+
+bool Class::TraceAllocation(const dart::Class& klass) {
+ return klass.TraceAllocation(dart::Isolate::Current());
+}
+
+word Instance::first_field_offset() {
+ return dart::Instance::NextFieldOffset();
+}
+
word Instance::DataOffsetFor(intptr_t cid) {
return dart::Instance::DataOffsetFor(cid);
}
+word Function::code_offset() {
+ return dart::Function::code_offset();
+}
+
+word Function::entry_point_offset() {
+ return dart::Function::entry_point_offset();
+}
+
+word Function::usage_counter_offset() {
+ return dart::Function::usage_counter_offset();
+}
+
+word Function::unchecked_entry_point_offset() {
+ return dart::Function::unchecked_entry_point_offset();
+}
+
+word ICData::CodeIndexFor(word num_args) {
+ return dart::ICData::CodeIndexFor(num_args);
+}
+
+word ICData::owner_offset() {
+ return dart::ICData::owner_offset();
+}
+
+word ICData::arguments_descriptor_offset() {
+ return dart::ICData::arguments_descriptor_offset();
+}
+
+word ICData::ic_data_offset() {
+ return dart::ICData::ic_data_offset();
+}
+
+word ICData::static_receiver_type_offset() {
+ return dart::ICData::static_receiver_type_offset();
+}
+
+word ICData::state_bits_offset() {
+ return dart::ICData::state_bits_offset();
+}
+
+word ICData::CountIndexFor(word num_args) {
+ return dart::ICData::CountIndexFor(num_args);
+}
+
+word ICData::TargetIndexFor(word num_args) {
+ return dart::ICData::TargetIndexFor(num_args);
+}
+
+word ICData::ExactnessOffsetFor(word num_args) {
+ return dart::ICData::ExactnessOffsetFor(num_args);
+}
+
+word ICData::TestEntryLengthFor(word num_args, bool exactness_check) {
+ return dart::ICData::TestEntryLengthFor(num_args, exactness_check);
+}
+
+word ICData::EntryPointIndexFor(word num_args) {
+ return dart::ICData::EntryPointIndexFor(num_args);
+}
+
+word ICData::NumArgsTestedShift() {
+ return dart::ICData::NumArgsTestedShift();
+}
+
+word ICData::NumArgsTestedMask() {
+ return dart::ICData::NumArgsTestedMask();
+}
+
+const word MegamorphicCache::kSpreadFactor =
+ dart::MegamorphicCache::kSpreadFactor;
+
+word MegamorphicCache::mask_offset() {
+ return dart::MegamorphicCache::mask_offset();
+}
+word MegamorphicCache::buckets_offset() {
+ return dart::MegamorphicCache::buckets_offset();
+}
+word MegamorphicCache::arguments_descriptor_offset() {
+ return dart::MegamorphicCache::arguments_descriptor_offset();
+}
+
+word SingleTargetCache::lower_limit_offset() {
+ return dart::SingleTargetCache::lower_limit_offset();
+}
+word SingleTargetCache::upper_limit_offset() {
+ return dart::SingleTargetCache::upper_limit_offset();
+}
+word SingleTargetCache::entry_point_offset() {
+ return dart::SingleTargetCache::entry_point_offset();
+}
+word SingleTargetCache::target_offset() {
+ return dart::SingleTargetCache::target_offset();
+}
+
+word Array::header_size() {
+ return sizeof(dart::RawArray);
+}
+
+word Array::tags_offset() {
+ return dart::Array::tags_offset();
+}
+
+word Array::data_offset() {
+ return dart::Array::data_offset();
+}
+
+word Array::type_arguments_offset() {
+ return dart::Array::type_arguments_offset();
+}
+
+word Array::length_offset() {
+ return dart::Array::length_offset();
+}
+
+const word Array::kMaxNewSpaceElements = dart::Array::kMaxNewSpaceElements;
+
+word ArgumentsDescriptor::count_offset() {
+ return dart::ArgumentsDescriptor::count_offset();
+}
+
+word ArgumentsDescriptor::type_args_len_offset() {
+ return dart::ArgumentsDescriptor::type_args_len_offset();
+}
+
+word AbstractType::type_test_stub_entry_point_offset() {
+ return dart::AbstractType::type_test_stub_entry_point_offset();
+}
+
+word Type::type_state_offset() {
+ return dart::Type::type_state_offset();
+}
+
+word Type::arguments_offset() {
+ return dart::Type::arguments_offset();
+}
+
+word Type::signature_offset() {
+ return dart::Type::signature_offset();
+}
+
+word TypeRef::type_offset() {
+ return dart::TypeRef::type_offset();
+}
+
+const word HeapPage::kBytesPerCardLog2 = dart::HeapPage::kBytesPerCardLog2;
+
+word HeapPage::card_table_offset() {
+ return dart::HeapPage::card_table_offset();
+}
+
bool Heap::IsAllocatableInNewSpace(intptr_t instance_size) {
return dart::Heap::IsAllocatableInNewSpace(instance_size);
}
+word Thread::active_exception_offset() {
+ return dart::Thread::active_exception_offset();
+}
+
+word Thread::active_stacktrace_offset() {
+ return dart::Thread::active_stacktrace_offset();
+}
+
+word Thread::resume_pc_offset() {
+ return dart::Thread::resume_pc_offset();
+}
+
+word Thread::marking_stack_block_offset() {
+ return dart::Thread::marking_stack_block_offset();
+}
+
+word Thread::top_exit_frame_info_offset() {
+ return dart::Thread::top_exit_frame_info_offset();
+}
+
+word Thread::top_resource_offset() {
+ return dart::Thread::top_resource_offset();
+}
+
+word Thread::global_object_pool_offset() {
+ return dart::Thread::global_object_pool_offset();
+}
+
+word Thread::object_null_offset() {
+ return dart::Thread::object_null_offset();
+}
+
+word Thread::bool_true_offset() {
+ return dart::Thread::bool_true_offset();
+}
+
+word Thread::bool_false_offset() {
+ return dart::Thread::bool_false_offset();
+}
+
word Thread::top_offset() {
return dart::Thread::top_offset();
}
@@ -137,7 +438,27 @@
return dart::Thread::isolate_offset();
}
+word Thread::store_buffer_block_offset() {
+ return dart::Thread::store_buffer_block_offset();
+}
+
#if !defined(TARGET_ARCH_DBC)
+word Thread::write_barrier_code_offset() {
+ return dart::Thread::write_barrier_code_offset();
+}
+
+word Thread::array_write_barrier_code_offset() {
+ return dart::Thread::array_write_barrier_code_offset();
+}
+
+word Thread::fix_callers_target_code_offset() {
+ return dart::Thread::fix_callers_target_code_offset();
+}
+
+word Thread::fix_allocation_stub_code_offset() {
+ return dart::Thread::fix_allocation_stub_code_offset();
+}
+
word Thread::call_to_runtime_entry_point_offset() {
return dart::Thread::call_to_runtime_entry_point_offset();
}
@@ -179,6 +500,78 @@
return dart::Thread::vm_tag_offset();
}
+#if !defined(TARGET_ARCH_DBC)
+
+word Thread::monomorphic_miss_stub_offset() {
+ return dart::Thread::monomorphic_miss_stub_offset();
+}
+
+word Thread::ic_lookup_through_code_stub_offset() {
+ return dart::Thread::ic_lookup_through_code_stub_offset();
+}
+
+word Thread::lazy_specialize_type_test_stub_offset() {
+ return dart::Thread::lazy_specialize_type_test_stub_offset();
+}
+
+word Thread::slow_type_test_stub_offset() {
+ return dart::Thread::slow_type_test_stub_offset();
+}
+
+word Thread::call_to_runtime_stub_offset() {
+ return dart::Thread::call_to_runtime_stub_offset();
+}
+
+word Thread::invoke_dart_code_stub_offset() {
+ return dart::Thread::invoke_dart_code_stub_offset();
+}
+
+word Thread::interpret_call_entry_point_offset() {
+ return dart::Thread::interpret_call_entry_point_offset();
+}
+
+word Thread::invoke_dart_code_from_bytecode_stub_offset() {
+ return dart::Thread::invoke_dart_code_from_bytecode_stub_offset();
+}
+
+word Thread::null_error_shared_without_fpu_regs_stub_offset() {
+ return dart::Thread::null_error_shared_without_fpu_regs_stub_offset();
+}
+
+word Thread::null_error_shared_with_fpu_regs_stub_offset() {
+ return dart::Thread::null_error_shared_with_fpu_regs_stub_offset();
+}
+
+word Thread::stack_overflow_shared_without_fpu_regs_stub_offset() {
+ return dart::Thread::stack_overflow_shared_without_fpu_regs_stub_offset();
+}
+
+word Thread::stack_overflow_shared_with_fpu_regs_stub_offset() {
+ return dart::Thread::stack_overflow_shared_with_fpu_regs_stub_offset();
+}
+
+word Thread::lazy_deopt_from_return_stub_offset() {
+ return dart::Thread::lazy_deopt_from_return_stub_offset();
+}
+
+word Thread::lazy_deopt_from_throw_stub_offset() {
+ return dart::Thread::lazy_deopt_from_throw_stub_offset();
+}
+
+word Thread::deoptimize_stub_offset() {
+ return dart::Thread::deoptimize_stub_offset();
+}
+
+#endif // !defined(TARGET_ARCH_DBC)
+
+word Thread::no_scope_native_wrapper_entry_point_offset() {
+ return dart::Thread::no_scope_native_wrapper_entry_point_offset();
+}
+
+word Thread::auto_scope_native_wrapper_entry_point_offset() {
+ return dart::Thread::auto_scope_native_wrapper_entry_point_offset();
+}
+
#define DECLARE_CONSTANT_OFFSET_GETTER(name) \
word Thread::name##_address_offset() { \
return dart::Thread::name##_address_offset(); \
@@ -186,10 +579,40 @@
THREAD_XMM_CONSTANT_LIST(DECLARE_CONSTANT_OFFSET_GETTER)
#undef DECLARE_CONSTANT_OFFSET_GETTER
+word Thread::OffsetFromThread(const dart::Object& object) {
+ return dart::Thread::OffsetFromThread(object);
+}
+
+uword StoreBufferBlock::top_offset() {
+ return dart::StoreBufferBlock::top_offset();
+}
+uword StoreBufferBlock::pointers_offset() {
+ return dart::StoreBufferBlock::pointers_offset();
+}
+const word StoreBufferBlock::kSize = dart::StoreBufferBlock::kSize;
+
+uword MarkingStackBlock::top_offset() {
+ return dart::MarkingStackBlock::top_offset();
+}
+uword MarkingStackBlock::pointers_offset() {
+ return dart::MarkingStackBlock::pointers_offset();
+}
+const word MarkingStackBlock::kSize = dart::MarkingStackBlock::kSize;
+
word Isolate::class_table_offset() {
return dart::Isolate::class_table_offset();
}
+word Isolate::ic_miss_code_offset() {
+ return dart::Isolate::ic_miss_code_offset();
+}
+
+#if !defined(PRODUCT)
+word Isolate::single_step_offset() {
+ return dart::Isolate::single_step_offset();
+}
+#endif // !defined(PRODUCT)
+
word ClassTable::table_offset() {
return dart::ClassTable::table_offset();
}
@@ -240,6 +663,66 @@
return dart::Code::entry_point_offset(kind);
}
+word SubtypeTestCache::cache_offset() {
+ return dart::SubtypeTestCache::cache_offset();
+}
+
+const word SubtypeTestCache::kTestEntryLength =
+ dart::SubtypeTestCache::kTestEntryLength;
+const word SubtypeTestCache::kInstanceClassIdOrFunction =
+ dart::SubtypeTestCache::kInstanceClassIdOrFunction;
+const word SubtypeTestCache::kInstanceTypeArguments =
+ dart::SubtypeTestCache::kInstanceTypeArguments;
+const word SubtypeTestCache::kInstantiatorTypeArguments =
+ dart::SubtypeTestCache::kInstantiatorTypeArguments;
+const word SubtypeTestCache::kFunctionTypeArguments =
+ dart::SubtypeTestCache::kFunctionTypeArguments;
+const word SubtypeTestCache::kInstanceParentFunctionTypeArguments =
+ dart::SubtypeTestCache::kInstanceParentFunctionTypeArguments;
+const word SubtypeTestCache::kInstanceDelayedFunctionTypeArguments =
+ dart::SubtypeTestCache::kInstanceDelayedFunctionTypeArguments;
+const word SubtypeTestCache::kTestResult = dart::SubtypeTestCache::kTestResult;
+
+word Context::header_size() {
+ return sizeof(dart::RawContext);
+}
+
+word Context::parent_offset() {
+ return dart::Context::parent_offset();
+}
+
+word Context::num_variables_offset() {
+ return dart::Context::num_variables_offset();
+}
+
+word Context::variable_offset(word i) {
+ return dart::Context::variable_offset(i);
+}
+
+word Context::InstanceSize(word n) {
+ return dart::Context::InstanceSize(n);
+}
+
+word Closure::context_offset() {
+ return dart::Closure::context_offset();
+}
+
+word Closure::delayed_type_arguments_offset() {
+ return dart::Closure::delayed_type_arguments_offset();
+}
+
+word Closure::function_offset() {
+ return dart::Closure::function_offset();
+}
+
+word Closure::function_type_arguments_offset() {
+ return dart::Closure::function_type_arguments_offset();
+}
+
+word Closure::instantiator_type_arguments_offset() {
+ return dart::Closure::instantiator_type_arguments_offset();
+}
+
#if !defined(PRODUCT)
word ClassHeapStats::TraceAllocationMask() {
return dart::ClassHeapStats::TraceAllocationMask();
@@ -262,6 +745,10 @@
return dart::Double::value_offset();
}
+word Mint::value_offset() {
+ return dart::Mint::value_offset();
+}
+
word Float32x4::value_offset() {
return dart::Float32x4::value_offset();
}
@@ -314,6 +801,29 @@
}
#endif // defined(TARGET_ARCH_IA32)
+const word NativeEntry::kNumCallWrapperArguments =
+ dart::NativeEntry::kNumCallWrapperArguments;
+
+word NativeArguments::thread_offset() {
+ return dart::NativeArguments::thread_offset();
+}
+
+word NativeArguments::argc_tag_offset() {
+ return dart::NativeArguments::argc_tag_offset();
+}
+
+word NativeArguments::argv_offset() {
+ return dart::NativeArguments::argv_offset();
+}
+
+word NativeArguments::retval_offset() {
+ return dart::NativeArguments::retval_offset();
+}
+
+word NativeArguments::StructSize() {
+ return sizeof(dart::NativeArguments);
+}
+
} // namespace target
} // namespace compiler
} // namespace dart
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
index 5ca1b6f..6af5760 100644
--- a/runtime/vm/compiler/runtime_api.h
+++ b/runtime/vm/compiler/runtime_api.h
@@ -24,18 +24,25 @@
#include "vm/code_entry_kind.h"
#include "vm/frame_layout.h"
#include "vm/pointer_tagging.h"
+#include "vm/runtime_entry_list.h"
+#include "vm/token.h"
namespace dart {
// Forward declarations.
+class Bool;
class Class;
class Code;
+class Code;
class Function;
class LocalVariable;
class Object;
class RuntimeEntry;
class String;
+class Type;
+class TypeArguments;
class Zone;
+
namespace compiler {
class Assembler;
}
@@ -78,6 +85,24 @@
// Clone the given handle.
Object& NewZoneHandle(Zone* zone, const Object&);
+//
+// Constant objects.
+//
+
+const Object& NullObject();
+const Bool& TrueObject();
+const Bool& FalseObject();
+const Object& EmptyTypeArguments();
+const Type& DynamicType();
+const Type& ObjectType();
+const Type& VoidType();
+const Type& IntType();
+
+template <typename To, typename From>
+const To& CastHandle(const From& from) {
+ return reinterpret_cast<const To&>(from);
+}
+
// Returns true if [a] and [b] are the same object.
bool IsSameObject(const Object& a, const Object& b);
@@ -128,6 +153,12 @@
compiler::Assembler*,
intptr_t);
+#if !defined(TARGET_ARCH_DBC)
+const Code& StubCodeAllocateArray();
+const Code& StubCodeSubtype2TestCache();
+const Code& StubCodeSubtype6TestCache();
+#endif // !defined(TARGET_ARCH_DBC)
+
class RuntimeEntry : public ValueObject {
public:
virtual ~RuntimeEntry() {}
@@ -167,6 +198,16 @@
RuntimeEntryCallInternal call_;
};
+#define DECLARE_RUNTIME_ENTRY(name) \
+ extern const RuntimeEntry& k##name##RuntimeEntry;
+RUNTIME_ENTRY_LIST(DECLARE_RUNTIME_ENTRY)
+#undef DECLARE_RUNTIME_ENTRY
+
+#define DECLARE_RUNTIME_ENTRY(type, name, ...) \
+ extern const RuntimeEntry& k##name##RuntimeEntry;
+LEAF_RUNTIME_ENTRY_LIST(DECLARE_RUNTIME_ENTRY)
+#undef DECLARE_RUNTIME_ENTRY
+
// Allocate a string object with the given content in the runtime heap.
const String& AllocateString(const char* buffer);
@@ -194,6 +235,11 @@
using ObjectAlignment = dart::ObjectAlignment<kWordSize, kWordSizeLog2>;
+// Information about heap pages.
+extern const word kPageSize;
+extern const word kPageSizeInWords;
+extern const word kPageMask;
+
// Information about frame_layout that compiler should be targeting.
extern FrameLayout frame_layout;
@@ -251,11 +297,21 @@
class RawObject : public AllStatic {
public:
+ static const word kCardRememberedBit;
+ static const word kOldAndNotRememberedBit;
+ static const word kOldAndNotMarkedBit;
static const word kClassIdTagPos;
static const word kClassIdTagSize;
+ static const word kSizeTagMaxSizeTag;
+ static const word kTagBitsSizeTagPos;
static const word kBarrierOverlapShift;
};
+class RawAbstractType : public AllStatic {
+ public:
+ static const word kTypeStateFinalizedInstantiated;
+};
+
class Object : public AllStatic {
public:
// Offset of the tags word.
@@ -270,23 +326,126 @@
class Class : public AllStatic {
public:
+ static word type_arguments_field_offset_in_words_offset();
+
+ // The value used if no type arguments vector is present.
+ static const word kNoTypeArguments;
+
// Return class id of the given class on the target.
static classid_t GetId(const dart::Class& handle);
// Return instance size for the given class on the target.
static uword GetInstanceSize(const dart::Class& handle);
+
+ // Returns the number of type arguments.
+ static intptr_t NumTypeArguments(const dart::Class& klass);
+
+ // Whether [klass] has a type arguments vector field.
+ static bool HasTypeArgumentsField(const dart::Class& klass);
+
+ // Returns the offset (in bytes) of the type arguments vector.
+ static intptr_t TypeArgumentsFieldOffset(const dart::Class& klass);
+
+ // Returns the instance size (in bytes).
+ static intptr_t InstanceSize(const dart::Class& klass);
+
+ // Whether to trace allocation for this klass.
+ static bool TraceAllocation(const dart::Class& klass);
};
class Instance : public AllStatic {
public:
+ // Returns the offset to the first field of [RawInstance].
+ static word first_field_offset();
static word DataOffsetFor(intptr_t cid);
};
+class Function : public AllStatic {
+ public:
+ static word code_offset();
+ static word entry_point_offset();
+ static word usage_counter_offset();
+ static word unchecked_entry_point_offset();
+};
+
+class ICData : public AllStatic {
+ public:
+ static word owner_offset();
+ static word arguments_descriptor_offset();
+ static word ic_data_offset();
+ static word static_receiver_type_offset();
+ static word state_bits_offset();
+
+ static word CodeIndexFor(word num_args);
+ static word CountIndexFor(word num_args);
+ static word TargetIndexFor(word num_args);
+ static word ExactnessOffsetFor(word num_args);
+ static word TestEntryLengthFor(word num_args, bool exactness_check);
+ static word EntryPointIndexFor(word num_args);
+ static word NumArgsTestedShift();
+ static word NumArgsTestedMask();
+};
+
+class MegamorphicCache : public AllStatic {
+ public:
+ static const word kSpreadFactor;
+ static word mask_offset();
+ static word buckets_offset();
+ static word arguments_descriptor_offset();
+};
+
+class SingleTargetCache : public AllStatic {
+ public:
+ static word lower_limit_offset();
+ static word upper_limit_offset();
+ static word entry_point_offset();
+ static word target_offset();
+};
+
+class Array : public AllStatic {
+ public:
+ static word header_size();
+ static word tags_offset();
+ static word data_offset();
+ static word type_arguments_offset();
+ static word length_offset();
+
+ static const word kMaxNewSpaceElements;
+};
+
+class ArgumentsDescriptor : public AllStatic {
+ public:
+ static word count_offset();
+ static word type_args_len_offset();
+};
+
+class AbstractType : public AllStatic {
+ public:
+ static word type_test_stub_entry_point_offset();
+};
+
+class Type : public AllStatic {
+ public:
+ static word type_state_offset();
+ static word arguments_offset();
+ static word signature_offset();
+};
+
+class TypeRef : public AllStatic {
+ public:
+ static word type_offset();
+};
+
class Double : public AllStatic {
public:
static word value_offset();
};
+class Mint : public AllStatic {
+ public:
+ static word value_offset();
+};
+
class Float32x4 : public AllStatic {
public:
static word value_offset();
@@ -299,9 +458,20 @@
class Thread : public AllStatic {
public:
+ static word active_exception_offset();
+ static word active_stacktrace_offset();
+ static word resume_pc_offset();
+ static word marking_stack_block_offset();
+ static word top_exit_frame_info_offset();
+ static word top_resource_offset();
+ static word global_object_pool_offset();
+ static word object_null_offset();
+ static word bool_true_offset();
+ static word bool_false_offset();
static word top_offset();
static word end_offset();
static word isolate_offset();
+ static word store_buffer_block_offset();
static word call_to_runtime_entry_point_offset();
static word null_error_shared_with_fpu_regs_entry_point_offset();
static word null_error_shared_without_fpu_regs_entry_point_offset();
@@ -312,6 +482,32 @@
static word write_barrier_entry_point_offset();
static word vm_tag_offset();
+#if !defined(TARGET_ARCH_DBC)
+ static word write_barrier_code_offset();
+ static word array_write_barrier_code_offset();
+ static word fix_callers_target_code_offset();
+ static word fix_allocation_stub_code_offset();
+
+ static word monomorphic_miss_stub_offset();
+ static word ic_lookup_through_code_stub_offset();
+ static word lazy_specialize_type_test_stub_offset();
+ static word slow_type_test_stub_offset();
+ static word call_to_runtime_stub_offset();
+ static word invoke_dart_code_stub_offset();
+ static word interpret_call_entry_point_offset();
+ static word invoke_dart_code_from_bytecode_stub_offset();
+ static word null_error_shared_without_fpu_regs_stub_offset();
+ static word null_error_shared_with_fpu_regs_stub_offset();
+ static word stack_overflow_shared_without_fpu_regs_stub_offset();
+ static word stack_overflow_shared_with_fpu_regs_stub_offset();
+ static word lazy_deopt_from_return_stub_offset();
+ static word lazy_deopt_from_throw_stub_offset();
+ static word deoptimize_stub_offset();
+#endif // !defined(TARGET_ARCH_DBC)
+
+ static word no_scope_native_wrapper_entry_point_offset();
+ static word auto_scope_native_wrapper_entry_point_offset();
+
#define THREAD_XMM_CONSTANT_LIST(V) \
V(float_not) \
V(float_negate) \
@@ -324,11 +520,31 @@
static word name##_address_offset();
THREAD_XMM_CONSTANT_LIST(DECLARE_CONSTANT_OFFSET_GETTER)
#undef DECLARE_CONSTANT_OFFSET_GETTER
+
+ static word OffsetFromThread(const dart::Object& object);
+};
+
+class StoreBufferBlock : public AllStatic {
+ public:
+ static uword top_offset();
+ static uword pointers_offset();
+ static const word kSize;
+};
+
+class MarkingStackBlock : public AllStatic {
+ public:
+ static uword top_offset();
+ static uword pointers_offset();
+ static const word kSize;
};
class Isolate : public AllStatic {
public:
static word class_table_offset();
+ static word ic_miss_code_offset();
+#if !defined(PRODUCT)
+ static word single_step_offset();
+#endif // !defined(PRODUCT)
};
class ClassTable : public AllStatic {
@@ -373,6 +589,45 @@
static intptr_t saved_instructions_offset();
};
+class SubtypeTestCache : public AllStatic {
+ public:
+ static word cache_offset();
+
+ static const word kTestEntryLength;
+ static const word kInstanceClassIdOrFunction;
+ static const word kInstanceTypeArguments;
+ static const word kInstantiatorTypeArguments;
+ static const word kFunctionTypeArguments;
+ static const word kInstanceParentFunctionTypeArguments;
+ static const word kInstanceDelayedFunctionTypeArguments;
+ static const word kTestResult;
+};
+
+class Context : public AllStatic {
+ public:
+ static word header_size();
+ static word parent_offset();
+ static word num_variables_offset();
+ static word variable_offset(word i);
+ static word InstanceSize(word n);
+};
+
+class Closure : public AllStatic {
+ public:
+ static word context_offset();
+ static word delayed_type_arguments_offset();
+ static word function_offset();
+ static word function_type_arguments_offset();
+ static word instantiator_type_arguments_offset();
+};
+
+class HeapPage : public AllStatic {
+ public:
+ static const word kBytesPerCardLog2;
+
+ static word card_table_offset();
+};
+
class Heap : public AllStatic {
public:
// Return true if an object with the given instance size is allocatable
@@ -380,6 +635,21 @@
static bool IsAllocatableInNewSpace(intptr_t instance_size);
};
+class NativeArguments {
+ public:
+ static word thread_offset();
+ static word argc_tag_offset();
+ static word argv_offset();
+ static word retval_offset();
+
+ static word StructSize();
+};
+
+class NativeEntry {
+ public:
+ static const word kNumCallWrapperArguments;
+};
+
} // namespace target
} // namespace compiler
} // namespace dart
diff --git a/runtime/vm/compiler/stub_code_compiler.h b/runtime/vm/compiler/stub_code_compiler.h
new file mode 100644
index 0000000..c227889
--- /dev/null
+++ b/runtime/vm/compiler/stub_code_compiler.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_STUB_CODE_COMPILER_H_
+#define RUNTIME_VM_COMPILER_STUB_CODE_COMPILER_H_
+
+#include "vm/allocation.h"
+#include "vm/compiler/runtime_api.h"
+#include "vm/constants.h"
+#include "vm/stub_code_list.h"
+
+namespace dart {
+
+namespace compiler {
+
+// Forward declarations.
+class Assembler;
+
+class StubCodeCompiler : public AllStatic {
+ public:
+#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
+ static void GenerateBuildMethodExtractorStub(
+ Assembler* assembler,
+ const Object& closure_allocation_stub,
+ const Object& context_allocation_stub);
+#endif
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+#define STUB_CODE_GENERATE(name) \
+ static void Generate##name##Stub(Assembler* assembler);
+ VM_STUB_CODE_LIST(STUB_CODE_GENERATE)
+#undef STUB_CODE_GENERATE
+
+ static void GenerateSharedStub(Assembler* assembler,
+ bool save_fpu_registers,
+ const RuntimeEntry* target,
+ intptr_t self_code_stub_offset_from_thread,
+ bool allow_return);
+
+ static void GenerateMegamorphicMissStub(Assembler* assembler);
+ static void GenerateAllocationStubForClass(Assembler* assembler,
+ const Class& cls);
+ static void GenerateNArgsCheckInlineCacheStub(
+ Assembler* assembler,
+ intptr_t num_args,
+ const RuntimeEntry& handle_ic_miss,
+ Token::Kind kind,
+ bool optimized = false,
+ bool exactness_check = false);
+ static void GenerateUsageCounterIncrement(Assembler* assembler,
+ Register temp_reg);
+ static void GenerateOptimizedUsageCounterIncrement(Assembler* assembler);
+#endif // !defined(DART_PRECOMPILED_RUNTIME)
+};
+
+} // namespace compiler
+
+enum DeoptStubKind { kLazyDeoptFromReturn, kLazyDeoptFromThrow, kEagerDeopt };
+
+// Invocation mode for TypeCheck runtime entry that describes
+// where we are calling it from.
+enum TypeCheckMode {
+ // TypeCheck is invoked from LazySpecializeTypeTest stub.
+ // It should replace stub on the type with a specialized version.
+ kTypeCheckFromLazySpecializeStub,
+
+ // TypeCheck is invoked from the SlowTypeTest stub.
+ // This means that cache can be lazily created (if needed)
+ // and dst_name can be fetched from the pool.
+ kTypeCheckFromSlowStub,
+
+ // TypeCheck is invoked from normal inline AssertAssignable.
+ // Both cache and dst_name must be already populated.
+ kTypeCheckFromInline
+};
+
+// Zap value used to indicate unused CODE_REG in deopt.
+static const uword kZapCodeReg = 0xf1f1f1f1;
+
+// Zap value used to indicate unused return address in deopt.
+static const uword kZapReturnAddress = 0xe1e1e1e1;
+
+} // namespace dart
+
+#endif // RUNTIME_VM_COMPILER_STUB_CODE_COMPILER_H_
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
similarity index 65%
rename from runtime/vm/stub_code_arm.cc
rename to runtime/vm/compiler/stub_code_compiler_arm.cc
index 55a4223..8fa9142 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -1,24 +1,23 @@
-// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h"
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/stub_code_compiler.h"
+
#if defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/class_id.h"
+#include "vm/code_entry_kind.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/compiler/jit/compiler.h"
-#include "vm/cpu.h"
-#include "vm/dart_entry.h"
-#include "vm/heap/heap.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/constants_arm.h"
#include "vm/instructions.h"
-#include "vm/isolate.h"
-#include "vm/object_store.h"
-#include "vm/runtime_entry.h"
-#include "vm/stack_frame.h"
+#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
-#include "vm/type_testing_stubs.h"
#define __ assembler->
@@ -31,6 +30,8 @@
"Set to true for debugging & verifying the slow paths.");
DECLARE_FLAG(bool, precompiled_mode);
+namespace compiler {
+
// Input parameters:
// LR : return address.
// SP : address of last argument in argument array.
@@ -38,24 +39,25 @@
// SP + 4*R4 : address of return value.
// R9 : address of the runtime function to call.
// R4 : number of arguments to the call.
-void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
- __ ldr(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset()));
+ __ ldr(CODE_REG, Address(THR, target::Thread::call_to_runtime_stub_offset()));
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
- __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, FP, THR,
+ target::Thread::top_exit_frame_info_offset());
#if defined(DEBUG)
{
Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
__ CompareImmediate(R8, VMTag::kDartCompiledTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -64,48 +66,51 @@
#endif
// Mark that the thread is executing VM code.
- __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
// Reserve space for arguments and align frame before entering C++ world.
- // NativeArguments are passed in registers.
- ASSERT(sizeof(NativeArguments) == 4 * kWordSize);
+ // target::NativeArguments are passed in registers.
+ ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize);
__ ReserveAlignedFrameSpace(0);
- // Pass NativeArguments structure by value and call runtime.
+ // Pass target::NativeArguments structure by value and call runtime.
// Registers R0, R1, R2, and R3 are used.
- ASSERT(thread_offset == 0 * kWordSize);
+ ASSERT(thread_offset == 0 * target::kWordSize);
// Set thread in NativeArgs.
__ mov(R0, Operand(THR));
// There are no runtime calls to closures, so we do not need to set the tag
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
- ASSERT(argc_tag_offset == 1 * kWordSize);
- __ mov(R1, Operand(R4)); // Set argc in NativeArguments.
+ ASSERT(argc_tag_offset == 1 * target::kWordSize);
+ __ mov(R1, Operand(R4)); // Set argc in target::NativeArguments.
- ASSERT(argv_offset == 2 * kWordSize);
+ ASSERT(argv_offset == 2 * target::kWordSize);
__ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
- // Set argv in NativeArguments.
- __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize);
+ // Set argv in target::NativeArguments.
+ __ AddImmediate(R2,
+ target::frame_layout.param_end_from_fp * target::kWordSize);
- ASSERT(retval_offset == 3 * kWordSize);
- __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument.
+ ASSERT(retval_offset == 3 * target::kWordSize);
+ __ add(R3, R2,
+ Operand(target::kWordSize)); // Retval is next to 1st argument.
// Call runtime or redirection via simulator.
__ blx(R9);
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
// Reset exit frame information in Isolate structure.
__ LoadImmediate(R2, 0);
- __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, R2, THR,
+ target::Thread::top_exit_frame_info_offset());
// Restore the global object pool after returning from runtime (old space is
// moving, so the GOP could have been relocated).
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
}
__ LeaveStubFrame();
@@ -119,11 +124,12 @@
__ Ret();
}
-void StubCode::GenerateSharedStub(Assembler* assembler,
- bool save_fpu_registers,
- const RuntimeEntry* target,
- intptr_t self_code_stub_offset_from_thread,
- bool allow_return) {
+void StubCodeCompiler::GenerateSharedStub(
+ Assembler* assembler,
+ bool save_fpu_registers,
+ const RuntimeEntry* target,
+ intptr_t self_code_stub_offset_from_thread,
+ bool allow_return) {
// If the target CPU does not support VFP the caller should always use the
// non-FPU stub.
if (save_fpu_registers && !TargetCPUFeatures::vfp_supported()) {
@@ -157,27 +163,19 @@
// R1: The extracted method.
// R4: The type_arguments_field_offset (or 0)
// SP+0: The object from which we are tearing a method off.
-void StubCode::GenerateBuildMethodExtractorStub(Assembler* assembler) {
- Thread* thread = Thread::Current();
- Zone* Z = thread->zone();
- ObjectStore* object_store = thread->isolate()->object_store();
-
- const auto& closure_class =
- Class::ZoneHandle(Z, object_store->closure_class());
- const auto& closure_allocation_stub =
- Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
-
+void StubCodeCompiler::GenerateBuildMethodExtractorStub(
+ Assembler* assembler,
+ const Object& closure_allocation_stub,
+ const Object& context_allocation_stub) {
const intptr_t kReceiverOffset =
compiler::target::frame_layout.param_end_from_fp + 1;
- const auto& context_allocation_stub = StubCode::AllocateContext();
-
__ EnterStubFrame();
// Build type_arguments vector (or null)
__ cmp(R4, Operand(0));
- __ ldr(R3, Address(THR, Thread::object_null_offset()), EQ);
- __ ldr(R0, Address(FP, kReceiverOffset * kWordSize), NE);
+ __ ldr(R3, Address(THR, target::Thread::object_null_offset()), EQ);
+ __ ldr(R0, Address(FP, kReceiverOffset * target::kWordSize), NE);
__ ldr(R3, Address(R0, R4), NE);
// Push type arguments & extracted method.
@@ -186,89 +184,97 @@
// Allocate context.
{
Label done, slow_path;
- __ TryAllocateArray(kContextCid, Context::InstanceSize(1), &slow_path,
+ __ TryAllocateArray(kContextCid, target::Context::InstanceSize(1),
+ &slow_path,
R0, // instance
R1, // end address
R2, R3);
- __ ldr(R1, Address(THR, Thread::object_null_offset()));
- __ str(R1, FieldAddress(R0, Context::parent_offset()));
+ __ ldr(R1, Address(THR, target::Thread::object_null_offset()));
+ __ str(R1, FieldAddress(R0, target::Context::parent_offset()));
__ LoadImmediate(R1, 1);
- __ str(R1, FieldAddress(R0, Context::num_variables_offset()));
+ __ str(R1, FieldAddress(R0, target::Context::num_variables_offset()));
__ b(&done);
__ Bind(&slow_path);
__ LoadImmediate(/*num_vars=*/R1, 1);
__ LoadObject(CODE_REG, context_allocation_stub);
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ blx(R0);
__ Bind(&done);
}
// Store receiver in context
- __ ldr(R1, Address(FP, kWordSize * kReceiverOffset));
- __ StoreIntoObject(R0, FieldAddress(R0, Context::variable_offset(0)), R1);
+ __ ldr(R1, Address(FP, target::kWordSize * kReceiverOffset));
+ __ StoreIntoObject(R0, FieldAddress(R0, target::Context::variable_offset(0)),
+ R1);
// Push context.
__ Push(R0);
// Allocate closure.
__ LoadObject(CODE_REG, closure_allocation_stub);
- __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kUnchecked)));
+ __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kUnchecked)));
__ blx(R1);
// Populate closure object.
__ Pop(R1); // Pop context.
- __ StoreIntoObject(R0, FieldAddress(R0, Closure::context_offset()), R1);
+ __ StoreIntoObject(R0, FieldAddress(R0, target::Closure::context_offset()),
+ R1);
__ PopList(1 << R3 | 1 << R1); // Pop type arguments & extracted method.
- __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Closure::function_offset()),
- R1);
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, Closure::instantiator_type_arguments_offset()), R3);
- __ LoadObject(R1, Object::empty_type_arguments());
+ R0, FieldAddress(R0, target::Closure::function_offset()), R1);
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, Closure::delayed_type_arguments_offset()), R1);
+ R0,
+ FieldAddress(R0, target::Closure::instantiator_type_arguments_offset()),
+ R3);
+ __ LoadObject(R1, EmptyTypeArguments());
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::Closure::delayed_type_arguments_offset()),
+ R1);
__ LeaveStubFrame();
__ Ret();
}
-void StubCode::GenerateNullErrorSharedWithoutFPURegsStub(Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/false,
- &kNullErrorRuntimeEntry,
- Thread::null_error_shared_without_fpu_regs_stub_offset(),
- /*allow_return=*/false);
+void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
+ Assembler* assembler) {
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
+ target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
+ /*allow_return=*/false);
}
-void StubCode::GenerateNullErrorSharedWithFPURegsStub(Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
- &kNullErrorRuntimeEntry,
- Thread::null_error_shared_with_fpu_regs_stub_offset(),
- /*allow_return=*/false);
+void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
+ Assembler* assembler) {
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
+ target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
+ /*allow_return=*/false);
}
-void StubCode::GenerateStackOverflowSharedWithoutFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kStackOverflowRuntimeEntry,
- Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
+ target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/true);
}
-void StubCode::GenerateStackOverflowSharedWithFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
- &kStackOverflowRuntimeEntry,
- Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
- /*allow_return=*/true);
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/true, &kStackOverflowRuntimeEntry,
+ target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
+ /*allow_return=*/true);
}
// Input parameters:
// R0 : stop message (const char*).
// Must preserve all registers.
-void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) {
+void StubCodeCompiler::GeneratePrintStopMessageStub(Assembler* assembler) {
__ EnterCallRuntimeFrame(0);
// Call the runtime leaf function. R0 already contains the parameter.
__ CallRuntime(kPrintStopMessageRuntimeEntry, 1);
@@ -284,22 +290,23 @@
// R1 : argc_tag including number of arguments and function kind.
static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
Address wrapper) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
- __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, FP, THR,
+ target::Thread::top_exit_frame_info_offset());
#if defined(DEBUG)
{
Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
__ CompareImmediate(R8, VMTag::kDartCompiledTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -308,37 +315,37 @@
#endif
// Mark that the thread is executing native code.
- __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
// R0) and align frame before entering the C++ world.
- __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
+ __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
- // Initialize NativeArguments structure and call native function.
+ // Initialize target::NativeArguments structure and call native function.
// Registers R0, R1, R2, and R3 are used.
- ASSERT(thread_offset == 0 * kWordSize);
+ ASSERT(thread_offset == 0 * target::kWordSize);
// Set thread in NativeArgs.
__ mov(R0, Operand(THR));
// There are no native calls to closures, so we do not need to set the tag
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
- ASSERT(argc_tag_offset == 1 * kWordSize);
- // Set argc in NativeArguments: R1 already contains argc.
+ ASSERT(argc_tag_offset == 1 * target::kWordSize);
+ // Set argc in target::NativeArguments: R1 already contains argc.
- ASSERT(argv_offset == 2 * kWordSize);
- // Set argv in NativeArguments: R2 already contains argv.
+ ASSERT(argv_offset == 2 * target::kWordSize);
+ // Set argv in target::NativeArguments: R2 already contains argv.
- ASSERT(retval_offset == 3 * kWordSize);
// Set retval in NativeArgs.
- __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
+ ASSERT(retval_offset == 3 * target::kWordSize);
+ __ add(R3, FP, Operand(2 * target::kWordSize));
// Passing the structure by value as in runtime calls would require changing
// Dart API for native functions.
// For now, space is reserved on the stack and we pass a pointer to it.
__ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
- __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
+ __ mov(R0, Operand(SP)); // Pass the pointer to the target::NativeArguments.
__ mov(R1, Operand(R9)); // Pass the function entrypoint to call.
@@ -348,26 +355,29 @@
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
// Reset exit frame information in Isolate structure.
__ LoadImmediate(R2, 0);
- __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, R2, THR,
+ target::Thread::top_exit_frame_info_offset());
__ LeaveStubFrame();
__ Ret();
}
-void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
- Address(THR, Thread::no_scope_native_wrapper_entry_point_offset()));
+ Address(THR,
+ target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
-void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
- Address(THR, Thread::auto_scope_native_wrapper_entry_point_offset()));
+ Address(THR,
+ target::Thread::auto_scope_native_wrapper_entry_point_offset()));
}
// Input parameters:
@@ -376,23 +386,24 @@
// R9 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
-void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
- __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, FP, THR,
+ target::Thread::top_exit_frame_info_offset());
#if defined(DEBUG)
{
Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(kWord, R8, THR, target::Thread::vm_tag_offset());
__ CompareImmediate(R8, VMTag::kDartCompiledTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -401,48 +412,49 @@
#endif
// Mark that the thread is executing native code.
- __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
// R0) and align frame before entering the C++ world.
- __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
+ __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
- // Initialize NativeArguments structure and call native function.
+ // Initialize target::NativeArguments structure and call native function.
// Registers R0, R1, R2, and R3 are used.
- ASSERT(thread_offset == 0 * kWordSize);
+ ASSERT(thread_offset == 0 * target::kWordSize);
// Set thread in NativeArgs.
__ mov(R0, Operand(THR));
// There are no native calls to closures, so we do not need to set the tag
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
- ASSERT(argc_tag_offset == 1 * kWordSize);
- // Set argc in NativeArguments: R1 already contains argc.
+ ASSERT(argc_tag_offset == 1 * target::kWordSize);
+ // Set argc in target::NativeArguments: R1 already contains argc.
- ASSERT(argv_offset == 2 * kWordSize);
- // Set argv in NativeArguments: R2 already contains argv.
+ ASSERT(argv_offset == 2 * target::kWordSize);
+ // Set argv in target::NativeArguments: R2 already contains argv.
- ASSERT(retval_offset == 3 * kWordSize);
// Set retval in NativeArgs.
- __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
+ ASSERT(retval_offset == 3 * target::kWordSize);
+ __ add(R3, FP, Operand(2 * target::kWordSize));
// Passing the structure by value as in runtime calls would require changing
// Dart API for native functions.
// For now, space is reserved on the stack and we pass a pointer to it.
__ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
- __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
+ __ mov(R0, Operand(SP)); // Pass the pointer to the target::NativeArguments.
// Call native function or redirection via simulator.
__ blx(R9);
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
// Reset exit frame information in Isolate structure.
__ LoadImmediate(R2, 0);
- __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, R2, THR,
+ target::Thread::top_exit_frame_info_offset());
__ LeaveStubFrame();
__ Ret();
@@ -450,7 +462,7 @@
// Input parameters:
// R4: arguments descriptor array.
-void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@@ -464,17 +476,18 @@
__ LeaveStubFrame();
// Jump to the dart function.
__ mov(CODE_REG, Operand(R0));
- __ Branch(FieldAddress(R0, Code::entry_point_offset()));
+ __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
}
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// R4: arguments descriptor array.
-void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
- __ ldr(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::fix_callers_target_code_offset()));
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@@ -488,16 +501,18 @@
__ LeaveStubFrame();
// Jump to the dart function.
__ mov(CODE_REG, Operand(R0));
- __ Branch(FieldAddress(R0, Code::entry_point_offset()));
+ __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
}
// Called from object allocate instruction when the allocation stub has been
// disabled.
-void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
+ Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
- __ ldr(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::fix_allocation_stub_code_offset()));
__ EnterStubFrame();
// Setup space on stack for return value.
__ LoadImmediate(R0, 0);
@@ -509,23 +524,24 @@
__ LeaveStubFrame();
// Jump to the dart function.
__ mov(CODE_REG, Operand(R0));
- __ Branch(FieldAddress(R0, Code::entry_point_offset()));
+ __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
}
// Input parameters:
// R2: smi-tagged argument count, may be zero.
-// FP[kParamEndSlotFromFp + 1]: last argument.
+// FP[target::frame_layout.param_end_from_fp + 1]: last argument.
static void PushArrayOfArguments(Assembler* assembler) {
// Allocate array to store arguments of caller.
- __ LoadObject(R1, Object::null_object());
+ __ LoadObject(R1, NullObject());
// R1: null element type for raw Array.
// R2: smi-tagged argument count, may be zero.
- __ BranchLink(StubCode::AllocateArray());
+ __ BranchLink(StubCodeAllocateArray());
// R0: newly allocated array.
// R2: smi-tagged argument count, may be zero (was preserved by the stub).
__ Push(R0); // Array is in R0 and on top of stack.
- __ AddImmediate(R1, FP, kParamEndSlotFromFp * kWordSize);
- __ AddImmediate(R3, R0, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R1, FP,
+ target::frame_layout.param_end_from_fp * target::kWordSize);
+ __ AddImmediate(R3, R0, target::Array::data_offset() - kHeapObjectTag);
// Copy arguments from stack to array (starting at the end).
// R1: address just beyond last argument on stack.
// R3: address of first argument in array.
@@ -533,11 +549,11 @@
__ b(&enter);
Label loop;
__ Bind(&loop);
- __ ldr(R8, Address(R1, kWordSize, Address::PreIndex));
+ __ ldr(R8, Address(R1, target::kWordSize, Address::PreIndex));
// Generational barrier is needed, array is not necessarily in new space.
__ StoreIntoObject(R0, Address(R3, R2, LSL, 1), R8);
__ Bind(&enter);
- __ subs(R2, R2, Operand(Smi::RawValue(1))); // R2 is Smi.
+ __ subs(R2, R2, Operand(target::ToRawSmi(1))); // R2 is Smi.
__ b(&loop, PL);
}
@@ -593,7 +609,7 @@
if (i == CODE_REG) {
// Save the original value of CODE_REG pushed before invoking this stub
// instead of the value used to call this stub.
- __ ldr(IP, Address(FP, kCallerSpSlotFromFp * kWordSize));
+ __ ldr(IP, Address(FP, 2 * target::kWordSize));
__ Push(IP);
} else if (i == SP) {
// Push(SP) has unpredictable behavior.
@@ -605,7 +621,7 @@
}
if (TargetCPUFeatures::vfp_supported()) {
- ASSERT(kFpuRegisterSize == 4 * kWordSize);
+ ASSERT(kFpuRegisterSize == 4 * target::kWordSize);
if (kNumberOfDRegisters > 16) {
__ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16);
__ vstmd(DB_W, SP, D0, 16);
@@ -626,11 +642,11 @@
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1 temporarily.
- __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize));
+ __ ldr(R1, Address(FP, saved_result_slot_from_fp * target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1 temporarily.
- __ ldr(R1, Address(FP, saved_exception_slot_from_fp * kWordSize));
- __ ldr(R2, Address(FP, saved_stacktrace_slot_from_fp * kWordSize));
+ __ ldr(R1, Address(FP, saved_exception_slot_from_fp * target::kWordSize));
+ __ ldr(R2, Address(FP, saved_stacktrace_slot_from_fp * target::kWordSize));
}
__ RestoreCodePointer();
@@ -652,14 +668,14 @@
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ ldr(R1, Address(FP, compiler::target::frame_layout.first_local_from_fp *
- kWordSize));
+ target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1.
__ ldr(R1, Address(FP, compiler::target::frame_layout.first_local_from_fp *
- kWordSize));
+ target::kWordSize));
__ ldr(R2, Address(FP, (compiler::target::frame_layout.first_local_from_fp -
1) *
- kWordSize));
+ target::kWordSize));
}
// Code above cannot cause GC.
__ RestoreCodePointer();
@@ -676,7 +692,7 @@
__ Push(R1); // Preserve exception, it will be GC-d here.
__ Push(R2); // Preserve stacktrace, it will be GC-d here.
}
- __ PushObject(Smi::ZoneHandle()); // Space for the result.
+ __ PushObject(NullObject()); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
// Result tells stub how many bytes to remove from the expression stack
// of the bottom-most frame. They were used as materialization arguments.
@@ -694,33 +710,37 @@
}
// R0: result, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
+ Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, kZapReturnAddress);
- __ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
__ Ret();
}
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
+ Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, kZapReturnAddress);
- __ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
__ Ret();
}
-void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ Push(CODE_REG);
- __ ldr(CODE_REG, Address(THR, Thread::deoptimize_stub_offset()));
+ __ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
__ Ret();
}
@@ -730,13 +750,14 @@
__ Comment("NoSuchMethodDispatch");
// When lazily generated invocation dispatchers are disabled, the
// miss-handler may return null.
- __ CompareObject(R0, Object::null_object());
+ __ CompareObject(R0, NullObject());
__ b(call_target_function, NE);
__ EnterStubFrame();
// Load the receiver.
- __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
+ __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
- __ ldr(R8, Address(IP, kParamEndSlotFromFp * kWordSize));
+ __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
+ target::kWordSize));
__ LoadImmediate(IP, 0);
__ Push(IP); // Result slot.
__ Push(R8); // Receiver.
@@ -744,9 +765,11 @@
__ Push(R4); // Arguments descriptor.
// Adjust arguments count.
- __ ldr(R3, FieldAddress(R4, ArgumentsDescriptor::type_args_len_offset()));
+ __ ldr(R3,
+ FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
__ cmp(R3, Operand(0));
- __ AddImmediate(R2, R2, Smi::RawValue(1), NE); // Include the type arguments.
+ __ AddImmediate(R2, R2, target::ToRawSmi(1),
+ NE); // Include the type arguments.
// R2: Smi-tagged arguments array length.
PushArrayOfArguments(assembler);
@@ -758,14 +781,14 @@
__ Ret();
}
-void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
- __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
+ __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
__ ldr(R8, Address(IP, compiler::target::frame_layout.param_end_from_fp *
- kWordSize));
+ target::kWordSize));
// Preserve IC data and arguments descriptor.
__ PushList((1 << R4) | (1 << R9));
@@ -793,8 +816,8 @@
}
// Tail-call to target function.
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
}
// Called for inline allocation of arrays.
@@ -803,11 +826,12 @@
// R1: array element type (either NULL or an instantiated type).
// R2: array length as Smi (must be preserved).
// The newly allocated object is returned in R0.
-void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
- // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
+ // RoundedAllocationSize(
+ // (array_length * kwordSize) + target::Array::header_size()).
__ mov(R3, Operand(R2)); // Array length.
// Check that length is a positive Smi.
__ tst(R3, Operand(kSmiTagMask));
@@ -821,7 +845,7 @@
// Check for maximum allowed length.
const intptr_t max_len =
- reinterpret_cast<int32_t>(Smi::New(Array::kMaxNewSpaceElements));
+ target::ToRawSmi(target::Array::kMaxNewSpaceElements);
__ CompareImmediate(R3, max_len);
__ b(&slow_case, GT);
@@ -830,15 +854,16 @@
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R4, &slow_case));
const intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawArray) + kObjectAlignment - 1;
+ target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
+ 1;
__ LoadImmediate(R9, fixed_size_plus_alignment_padding);
__ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
ASSERT(kSmiTagShift == 1);
- __ bic(R9, R9, Operand(kObjectAlignment - 1));
+ __ bic(R9, R9, Operand(target::ObjectAlignment::kObjectAlignment - 1));
// R9: Allocation size.
// Potential new object start.
- __ ldr(R0, Address(THR, Thread::top_offset()));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
__ adds(NOTFP, R0, Operand(R9)); // Potential next object start.
__ b(&slow_case, CS); // Branch if unsigned overflow.
@@ -846,14 +871,14 @@
// R0: potential new object start.
// NOTFP: potential next object start.
// R9: allocation size.
- __ ldr(R3, Address(THR, Thread::end_offset()));
+ __ ldr(R3, Address(THR, target::Thread::end_offset()));
__ cmp(NOTFP, Operand(R3));
__ b(&slow_case, CS);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R3, cid));
- __ str(NOTFP, Address(THR, Thread::top_offset()));
+ __ str(NOTFP, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
// Initialize the tags.
@@ -862,30 +887,31 @@
// NOTFP: new object end address.
// R9: allocation size.
{
- const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
+ const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2;
- __ CompareImmediate(R9, RawObject::SizeTag::kMaxSizeTag);
+ __ CompareImmediate(R9, target::RawObject::kSizeTagMaxSizeTag);
__ mov(R8, Operand(R9, LSL, shift), LS);
__ mov(R8, Operand(0), HI);
// Get the class index and insert it into the tags.
// R8: size and bit tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R8, R8, Operand(TMP));
- __ str(R8, FieldAddress(R0, Array::tags_offset())); // Store tags.
+ __ str(R8, FieldAddress(R0, target::Array::tags_offset())); // Store tags.
}
// R0: new object start as a tagged pointer.
// NOTFP: new object end address.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, Array::type_arguments_offset()), R1);
+ R0, FieldAddress(R0, target::Array::type_arguments_offset()), R1);
// Set the length field.
- __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Array::length_offset()), R2);
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::Array::length_offset()), R2);
// Initialize all array elements to raw_null.
// R0: new object start as a tagged pointer.
@@ -897,9 +923,9 @@
// R9: allocation size.
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R3, R9));
- __ LoadObject(R8, Object::null_object());
+ __ LoadObject(R8, NullObject());
__ mov(R9, Operand(R8));
- __ AddImmediate(R4, R0, sizeof(RawArray) - kHeapObjectTag);
+ __ AddImmediate(R4, R0, target::Array::header_size() - kHeapObjectTag);
__ InitializeFieldsNoBarrier(R0, R4, NOTFP, R8, R9);
__ Ret(); // Returns the newly allocated object in R0.
// Unable to allocate the array using the fast inline code, just call
@@ -928,12 +954,12 @@
// R1 : arguments descriptor array.
// R2 : arguments array.
// R3 : current thread.
-void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Push(LR); // Marker for the profiler.
__ EnterFrame((1 << FP) | (1 << LR), 0);
// Push code object to PC marker slot.
- __ ldr(IP, Address(R3, Thread::invoke_dart_code_stub_offset()));
+ __ ldr(IP, Address(R3, target::Thread::invoke_dart_code_stub_offset()));
__ Push(IP);
// Save new context and C++ ABI callee-saved registers.
@@ -954,44 +980,49 @@
}
// Save the current VMTag on the stack.
- __ LoadFromOffset(kWord, R9, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
__ Push(R9);
// Save top resource and top exit frame info. Use R4-6 as temporary registers.
// StackFrameIterator reads the top exit frame info saved in this frame.
- __ LoadFromOffset(kWord, R9, THR, Thread::top_exit_frame_info_offset());
- __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset());
+ __ LoadFromOffset(kWord, R9, THR,
+ target::Thread::top_exit_frame_info_offset());
+ __ LoadFromOffset(kWord, R4, THR, target::Thread::top_resource_offset());
__ LoadImmediate(R8, 0);
- __ StoreToOffset(kWord, R8, THR, Thread::top_resource_offset());
- __ StoreToOffset(kWord, R8, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, R8, THR, target::Thread::top_resource_offset());
+ __ StoreToOffset(kWord, R8, THR,
+ target::Thread::top_exit_frame_info_offset());
- // kExitLinkSlotFromEntryFp must be kept in sync with the code below.
+ // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
+ // with the code below.
__ Push(R4);
#if defined(TARGET_OS_MACOS) || defined(TARGET_OS_MACOS_IOS)
- ASSERT(kExitLinkSlotFromEntryFp == -26);
+ ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -26);
#else
- ASSERT(kExitLinkSlotFromEntryFp == -27);
+ ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -27);
#endif
__ Push(R9);
// Mark that the thread is executing Dart code. Do this after initializing the
// exit link for the profiler.
__ LoadImmediate(R9, VMTag::kDartCompiledTagId);
- __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
// Load arguments descriptor array into R4, which is passed to Dart code.
__ ldr(R4, Address(R1, VMHandles::kOffsetOfRawPtrInHandle));
// Load number of arguments into R9 and adjust count for type arguments.
- __ ldr(R3, FieldAddress(R4, ArgumentsDescriptor::type_args_len_offset()));
- __ ldr(R9, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
+ __ ldr(R3,
+ FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
+ __ ldr(R9, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ cmp(R3, Operand(0));
- __ AddImmediate(R9, R9, Smi::RawValue(1), NE); // Include the type arguments.
+ __ AddImmediate(R9, R9, target::ToRawSmi(1),
+ NE); // Include the type arguments.
__ SmiUntag(R9);
// Compute address of 'arguments array' data area into R2.
__ ldr(R2, Address(R2, VMHandles::kOffsetOfRawPtrInHandle));
- __ AddImmediate(R2, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R2, target::Array::data_offset() - kHeapObjectTag);
// Set up arguments for the Dart call.
Label push_arguments;
@@ -1002,7 +1033,7 @@
__ Bind(&push_arguments);
__ ldr(R3, Address(R2));
__ Push(R3);
- __ AddImmediate(R2, kWordSize);
+ __ AddImmediate(R2, target::kWordSize);
__ AddImmediate(R1, 1);
__ cmp(R1, Operand(R9));
__ b(&push_arguments, LT);
@@ -1010,27 +1041,30 @@
// Call the Dart code entrypoint.
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
} else {
__ LoadImmediate(PP, 0); // GC safe value into PP.
}
__ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ blx(R0); // R4 is the arguments descriptor array.
// Get rid of arguments pushed on the stack.
- __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
+ __ AddImmediate(
+ SP, FP,
+ target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
// Restore the saved top exit frame info and top resource back into the
// Isolate structure. Uses R9 as a temporary register for this.
__ Pop(R9);
- __ StoreToOffset(kWord, R9, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, R9, THR,
+ target::Thread::top_exit_frame_info_offset());
__ Pop(R9);
- __ StoreToOffset(kWord, R9, THR, Thread::top_resource_offset());
+ __ StoreToOffset(kWord, R9, THR, target::Thread::top_resource_offset());
// Restore the current VMTag from the stack.
__ Pop(R4);
- __ StoreToOffset(kWord, R4, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R4, THR, target::Thread::vm_tag_offset());
// Restore C++ ABI callee-saved registers.
if (TargetCPUFeatures::vfp_supported()) {
@@ -1049,7 +1083,8 @@
__ Ret();
}
-void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
+ Assembler* assembler) {
__ Unimplemented("Interpreter not yet supported");
}
@@ -1058,17 +1093,18 @@
// R1: number of context variables.
// Output:
// R0: new allocated RawContext object.
-void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (FLAG_inline_alloc) {
Label slow_case;
// First compute the rounded instance size.
// R1: number of context variables.
intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawContext) + kObjectAlignment - 1;
+ target::Context::header_size() +
+ target::ObjectAlignment::kObjectAlignment - 1;
__ LoadImmediate(R2, fixed_size_plus_alignment_padding);
__ add(R2, R2, Operand(R1, LSL, 2));
ASSERT(kSmiTagShift == 1);
- __ bic(R2, R2, Operand(kObjectAlignment - 1));
+ __ bic(R2, R2, Operand(target::ObjectAlignment::kObjectAlignment - 1));
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R8, kContextCid));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R8, &slow_case));
@@ -1076,14 +1112,14 @@
// R1: number of context variables.
// R2: object size.
const intptr_t cid = kContextCid;
- __ ldr(R0, Address(THR, Thread::top_offset()));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
__ add(R3, R2, Operand(R0));
// Check if the allocation fits into the remaining space.
// R0: potential new object.
// R1: number of context variables.
// R2: object size.
// R3: potential next object start.
- __ ldr(IP, Address(THR, Thread::end_offset()));
+ __ ldr(IP, Address(THR, target::Thread::end_offset()));
__ cmp(R3, Operand(IP));
if (FLAG_use_slow_path) {
__ b(&slow_case);
@@ -1098,7 +1134,7 @@
// R2: object size.
// R3: next object start.
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
- __ str(R3, Address(THR, Thread::top_offset()));
+ __ str(R3, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
// Calculate the size tag.
@@ -1107,20 +1143,21 @@
// R2: object size.
// R3: next object start.
// R4: allocation stats address.
- const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
- __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag);
+ const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2;
+ __ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag);
// If no size tag overflow, shift R2 left, else set R2 to zero.
__ mov(R9, Operand(R2, LSL, shift), LS);
__ mov(R9, Operand(0), HI);
// Get the class index and insert it into the tags.
// R9: size and bit tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+
__ LoadImmediate(IP, tags);
__ orr(R9, R9, Operand(IP));
- __ str(R9, FieldAddress(R0, Context::tags_offset()));
+ __ str(R9, FieldAddress(R0, target::Object::tags_offset()));
// Setup up number of context variables field.
// R0: new object.
@@ -1128,7 +1165,7 @@
// R2: object size.
// R3: next object start.
// R4: allocation stats address.
- __ str(R1, FieldAddress(R0, Context::num_variables_offset()));
+ __ str(R1, FieldAddress(R0, target::Context::num_variables_offset()));
// Setup the parent field.
// R0: new object.
@@ -1136,9 +1173,9 @@
// R2: object size.
// R3: next object start.
// R4: allocation stats address.
- __ LoadObject(R8, Object::null_object());
- __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Context::parent_offset()),
- R8);
+ __ LoadObject(R8, NullObject());
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::Context::parent_offset()), R8);
// Initialize the context variables.
// R0: new object.
@@ -1148,7 +1185,8 @@
// R8, R9: raw null.
// R4: allocation stats address.
Label loop;
- __ AddImmediate(NOTFP, R0, Context::variable_offset(0) - kHeapObjectTag);
+ __ AddImmediate(NOTFP, R0,
+ target::Context::variable_offset(0) - kHeapObjectTag);
__ InitializeFieldsNoBarrier(R0, NOTFP, R3, R8, R9);
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2));
@@ -1174,7 +1212,7 @@
__ Ret();
}
-void StubCode::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
RegList saved = (1 << LR) | (1 << kWriteBarrierObjectReg);
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@@ -1183,7 +1221,8 @@
intptr_t start = __ CodeSize();
__ PushList(saved);
__ mov(kWriteBarrierObjectReg, Operand(reg));
- __ ldr(LR, Address(THR, Thread::write_barrier_entry_point_offset()));
+ __ ldr(LR,
+ Address(THR, target::Thread::write_barrier_entry_point_offset()));
__ blx(LR);
__ PopList(saved);
__ bx(LR);
@@ -1207,18 +1246,18 @@
Address stub_code,
bool cards) {
Label add_to_mark_stack, remember_card;
- __ tst(R0, Operand(1 << kNewObjectBitPosition));
+ __ tst(R0, Operand(1 << target::ObjectAlignment::kNewObjectBitPosition));
__ b(&add_to_mark_stack, ZERO);
if (cards) {
- __ ldr(TMP, FieldAddress(R1, Object::tags_offset()));
- __ tst(TMP, Operand(1 << RawObject::kCardRememberedBit));
+ __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
+ __ tst(TMP, Operand(1 << target::RawObject::kCardRememberedBit));
__ b(&remember_card, NOT_ZERO);
} else {
#if defined(DEBUG)
Label ok;
- __ ldr(TMP, FieldAddress(R1, Object::tags_offset()));
- __ tst(TMP, Operand(1 << RawObject::kCardRememberedBit));
+ __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
+ __ tst(TMP, Operand(1 << target::RawObject::kCardRememberedBit));
__ b(&ok, ZERO);
__ Stop("Wrong barrier");
__ Bind(&ok);
@@ -1233,18 +1272,18 @@
#if !defined(USING_SIMULATOR)
ASSERT(OS::NumberOfAvailableProcessors() <= 1);
#endif
- __ ldr(R2, FieldAddress(R1, Object::tags_offset()));
- __ bic(R2, R2, Operand(1 << RawObject::kOldAndNotRememberedBit));
- __ str(R2, FieldAddress(R1, Object::tags_offset()));
+ __ ldr(R2, FieldAddress(R1, target::Object::tags_offset()));
+ __ bic(R2, R2, Operand(1 << target::RawObject::kOldAndNotRememberedBit));
+ __ str(R2, FieldAddress(R1, target::Object::tags_offset()));
} else {
// Atomically set the remembered bit of the object header.
- ASSERT(Object::tags_offset() == 0);
+ ASSERT(target::Object::tags_offset() == 0);
__ sub(R3, R1, Operand(kHeapObjectTag));
// R3: Untagged address of header word (ldrex/strex do not support offsets).
Label retry;
__ Bind(&retry);
__ ldrex(R2, R3);
- __ bic(R2, R2, Operand(1 << RawObject::kOldAndNotRememberedBit));
+ __ bic(R2, R2, Operand(1 << target::RawObject::kOldAndNotRememberedBit));
__ strex(R4, R2, R3);
__ cmp(R4, Operand(1));
__ b(&retry, EQ);
@@ -1252,18 +1291,18 @@
// Load the StoreBuffer block out of the thread. Then load top_ out of the
// StoreBufferBlock and add the address to the pointers_.
- __ ldr(R4, Address(THR, Thread::store_buffer_block_offset()));
- __ ldr(R2, Address(R4, StoreBufferBlock::top_offset()));
- __ add(R3, R4, Operand(R2, LSL, kWordSizeLog2));
- __ str(R1, Address(R3, StoreBufferBlock::pointers_offset()));
+ __ ldr(R4, Address(THR, target::Thread::store_buffer_block_offset()));
+ __ ldr(R2, Address(R4, target::StoreBufferBlock::top_offset()));
+ __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
+ __ str(R1, Address(R3, target::StoreBufferBlock::pointers_offset()));
// Increment top_ and check for overflow.
// R2: top_.
// R4: StoreBufferBlock.
Label overflow;
__ add(R2, R2, Operand(1));
- __ str(R2, Address(R4, StoreBufferBlock::top_offset()));
- __ CompareImmediate(R2, StoreBufferBlock::kSize);
+ __ str(R2, Address(R4, target::StoreBufferBlock::top_offset()));
+ __ CompareImmediate(R2, target::StoreBufferBlock::kSize);
// Restore values.
__ PopList((1 << R2) | (1 << R3) | (1 << R4));
__ b(&overflow, EQ);
@@ -1275,7 +1314,7 @@
__ Push(CODE_REG);
__ ldr(CODE_REG, stub_code);
- __ EnterCallRuntimeFrame(0 * kWordSize);
+ __ EnterCallRuntimeFrame(0 * target::kWordSize);
__ mov(R0, Operand(THR));
__ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
// Restore callee-saved registers, tear down frame.
@@ -1292,31 +1331,31 @@
#if !defined(USING_SIMULATOR)
ASSERT(OS::NumberOfAvailableProcessors() <= 1);
#endif
- __ ldr(R2, FieldAddress(R0, Object::tags_offset()));
- __ bic(R2, R2, Operand(1 << RawObject::kOldAndNotMarkedBit));
- __ str(R2, FieldAddress(R0, Object::tags_offset()));
+ __ ldr(R2, FieldAddress(R0, target::Object::tags_offset()));
+ __ bic(R2, R2, Operand(1 << target::RawObject::kOldAndNotMarkedBit));
+ __ str(R2, FieldAddress(R0, target::Object::tags_offset()));
} else {
// Atomically clear kOldAndNotMarkedBit.
- ASSERT(Object::tags_offset() == 0);
+ ASSERT(target::Object::tags_offset() == 0);
__ sub(R3, R0, Operand(kHeapObjectTag));
// R3: Untagged address of header word (ldrex/strex do not support offsets).
__ Bind(&marking_retry);
__ ldrex(R2, R3);
- __ tst(R2, Operand(1 << RawObject::kOldAndNotMarkedBit));
+ __ tst(R2, Operand(1 << target::RawObject::kOldAndNotMarkedBit));
__ b(&lost_race, ZERO);
- __ bic(R2, R2, Operand(1 << RawObject::kOldAndNotMarkedBit));
+ __ bic(R2, R2, Operand(1 << target::RawObject::kOldAndNotMarkedBit));
__ strex(R4, R2, R3);
__ cmp(R4, Operand(1));
__ b(&marking_retry, EQ);
}
- __ ldr(R4, Address(THR, Thread::marking_stack_block_offset()));
- __ ldr(R2, Address(R4, MarkingStackBlock::top_offset()));
- __ add(R3, R4, Operand(R2, LSL, kWordSizeLog2));
- __ str(R0, Address(R3, MarkingStackBlock::pointers_offset()));
+ __ ldr(R4, Address(THR, target::Thread::marking_stack_block_offset()));
+ __ ldr(R2, Address(R4, target::MarkingStackBlock::top_offset()));
+ __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
+ __ str(R0, Address(R3, target::MarkingStackBlock::pointers_offset()));
__ add(R2, R2, Operand(1));
- __ str(R2, Address(R4, MarkingStackBlock::top_offset()));
- __ CompareImmediate(R2, MarkingStackBlock::kSize);
+ __ str(R2, Address(R4, target::MarkingStackBlock::top_offset()));
+ __ CompareImmediate(R2, target::MarkingStackBlock::kSize);
__ PopList((1 << R4) | (1 << R2) | (1 << R3)); // Unspill.
__ b(&marking_overflow, EQ);
__ Ret();
@@ -1324,7 +1363,7 @@
__ Bind(&marking_overflow);
__ Push(CODE_REG);
__ ldr(CODE_REG, stub_code);
- __ EnterCallRuntimeFrame(0 * kWordSize);
+ __ EnterCallRuntimeFrame(0 * target::kWordSize);
__ mov(R0, Operand(THR));
__ CallRuntime(kMarkingStackBlockProcessRuntimeEntry, 1);
__ LeaveCallRuntimeFrame();
@@ -1340,17 +1379,20 @@
// Get card table.
__ Bind(&remember_card);
- __ AndImmediate(TMP, R1, kPageMask); // HeapPage.
- __ ldr(TMP, Address(TMP, HeapPage::card_table_offset())); // Card table.
+ __ AndImmediate(TMP, R1, target::kPageMask); // HeapPage.
+ __ ldr(TMP,
+ Address(TMP, target::HeapPage::card_table_offset())); // Card table.
__ cmp(TMP, Operand(0));
__ b(&remember_card_slow, EQ);
// Dirty the card.
- __ AndImmediate(TMP, R1, kPageMask); // HeapPage.
- __ sub(R9, R9, Operand(TMP)); // Offset in page.
- __ ldr(TMP, Address(TMP, HeapPage::card_table_offset())); // Card table.
+ __ AndImmediate(TMP, R1, target::kPageMask); // HeapPage.
+ __ sub(R9, R9, Operand(TMP)); // Offset in page.
+ __ ldr(TMP,
+ Address(TMP, target::HeapPage::card_table_offset())); // Card table.
__ add(TMP, TMP,
- Operand(R9, LSR, HeapPage::kBytesPerCardLog2)); // Card address.
+ Operand(R9, LSR,
+ target::HeapPage::kBytesPerCardLog2)); // Card address.
__ strb(R1,
Address(TMP, 0)); // Low byte of R0 is non-zero from object tag.
__ Ret();
@@ -1373,26 +1415,28 @@
}
}
-void StubCode::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::write_barrier_code_offset()), false);
+ assembler, Address(THR, target::Thread::write_barrier_code_offset()),
+ false);
}
-void StubCode::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::array_write_barrier_code_offset()), true);
+ assembler,
+ Address(THR, target::Thread::array_write_barrier_code_offset()), true);
}
// Called for inline allocation of objects.
// Input parameters:
// LR : return address.
// SP + 0 : type arguments object (only if class is parameterized).
-void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
- const Class& cls) {
+void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
+ const Class& cls) {
// The generated code is different if the class is parameterized.
- const bool is_cls_parameterized = cls.NumTypeArguments() > 0;
- ASSERT(!is_cls_parameterized ||
- (cls.type_arguments_field_offset() != Class::kNoTypeArguments));
+ const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
+ ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
+ cls) != target::Class::kNoTypeArguments);
const Register kNullReg = R8;
const Register kOtherNullReg = R9;
@@ -1405,24 +1449,25 @@
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12;
- const intptr_t instance_size = cls.instance_size();
+ const intptr_t instance_size = target::Class::InstanceSize(cls);
ASSERT(instance_size > 0);
- ASSERT(instance_size % kObjectAlignment == 0);
+ ASSERT(instance_size % target::ObjectAlignment::kObjectAlignment == 0);
if (is_cls_parameterized) {
__ ldr(kTypeArgumentsReg, Address(SP, 0));
}
- Isolate* isolate = Isolate::Current();
- __ LoadObject(kNullReg, Object::null_object());
- if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) &&
- !cls.TraceAllocation(isolate)) {
+ __ LoadObject(kNullReg, NullObject());
+ if (FLAG_inline_alloc &&
+ target::Heap::IsAllocatableInNewSpace(instance_size) &&
+ !target::Class::TraceAllocation(cls)) {
Label slow_case;
// Allocate the object and update top to point to
// next object start and initialize the allocated object.
- RELEASE_ASSERT((Thread::top_offset() + kWordSize) == Thread::end_offset());
- __ ldrd(kInstanceReg, kEndReg, THR, Thread::top_offset());
+ RELEASE_ASSERT((target::Thread::top_offset() + target::kWordSize) ==
+ target::Thread::end_offset());
+ __ ldrd(kInstanceReg, kEndReg, THR, target::Thread::top_offset());
__ AddImmediate(kEndOfInstanceReg, kInstanceReg, instance_size);
__ cmp(kEndOfInstanceReg, Operand(kEndReg));
if (FLAG_use_slow_path) {
@@ -1430,29 +1475,29 @@
} else {
__ b(&slow_case, CS); // Unsigned higher or equal.
}
- __ str(kEndOfInstanceReg, Address(THR, Thread::top_offset()));
+ __ str(kEndOfInstanceReg, Address(THR, target::Thread::top_offset()));
// Load the address of the allocation stats table. We split up the load
// and the increment so that the dependent load is not too nearby.
NOT_IN_PRODUCT(static Register kAllocationStatsReg = R4);
- NOT_IN_PRODUCT(
- __ LoadAllocationStatsAddress(kAllocationStatsReg, cls.id()));
+ NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(kAllocationStatsReg,
+ target::Class::GetId(cls)));
// Set the tags.
- uint32_t tags = 0;
- tags = RawObject::SizeTag::update(instance_size, tags);
- ASSERT(cls.id() != kIllegalCid);
- tags = RawObject::ClassIdTag::update(cls.id(), tags);
- tags = RawObject::NewBit::update(true, tags);
+ ASSERT(target::Class::GetId(cls) != kIllegalCid);
+ const uint32_t tags = target::MakeTagWordForNewSpaceObject(
+ target::Class::GetId(cls), instance_size);
+
__ LoadImmediate(R1, tags);
- __ str(R1, Address(kInstanceReg, Instance::tags_offset()));
+ __ str(R1, Address(kInstanceReg, target::Object::tags_offset()));
__ add(kInstanceReg, kInstanceReg, Operand(kHeapObjectTag));
// First try inlining the initialization without a loop.
- if (instance_size < (kInlineInstanceSize * kWordSize)) {
- intptr_t begin_offset = Instance::NextFieldOffset() - kHeapObjectTag;
+ if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
+ intptr_t begin_offset =
+ target::Instance::first_field_offset() - kHeapObjectTag;
intptr_t end_offset = instance_size - kHeapObjectTag;
- if ((end_offset - begin_offset) >= (2 * kWordSize)) {
+ if ((end_offset - begin_offset) >= (2 * target::kWordSize)) {
__ mov(kOtherNullReg, Operand(kNullReg));
}
__ InitializeFieldsNoBarrierUnrolled(kInstanceReg, kInstanceReg,
@@ -1460,20 +1505,20 @@
kOtherNullReg);
} else {
__ add(R1, kInstanceReg,
- Operand(Instance::NextFieldOffset() - kHeapObjectTag));
+ Operand(target::Instance::first_field_offset() - kHeapObjectTag));
__ mov(kOtherNullReg, Operand(kNullReg));
__ InitializeFieldsNoBarrier(kInstanceReg, R1, kEndOfInstanceReg,
kNullReg, kOtherNullReg);
}
if (is_cls_parameterized) {
+ const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
__ StoreIntoObjectNoBarrier(
- kInstanceReg,
- FieldAddress(kInstanceReg, cls.type_arguments_field_offset()),
- kTypeArgumentsReg);
+ kInstanceReg, FieldAddress(kInstanceReg, offset), kTypeArgumentsReg);
}
// Update allocation stats.
- NOT_IN_PRODUCT(__ IncrementAllocationStats(kAllocationStatsReg, cls.id()));
+ NOT_IN_PRODUCT(__ IncrementAllocationStats(kAllocationStatsReg,
+ target::Class::GetId(cls)));
__ Ret();
__ Bind(&slow_case);
@@ -1481,13 +1526,15 @@
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame(); // Uses pool pointer to pass cls to runtime.
- __ LoadObject(R1, cls);
+ __ LoadObject(R1, CastHandle<Object>(cls));
__ PushList(1 << kNullReg | 1 << R1); // Pushes cls, result slot.
__ Push(is_cls_parameterized ? kTypeArgumentsReg : kNullReg);
__ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
- __ ldr(kInstanceReg,
- Address(SP, 2 * kWordSize)); // Pop result (newly allocated object).
- __ LeaveDartFrameAndReturn(); // Restores correct SP.
+ __ ldr(
+ kInstanceReg,
+ Address(SP,
+ 2 * target::kWordSize)); // Pop result (newly allocated object).
+ __ LeaveDartFrameAndReturn(); // Restores correct SP.
}
// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
@@ -1497,13 +1544,15 @@
// LR : return address.
// SP : address of last argument.
// R4: arguments descriptor array.
-void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
+ Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
- __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
+ __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
- __ ldr(R8, Address(IP, kParamEndSlotFromFp * kWordSize));
+ __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
+ target::kWordSize));
// Push space for the return value.
// Push the receiver.
@@ -1512,9 +1561,11 @@
__ PushList((1 << R4) | (1 << R8) | (1 << IP));
// Adjust arguments count.
- __ ldr(R3, FieldAddress(R4, ArgumentsDescriptor::type_args_len_offset()));
+ __ ldr(R3,
+ FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
__ cmp(R3, Operand(0));
- __ AddImmediate(R2, R2, Smi::RawValue(1), NE); // Include the type arguments.
+ __ AddImmediate(R2, R2, target::ToRawSmi(1),
+ NE); // Include the type arguments.
// R2: Smi-tagged arguments array length.
PushArrayOfArguments(assembler);
@@ -1529,7 +1580,8 @@
// R9: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
-void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
+ Assembler* assembler) {
Register ic_reg = R9;
Register func_reg = R8;
if (FLAG_trace_optimized_ic_calls) {
@@ -1542,23 +1594,27 @@
__ PopList((1 << R9) | (1 << R8)); // Restore.
__ LeaveStubFrame();
}
- __ ldr(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ ldr(NOTFP,
+ FieldAddress(func_reg, target::Function::usage_counter_offset()));
__ add(NOTFP, NOTFP, Operand(1));
- __ str(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ str(NOTFP,
+ FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
// Loads function into 'temp_reg'.
-void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
- Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
+ Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = R9;
Register func_reg = temp_reg;
ASSERT(temp_reg == R8);
__ Comment("Increment function counter");
- __ ldr(func_reg, FieldAddress(ic_reg, ICData::owner_offset()));
- __ ldr(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ ldr(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
+ __ ldr(NOTFP,
+ FieldAddress(func_reg, target::Function::usage_counter_offset()));
__ add(NOTFP, NOTFP, Operand(1));
- __ str(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ str(NOTFP,
+ FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
}
@@ -1571,8 +1627,8 @@
intptr_t num_args,
Label* not_smi_or_overflow) {
__ Comment("Fast Smi op");
- __ ldr(R0, Address(SP, 0 * kWordSize));
- __ ldr(R1, Address(SP, 1 * kWordSize));
+ __ ldr(R0, Address(SP, 0 * target::kWordSize));
+ __ ldr(R1, Address(SP, 1 * target::kWordSize));
__ orr(TMP, R0, Operand(R1));
__ tst(TMP, Operand(kSmiTagMask));
__ b(not_smi_or_overflow, NE);
@@ -1589,26 +1645,26 @@
}
case Token::kEQ: {
__ cmp(R0, Operand(R1));
- __ LoadObject(R0, Bool::True(), EQ);
- __ LoadObject(R0, Bool::False(), NE);
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
+ __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
break;
}
default:
UNIMPLEMENTED();
}
// R9: IC data object (preserved).
- __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
+ __ ldr(R8, FieldAddress(R9, target::ICData::ic_data_offset()));
// R8: ic_data_array with check entries: classes and target functions.
- __ AddImmediate(R8, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: points directly to the first ic data array element.
#if defined(DEBUG)
// Check that first entry is for Smi/Smi.
Label error, ok;
- const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid));
+ const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
__ ldr(R1, Address(R8, 0));
__ CompareImmediate(R1, imm_smi_cid);
__ b(&error, NE);
- __ ldr(R1, Address(R8, kWordSize));
+ __ ldr(R1, Address(R8, target::kWordSize));
__ CompareImmediate(R1, imm_smi_cid);
__ b(&ok, EQ);
__ Bind(&error);
@@ -1617,9 +1673,10 @@
#endif
if (FLAG_optimization_counter_threshold >= 0) {
// Update counter, ignore overflow.
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
__ LoadFromOffset(kWord, R1, R8, count_offset);
- __ adds(R1, R1, Operand(Smi::RawValue(1)));
+ __ adds(R1, R1, Operand(target::ToRawSmi(1)));
__ StoreIntoSmiField(Address(R8, count_offset), R1);
}
__ Ret();
@@ -1635,7 +1692,7 @@
// - Check if 'num_args' (including receiver) match any IC data group.
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
-void StubCode::GenerateNArgsCheckInlineCacheStub(
+void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
@@ -1650,9 +1707,9 @@
Label ok;
// Check that the IC data array has NumArgsTested() == num_args.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset()));
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ and_(R8, R8, Operand(ICData::NumArgsTestedMask()));
+ __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
__ CompareImmediate(R8, num_args);
__ b(&ok, EQ);
__ Stop("Incorrect stub for IC data");
@@ -1665,7 +1722,7 @@
if (!optimized) {
__ Comment("Check single stepping");
__ LoadIsolate(R8);
- __ ldrb(R8, Address(R8, Isolate::single_step_offset()));
+ __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
__ CompareImmediate(R8, 0);
__ b(&stepping, NE);
__ Bind(&done_stepping);
@@ -1680,19 +1737,19 @@
__ Comment("Extract ICData initial values and receiver cid");
// Load arguments descriptor into R4.
- __ ldr(R4, FieldAddress(R9, ICData::arguments_descriptor_offset()));
+ __ ldr(R4, FieldAddress(R9, target::ICData::arguments_descriptor_offset()));
// Loop that checks if there is an IC data match.
Label loop, found, miss;
// R9: IC data object (preserved).
- __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
+ __ ldr(R8, FieldAddress(R9, target::ICData::ic_data_offset()));
// R8: ic_data_array with check entries: classes and target functions.
- const int kIcDataOffset = Array::data_offset() - kHeapObjectTag;
+ const int kIcDataOffset = target::Array::data_offset() - kHeapObjectTag;
// R8: points at the IC data array.
// Get the receiver's class ID (first read number of arguments from
// arguments descriptor array and then access the receiver from the stack).
- __ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
- __ sub(NOTFP, NOTFP, Operand(Smi::RawValue(1)));
+ __ ldr(NOTFP, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
+ __ sub(NOTFP, NOTFP, Operand(target::ToRawSmi(1)));
// NOTFP: argument_count - 1 (smi).
__ Comment("ICData loop");
@@ -1700,7 +1757,7 @@
__ ldr(R0, Address(SP, NOTFP, LSL, 1)); // NOTFP (argument_count - 1) is Smi.
__ LoadTaggedClassIdMayBeSmi(R0, R0);
if (num_args == 2) {
- __ sub(R1, NOTFP, Operand(Smi::RawValue(1)));
+ __ sub(R1, NOTFP, Operand(target::ToRawSmi(1)));
__ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
__ LoadTaggedClassIdMayBeSmi(R1, R1);
}
@@ -1716,7 +1773,7 @@
__ cmp(R0, Operand(R2)); // Class id match?
if (num_args == 2) {
__ b(&update, NE); // Continue.
- __ ldr(R2, Address(R8, kIcDataOffset + kWordSize));
+ __ ldr(R2, Address(R8, kIcDataOffset + target::kWordSize));
__ cmp(R1, Operand(R2)); // Class id match?
}
__ b(&found, EQ); // Break.
@@ -1724,10 +1781,11 @@
__ Bind(&update);
const intptr_t entry_size =
- ICData::TestEntryLengthFor(num_args, exactness_check) * kWordSize;
+ target::ICData::TestEntryLengthFor(num_args, exactness_check) *
+ target::kWordSize;
__ AddImmediate(R8, entry_size); // Next entry.
- __ CompareImmediate(R2, Smi::RawValue(kIllegalCid)); // Done?
+ __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); // Done?
if (unroll == 0) {
__ b(&loop, NE);
} else {
@@ -1750,7 +1808,7 @@
__ PushList((1 << R0) | (1 << R4) | (1 << R9));
// Push call arguments.
for (intptr_t i = 0; i < num_args; i++) {
- __ LoadFromOffset(kWord, IP, NOTFP, -i * kWordSize);
+ __ LoadFromOffset(kWord, IP, NOTFP, -i * target::kWordSize);
__ Push(IP);
}
// Pass IC data object.
@@ -1772,23 +1830,25 @@
__ Bind(&found);
// R8: pointer to an IC data check group.
- const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(num_args) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
__ LoadFromOffset(kWord, R0, R8, kIcDataOffset + target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update caller's counter");
__ LoadFromOffset(kWord, R1, R8, kIcDataOffset + count_offset);
// Ignore overflow.
- __ adds(R1, R1, Operand(Smi::RawValue(1)));
+ __ adds(R1, R1, Operand(target::ToRawSmi(1)));
__ StoreIntoSmiField(Address(R8, kIcDataOffset + count_offset), R1);
}
__ Comment("Call target");
__ Bind(&call_target_function);
// R0: target function.
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
#if !defined(PRODUCT)
if (!optimized) {
@@ -1814,43 +1874,45 @@
// 2 .. (length - 1): group of checks, each check containing:
// - N classes.
// - 1 target function.
-void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
-void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
kInlineCacheMissHandlerTwoArgsRuntimeEntry,
Token::kILLEGAL);
}
-void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD);
}
-void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB);
}
-void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ);
}
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 1,
@@ -1858,12 +1920,13 @@
Token::kILLEGAL, true /* optimized */);
}
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
- Assembler* assembler) {
+void StubCodeCompiler::
+ GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
+ Assembler* assembler) {
__ Stop("Unimplemented");
}
-void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
@@ -1874,16 +1937,17 @@
// Intermediary stub between a static call and its target. ICData contains
// the target function and the call count.
// R9: ICData
-void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
#if defined(DEBUG)
{
Label ok;
// Check that the IC data array has NumArgsTested() == 0.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset()));
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ and_(R8, R8, Operand(ICData::NumArgsTestedMask()));
+ __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
__ CompareImmediate(R8, 0);
__ b(&ok, EQ);
__ Stop("Incorrect IC data for unoptimized static call");
@@ -1895,34 +1959,36 @@
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(R8);
- __ ldrb(R8, Address(R8, Isolate::single_step_offset()));
+ __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
__ CompareImmediate(R8, 0);
__ b(&stepping, NE);
__ Bind(&done_stepping);
#endif
// R9: IC data object (preserved).
- __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
+ __ ldr(R8, FieldAddress(R9, target::ICData::ic_data_offset()));
// R8: ic_data_array with entries: target functions and count.
- __ AddImmediate(R8, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: points directly to the first ic data array element.
- const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(0) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(0) * target::kWordSize;
if (FLAG_optimization_counter_threshold >= 0) {
// Increment count for this call, ignore overflow.
__ LoadFromOffset(kWord, R1, R8, count_offset);
- __ adds(R1, R1, Operand(Smi::RawValue(1)));
+ __ adds(R1, R1, Operand(target::ToRawSmi(1)));
__ StoreIntoSmiField(Address(R8, count_offset), R1);
}
// Load arguments descriptor into R4.
- __ ldr(R4, FieldAddress(R9, ICData::arguments_descriptor_offset()));
+ __ ldr(R4, FieldAddress(R9, target::ICData::arguments_descriptor_offset()));
// Get function and call it, if possible.
__ LoadFromOffset(kWord, R0, R8, target_offset);
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
#if !defined(PRODUCT)
__ Bind(&stepping);
@@ -1936,13 +2002,15 @@
#endif
}
-void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL);
@@ -1951,7 +2019,7 @@
// Stub for compiling a function and jumping to the compiled code.
// R4: Arguments descriptor.
// R0: Function.
-void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushList((1 << R0) | (1 << R4)); // Preserve arg desc, pass function.
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
@@ -1960,16 +2028,16 @@
// When using the interpreter, the function's code may now point to the
// InterpretCall stub. Make sure R0, R4, and R9 are preserved.
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
}
-void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
__ Unimplemented("Interpreter not yet supported");
}
// R9: Contains an ICData.
-void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
__ LoadImmediate(R0, 0);
// Preserve arguments descriptor and make room for result.
@@ -1978,10 +2046,10 @@
__ PopList((1 << R0) | (1 << R9));
__ LeaveStubFrame();
__ mov(CODE_REG, Operand(R0));
- __ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
-void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
__ LoadImmediate(R0, 0);
// Make room for result.
@@ -1989,18 +2057,18 @@
__ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
__ PopList((1 << CODE_REG));
__ LeaveStubFrame();
- __ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
// Called only from unoptimized code. All relevant registers have been saved.
-void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Ret();
#else
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(R1);
- __ ldrb(R1, Address(R1, Isolate::single_step_offset()));
+ __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
__ CompareImmediate(R1, 0);
__ b(&stepping, NE);
__ Bind(&done_stepping);
@@ -2019,7 +2087,7 @@
// R0: instance (must be preserved).
// R2: instantiator type arguments (only if n >= 4, can be raw_null).
// R1: function type arguments (only if n >= 4, can be raw_null).
-// R3: SubtypeTestCache.
+// R3: target::SubtypeTestCache.
//
// Preserves R0/R2
//
@@ -2039,7 +2107,7 @@
const Register kNullReg = NOTFP;
- __ LoadObject(kNullReg, Object::null_object());
+ __ LoadObject(kNullReg, NullObject());
// Free up these 2 registers to be used for 6-value test.
if (n >= 6) {
@@ -2049,8 +2117,9 @@
// Loop initialization (moved up here to avoid having all dependent loads
// after each other).
- __ ldr(kCacheReg, FieldAddress(kCacheReg, SubtypeTestCache::cache_offset()));
- __ AddImmediate(kCacheReg, Array::data_offset() - kHeapObjectTag);
+ __ ldr(kCacheReg,
+ FieldAddress(kCacheReg, target::SubtypeTestCache::cache_offset()));
+ __ AddImmediate(kCacheReg, target::Array::data_offset() - kHeapObjectTag);
Label loop, not_closure;
if (n >= 4) {
@@ -2064,19 +2133,20 @@
// Closure handling.
{
__ ldr(kInstanceCidOrFunction,
- FieldAddress(kInstanceReg, Closure::function_offset()));
+ FieldAddress(kInstanceReg, target::Closure::function_offset()));
if (n >= 2) {
- __ ldr(kInstanceInstantiatorTypeArgumentsReg,
- FieldAddress(kInstanceReg,
- Closure::instantiator_type_arguments_offset()));
+ __ ldr(
+ kInstanceInstantiatorTypeArgumentsReg,
+ FieldAddress(kInstanceReg,
+ target::Closure::instantiator_type_arguments_offset()));
if (n >= 6) {
ASSERT(n == 6);
__ ldr(kInstanceParentFunctionTypeArgumentsReg,
FieldAddress(kInstanceReg,
- Closure::function_type_arguments_offset()));
+ target::Closure::function_type_arguments_offset()));
__ ldr(kInstanceDelayedFunctionTypeArgumentsReg,
FieldAddress(kInstanceReg,
- Closure::delayed_type_arguments_offset()));
+ target::Closure::delayed_type_arguments_offset()));
}
}
__ b(&loop);
@@ -2089,9 +2159,11 @@
Label has_no_type_arguments;
__ LoadClassById(R9, kInstanceCidOrFunction);
__ mov(kInstanceInstantiatorTypeArgumentsReg, Operand(kNullReg));
- __ ldr(R9, FieldAddress(
- R9, Class::type_arguments_field_offset_in_words_offset()));
- __ CompareImmediate(R9, Class::kNoTypeArguments);
+ __ ldr(R9,
+ FieldAddress(
+ R9,
+ target::Class::type_arguments_field_offset_in_words_offset()));
+ __ CompareImmediate(R9, target::Class::kNoTypeArguments);
__ b(&has_no_type_arguments, EQ);
__ add(R9, kInstanceReg, Operand(R9, LSL, 2));
__ ldr(kInstanceInstantiatorTypeArgumentsReg, FieldAddress(R9, 0));
@@ -2110,7 +2182,8 @@
// Loop header.
__ Bind(&loop);
__ ldr(R9, Address(kCacheReg,
- kWordSize * SubtypeTestCache::kInstanceClassIdOrFunction));
+ target::kWordSize *
+ target::SubtypeTestCache::kInstanceClassIdOrFunction));
__ cmp(R9, Operand(kNullReg));
__ b(¬_found, EQ);
__ cmp(R9, Operand(kInstanceCidOrFunction));
@@ -2119,7 +2192,8 @@
} else {
__ b(&next_iteration, NE);
__ ldr(R9, Address(kCacheReg,
- kWordSize * SubtypeTestCache::kInstanceTypeArguments));
+ target::kWordSize *
+ target::SubtypeTestCache::kInstanceTypeArguments));
__ cmp(R9, Operand(kInstanceInstantiatorTypeArgumentsReg));
if (n == 2) {
__ b(&found, EQ);
@@ -2127,11 +2201,13 @@
__ b(&next_iteration, NE);
__ ldr(R9,
Address(kCacheReg,
- kWordSize * SubtypeTestCache::kInstantiatorTypeArguments));
+ target::kWordSize *
+ target::SubtypeTestCache::kInstantiatorTypeArguments));
__ cmp(R9, Operand(kInstantiatorTypeArgumentsReg));
__ b(&next_iteration, NE);
__ ldr(R9, Address(kCacheReg,
- kWordSize * SubtypeTestCache::kFunctionTypeArguments));
+ target::kWordSize *
+ target::SubtypeTestCache::kFunctionTypeArguments));
__ cmp(R9, Operand(kFunctionTypeArgumentsReg));
if (n == 4) {
__ b(&found, EQ);
@@ -2139,31 +2215,30 @@
ASSERT(n == 6);
__ b(&next_iteration, NE);
- __ ldr(R9,
- Address(
- kCacheReg,
- kWordSize *
- SubtypeTestCache::kInstanceParentFunctionTypeArguments));
+ __ ldr(R9, Address(kCacheReg,
+ target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceParentFunctionTypeArguments));
__ cmp(R9, Operand(kInstanceParentFunctionTypeArgumentsReg));
__ b(&next_iteration, NE);
- __ ldr(
- R9,
- Address(
- kCacheReg,
- kWordSize *
- SubtypeTestCache::kInstanceDelayedFunctionTypeArguments));
+ __ ldr(R9, Address(kCacheReg,
+ target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceDelayedFunctionTypeArguments));
__ cmp(R9, Operand(kInstanceDelayedFunctionTypeArgumentsReg));
__ b(&found, EQ);
}
}
}
__ Bind(&next_iteration);
- __ AddImmediate(kCacheReg, kWordSize * SubtypeTestCache::kTestEntryLength);
+ __ AddImmediate(kCacheReg, target::kWordSize *
+ target::SubtypeTestCache::kTestEntryLength);
__ b(&loop);
__ Bind(&found);
- __ ldr(R1, Address(kCacheReg, kWordSize * SubtypeTestCache::kTestResult));
+ __ ldr(R1, Address(kCacheReg, target::kWordSize *
+ target::SubtypeTestCache::kTestResult));
if (n >= 6) {
__ PopList(1 << kInstanceParentFunctionTypeArgumentsReg |
1 << kInstanceDelayedFunctionTypeArgumentsReg);
@@ -2180,22 +2255,22 @@
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 2);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 4);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype6TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 6);
}
@@ -2216,85 +2291,38 @@
//
// Note of warning: The caller will not populate CODE_REG and we have therefore
// no access to the pool.
-void StubCode::GenerateDefaultTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
Label done;
const Register kInstanceReg = R0;
// Fast case for 'null'.
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(EQUAL, &done);
- __ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
- __ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
+ __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ Bind(&done);
__ Ret();
}
-void StubCode::GenerateTopTypeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
__ Ret();
}
-void StubCode::GenerateTypeRefTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTypeRefTypeTestStub(Assembler* assembler) {
const Register kTypeRefReg = R8;
// We dereference the TypeRef and tail-call to it's type testing stub.
- __ ldr(kTypeRefReg, FieldAddress(kTypeRefReg, TypeRef::type_offset()));
- __ ldr(R9, FieldAddress(kTypeRefReg,
- AbstractType::type_test_stub_entry_point_offset()));
+ __ ldr(kTypeRefReg,
+ FieldAddress(kTypeRefReg, target::TypeRef::type_offset()));
+ __ ldr(R9, FieldAddress(
+ kTypeRefReg,
+ target::AbstractType::type_test_stub_entry_point_offset()));
__ bx(R9);
}
-void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
- Assembler* assembler,
- HierarchyInfo* hi,
- const Type& type,
- const Class& type_class) {
- const Register kInstanceReg = R0;
- const Register kClassIdReg = R9;
-
- BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
- kInstanceReg, kClassIdReg);
-
- __ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
- __ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
-}
-
-void TypeTestingStubGenerator::
- BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
- HierarchyInfo* hi,
- const Class& type_class,
- const TypeArguments& tp,
- const TypeArguments& ta) {
- const Register kInstanceReg = R0;
- const Register kInstanceTypeArguments = NOTFP;
- const Register kClassIdReg = R9;
-
- BuildOptimizedSubclassRangeCheckWithTypeArguments(
- assembler, hi, type_class, tp, ta, kClassIdReg, kInstanceReg,
- kInstanceTypeArguments);
-}
-
-void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
- Assembler* assembler,
- HierarchyInfo* hi,
- const AbstractType& type_arg,
- intptr_t type_param_value_offset_i,
- Label* check_failed) {
- const Register kInstantiatorTypeArgumentsReg = R2;
- const Register kFunctionTypeArgumentsReg = R1;
- const Register kInstanceTypeArguments = NOTFP;
-
- const Register kClassIdReg = R9;
- const Register kOwnTypeArgumentValue = TMP;
-
- BuildOptimizedTypeArgumentValueCheck(
- assembler, hi, type_arg, type_param_value_offset_i, kClassIdReg,
- kInstanceTypeArguments, kInstantiatorTypeArgumentsReg,
- kFunctionTypeArgumentsReg, kOwnTypeArgumentValue, check_failed);
-}
-
-void StubCode::GenerateUnreachableTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
__ Breakpoint();
}
@@ -2306,14 +2334,14 @@
const Register kDstTypeReg = R8;
const Register kSubtypeTestCacheReg = R3;
- __ PushObject(Object::null_object()); // Make room for result.
+ __ PushObject(NullObject()); // Make room for result.
__ Push(kInstanceReg);
__ Push(kDstTypeReg);
__ Push(kInstantiatorTypeArgumentsReg);
__ Push(kFunctionTypeArgumentsReg);
- __ PushObject(Object::null_object());
+ __ PushObject(NullObject());
__ Push(kSubtypeTestCacheReg);
- __ PushObject(Smi::ZoneHandle(Smi::New(mode)));
+ __ PushImmediate(target::ToRawSmi(mode));
__ CallRuntime(kTypeCheckRuntimeEntry, 7);
__ Drop(1); // mode
__ Pop(kSubtypeTestCacheReg);
@@ -2325,15 +2353,16 @@
__ Drop(1); // Discard return value.
}
-void StubCode::GenerateLazySpecializeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
+ Assembler* assembler) {
const Register kInstanceReg = R0;
Label done;
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(EQUAL, &done);
__ ldr(CODE_REG,
- Address(THR, Thread::lazy_specialize_type_test_stub_offset()));
+ Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
__ EnterStubFrame();
InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
__ LeaveStubFrame();
@@ -2342,7 +2371,7 @@
__ Ret();
}
-void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
Label done, call_runtime;
const Register kInstanceReg = R0;
@@ -2355,14 +2384,14 @@
#ifdef DEBUG
// Guaranteed by caller.
Label no_error;
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(NOT_EQUAL, &no_error);
__ Breakpoint();
__ Bind(&no_error);
#endif
// If the subtype-cache is null, it needs to be lazily-created by the runtime.
- __ CompareObject(kSubtypeTestCacheReg, Object::null_object());
+ __ CompareObject(kSubtypeTestCacheReg, NullObject());
__ BranchIf(EQUAL, &call_runtime);
const Register kTmp = NOTFP;
@@ -2374,13 +2403,14 @@
__ BranchIf(NOT_EQUAL, &is_complex_case);
// Check whether this [Type] is instantiated/uninstantiated.
- __ ldrb(kTmp, FieldAddress(kDstTypeReg, Type::type_state_offset()));
- __ cmp(kTmp, Operand(RawType::kFinalizedInstantiated));
+ __ ldrb(kTmp, FieldAddress(kDstTypeReg, target::Type::type_state_offset()));
+ __ cmp(kTmp,
+ Operand(target::RawAbstractType::kTypeStateFinalizedInstantiated));
__ BranchIf(NOT_EQUAL, &is_complex_case);
// Check whether this [Type] is a function type.
- __ ldr(kTmp, FieldAddress(kDstTypeReg, Type::signature_offset()));
- __ CompareObject(kTmp, Object::null_object());
+ __ ldr(kTmp, FieldAddress(kDstTypeReg, target::Type::signature_offset()));
+ __ CompareObject(kTmp, NullObject());
__ BranchIf(NOT_EQUAL, &is_complex_case);
// This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
@@ -2395,8 +2425,8 @@
__ Bind(&is_simple_case);
{
__ PushList(kRegsToSave);
- __ BranchLink(StubCode::Subtype2TestCache());
- __ CompareObject(R1, Bool::True());
+ __ BranchLink(StubCodeSubtype2TestCache());
+ __ CompareObject(R1, CastHandle<Object>(TrueObject()));
__ PopList(kRegsToSave);
__ BranchIf(EQUAL, &done); // Cache said: yes.
__ Jump(&call_runtime);
@@ -2405,8 +2435,8 @@
__ Bind(&is_complex_case);
{
__ PushList(kRegsToSave);
- __ BranchLink(StubCode::Subtype6TestCache());
- __ CompareObject(R1, Bool::True());
+ __ BranchLink(StubCodeSubtype6TestCache());
+ __ CompareObject(R1, CastHandle<Object>(TrueObject()));
__ PopList(kRegsToSave);
__ BranchIf(EQUAL, &done); // Cache said: yes.
// Fall through to runtime_call
@@ -2419,11 +2449,11 @@
// because we do constant evaluation with default stubs and only install
// optimized versions before writing out the AOT snapshot.
// So dynamic/Object/void will run with default stub in constant evaluation.
- __ CompareObject(kDstTypeReg, Type::dynamic_type());
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(DynamicType()));
__ BranchIf(EQUAL, &done);
- __ CompareObject(kDstTypeReg, Type::Handle(Type::ObjectType()));
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(ObjectType()));
__ BranchIf(EQUAL, &done);
- __ CompareObject(kDstTypeReg, Type::void_type());
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(VoidType()));
__ BranchIf(EQUAL, &done);
InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
@@ -2434,7 +2464,7 @@
}
// Return the current stack pointer address, used to do stack alignment checks.
-void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
__ mov(R0, Operand(SP));
__ Ret();
}
@@ -2446,7 +2476,7 @@
// R2: frame_pointer.
// R3: thread.
// Does not return.
-void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == R0);
ASSERT(kStackTraceObjectReg == R1);
__ mov(IP, Operand(R1)); // Copy Stack pointer into IP.
@@ -2456,14 +2486,15 @@
__ mov(SP, Operand(IP)); // Set Stack pointer.
// Set the tag.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(kWord, R2, THR, target::Thread::vm_tag_offset());
// Clear top exit frame.
__ LoadImmediate(R2, 0);
- __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(kWord, R2, THR,
+ target::Thread::top_exit_frame_info_offset());
// Restore the pool pointer.
__ RestoreCodePointer();
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
__ set_constant_pool_allowed(true);
} else {
__ LoadPoolPointer();
@@ -2476,20 +2507,21 @@
//
// The arguments are stored in the Thread object.
// Does not return.
-void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) {
- __ LoadFromOffset(kWord, LR, THR, Thread::resume_pc_offset());
+void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+ __ LoadFromOffset(kWord, LR, THR, target::Thread::resume_pc_offset());
- ASSERT(Thread::CanLoadFromThread(Object::null_object()));
- __ LoadFromOffset(kWord, R2, THR,
- Thread::OffsetFromThread(Object::null_object()));
+ word offset_from_thread = 0;
+ bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
+ ASSERT(ok);
+ __ LoadFromOffset(kWord, R2, THR, offset_from_thread);
// Exception object.
- __ LoadFromOffset(kWord, R0, THR, Thread::active_exception_offset());
- __ StoreToOffset(kWord, R2, THR, Thread::active_exception_offset());
+ __ LoadFromOffset(kWord, R0, THR, target::Thread::active_exception_offset());
+ __ StoreToOffset(kWord, R2, THR, target::Thread::active_exception_offset());
// StackTrace object.
- __ LoadFromOffset(kWord, R1, THR, Thread::active_stacktrace_offset());
- __ StoreToOffset(kWord, R2, THR, Thread::active_stacktrace_offset());
+ __ LoadFromOffset(kWord, R1, THR, target::Thread::active_stacktrace_offset());
+ __ StoreToOffset(kWord, R2, THR, target::Thread::active_stacktrace_offset());
__ bx(LR); // Jump to the exception handler code.
}
@@ -2497,13 +2529,13 @@
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
-void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push zap value instead of CODE_REG.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
// Load the deopt pc into LR.
- __ LoadFromOffset(kWord, LR, THR, Thread::resume_pc_offset());
+ __ LoadFromOffset(kWord, LR, THR, target::Thread::resume_pc_offset());
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
// After we have deoptimized, jump to the correct frame.
@@ -2516,7 +2548,7 @@
// Calls to the runtime to optimize the given function.
// R8: function to be reoptimized.
// R4: argument descriptor (preserved).
-void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ Push(R4);
__ LoadImmediate(IP, 0);
@@ -2527,8 +2559,8 @@
__ Pop(R0); // Get Function object
__ Pop(R4); // Restore argument descriptor.
__ LeaveStubFrame();
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
__ bkpt(0);
}
@@ -2555,12 +2587,16 @@
__ b(&done, NE);
// Double values bitwise compare.
- __ ldr(temp, FieldAddress(left, Double::value_offset() + 0 * kWordSize));
- __ ldr(IP, FieldAddress(right, Double::value_offset() + 0 * kWordSize));
+ __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
+ 0 * target::kWordSize));
+ __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
+ 0 * target::kWordSize));
__ cmp(temp, Operand(IP));
__ b(&done, NE);
- __ ldr(temp, FieldAddress(left, Double::value_offset() + 1 * kWordSize));
- __ ldr(IP, FieldAddress(right, Double::value_offset() + 1 * kWordSize));
+ __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
+ 1 * target::kWordSize));
+ __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
+ 1 * target::kWordSize));
__ cmp(temp, Operand(IP));
__ b(&done);
@@ -2569,12 +2605,16 @@
__ b(&reference_compare, NE);
__ CompareClassId(right, kMintCid, temp);
__ b(&done, NE);
- __ ldr(temp, FieldAddress(left, Mint::value_offset() + 0 * kWordSize));
- __ ldr(IP, FieldAddress(right, Mint::value_offset() + 0 * kWordSize));
+ __ ldr(temp, FieldAddress(
+ left, target::Mint::value_offset() + 0 * target::kWordSize));
+ __ ldr(IP, FieldAddress(
+ right, target::Mint::value_offset() + 0 * target::kWordSize));
__ cmp(temp, Operand(IP));
__ b(&done, NE);
- __ ldr(temp, FieldAddress(left, Mint::value_offset() + 1 * kWordSize));
- __ ldr(IP, FieldAddress(right, Mint::value_offset() + 1 * kWordSize));
+ __ ldr(temp, FieldAddress(
+ left, target::Mint::value_offset() + 1 * target::kWordSize));
+ __ ldr(IP, FieldAddress(
+ right, target::Mint::value_offset() + 1 * target::kWordSize));
__ cmp(temp, Operand(IP));
__ b(&done);
@@ -2588,13 +2628,13 @@
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
-void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(R1);
- __ ldrb(R1, Address(R1, Isolate::single_step_offset()));
+ __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
__ CompareImmediate(R1, 0);
__ b(&stepping, NE);
__ Bind(&done_stepping);
@@ -2603,8 +2643,8 @@
const Register temp = R2;
const Register left = R1;
const Register right = R0;
- __ ldr(left, Address(SP, 1 * kWordSize));
- __ ldr(right, Address(SP, 0 * kWordSize));
+ __ ldr(left, Address(SP, 1 * target::kWordSize));
+ __ ldr(right, Address(SP, 0 * target::kWordSize));
GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
__ Ret();
@@ -2623,13 +2663,13 @@
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
-void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register temp = R2;
const Register left = R1;
const Register right = R0;
- __ ldr(left, Address(SP, 1 * kWordSize));
- __ ldr(right, Address(SP, 0 * kWordSize));
+ __ ldr(left, Address(SP, 1 * target::kWordSize));
+ __ ldr(right, Address(SP, 0 * target::kWordSize));
GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
__ Ret();
}
@@ -2640,16 +2680,16 @@
// Passed to target:
// CODE_REG: target Code
// R4: arguments descriptor
-void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ LoadTaggedClassIdMayBeSmi(R0, R0);
// R0: receiver cid as Smi.
- __ ldr(R2, FieldAddress(R9, MegamorphicCache::buckets_offset()));
- __ ldr(R1, FieldAddress(R9, MegamorphicCache::mask_offset()));
+ __ ldr(R2, FieldAddress(R9, target::MegamorphicCache::buckets_offset()));
+ __ ldr(R1, FieldAddress(R9, target::MegamorphicCache::mask_offset()));
// R2: cache buckets array.
// R1: mask as a smi.
// Compute the table index.
- ASSERT(MegamorphicCache::kSpreadFactor == 7);
+ ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
// Use reverse substract to multiply with 7 == 8 - 1.
__ rsb(R3, R0, Operand(R0, LSL, 3));
// R3: probe.
@@ -2657,7 +2697,7 @@
__ Bind(&loop);
__ and_(R3, R3, Operand(R1));
- const intptr_t base = Array::data_offset();
+ const intptr_t base = target::Array::data_offset();
// R3 is smi tagged, but table entries are two words, so LSL 2.
Label probe_failed;
__ add(IP, R2, Operand(R3, LSL, 2));
@@ -2671,17 +2711,19 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- const auto target_address = FieldAddress(IP, base + kWordSize);
+ const auto target_address = FieldAddress(IP, base + target::kWordSize);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ ldr(ARGS_DESC_REG,
- FieldAddress(R9, MegamorphicCache::arguments_descriptor_offset()));
+ FieldAddress(
+ R9, target::MegamorphicCache::arguments_descriptor_offset()));
__ Branch(target_address);
} else {
__ ldr(R0, target_address);
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ ldr(ARGS_DESC_REG,
- FieldAddress(R9, MegamorphicCache::arguments_descriptor_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ FieldAddress(
+ R9, target::MegamorphicCache::arguments_descriptor_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
}
// Probe failed, check if it is a miss.
@@ -2691,7 +2733,7 @@
__ b(&load_target, EQ); // branch if miss.
// Try next entry in the table.
- __ AddImmediate(R3, Smi::RawValue(1));
+ __ AddImmediate(R3, target::ToRawSmi(1));
__ b(&loop);
}
@@ -2701,12 +2743,12 @@
// Passed to target:
// CODE_REG: target Code object
// R4: arguments descriptor
-void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughFunctionStub(Assembler* assembler) {
Label loop, found, miss;
__ ldr(ARGS_DESC_REG,
- FieldAddress(R9, ICData::arguments_descriptor_offset()));
- __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
- __ AddImmediate(R8, Array::data_offset() - kHeapObjectTag);
+ FieldAddress(R9, target::ICData::arguments_descriptor_offset()));
+ __ ldr(R8, FieldAddress(R9, target::ICData::ic_data_offset()));
+ __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);
// R1: receiver cid as Smi
@@ -2715,31 +2757,33 @@
__ ldr(R2, Address(R8, 0));
__ cmp(R1, Operand(R2));
__ b(&found, EQ);
- __ CompareImmediate(R2, Smi::RawValue(kIllegalCid));
+ __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid));
__ b(&miss, EQ);
const intptr_t entry_length =
- ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) * kWordSize;
+ target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
+ target::kWordSize;
__ AddImmediate(R8, entry_length); // Next entry.
__ b(&loop);
__ Bind(&found);
- const intptr_t target_offset = ICData::TargetIndexFor(1) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(1) * target::kWordSize;
__ LoadFromOffset(kWord, R0, R8, target_offset);
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
- __ Branch(FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
+ __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
__ Bind(&miss);
__ LoadIsolate(R2);
- __ ldr(CODE_REG, Address(R2, Isolate::ic_miss_code_offset()));
- __ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(CODE_REG, Address(R2, target::Isolate::ic_miss_code_offset()));
+ __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
-void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
- __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
- __ ldr(R4, FieldAddress(R9, ICData::arguments_descriptor_offset()));
- __ AddImmediate(R8, Array::data_offset() - kHeapObjectTag);
+ __ ldr(R8, FieldAddress(R9, target::ICData::ic_data_offset()));
+ __ ldr(R4, FieldAddress(R9, target::ICData::arguments_descriptor_offset()));
+ __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);
// R1: receiver cid as Smi
@@ -2748,17 +2792,20 @@
__ ldr(R2, Address(R8, 0));
__ cmp(R1, Operand(R2));
__ b(&found, EQ);
- __ CompareImmediate(R2, Smi::RawValue(kIllegalCid));
+ __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid));
__ b(&miss, EQ);
const intptr_t entry_length =
- ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) * kWordSize;
+ target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
+ target::kWordSize;
__ AddImmediate(R8, entry_length); // Next entry.
__ b(&loop);
__ Bind(&found);
- const intptr_t code_offset = ICData::CodeIndexFor(1) * kWordSize;
- const intptr_t entry_offset = ICData::EntryPointIndexFor(1) * kWordSize;
+ const intptr_t code_offset =
+ target::ICData::CodeIndexFor(1) * target::kWordSize;
+ const intptr_t entry_offset =
+ target::ICData::EntryPointIndexFor(1) * target::kWordSize;
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
__ ldr(CODE_REG, Address(R8, code_offset));
}
@@ -2766,14 +2813,14 @@
__ Bind(&miss);
__ LoadIsolate(R2);
- __ ldr(CODE_REG, Address(R2, Isolate::ic_miss_code_offset()));
- __ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(CODE_REG, Address(R2, target::Isolate::ic_miss_code_offset()));
+ __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
// Called from switchable IC calls.
// R0: receiver
// R9: UnlinkedCall
-void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnlinkedCallStub(Assembler* assembler) {
__ EnterStubFrame();
__ Push(R0); // Preserve receiver.
@@ -2788,9 +2835,10 @@
__ Pop(R0); // Restore receiver.
__ LeaveStubFrame();
- __ ldr(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
__ Branch(FieldAddress(
- CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
+ CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)));
}
// Called from switchable IC calls.
@@ -2798,19 +2846,22 @@
// R9: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
-void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(R1, R0);
- __ ldrh(R2, FieldAddress(R9, SingleTargetCache::lower_limit_offset()));
- __ ldrh(R3, FieldAddress(R9, SingleTargetCache::upper_limit_offset()));
+ __ ldrh(R2,
+ FieldAddress(R9, target::SingleTargetCache::lower_limit_offset()));
+ __ ldrh(R3,
+ FieldAddress(R9, target::SingleTargetCache::upper_limit_offset()));
__ cmp(R1, Operand(R2));
__ b(&miss, LT);
__ cmp(R1, Operand(R3));
__ b(&miss, GT);
- __ ldr(CODE_REG, FieldAddress(R9, SingleTargetCache::target_offset()));
- __ Branch(FieldAddress(R9, SingleTargetCache::entry_point_offset()));
+ __ ldr(CODE_REG,
+ FieldAddress(R9, target::SingleTargetCache::target_offset()));
+ __ Branch(FieldAddress(R9, target::SingleTargetCache::entry_point_offset()));
__ Bind(&miss);
__ EnterStubFrame();
@@ -2826,15 +2877,17 @@
__ Pop(R0); // Restore receiver.
__ LeaveStubFrame();
- __ ldr(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
__ Branch(FieldAddress(
- CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
+ CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)));
}
// Called from the monomorphic checked entry.
// R0: receiver
-void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) {
- __ ldr(CODE_REG, Address(THR, Thread::monomorphic_miss_stub_offset()));
+void StubCodeCompiler::GenerateMonomorphicMissStub(Assembler* assembler) {
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::monomorphic_miss_stub_offset()));
__ EnterStubFrame();
__ Push(R0); // Preserve receiver.
@@ -2848,19 +2901,23 @@
__ Pop(R0); // Restore receiver.
__ LeaveStubFrame();
- __ ldr(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
__ Branch(FieldAddress(
- CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
+ CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)));
}
-void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
+ Assembler* assembler) {
__ bkpt(0);
}
-void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
__ bkpt(0);
}
+} // namespace compiler
+
} // namespace dart
#endif // defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
similarity index 66%
rename from runtime/vm/stub_code_arm64.cc
rename to runtime/vm/compiler/stub_code_compiler_arm64.cc
index 6305578..58f370c 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -1,22 +1,23 @@
-// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h"
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/stub_code_compiler.h"
+
#if defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/class_id.h"
+#include "vm/code_entry_kind.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/compiler/jit/compiler.h"
-#include "vm/dart_entry.h"
-#include "vm/heap/heap.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/constants_arm64.h"
#include "vm/instructions.h"
-#include "vm/object_store.h"
-#include "vm/runtime_entry.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
+#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
-#include "vm/type_testing_stubs.h"
#define __ assembler->
@@ -30,6 +31,8 @@
DECLARE_FLAG(bool, enable_interpreter);
DECLARE_FLAG(bool, precompiled_mode);
+namespace compiler {
+
// Input parameters:
// LR : return address.
// SP : address of last argument in argument array.
@@ -37,26 +40,26 @@
// SP + 8*R4 : address of return value.
// R5 : address of the runtime function to call.
// R4 : number of arguments to the call.
-void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
__ Comment("CallToRuntimeStub");
- __ ldr(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset()));
+ __ ldr(CODE_REG, Address(THR, target::Thread::call_to_runtime_stub_offset()));
__ SetPrologueOffset();
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
- __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
#if defined(DEBUG)
{
Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(R8, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
__ CompareImmediate(R8, VMTag::kDartCompiledTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -65,41 +68,42 @@
#endif
// Mark that the thread is executing VM code.
- __ StoreToOffset(R5, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset());
// Reserve space for arguments and align frame before entering C++ world.
- // NativeArguments are passed in registers.
+ // target::NativeArguments are passed in registers.
__ Comment("align stack");
// Reserve space for arguments.
- ASSERT(sizeof(NativeArguments) == 4 * kWordSize);
- __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
+ ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize);
+ __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
- // Pass NativeArguments structure by value and call runtime.
+ // Pass target::NativeArguments structure by value and call runtime.
// Registers R0, R1, R2, and R3 are used.
- ASSERT(thread_offset == 0 * kWordSize);
+ ASSERT(thread_offset == 0 * target::kWordSize);
// Set thread in NativeArgs.
__ mov(R0, THR);
// There are no runtime calls to closures, so we do not need to set the tag
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
- ASSERT(argc_tag_offset == 1 * kWordSize);
- __ mov(R1, R4); // Set argc in NativeArguments.
+ ASSERT(argc_tag_offset == 1 * target::kWordSize);
+ __ mov(R1, R4); // Set argc in target::NativeArguments.
- ASSERT(argv_offset == 2 * kWordSize);
+ ASSERT(argv_offset == 2 * target::kWordSize);
__ add(R2, ZR, Operand(R4, LSL, 3));
__ add(R2, FP, Operand(R2)); // Compute argv.
- // Set argv in NativeArguments.
- __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize);
+ // Set argv in target::NativeArguments.
+ __ AddImmediate(R2,
+ target::frame_layout.param_end_from_fp * target::kWordSize);
- ASSERT(retval_offset == 3 * kWordSize);
- __ AddImmediate(R3, R2, kWordSize);
+ ASSERT(retval_offset == 3 * target::kWordSize);
+ __ AddImmediate(R3, R2, target::kWordSize);
__ StoreToOffset(R0, SP, thread_offset);
__ StoreToOffset(R1, SP, argc_tag_offset);
__ StoreToOffset(R2, SP, argv_offset);
__ StoreToOffset(R3, SP, retval_offset);
- __ mov(R0, SP); // Pass the pointer to the NativeArguments.
+ __ mov(R0, SP); // Pass the pointer to the target::NativeArguments.
// We are entering runtime code, so the C stack pointer must be restored from
// the stack limit to the top of the stack. We cache the stack limit address
@@ -115,20 +119,21 @@
__ mov(CSP, R25);
// Refresh write barrier mask.
- __ ldr(BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+ __ ldr(BARRIER_MASK,
+ Address(THR, target::Thread::write_barrier_mask_offset()));
// Retval is next to 1st argument.
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
// Reset exit frame information in Isolate structure.
- __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
// Restore the global object pool after returning from runtime (old space is
// moving, so the GOP could have been relocated).
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
__ sub(PP, PP, Operand(kHeapObjectTag)); // Pool in PP is untagged!
}
@@ -143,11 +148,12 @@
__ ret();
}
-void StubCode::GenerateSharedStub(Assembler* assembler,
- bool save_fpu_registers,
- const RuntimeEntry* target,
- intptr_t self_code_stub_offset_from_thread,
- bool allow_return) {
+void StubCodeCompiler::GenerateSharedStub(
+ Assembler* assembler,
+ bool save_fpu_registers,
+ const RuntimeEntry* target,
+ intptr_t self_code_stub_offset_from_thread,
+ bool allow_return) {
// We want the saved registers to appear like part of the caller's frame, so
// we push them before calling EnterStubFrame.
RegisterSet all_registers;
@@ -172,29 +178,21 @@
// R1: The extracted method.
// R4: The type_arguments_field_offset (or 0)
-void StubCode::GenerateBuildMethodExtractorStub(Assembler* assembler) {
- Thread* thread = Thread::Current();
- Zone* Z = thread->zone();
- ObjectStore* object_store = thread->isolate()->object_store();
-
- const auto& closure_class =
- Class::ZoneHandle(Z, object_store->closure_class());
- const auto& closure_allocation_stub =
- Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
-
+void StubCodeCompiler::GenerateBuildMethodExtractorStub(
+ Assembler* assembler,
+ const Object& closure_allocation_stub,
+ const Object& context_allocation_stub) {
const intptr_t kReceiverOffset =
compiler::target::frame_layout.param_end_from_fp + 1;
- const auto& context_allocation_stub = StubCode::AllocateContext();
-
__ EnterStubFrame();
// Build type_arguments vector (or null)
Label no_type_args;
- __ ldr(R3, Address(THR, Thread::object_null_offset()), kDoubleWord);
+ __ ldr(R3, Address(THR, target::Thread::object_null_offset()), kDoubleWord);
__ cmp(R4, Operand(0));
__ b(&no_type_args, EQ);
- __ ldr(R0, Address(FP, kReceiverOffset * kWordSize));
+ __ ldr(R0, Address(FP, kReceiverOffset * target::kWordSize));
__ ldr(R3, Address(R0, R4));
__ Bind(&no_type_args);
@@ -204,86 +202,94 @@
// Allocate context.
{
Label done, slow_path;
- __ TryAllocateArray(kContextCid, Context::InstanceSize(1), &slow_path,
+ __ TryAllocateArray(kContextCid, target::Context::InstanceSize(1),
+ &slow_path,
R0, // instance
R1, // end address
R2, R3);
- __ ldr(R1, Address(THR, Thread::object_null_offset()));
- __ str(R1, FieldAddress(R0, Context::parent_offset()));
+ __ ldr(R1, Address(THR, target::Thread::object_null_offset()));
+ __ str(R1, FieldAddress(R0, target::Context::parent_offset()));
__ LoadImmediate(R1, 1);
- __ str(R1, FieldAddress(R0, Context::num_variables_offset()));
+ __ str(R1, FieldAddress(R0, target::Context::num_variables_offset()));
__ b(&done);
__ Bind(&slow_path);
__ LoadImmediate(/*num_vars=*/R1, 1);
__ LoadObject(CODE_REG, context_allocation_stub);
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ blr(R0);
__ Bind(&done);
}
// Store receiver in context
- __ ldr(R1, Address(FP, kWordSize * kReceiverOffset));
- __ StoreIntoObject(R0, FieldAddress(R0, Context::variable_offset(0)), R1);
+ __ ldr(R1, Address(FP, target::kWordSize * kReceiverOffset));
+ __ StoreIntoObject(R0, FieldAddress(R0, target::Context::variable_offset(0)),
+ R1);
// Push context.
__ Push(R0);
// Allocate closure.
__ LoadObject(CODE_REG, closure_allocation_stub);
- __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kUnchecked)));
+ __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kUnchecked)));
__ blr(R1);
// Populate closure object.
__ Pop(R1); // Pop context.
- __ StoreIntoObject(R0, FieldAddress(R0, Closure::context_offset()), R1);
+ __ StoreIntoObject(R0, FieldAddress(R0, target::Closure::context_offset()),
+ R1);
__ PopPair(R3, R1); // Pop type arguments & extracted method.
- __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Closure::function_offset()),
- R1);
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, Closure::instantiator_type_arguments_offset()), R3);
- __ LoadObject(R1, Object::empty_type_arguments());
+ R0, FieldAddress(R0, target::Closure::function_offset()), R1);
__ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, Closure::delayed_type_arguments_offset()), R1);
+ R0,
+ FieldAddress(R0, target::Closure::instantiator_type_arguments_offset()),
+ R3);
+ __ LoadObject(R1, EmptyTypeArguments());
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::Closure::delayed_type_arguments_offset()),
+ R1);
__ LeaveStubFrame();
__ Ret();
}
-void StubCode::GenerateNullErrorSharedWithoutFPURegsStub(Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/false,
- &kNullErrorRuntimeEntry,
- Thread::null_error_shared_without_fpu_regs_stub_offset(),
- /*allow_return=*/false);
+void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
+ Assembler* assembler) {
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
+ target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
+ /*allow_return=*/false);
}
-void StubCode::GenerateNullErrorSharedWithFPURegsStub(Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
- &kNullErrorRuntimeEntry,
- Thread::null_error_shared_with_fpu_regs_stub_offset(),
- /*allow_return=*/false);
+void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
+ Assembler* assembler) {
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
+ target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
+ /*allow_return=*/false);
}
-void StubCode::GenerateStackOverflowSharedWithoutFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kStackOverflowRuntimeEntry,
- Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
+ target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/true);
}
-void StubCode::GenerateStackOverflowSharedWithFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
- &kStackOverflowRuntimeEntry,
- Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
- /*allow_return=*/true);
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/true, &kStackOverflowRuntimeEntry,
+ target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
+ /*allow_return=*/true);
}
-void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) {
+void StubCodeCompiler::GeneratePrintStopMessageStub(Assembler* assembler) {
__ Stop("GeneratePrintStopMessageStub");
}
@@ -295,22 +301,22 @@
// R1 : argc_tag including number of arguments and function kind.
static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
Address wrapper) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
- __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
#if defined(DEBUG)
{
Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(R6, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(R6, THR, target::Thread::vm_tag_offset());
__ CompareImmediate(R6, VMTag::kDartCompiledTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -319,31 +325,31 @@
#endif
// Mark that the thread is executing native code.
- __ StoreToOffset(R5, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset());
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
// R0) and align frame before entering the C++ world.
- __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
+ __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
- // Initialize NativeArguments structure and call native function.
+ // Initialize target::NativeArguments structure and call native function.
// Registers R0, R1, R2, and R3 are used.
- ASSERT(thread_offset == 0 * kWordSize);
+ ASSERT(thread_offset == 0 * target::kWordSize);
// Set thread in NativeArgs.
__ mov(R0, THR);
// There are no native calls to closures, so we do not need to set the tag
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
- ASSERT(argc_tag_offset == 1 * kWordSize);
- // Set argc in NativeArguments: R1 already contains argc.
+ ASSERT(argc_tag_offset == 1 * target::kWordSize);
+ // Set argc in target::NativeArguments: R1 already contains argc.
- ASSERT(argv_offset == 2 * kWordSize);
- // Set argv in NativeArguments: R2 already contains argv.
+ ASSERT(argv_offset == 2 * target::kWordSize);
+ // Set argv in target::NativeArguments: R2 already contains argv.
// Set retval in NativeArgs.
- ASSERT(retval_offset == 3 * kWordSize);
- __ AddImmediate(R3, FP, 2 * kWordSize);
+ ASSERT(retval_offset == 3 * target::kWordSize);
+ __ AddImmediate(R3, FP, 2 * target::kWordSize);
// Passing the structure by value as in runtime calls would require changing
// Dart API for native functions.
@@ -352,7 +358,7 @@
__ StoreToOffset(R1, SP, argc_tag_offset);
__ StoreToOffset(R2, SP, argv_offset);
__ StoreToOffset(R3, SP, retval_offset);
- __ mov(R0, SP); // Pass the pointer to the NativeArguments.
+ __ mov(R0, SP); // Pass the pointer to the target::NativeArguments.
// We are entering runtime code, so the C stack pointer must be restored from
// the stack limit to the top of the stack. We cache the stack limit address
@@ -371,29 +377,32 @@
__ mov(CSP, R25);
// Refresh write barrier mask.
- __ ldr(BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+ __ ldr(BARRIER_MASK,
+ Address(THR, target::Thread::write_barrier_mask_offset()));
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
// Reset exit frame information in Isolate structure.
- __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
__ LeaveStubFrame();
__ ret();
}
-void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
- Address(THR, Thread::no_scope_native_wrapper_entry_point_offset()));
+ Address(THR,
+ target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
-void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
- Address(THR, Thread::auto_scope_native_wrapper_entry_point_offset()));
+ Address(THR,
+ target::Thread::auto_scope_native_wrapper_entry_point_offset()));
}
// Input parameters:
@@ -402,23 +411,23 @@
// R5 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
-void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
- __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
#if defined(DEBUG)
{
Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(R6, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(R6, THR, target::Thread::vm_tag_offset());
__ CompareImmediate(R6, VMTag::kDartCompiledTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -427,31 +436,31 @@
#endif
// Mark that the thread is executing native code.
- __ StoreToOffset(R5, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset());
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
// R0) and align frame before entering the C++ world.
- __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
+ __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
- // Initialize NativeArguments structure and call native function.
+ // Initialize target::NativeArguments structure and call native function.
// Registers R0, R1, R2, and R3 are used.
- ASSERT(thread_offset == 0 * kWordSize);
+ ASSERT(thread_offset == 0 * target::kWordSize);
// Set thread in NativeArgs.
__ mov(R0, THR);
// There are no native calls to closures, so we do not need to set the tag
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
- ASSERT(argc_tag_offset == 1 * kWordSize);
- // Set argc in NativeArguments: R1 already contains argc.
+ ASSERT(argc_tag_offset == 1 * target::kWordSize);
+ // Set argc in target::NativeArguments: R1 already contains argc.
- ASSERT(argv_offset == 2 * kWordSize);
- // Set argv in NativeArguments: R2 already contains argv.
+ ASSERT(argv_offset == 2 * target::kWordSize);
+ // Set argv in target::NativeArguments: R2 already contains argv.
// Set retval in NativeArgs.
- ASSERT(retval_offset == 3 * kWordSize);
- __ AddImmediate(R3, FP, 2 * kWordSize);
+ ASSERT(retval_offset == 3 * target::kWordSize);
+ __ AddImmediate(R3, FP, 2 * target::kWordSize);
// Passing the structure by value as in runtime calls would require changing
// Dart API for native functions.
@@ -460,7 +469,7 @@
__ StoreToOffset(R1, SP, argc_tag_offset);
__ StoreToOffset(R2, SP, argv_offset);
__ StoreToOffset(R3, SP, retval_offset);
- __ mov(R0, SP); // Pass the pointer to the NativeArguments.
+ __ mov(R0, SP); // Pass the pointer to the target::NativeArguments.
// We are entering runtime code, so the C stack pointer must be restored from
// the stack limit to the top of the stack. We cache the stack limit address
@@ -476,14 +485,15 @@
__ mov(CSP, R25);
// Refresh write barrier mask.
- __ ldr(BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+ __ ldr(BARRIER_MASK,
+ Address(THR, target::Thread::write_barrier_mask_offset()));
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
// Reset exit frame information in Isolate structure.
- __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
__ LeaveStubFrame();
__ ret();
@@ -491,7 +501,7 @@
// Input parameters:
// R4: arguments descriptor array.
-void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@@ -505,18 +515,19 @@
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
- __ LoadFieldFromOffset(R0, CODE_REG, Code::entry_point_offset());
+ __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
__ br(R0);
}
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// R4: arguments descriptor array.
-void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
- __ ldr(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::fix_callers_target_code_offset()));
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@@ -530,17 +541,19 @@
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
- __ LoadFieldFromOffset(R0, CODE_REG, Code::entry_point_offset());
+ __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
__ br(R0);
}
// Called from object allocate instruction when the allocation stub has been
// disabled.
-void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
+ Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
- __ ldr(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::fix_allocation_stub_code_offset()));
__ EnterStubFrame();
// Setup space on stack for return value.
__ Push(ZR);
@@ -550,25 +563,26 @@
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
- __ LoadFieldFromOffset(R0, CODE_REG, Code::entry_point_offset());
+ __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
__ br(R0);
}
// Input parameters:
// R2: smi-tagged argument count, may be zero.
-// FP[kParamEndSlotFromFp + 1]: last argument.
+// FP[target::frame_layout.param_end_from_fp + 1]: last argument.
static void PushArrayOfArguments(Assembler* assembler) {
// Allocate array to store arguments of caller.
- __ LoadObject(R1, Object::null_object());
+ __ LoadObject(R1, NullObject());
// R1: null element type for raw Array.
// R2: smi-tagged argument count, may be zero.
- __ BranchLink(StubCode::AllocateArray());
+ __ BranchLink(StubCodeAllocateArray());
// R0: newly allocated array.
// R2: smi-tagged argument count, may be zero (was preserved by the stub).
__ Push(R0); // Array is in R0 and on top of stack.
__ add(R1, FP, Operand(R2, LSL, 2));
- __ AddImmediate(R1, kParamEndSlotFromFp * kWordSize);
- __ AddImmediate(R3, R0, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R1,
+ target::frame_layout.param_end_from_fp * target::kWordSize);
+ __ AddImmediate(R3, R0, target::Array::data_offset() - kHeapObjectTag);
// R1: address of first argument on stack.
// R3: address of first argument in array.
@@ -577,10 +591,10 @@
__ b(&loop_exit, LE);
__ Bind(&loop);
__ ldr(R7, Address(R1));
- __ AddImmediate(R1, -kWordSize);
- __ AddImmediate(R3, kWordSize);
- __ AddImmediateSetFlags(R2, R2, -Smi::RawValue(1));
- __ str(R7, Address(R3, -kWordSize));
+ __ AddImmediate(R1, -target::kWordSize);
+ __ AddImmediate(R3, target::kWordSize);
+ __ AddImmediateSetFlags(R2, R2, -target::ToRawSmi(1));
+ __ str(R7, Address(R3, -target::kWordSize));
__ b(&loop, GE);
__ Bind(&loop_exit);
}
@@ -638,10 +652,10 @@
// Save the original value of CODE_REG pushed before invoking this stub
// instead of the value used to call this stub.
COMPILE_ASSERT(R25 > CODE_REG);
- __ ldr(R25, Address(FP, 2 * kWordSize));
- __ str(R25, Address(SP, -1 * kWordSize, Address::PreIndex));
+ __ ldr(R25, Address(FP, 2 * target::kWordSize));
+ __ str(R25, Address(SP, -1 * target::kWordSize, Address::PreIndex));
} else {
- __ str(r, Address(SP, -1 * kWordSize, Address::PreIndex));
+ __ str(r, Address(SP, -1 * target::kWordSize, Address::PreIndex));
}
}
@@ -660,11 +674,12 @@
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1 temporarily.
- __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * kWordSize);
+ __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * target::kWordSize);
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1 temporarily.
- __ LoadFromOffset(R1, FP, saved_exception_slot_from_fp * kWordSize);
- __ LoadFromOffset(R2, FP, saved_stacktrace_slot_from_fp * kWordSize);
+ __ LoadFromOffset(R1, FP, saved_exception_slot_from_fp * target::kWordSize);
+ __ LoadFromOffset(R2, FP,
+ saved_stacktrace_slot_from_fp * target::kWordSize);
}
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@@ -688,14 +703,16 @@
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ LoadFromOffset(
- R1, FP, compiler::target::frame_layout.first_local_from_fp * kWordSize);
+ R1, FP,
+ compiler::target::frame_layout.first_local_from_fp * target::kWordSize);
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1.
__ LoadFromOffset(
- R1, FP, compiler::target::frame_layout.first_local_from_fp * kWordSize);
- __ LoadFromOffset(
- R2, FP,
- (compiler::target::frame_layout.first_local_from_fp - 1) * kWordSize);
+ R1, FP,
+ compiler::target::frame_layout.first_local_from_fp * target::kWordSize);
+ __ LoadFromOffset(R2, FP,
+ (compiler::target::frame_layout.first_local_from_fp - 1) *
+ target::kWordSize);
}
// Code above cannot cause GC.
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@@ -733,33 +750,37 @@
}
// R0: result, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
+ Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, kZapReturnAddress);
- __ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
__ ret();
}
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
+ Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, kZapReturnAddress);
- __ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset()));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
__ ret();
}
-void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ Push(CODE_REG);
- __ ldr(CODE_REG, Address(THR, Thread::deoptimize_stub_offset()));
+ __ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
__ ret();
}
@@ -769,21 +790,23 @@
__ Comment("NoSuchMethodDispatch");
// When lazily generated invocation dispatchers are disabled, the
// miss-handler may return null.
- __ CompareObject(R0, Object::null_object());
+ __ CompareObject(R0, NullObject());
__ b(call_target_function, NE);
__ EnterStubFrame();
// Load the receiver.
- __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
+ __ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::count_offset());
__ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi.
- __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize);
+ __ LoadFromOffset(R6, TMP,
+ target::frame_layout.param_end_from_fp * target::kWordSize);
__ Push(ZR); // Result slot.
__ Push(R6); // Receiver.
__ Push(R5); // ICData/MegamorphicCache.
__ Push(R4); // Arguments descriptor.
// Adjust arguments count.
- __ LoadFieldFromOffset(R3, R4, ArgumentsDescriptor::type_args_len_offset());
+ __ LoadFieldFromOffset(R3, R4,
+ target::ArgumentsDescriptor::type_args_len_offset());
__ AddImmediate(TMP, R2, 1); // Include the type arguments.
__ cmp(R3, Operand(0));
__ csinc(R2, R2, TMP, EQ); // R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2).
@@ -798,14 +821,15 @@
__ ret();
}
-void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
- __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
+ __ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::count_offset());
__ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi.
__ LoadFromOffset(
- R6, TMP, compiler::target::frame_layout.param_end_from_fp * kWordSize);
+ R6, TMP,
+ compiler::target::frame_layout.param_end_from_fp * target::kWordSize);
// Preserve IC data and arguments descriptor.
__ Push(R5);
@@ -838,8 +862,8 @@
}
// Tail-call to target function.
- __ LoadFieldFromOffset(CODE_REG, R0, Function::code_offset());
- __ LoadFieldFromOffset(R2, R0, Function::entry_point_offset());
+ __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset());
+ __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
__ br(R2);
}
@@ -850,11 +874,12 @@
// R1: array element type (either NULL or an instantiated type).
// NOTE: R2 cannot be clobbered here as the caller relies on it being saved.
// The newly allocated object is returned in R0.
-void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
- // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
+ // RoundedAllocationSize(
+ // (array_length * kwordSize) + target::Array::header_size()).
// Assert that length is a Smi.
__ tsti(R2, Immediate(kSmiTagMask));
if (FLAG_use_slow_path) {
@@ -867,7 +892,7 @@
// Check for maximum allowed length.
const intptr_t max_len =
- reinterpret_cast<intptr_t>(Smi::New(Array::kMaxNewSpaceElements));
+ target::ToRawSmi(target::Array::kMaxNewSpaceElements);
__ CompareImmediate(R2, max_len);
__ b(&slow_case, GT);
@@ -878,13 +903,14 @@
// Load new object start and calculate next object start.
// R1: array element type.
// R2: array length as Smi.
- __ ldr(R0, Address(THR, Thread::top_offset()));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawArray) + kObjectAlignment - 1;
+ target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
+ 1;
__ LoadImmediate(R3, fixed_size_plus_alignment_padding);
__ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi.
ASSERT(kSmiTagShift == 1);
- __ andi(R3, R3, Immediate(~(kObjectAlignment - 1)));
+ __ andi(R3, R3, Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
// R0: potential new object start.
// R3: object size in bytes.
__ adds(R7, R3, Operand(R0));
@@ -896,7 +922,7 @@
// R2: array length as Smi.
// R3: array size.
// R7: potential next object start.
- __ LoadFromOffset(TMP, THR, Thread::end_offset());
+ __ LoadFromOffset(TMP, THR, target::Thread::end_offset());
__ CompareRegisters(R7, TMP);
__ b(&slow_case, CS); // Branch if unsigned higher or equal.
@@ -905,7 +931,7 @@
// R0: potential new object start.
// R3: array size.
// R7: potential next object start.
- __ str(R7, Address(THR, Thread::top_offset()));
+ __ str(R7, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R3));
@@ -916,46 +942,48 @@
// R7: new object end address.
// Store the type argument field.
- __ StoreIntoObjectOffsetNoBarrier(R0, Array::type_arguments_offset(), R1);
+ __ StoreIntoObjectOffsetNoBarrier(R0, target::Array::type_arguments_offset(),
+ R1);
// Set the length field.
- __ StoreIntoObjectOffsetNoBarrier(R0, Array::length_offset(), R2);
+ __ StoreIntoObjectOffsetNoBarrier(R0, target::Array::length_offset(), R2);
// Calculate the size tag.
// R0: new object start as a tagged pointer.
// R2: array length as Smi.
// R3: array size.
// R7: new object end address.
- const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
- __ CompareImmediate(R3, RawObject::SizeTag::kMaxSizeTag);
+ const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2;
+ __ CompareImmediate(R3, target::RawObject::kSizeTagMaxSizeTag);
// If no size tag overflow, shift R1 left, else set R1 to zero.
__ LslImmediate(TMP, R3, shift);
__ csel(R1, TMP, R1, LS);
__ csel(R1, ZR, R1, HI);
// Get the class index and insert it into the tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+
__ LoadImmediate(TMP, tags);
__ orr(R1, R1, Operand(TMP));
- __ StoreFieldToOffset(R1, R0, Array::tags_offset());
+ __ StoreFieldToOffset(R1, R0, target::Array::tags_offset());
// Initialize all array elements to raw_null.
// R0: new object start as a tagged pointer.
// R7: new object end address.
// R2: array length as Smi.
- __ AddImmediate(R1, R0, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R1, R0, target::Array::data_offset() - kHeapObjectTag);
// R1: iterator which initially points to the start of the variable
// data area to be initialized.
- __ LoadObject(TMP, Object::null_object());
+ __ LoadObject(TMP, NullObject());
Label loop, done;
__ Bind(&loop);
// TODO(cshapiro): StoreIntoObjectNoBarrier
__ CompareRegisters(R1, R7);
__ b(&done, CS);
__ str(TMP, Address(R1)); // Store if unsigned lower.
- __ AddImmediate(R1, kWordSize);
+ __ AddImmediate(R1, target::kWordSize);
__ b(&loop); // Loop until R1 == R7.
__ Bind(&done);
@@ -991,7 +1019,7 @@
// R1 : arguments descriptor array.
// R2 : arguments array.
// R3 : current thread.
-void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Comment("InvokeDartCodeStub");
// Copy the C stack pointer (R31) into the stack pointer we'll actually use
@@ -1001,7 +1029,7 @@
__ EnterFrame(0);
// Push code object to PC marker slot.
- __ ldr(TMP, Address(R3, Thread::invoke_dart_code_stub_offset()));
+ __ ldr(TMP, Address(R3, target::Thread::invoke_dart_code_stub_offset()));
__ Push(TMP);
// Save the callee-saved registers.
@@ -1010,7 +1038,7 @@
// We use str instead of the Push macro because we will be pushing the PP
// register when it is not holding a pool-pointer since we are coming from
// C++ code.
- __ str(r, Address(SP, -1 * kWordSize, Address::PreIndex));
+ __ str(r, Address(SP, -1 * target::kWordSize, Address::PreIndex));
}
// Save the bottom 64-bits of callee-saved V registers.
@@ -1024,34 +1052,37 @@
__ mov(THR, R3);
}
// Refresh write barrier mask.
- __ ldr(BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+ __ ldr(BARRIER_MASK,
+ Address(THR, target::Thread::write_barrier_mask_offset()));
// Save the current VMTag on the stack.
- __ LoadFromOffset(R4, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(R4, THR, target::Thread::vm_tag_offset());
__ Push(R4);
// Save top resource and top exit frame info. Use R6 as a temporary register.
// StackFrameIterator reads the top exit frame info saved in this frame.
- __ LoadFromOffset(R6, THR, Thread::top_resource_offset());
- __ StoreToOffset(ZR, THR, Thread::top_resource_offset());
+ __ LoadFromOffset(R6, THR, target::Thread::top_resource_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_resource_offset());
__ Push(R6);
- __ LoadFromOffset(R6, THR, Thread::top_exit_frame_info_offset());
- __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
- // kExitLinkSlotFromEntryFp must be kept in sync with the code below.
- ASSERT(kExitLinkSlotFromEntryFp == -22);
+ __ LoadFromOffset(R6, THR, target::Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
+ // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
+ // with the code below.
+ ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -22);
__ Push(R6);
// Mark that the thread is executing Dart code. Do this after initializing the
// exit link for the profiler.
__ LoadImmediate(R6, VMTag::kDartCompiledTagId);
- __ StoreToOffset(R6, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R6, THR, target::Thread::vm_tag_offset());
// Load arguments descriptor array into R4, which is passed to Dart code.
__ LoadFromOffset(R4, R1, VMHandles::kOffsetOfRawPtrInHandle);
// Load number of arguments into R5 and adjust count for type arguments.
- __ LoadFieldFromOffset(R5, R4, ArgumentsDescriptor::count_offset());
- __ LoadFieldFromOffset(R3, R4, ArgumentsDescriptor::type_args_len_offset());
+ __ LoadFieldFromOffset(R5, R4, target::ArgumentsDescriptor::count_offset());
+ __ LoadFieldFromOffset(R3, R4,
+ target::ArgumentsDescriptor::type_args_len_offset());
__ AddImmediate(TMP, R5, 1); // Include the type arguments.
__ cmp(R3, Operand(0));
__ csinc(R5, R5, TMP, EQ); // R5 <- (R3 == 0) ? R5 : TMP + 1 (R5 : R5 + 2).
@@ -1059,7 +1090,7 @@
// Compute address of 'arguments array' data area into R2.
__ LoadFromOffset(R2, R2, VMHandles::kOffsetOfRawPtrInHandle);
- __ AddImmediate(R2, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R2, target::Array::data_offset() - kHeapObjectTag);
// Set up arguments for the Dart call.
Label push_arguments;
@@ -1071,40 +1102,42 @@
__ ldr(R3, Address(R2));
__ Push(R3);
__ add(R1, R1, Operand(1));
- __ add(R2, R2, Operand(kWordSize));
+ __ add(R2, R2, Operand(target::kWordSize));
__ cmp(R1, Operand(R5));
__ b(&push_arguments, LT);
__ Bind(&done_push_arguments);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
__ sub(PP, PP, Operand(kHeapObjectTag)); // Pool in PP is untagged!
} else {
// We now load the pool pointer(PP) with a GC safe value as we are about to
// invoke dart code. We don't need a real object pool here.
// Smi zero does not work because ARM64 assumes PP to be untagged.
- __ LoadObject(PP, Object::null_object());
+ __ LoadObject(PP, NullObject());
}
// Call the Dart code entrypoint.
__ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ blr(R0); // R4 is the arguments descriptor array.
__ Comment("InvokeDartCodeStub return");
// Get rid of arguments pushed on the stack.
- __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
+ __ AddImmediate(
+ SP, FP,
+ target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
// Restore the saved top exit frame info and top resource back into the
// Isolate structure. Uses R6 as a temporary register for this.
__ Pop(R6);
- __ StoreToOffset(R6, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(R6, THR, target::Thread::top_exit_frame_info_offset());
__ Pop(R6);
- __ StoreToOffset(R6, THR, Thread::top_resource_offset());
+ __ StoreToOffset(R6, THR, target::Thread::top_resource_offset());
// Restore the current VMTag from the stack.
__ Pop(R4);
- __ StoreToOffset(R4, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset());
// Restore the bottom 64-bits of callee-saved V registers.
for (int i = kAbiLastPreservedFpuReg; i >= kAbiFirstPreservedFpuReg; i--) {
@@ -1119,7 +1152,7 @@
// register when it is not holding a pool-pointer since we are returning to
// C++ code. We also skip the dart stack pointer SP, since we are still
// using it as the stack pointer.
- __ ldr(r, Address(SP, 1 * kWordSize, Address::PostIndex));
+ __ ldr(r, Address(SP, 1 * target::kWordSize, Address::PostIndex));
}
// Restore the frame pointer and C stack pointer and return.
@@ -1136,7 +1169,8 @@
// R1 : arguments raw descriptor array.
// R2 : address of first argument.
// R3 : current thread.
-void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
+ Assembler* assembler) {
#if defined(DART_PRECOMPILED_RUNTIME)
__ Stop("Not using interpreter");
#else
@@ -1148,7 +1182,8 @@
// Push code object to PC marker slot.
__ ldr(TMP,
- Address(R3, Thread::invoke_dart_code_from_bytecode_stub_offset()));
+ Address(R3,
+ target::Thread::invoke_dart_code_from_bytecode_stub_offset()));
__ Push(TMP);
// Save the callee-saved registers.
@@ -1157,7 +1192,7 @@
// We use str instead of the Push macro because we will be pushing the PP
// register when it is not holding a pool-pointer since we are coming from
// C++ code.
- __ str(r, Address(SP, -1 * kWordSize, Address::PreIndex));
+ __ str(r, Address(SP, -1 * target::kWordSize, Address::PreIndex));
}
// Save the bottom 64-bits of callee-saved V registers.
@@ -1171,34 +1206,37 @@
__ mov(THR, R3);
}
// Refresh write barrier mask.
- __ ldr(BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+ __ ldr(BARRIER_MASK,
+ Address(THR, target::Thread::write_barrier_mask_offset()));
// Save the current VMTag on the stack.
- __ LoadFromOffset(R4, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(R4, THR, target::Thread::vm_tag_offset());
__ Push(R4);
// Save top resource and top exit frame info. Use R6 as a temporary register.
// StackFrameIterator reads the top exit frame info saved in this frame.
- __ LoadFromOffset(R6, THR, Thread::top_resource_offset());
- __ StoreToOffset(ZR, THR, Thread::top_resource_offset());
+ __ LoadFromOffset(R6, THR, target::Thread::top_resource_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_resource_offset());
__ Push(R6);
- __ LoadFromOffset(R6, THR, Thread::top_exit_frame_info_offset());
- __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
- // kExitLinkSlotFromEntryFp must be kept in sync with the code below.
- ASSERT(kExitLinkSlotFromEntryFp == -22);
+ __ LoadFromOffset(R6, THR, target::Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
+ // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
+ // with the code below.
+ ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -22);
__ Push(R6);
// Mark that the thread is executing Dart code. Do this after initializing the
// exit link for the profiler.
__ LoadImmediate(R6, VMTag::kDartCompiledTagId);
- __ StoreToOffset(R6, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R6, THR, target::Thread::vm_tag_offset());
// Load arguments descriptor array into R4, which is passed to Dart code.
__ mov(R4, R1);
// Load number of arguments into R5 and adjust count for type arguments.
- __ LoadFieldFromOffset(R5, R4, ArgumentsDescriptor::count_offset());
- __ LoadFieldFromOffset(R3, R4, ArgumentsDescriptor::type_args_len_offset());
+ __ LoadFieldFromOffset(R5, R4, target::ArgumentsDescriptor::count_offset());
+ __ LoadFieldFromOffset(R3, R4,
+ target::ArgumentsDescriptor::type_args_len_offset());
__ AddImmediate(TMP, R5, 1); // Include the type arguments.
__ cmp(R3, Operand(0));
__ csinc(R5, R5, TMP, EQ); // R5 <- (R3 == 0) ? R5 : TMP + 1 (R5 : R5 + 2).
@@ -1215,7 +1253,7 @@
__ ldr(R3, Address(R2));
__ Push(R3);
__ add(R1, R1, Operand(1));
- __ add(R2, R2, Operand(kWordSize));
+ __ add(R2, R2, Operand(target::kWordSize));
__ cmp(R1, Operand(R5));
__ b(&push_arguments, LT);
__ Bind(&done_push_arguments);
@@ -1223,26 +1261,28 @@
// We now load the pool pointer(PP) with a GC safe value as we are about to
// invoke dart code. We don't need a real object pool here.
// Smi zero does not work because ARM64 assumes PP to be untagged.
- __ LoadObject(PP, Object::null_object());
+ __ LoadObject(PP, NullObject());
// Call the Dart code entrypoint.
__ mov(CODE_REG, R0);
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ blr(R0); // R4 is the arguments descriptor array.
// Get rid of arguments pushed on the stack.
- __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
+ __ AddImmediate(
+ SP, FP,
+ target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
// Restore the saved top exit frame info and top resource back into the
// Isolate structure. Uses R6 as a temporary register for this.
__ Pop(R6);
- __ StoreToOffset(R6, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(R6, THR, target::Thread::top_exit_frame_info_offset());
__ Pop(R6);
- __ StoreToOffset(R6, THR, Thread::top_resource_offset());
+ __ StoreToOffset(R6, THR, target::Thread::top_resource_offset());
// Restore the current VMTag from the stack.
__ Pop(R4);
- __ StoreToOffset(R4, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset());
// Restore the bottom 64-bits of callee-saved V registers.
for (int i = kAbiLastPreservedFpuReg; i >= kAbiFirstPreservedFpuReg; i--) {
@@ -1257,7 +1297,7 @@
// register when it is not holding a pool-pointer since we are returning to
// C++ code. We also skip the dart stack pointer SP, since we are still
// using it as the stack pointer.
- __ ldr(r, Address(SP, 1 * kWordSize, Address::PostIndex));
+ __ ldr(r, Address(SP, 1 * target::kWordSize, Address::PostIndex));
}
// Restore the frame pointer and C stack pointer and return.
@@ -1273,31 +1313,33 @@
// R1: number of context variables.
// Output:
// R0: new allocated RawContext object.
-void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (FLAG_inline_alloc) {
Label slow_case;
// First compute the rounded instance size.
// R1: number of context variables.
intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawContext) + kObjectAlignment - 1;
+ target::Context::header_size() +
+ target::ObjectAlignment::kObjectAlignment - 1;
__ LoadImmediate(R2, fixed_size_plus_alignment_padding);
__ add(R2, R2, Operand(R1, LSL, 3));
ASSERT(kSmiTagShift == 1);
- __ andi(R2, R2, Immediate(~(kObjectAlignment - 1)));
+ __ andi(R2, R2,
+ Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R4, &slow_case));
// Now allocate the object.
// R1: number of context variables.
// R2: object size.
const intptr_t cid = kContextCid;
- __ ldr(R0, Address(THR, Thread::top_offset()));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
__ add(R3, R2, Operand(R0));
// Check if the allocation fits into the remaining space.
// R0: potential new object.
// R1: number of context variables.
// R2: object size.
// R3: potential next object start.
- __ ldr(TMP, Address(THR, Thread::end_offset()));
+ __ ldr(TMP, Address(THR, target::Thread::end_offset()));
__ CompareRegisters(R3, TMP);
if (FLAG_use_slow_path) {
__ b(&slow_case);
@@ -1311,7 +1353,7 @@
// R1: number of context variables.
// R2: object size.
// R3: next object start.
- __ str(R3, Address(THR, Thread::top_offset()));
+ __ str(R3, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2));
@@ -1319,8 +1361,9 @@
// R0: new object.
// R1: number of context variables.
// R2: object size.
- const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
- __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag);
+ const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2;
+ __ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag);
// If no size tag overflow, shift R2 left, else set R2 to zero.
__ LslImmediate(TMP, R2, shift);
__ csel(R2, TMP, R2, LS);
@@ -1328,30 +1371,31 @@
// Get the class index and insert it into the tags.
// R2: size and bit tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ const uint32_t tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+
__ LoadImmediate(TMP, tags);
__ orr(R2, R2, Operand(TMP));
- __ StoreFieldToOffset(R2, R0, Context::tags_offset());
+ __ StoreFieldToOffset(R2, R0, target::Object::tags_offset());
// Setup up number of context variables field.
// R0: new object.
// R1: number of context variables as integer value (not object).
- __ StoreFieldToOffset(R1, R0, Context::num_variables_offset());
+ __ StoreFieldToOffset(R1, R0, target::Context::num_variables_offset());
// Setup the parent field.
// R0: new object.
// R1: number of context variables.
- __ LoadObject(R2, Object::null_object());
- __ StoreFieldToOffset(R2, R0, Context::parent_offset());
+ __ LoadObject(R2, NullObject());
+ __ StoreFieldToOffset(R2, R0, target::Context::parent_offset());
// Initialize the context variables.
// R0: new object.
// R1: number of context variables.
// R2: raw null.
Label loop, done;
- __ AddImmediate(R3, R0, Context::variable_offset(0) - kHeapObjectTag);
+ __ AddImmediate(R3, R0,
+ target::Context::variable_offset(0) - kHeapObjectTag);
__ Bind(&loop);
__ subs(R1, R1, Operand(1));
__ b(&done, MI);
@@ -1370,7 +1414,7 @@
__ EnterStubFrame();
// Setup space on stack for return value.
__ SmiTag(R1);
- __ PushObject(Object::null_object());
+ __ PushObject(NullObject());
__ Push(R1);
__ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
__ Drop(1); // Pop number of context variables argument.
@@ -1381,7 +1425,7 @@
__ ret();
}
-void StubCode::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@@ -1390,7 +1434,8 @@
__ Push(LR);
__ Push(kWriteBarrierObjectReg);
__ mov(kWriteBarrierObjectReg, reg);
- __ ldr(LR, Address(THR, Thread::write_barrier_entry_point_offset()));
+ __ ldr(LR,
+ Address(THR, target::Thread::write_barrier_entry_point_offset()));
__ blr(LR);
__ Pop(kWriteBarrierObjectReg);
__ Pop(LR);
@@ -1415,16 +1460,17 @@
Address stub_code,
bool cards) {
Label add_to_mark_stack, remember_card;
- __ tbz(&add_to_mark_stack, R0, kNewObjectBitPosition);
+ __ tbz(&add_to_mark_stack, R0,
+ target::ObjectAlignment::kNewObjectBitPosition);
if (cards) {
- __ LoadFieldFromOffset(TMP, R1, Object::tags_offset(), kWord);
- __ tbnz(&remember_card, TMP, RawObject::kCardRememberedBit);
+ __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kWord);
+ __ tbnz(&remember_card, TMP, target::RawObject::kCardRememberedBit);
} else {
#if defined(DEBUG)
Label ok;
- __ LoadFieldFromOffset(TMP, R1, Object::tags_offset(), kWord);
- __ tbz(&ok, TMP, RawObject::kCardRememberedBit);
+ __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kWord);
+ __ tbz(&ok, TMP, target::RawObject::kCardRememberedBit);
__ Stop("Wrong barrier");
__ Bind(&ok);
#endif
@@ -1436,7 +1482,7 @@
__ Push(R4);
// Atomically set the remembered bit of the object header.
- ASSERT(Object::tags_offset() == 0);
+ ASSERT(target::Object::tags_offset() == 0);
__ sub(R3, R1, Operand(kHeapObjectTag));
// R3: Untagged address of header word (ldxr/stxr do not support offsets).
// Note that we use 32 bit operations here to match the size of the
@@ -1444,24 +1490,26 @@
Label retry;
__ Bind(&retry);
__ ldxr(R2, R3, kWord);
- __ AndImmediate(R2, R2, ~(1 << RawObject::kOldAndNotRememberedBit));
+ __ AndImmediate(R2, R2, ~(1 << target::RawObject::kOldAndNotRememberedBit));
__ stxr(R4, R2, R3, kWord);
__ cbnz(&retry, R4);
// Load the StoreBuffer block out of the thread. Then load top_ out of the
// StoreBufferBlock and add the address to the pointers_.
- __ LoadFromOffset(R4, THR, Thread::store_buffer_block_offset());
- __ LoadFromOffset(R2, R4, StoreBufferBlock::top_offset(), kUnsignedWord);
- __ add(R3, R4, Operand(R2, LSL, kWordSizeLog2));
- __ StoreToOffset(R1, R3, StoreBufferBlock::pointers_offset());
+ __ LoadFromOffset(R4, THR, target::Thread::store_buffer_block_offset());
+ __ LoadFromOffset(R2, R4, target::StoreBufferBlock::top_offset(),
+ kUnsignedWord);
+ __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
+ __ StoreToOffset(R1, R3, target::StoreBufferBlock::pointers_offset());
// Increment top_ and check for overflow.
// R2: top_.
// R4: StoreBufferBlock.
Label overflow;
__ add(R2, R2, Operand(1));
- __ StoreToOffset(R2, R4, StoreBufferBlock::top_offset(), kUnsignedWord);
- __ CompareImmediate(R2, StoreBufferBlock::kSize);
+ __ StoreToOffset(R2, R4, target::StoreBufferBlock::top_offset(),
+ kUnsignedWord);
+ __ CompareImmediate(R2, target::StoreBufferBlock::kSize);
// Restore values.
__ Pop(R4);
__ Pop(R3);
@@ -1475,7 +1523,7 @@
__ Push(CODE_REG);
__ ldr(CODE_REG, stub_code);
- __ EnterCallRuntimeFrame(0 * kWordSize);
+ __ EnterCallRuntimeFrame(0 * target::kWordSize);
__ mov(R0, THR);
__ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
// Restore callee-saved registers, tear down frame.
@@ -1492,23 +1540,25 @@
// Note that we use 32 bit operations here to match the size of the
// background sweeper which is also manipulating this 32 bit word.
Label marking_retry, lost_race, marking_overflow;
- ASSERT(Object::tags_offset() == 0);
+ ASSERT(target::Object::tags_offset() == 0);
__ sub(R3, R0, Operand(kHeapObjectTag));
// R3: Untagged address of header word (ldxr/stxr do not support offsets).
__ Bind(&marking_retry);
__ ldxr(R2, R3, kWord);
- __ tbz(&lost_race, R2, RawObject::kOldAndNotMarkedBit);
- __ AndImmediate(R2, R2, ~(1 << RawObject::kOldAndNotMarkedBit));
+ __ tbz(&lost_race, R2, target::RawObject::kOldAndNotMarkedBit);
+ __ AndImmediate(R2, R2, ~(1 << target::RawObject::kOldAndNotMarkedBit));
__ stxr(R4, R2, R3, kWord);
__ cbnz(&marking_retry, R4);
- __ LoadFromOffset(R4, THR, Thread::marking_stack_block_offset());
- __ LoadFromOffset(R2, R4, MarkingStackBlock::top_offset(), kUnsignedWord);
- __ add(R3, R4, Operand(R2, LSL, kWordSizeLog2));
- __ StoreToOffset(R0, R3, MarkingStackBlock::pointers_offset());
+ __ LoadFromOffset(R4, THR, target::Thread::marking_stack_block_offset());
+ __ LoadFromOffset(R2, R4, target::MarkingStackBlock::top_offset(),
+ kUnsignedWord);
+ __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
+ __ StoreToOffset(R0, R3, target::MarkingStackBlock::pointers_offset());
__ add(R2, R2, Operand(1));
- __ StoreToOffset(R2, R4, MarkingStackBlock::top_offset(), kUnsignedWord);
- __ CompareImmediate(R2, MarkingStackBlock::kSize);
+ __ StoreToOffset(R2, R4, target::MarkingStackBlock::top_offset(),
+ kUnsignedWord);
+ __ CompareImmediate(R2, target::MarkingStackBlock::kSize);
__ Pop(R4); // Unspill.
__ Pop(R3); // Unspill.
__ Pop(R2); // Unspill.
@@ -1518,7 +1568,7 @@
__ Bind(&marking_overflow);
__ Push(CODE_REG);
__ ldr(CODE_REG, stub_code);
- __ EnterCallRuntimeFrame(0 * kWordSize);
+ __ EnterCallRuntimeFrame(0 * target::kWordSize);
__ mov(R0, THR);
__ CallRuntime(kMarkingStackBlockProcessRuntimeEntry, 1);
__ LeaveCallRuntimeFrame();
@@ -1536,16 +1586,19 @@
// Get card table.
__ Bind(&remember_card);
- __ AndImmediate(TMP, R1, kPageMask); // HeapPage.
- __ ldr(TMP, Address(TMP, HeapPage::card_table_offset())); // Card table.
+ __ AndImmediate(TMP, R1, target::kPageMask); // HeapPage.
+ __ ldr(TMP,
+ Address(TMP, target::HeapPage::card_table_offset())); // Card table.
__ cbz(&remember_card_slow, TMP);
// Dirty the card.
- __ AndImmediate(TMP, R1, kPageMask); // HeapPage.
- __ sub(R25, R25, Operand(TMP)); // Offset in page.
- __ ldr(TMP, Address(TMP, HeapPage::card_table_offset())); // Card table.
+ __ AndImmediate(TMP, R1, target::kPageMask); // HeapPage.
+ __ sub(R25, R25, Operand(TMP)); // Offset in page.
+ __ ldr(TMP,
+ Address(TMP, target::HeapPage::card_table_offset())); // Card table.
__ add(TMP, TMP,
- Operand(R25, LSR, HeapPage::kBytesPerCardLog2)); // Card address.
+ Operand(R25, LSR,
+ target::HeapPage::kBytesPerCardLog2)); // Card address.
__ str(R1, Address(TMP, 0),
kUnsignedByte); // Low byte of R1 is non-zero from object tag.
__ ret();
@@ -1566,26 +1619,28 @@
}
}
-void StubCode::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::write_barrier_code_offset()), false);
+ assembler, Address(THR, target::Thread::write_barrier_code_offset()),
+ false);
}
-void StubCode::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::array_write_barrier_code_offset()), true);
+ assembler,
+ Address(THR, target::Thread::array_write_barrier_code_offset()), true);
}
// Called for inline allocation of objects.
// Input parameters:
// LR : return address.
// SP + 0 : type arguments object (only if class is parameterized).
-void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
- const Class& cls) {
+void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
+ const Class& cls) {
// The generated code is different if the class is parameterized.
- const bool is_cls_parameterized = cls.NumTypeArguments() > 0;
- ASSERT(!is_cls_parameterized ||
- (cls.type_arguments_field_offset() != Class::kNoTypeArguments));
+ const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
+ ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
+ cls) != target::Class::kNoTypeArguments);
const Register kTypeArgumentsReg = R1;
const Register kInstanceReg = R0;
@@ -1597,47 +1652,49 @@
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12;
- const intptr_t instance_size = cls.instance_size();
+ const intptr_t instance_size = target::Class::InstanceSize(cls);
ASSERT(instance_size > 0);
if (is_cls_parameterized) {
__ ldr(kTypeArgumentsReg, Address(SP));
}
- Isolate* isolate = Isolate::Current();
- __ LoadObject(kNullReg, Object::null_object());
- if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) &&
- !cls.TraceAllocation(isolate)) {
+ __ LoadObject(kNullReg, NullObject());
+ if (FLAG_inline_alloc &&
+ target::Heap::IsAllocatableInNewSpace(instance_size) &&
+ !target::Class::TraceAllocation(cls)) {
Label slow_case;
// Allocate the object & initialize header word.
__ TryAllocate(cls, &slow_case, kInstanceReg, kTopReg,
/*tag_result=*/false);
// Initialize the remaining words of the object.
- if (instance_size < (kInlineInstanceSize * kWordSize)) {
- intptr_t current_offset = Instance::NextFieldOffset();
- while ((current_offset + kWordSize) < instance_size) {
+ if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
+ intptr_t current_offset = target::Instance::first_field_offset();
+ while ((current_offset + target::kWordSize) < instance_size) {
__ stp(kNullReg, kNullReg,
Address(kInstanceReg, current_offset, Address::PairOffset));
- current_offset += 2 * kWordSize;
+ current_offset += 2 * target::kWordSize;
}
while (current_offset < instance_size) {
__ str(kNullReg, Address(kInstanceReg, current_offset));
- current_offset += kWordSize;
+ current_offset += target::kWordSize;
}
} else {
- __ AddImmediate(kTempReg, kInstanceReg, Instance::NextFieldOffset());
+ __ AddImmediate(kTempReg, kInstanceReg,
+ target::Instance::first_field_offset());
Label done, init_loop;
__ Bind(&init_loop);
__ CompareRegisters(kTempReg, kTopReg);
__ b(&done, CS);
- __ str(kNullReg, Address(kTempReg, kWordSize, Address::PostIndex));
+ __ str(kNullReg,
+ Address(kTempReg, target::kWordSize, Address::PostIndex));
__ b(&init_loop);
__ Bind(&done);
}
if (is_cls_parameterized) {
- __ StoreToOffset(kTypeArgumentsReg, kInstanceReg,
- cls.type_arguments_field_offset());
+ const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
+ __ StoreToOffset(kTypeArgumentsReg, kInstanceReg, offset);
}
__ add(kInstanceReg, kInstanceReg, Operand(kHeapObjectTag));
__ ret();
@@ -1649,13 +1706,15 @@
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame(); // Uses pool pointer to pass cls to runtime.
- __ LoadObject(R0, cls);
+ __ LoadObject(R0, CastHandle<Object>(cls));
__ PushPair(R0, kNullReg); // Pushes cls, result slot.
__ Push(is_cls_parameterized ? kTypeArgumentsReg : kNullReg);
__ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
- __ ldr(kInstanceReg,
- Address(SP, 2 * kWordSize)); // Pop result (newly allocated object).
- __ LeaveStubFrame(); // Restores correct SP.
+ __ ldr(
+ kInstanceReg,
+ Address(SP,
+ 2 * target::kWordSize)); // Pop result (newly allocated object).
+ __ LeaveStubFrame(); // Restores correct SP.
__ ret();
}
@@ -1666,13 +1725,15 @@
// LR : return address.
// SP : address of last argument.
// R4: arguments descriptor array.
-void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
+ Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
- __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
+ __ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::count_offset());
__ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi.
- __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize);
+ __ LoadFromOffset(R6, TMP,
+ target::frame_layout.param_end_from_fp * target::kWordSize);
// Push space for the return value.
// Push the receiver.
@@ -1682,7 +1743,8 @@
__ Push(R4);
// Adjust arguments count.
- __ LoadFieldFromOffset(R3, R4, ArgumentsDescriptor::type_args_len_offset());
+ __ LoadFieldFromOffset(R3, R4,
+ target::ArgumentsDescriptor::type_args_len_offset());
__ AddImmediate(TMP, R2, 1); // Include the type arguments.
__ cmp(R3, Operand(0));
__ csinc(R2, R2, TMP, EQ); // R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2).
@@ -1700,7 +1762,8 @@
// R5: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
-void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
+ Assembler* assembler) {
Register ic_reg = R5;
Register func_reg = R6;
if (FLAG_trace_optimized_ic_calls) {
@@ -1715,25 +1778,27 @@
__ Pop(R6); // Restore.
__ LeaveStubFrame();
}
- __ LoadFieldFromOffset(R7, func_reg, Function::usage_counter_offset(), kWord);
+ __ LoadFieldFromOffset(R7, func_reg, target::Function::usage_counter_offset(),
+ kWord);
__ add(R7, R7, Operand(1));
- __ StoreFieldToOffset(R7, func_reg, Function::usage_counter_offset(), kWord);
+ __ StoreFieldToOffset(R7, func_reg, target::Function::usage_counter_offset(),
+ kWord);
}
// Loads function into 'temp_reg'.
-void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
- Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
+ Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = R5;
Register func_reg = temp_reg;
ASSERT(temp_reg == R6);
__ Comment("Increment function counter");
- __ LoadFieldFromOffset(func_reg, ic_reg, ICData::owner_offset());
- __ LoadFieldFromOffset(R7, func_reg, Function::usage_counter_offset(),
- kWord);
+ __ LoadFieldFromOffset(func_reg, ic_reg, target::ICData::owner_offset());
+ __ LoadFieldFromOffset(R7, func_reg,
+ target::Function::usage_counter_offset(), kWord);
__ AddImmediate(R7, 1);
- __ StoreFieldToOffset(R7, func_reg, Function::usage_counter_offset(),
- kWord);
+ __ StoreFieldToOffset(R7, func_reg,
+ target::Function::usage_counter_offset(), kWord);
}
}
@@ -1746,8 +1811,8 @@
intptr_t num_args,
Label* not_smi_or_overflow) {
__ Comment("Fast Smi op");
- __ ldr(R0, Address(SP, +0 * kWordSize)); // Right.
- __ ldr(R1, Address(SP, +1 * kWordSize)); // Left.
+ __ ldr(R0, Address(SP, +0 * target::kWordSize)); // Right.
+ __ ldr(R1, Address(SP, +1 * target::kWordSize)); // Left.
__ orr(TMP, R0, Operand(R1));
__ BranchIfNotSmi(TMP, not_smi_or_overflow);
switch (kind) {
@@ -1763,8 +1828,8 @@
}
case Token::kEQ: {
__ CompareRegisters(R0, R1);
- __ LoadObject(R0, Bool::True());
- __ LoadObject(R1, Bool::False());
+ __ LoadObject(R0, CastHandle<Object>(TrueObject()));
+ __ LoadObject(R1, CastHandle<Object>(FalseObject()));
__ csel(R0, R1, R0, NE);
break;
}
@@ -1773,18 +1838,18 @@
}
// R5: IC data object (preserved).
- __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset());
+ __ LoadFieldFromOffset(R6, R5, target::ICData::ic_data_offset());
// R6: ic_data_array with check entries: classes and target functions.
- __ AddImmediate(R6, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R6, target::Array::data_offset() - kHeapObjectTag);
// R6: points directly to the first ic data array element.
#if defined(DEBUG)
// Check that first entry is for Smi/Smi.
Label error, ok;
- const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid));
+ const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
__ ldr(R1, Address(R6, 0));
__ CompareImmediate(R1, imm_smi_cid);
__ b(&error, NE);
- __ ldr(R1, Address(R6, kWordSize));
+ __ ldr(R1, Address(R6, target::kWordSize));
__ CompareImmediate(R1, imm_smi_cid);
__ b(&ok, EQ);
__ Bind(&error);
@@ -1792,10 +1857,11 @@
__ Bind(&ok);
#endif
if (FLAG_optimization_counter_threshold >= 0) {
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
// Update counter, ignore overflow.
__ LoadFromOffset(R1, R6, count_offset);
- __ adds(R1, R1, Operand(Smi::RawValue(1)));
+ __ adds(R1, R1, Operand(target::ToRawSmi(1)));
__ StoreToOffset(R1, R6, count_offset);
}
@@ -1812,7 +1878,7 @@
// - Check if 'num_args' (including receiver) match any IC data group.
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
-void StubCode::GenerateNArgsCheckInlineCacheStub(
+void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
@@ -1826,10 +1892,11 @@
Label ok;
// Check that the IC data array has NumArgsTested() == num_args.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag,
+ __ LoadFromOffset(R6, R5,
+ target::ICData::state_bits_offset() - kHeapObjectTag,
kUnsignedWord);
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask()));
__ CompareImmediate(R6, num_args);
__ b(&ok, EQ);
__ Stop("Incorrect stub for IC data");
@@ -1842,7 +1909,8 @@
if (!optimized) {
__ Comment("Check single stepping");
__ LoadIsolate(R6);
- __ LoadFromOffset(R6, R6, Isolate::single_step_offset(), kUnsignedByte);
+ __ LoadFromOffset(R6, R6, target::Isolate::single_step_offset(),
+ kUnsignedByte);
__ CompareRegisters(R6, ZR);
__ b(&stepping, NE);
__ Bind(&done_stepping);
@@ -1857,18 +1925,18 @@
__ Comment("Extract ICData initial values and receiver cid");
// Load arguments descriptor into R4.
- __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset());
+ __ LoadFieldFromOffset(R4, R5, target::ICData::arguments_descriptor_offset());
// Loop that checks if there is an IC data match.
Label loop, found, miss;
// R5: IC data object (preserved).
- __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset());
+ __ LoadFieldFromOffset(R6, R5, target::ICData::ic_data_offset());
// R6: ic_data_array with check entries: classes and target functions.
- __ AddImmediate(R6, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R6, target::Array::data_offset() - kHeapObjectTag);
// R6: points directly to the first ic data array element.
// Get the receiver's class ID (first read number of arguments from
// arguments descriptor array and then access the receiver from the stack).
- __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset());
+ __ LoadFieldFromOffset(R7, R4, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
__ sub(R7, R7, Operand(1));
@@ -1895,7 +1963,7 @@
__ CompareRegisters(R0, R2); // Class id match?
if (num_args == 2) {
__ b(&update, NE); // Continue.
- __ LoadFromOffset(R2, R6, kWordSize);
+ __ LoadFromOffset(R2, R6, target::kWordSize);
__ CompareRegisters(R1, R2); // Class id match?
}
__ b(&found, EQ); // Break.
@@ -1903,10 +1971,11 @@
__ Bind(&update);
const intptr_t entry_size =
- ICData::TestEntryLengthFor(num_args, exactness_check) * kWordSize;
+ target::ICData::TestEntryLengthFor(num_args, exactness_check) *
+ target::kWordSize;
__ AddImmediate(R6, entry_size); // Next entry.
- __ CompareImmediate(R2, Smi::RawValue(kIllegalCid)); // Done?
+ __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); // Done?
if (unroll == 0) {
__ b(&loop, NE);
} else {
@@ -1932,7 +2001,7 @@
__ Push(ZR);
// Push call arguments.
for (intptr_t i = 0; i < num_args; i++) {
- __ LoadFromOffset(TMP, R7, -i * kWordSize);
+ __ LoadFromOffset(TMP, R7, -i * target::kWordSize);
__ Push(TMP);
}
// Pass IC data object.
@@ -1957,22 +2026,24 @@
__ Bind(&found);
__ Comment("Update caller's counter");
// R6: pointer to an IC data check group.
- const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(num_args) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
__ LoadFromOffset(R0, R6, target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
// Update counter, ignore overflow.
__ LoadFromOffset(R1, R6, count_offset);
- __ adds(R1, R1, Operand(Smi::RawValue(1)));
+ __ adds(R1, R1, Operand(target::ToRawSmi(1)));
__ StoreToOffset(R1, R6, count_offset);
}
__ Comment("Call target");
__ Bind(&call_target_function);
// R0: target function.
- __ LoadFieldFromOffset(CODE_REG, R0, Function::code_offset());
- __ LoadFieldFromOffset(R2, R0, Function::entry_point_offset());
+ __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset());
+ __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
__ br(R2);
#if !defined(PRODUCT)
@@ -1999,43 +2070,45 @@
// 2 .. (length - 1): group of checks, each check containing:
// - N classes.
// - 1 target function.
-void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
-void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
kInlineCacheMissHandlerTwoArgsRuntimeEntry,
Token::kILLEGAL);
}
-void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD);
}
-void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB);
}
-void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ);
}
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 1,
@@ -2043,12 +2116,13 @@
Token::kILLEGAL, true /* optimized */);
}
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
- Assembler* assembler) {
+void StubCodeCompiler::
+ GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
+ Assembler* assembler) {
__ Stop("Unimplemented");
}
-void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
@@ -2056,17 +2130,19 @@
Token::kILLEGAL, true /* optimized */);
}
-void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
#if defined(DEBUG)
{
Label ok;
// Check that the IC data array has NumArgsTested() == 0.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag,
+ __ LoadFromOffset(R6, R5,
+ target::ICData::state_bits_offset() - kHeapObjectTag,
kUnsignedWord);
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask()));
__ CompareImmediate(R6, 0);
__ b(&ok, EQ);
__ Stop("Incorrect IC data for unoptimized static call");
@@ -2078,34 +2154,37 @@
#if !defined(PRODUCT)
Label stepping, done_stepping;
__ LoadIsolate(R6);
- __ LoadFromOffset(R6, R6, Isolate::single_step_offset(), kUnsignedByte);
+ __ LoadFromOffset(R6, R6, target::Isolate::single_step_offset(),
+ kUnsignedByte);
__ CompareImmediate(R6, 0);
__ b(&stepping, NE);
__ Bind(&done_stepping);
#endif
// R5: IC data object (preserved).
- __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset());
+ __ LoadFieldFromOffset(R6, R5, target::ICData::ic_data_offset());
// R6: ic_data_array with entries: target functions and count.
- __ AddImmediate(R6, Array::data_offset() - kHeapObjectTag);
+ __ AddImmediate(R6, target::Array::data_offset() - kHeapObjectTag);
// R6: points directly to the first ic data array element.
- const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(0) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(0) * target::kWordSize;
if (FLAG_optimization_counter_threshold >= 0) {
// Increment count for this call, ignore overflow.
__ LoadFromOffset(R1, R6, count_offset);
- __ adds(R1, R1, Operand(Smi::RawValue(1)));
+ __ adds(R1, R1, Operand(target::ToRawSmi(1)));
__ StoreToOffset(R1, R6, count_offset);
}
// Load arguments descriptor into R4.
- __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset());
+ __ LoadFieldFromOffset(R4, R5, target::ICData::arguments_descriptor_offset());
// Get function and call it, if possible.
__ LoadFromOffset(R0, R6, target_offset);
- __ LoadFieldFromOffset(CODE_REG, R0, Function::code_offset());
- __ LoadFieldFromOffset(R2, R0, Function::entry_point_offset());
+ __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset());
+ __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
__ br(R2);
#if !defined(PRODUCT)
@@ -2120,13 +2199,15 @@
#endif
}
-void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL);
@@ -2135,7 +2216,7 @@
// Stub for compiling a function and jumping to the compiled code.
// R4: Arguments descriptor.
// R0: Function.
-void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// Preserve arg desc.
__ EnterStubFrame();
__ Push(R4); // Save arg. desc.
@@ -2147,15 +2228,15 @@
// When using the interpreter, the function's code may now point to the
// InterpretCall stub. Make sure R0, R4, and R5 are preserved.
- __ LoadFieldFromOffset(CODE_REG, R0, Function::code_offset());
- __ LoadFieldFromOffset(R2, R0, Function::entry_point_offset());
+ __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset());
+ __ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
__ br(R2);
}
// Stub for interpreting a function call.
// R4: Arguments descriptor.
// R0: Function.
-void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
#if defined(DART_PRECOMPILED_RUNTIME)
__ Stop("Not using interpreter")
#else
@@ -2166,7 +2247,7 @@
{
Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(R8, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
__ CompareImmediate(R8, VMTag::kDartCompiledTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -2175,22 +2256,24 @@
#endif
// Adjust arguments count for type arguments vector.
- __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
+ __ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R2);
- __ LoadFieldFromOffset(R1, R4, ArgumentsDescriptor::type_args_len_offset());
+ __ LoadFieldFromOffset(R1, R4,
+ target::ArgumentsDescriptor::type_args_len_offset());
__ cmp(R1, Operand(0));
__ csinc(R2, R2, R2, EQ); // R2 <- (R1 == 0) ? R2 : R2 + 1.
// Compute argv.
__ add(R3, ZR, Operand(R2, LSL, 3));
__ add(R3, FP, Operand(R3));
- __ AddImmediate(R3, kParamEndSlotFromFp * kWordSize);
+ __ AddImmediate(R3,
+ target::frame_layout.param_end_from_fp * target::kWordSize);
// Indicate decreasing memory addresses of arguments with negative argc.
__ neg(R2, R2);
// Align frame before entering C++ world. No shadow stack space required.
- __ ReserveAlignedFrameSpace(0 * kWordSize);
+ __ ReserveAlignedFrameSpace(0 * target::kWordSize);
// Pass arguments in registers.
// R0: Function.
@@ -2201,11 +2284,12 @@
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
- __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
// Mark that the thread is executing VM code.
- __ LoadFromOffset(R5, THR, Thread::interpret_call_entry_point_offset());
- __ StoreToOffset(R5, THR, Thread::vm_tag_offset());
+ __ LoadFromOffset(R5, THR,
+ target::Thread::interpret_call_entry_point_offset());
+ __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset());
// We are entering runtime code, so the C stack pointer must be restored from
// the stack limit to the top of the stack. We cache the stack limit address
@@ -2220,14 +2304,15 @@
__ mov(CSP, R25);
// Refresh write barrier mask.
- __ ldr(BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+ __ ldr(BARRIER_MASK,
+ Address(THR, target::Thread::write_barrier_mask_offset()));
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
// Reset exit frame information in Isolate structure.
- __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
__ LeaveStubFrame();
__ ret();
@@ -2235,7 +2320,7 @@
}
// R5: Contains an ICData.
-void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
__ Push(R5);
__ Push(ZR); // Space for result.
@@ -2243,29 +2328,30 @@
__ Pop(CODE_REG);
__ Pop(R5);
__ LeaveStubFrame();
- __ LoadFieldFromOffset(R0, CODE_REG, Code::entry_point_offset());
+ __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
__ br(R0);
}
-void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
__ Push(ZR); // Space for result.
__ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
__ Pop(CODE_REG);
__ LeaveStubFrame();
- __ LoadFieldFromOffset(R0, CODE_REG, Code::entry_point_offset());
+ __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
__ br(R0);
}
// Called only from unoptimized code. All relevant registers have been saved.
-void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ ret();
#else
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(R1);
- __ LoadFromOffset(R1, R1, Isolate::single_step_offset(), kUnsignedByte);
+ __ LoadFromOffset(R1, R1, target::Isolate::single_step_offset(),
+ kUnsignedByte);
__ CompareImmediate(R1, 0);
__ b(&stepping, NE);
__ Bind(&done_stepping);
@@ -2284,7 +2370,7 @@
// R0: instance (must be preserved).
// R1: instantiator type arguments (only if n == 4, can be raw_null).
// R2: function type arguments (only if n == 4, can be raw_null).
-// R3: SubtypeTestCache.
+// R3: target::SubtypeTestCache.
//
// Preserves R0/R2/R8.
//
@@ -2304,12 +2390,13 @@
const Register kNullReg = R7;
- __ LoadObject(kNullReg, Object::null_object());
+ __ LoadObject(kNullReg, NullObject());
// Loop initialization (moved up here to avoid having all dependent loads
// after each other).
- __ ldr(kCacheReg, FieldAddress(kCacheReg, SubtypeTestCache::cache_offset()));
- __ AddImmediate(kCacheReg, Array::data_offset() - kHeapObjectTag);
+ __ ldr(kCacheReg,
+ FieldAddress(kCacheReg, target::SubtypeTestCache::cache_offset()));
+ __ AddImmediate(kCacheReg, target::Array::data_offset() - kHeapObjectTag);
Label loop, not_closure;
if (n >= 4) {
@@ -2323,19 +2410,20 @@
// Closure handling.
{
__ ldr(kInstanceCidOrFunction,
- FieldAddress(kInstanceReg, Closure::function_offset()));
+ FieldAddress(kInstanceReg, target::Closure::function_offset()));
if (n >= 2) {
- __ ldr(kInstanceInstantiatorTypeArgumentsReg,
- FieldAddress(kInstanceReg,
- Closure::instantiator_type_arguments_offset()));
+ __ ldr(
+ kInstanceInstantiatorTypeArgumentsReg,
+ FieldAddress(kInstanceReg,
+ target::Closure::instantiator_type_arguments_offset()));
if (n >= 6) {
ASSERT(n == 6);
__ ldr(kInstanceParentFunctionTypeArgumentsReg,
FieldAddress(kInstanceReg,
- Closure::function_type_arguments_offset()));
+ target::Closure::function_type_arguments_offset()));
__ ldr(kInstanceDelayedFunctionTypeArgumentsReg,
FieldAddress(kInstanceReg,
- Closure::delayed_type_arguments_offset()));
+ target::Closure::delayed_type_arguments_offset()));
}
}
__ b(&loop);
@@ -2353,8 +2441,9 @@
__ LoadClassById(R5, kInstanceCidOrFunction);
__ mov(kInstanceInstantiatorTypeArgumentsReg, kNullReg);
__ LoadFieldFromOffset(
- R5, R5, Class::type_arguments_field_offset_in_words_offset(), kWord);
- __ CompareImmediate(R5, Class::kNoTypeArguments);
+ R5, R5, target::Class::type_arguments_field_offset_in_words_offset(),
+ kWord);
+ __ CompareImmediate(R5, target::Class::kNoTypeArguments);
__ b(&has_no_type_arguments, EQ);
__ add(R5, kInstanceReg, Operand(R5, LSL, 3));
__ ldr(kInstanceInstantiatorTypeArgumentsReg, FieldAddress(R5, 0));
@@ -2372,7 +2461,8 @@
// Loop header
__ Bind(&loop);
__ ldr(R5, Address(kCacheReg,
- kWordSize * SubtypeTestCache::kInstanceClassIdOrFunction));
+ target::kWordSize *
+ target::SubtypeTestCache::kInstanceClassIdOrFunction));
__ cmp(R5, Operand(kNullReg));
__ b(¬_found, EQ);
__ cmp(R5, Operand(kInstanceCidOrFunction));
@@ -2381,7 +2471,8 @@
} else {
__ b(&next_iteration, NE);
__ ldr(R5, Address(kCacheReg,
- kWordSize * SubtypeTestCache::kInstanceTypeArguments));
+ target::kWordSize *
+ target::SubtypeTestCache::kInstanceTypeArguments));
__ cmp(R5, Operand(kInstanceInstantiatorTypeArgumentsReg));
if (n == 2) {
__ b(&found, EQ);
@@ -2389,11 +2480,13 @@
__ b(&next_iteration, NE);
__ ldr(R5,
Address(kCacheReg,
- kWordSize * SubtypeTestCache::kInstantiatorTypeArguments));
+ target::kWordSize *
+ target::SubtypeTestCache::kInstantiatorTypeArguments));
__ cmp(R5, Operand(kInstantiatorTypeArgumentsReg));
__ b(&next_iteration, NE);
__ ldr(R5, Address(kCacheReg,
- kWordSize * SubtypeTestCache::kFunctionTypeArguments));
+ target::kWordSize *
+ target::SubtypeTestCache::kFunctionTypeArguments));
__ cmp(R5, Operand(kFunctionTypeArgumentsReg));
if (n == 4) {
__ b(&found, EQ);
@@ -2401,31 +2494,30 @@
ASSERT(n == 6);
__ b(&next_iteration, NE);
- __ ldr(R5,
- Address(
- kCacheReg,
- kWordSize *
- SubtypeTestCache::kInstanceParentFunctionTypeArguments));
+ __ ldr(R5, Address(kCacheReg,
+ target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceParentFunctionTypeArguments));
__ cmp(R5, Operand(kInstanceParentFunctionTypeArgumentsReg));
__ b(&next_iteration, NE);
- __ ldr(
- R5,
- Address(
- kCacheReg,
- kWordSize *
- SubtypeTestCache::kInstanceDelayedFunctionTypeArguments));
+ __ ldr(R5, Address(kCacheReg,
+ target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceDelayedFunctionTypeArguments));
__ cmp(R5, Operand(kInstanceDelayedFunctionTypeArgumentsReg));
__ b(&found, EQ);
}
}
}
__ Bind(&next_iteration);
- __ AddImmediate(kCacheReg, kWordSize * SubtypeTestCache::kTestEntryLength);
+ __ AddImmediate(kCacheReg, target::kWordSize *
+ target::SubtypeTestCache::kTestEntryLength);
__ b(&loop);
__ Bind(&found);
- __ ldr(R1, Address(kCacheReg, kWordSize * SubtypeTestCache::kTestResult));
+ __ ldr(R1, Address(kCacheReg, target::kWordSize *
+ target::SubtypeTestCache::kTestResult));
__ ret();
__ Bind(¬_found);
@@ -2434,22 +2526,22 @@
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 2);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 4);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype6TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 6);
}
@@ -2470,100 +2562,52 @@
//
// Note of warning: The caller will not populate CODE_REG and we have therefore
// no access to the pool.
-void StubCode::GenerateDefaultTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
Label done;
const Register kInstanceReg = R0;
const Register kDstTypeReg = R8;
// Fast case for 'null'.
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(EQUAL, &done);
// Fast case for 'int'.
Label not_smi;
__ BranchIfNotSmi(kInstanceReg, ¬_smi);
- __ CompareObject(kDstTypeReg, Object::ZoneHandle(Type::IntType()));
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(IntType()));
__ BranchIf(EQUAL, &done);
__ Bind(¬_smi);
// Tail call the [SubtypeTestCache]-based implementation.
- __ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
- __ ldr(R9, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
+ __ ldr(R9, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ br(R9);
__ Bind(&done);
__ Ret();
}
-void StubCode::GenerateTopTypeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
__ Ret();
}
-void StubCode::GenerateTypeRefTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTypeRefTypeTestStub(Assembler* assembler) {
const Register kTypeRefReg = R8;
// We dereference the TypeRef and tail-call to it's type testing stub.
- __ ldr(kTypeRefReg, FieldAddress(kTypeRefReg, TypeRef::type_offset()));
- __ ldr(R9, FieldAddress(kTypeRefReg,
- AbstractType::type_test_stub_entry_point_offset()));
+ __ ldr(kTypeRefReg,
+ FieldAddress(kTypeRefReg, target::TypeRef::type_offset()));
+ __ ldr(R9, FieldAddress(
+ kTypeRefReg,
+ target::AbstractType::type_test_stub_entry_point_offset()));
__ br(R9);
}
-void StubCode::GenerateUnreachableTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
__ Breakpoint();
}
-void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
- Assembler* assembler,
- HierarchyInfo* hi,
- const Type& type,
- const Class& type_class) {
- const Register kInstanceReg = R0;
- const Register kClassIdReg = R9;
-
- BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
- kInstanceReg, kClassIdReg);
-
- __ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
- __ ldr(R9, FieldAddress(CODE_REG, Code::entry_point_offset()));
- __ br(R9);
-}
-
-void TypeTestingStubGenerator::
- BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
- HierarchyInfo* hi,
- const Class& type_class,
- const TypeArguments& tp,
- const TypeArguments& ta) {
- const Register kInstanceReg = R0;
- const Register kInstanceTypeArguments = R7;
- const Register kClassIdReg = R9;
-
- BuildOptimizedSubclassRangeCheckWithTypeArguments(
- assembler, hi, type_class, tp, ta, kClassIdReg, kInstanceReg,
- kInstanceTypeArguments);
-}
-
-void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
- Assembler* assembler,
- HierarchyInfo* hi,
- const AbstractType& type_arg,
- intptr_t type_param_value_offset_i,
- Label* check_failed) {
- const Register kInstantiatorTypeArgumentsReg = R1;
- const Register kFunctionTypeArgumentsReg = R2;
- const Register kInstanceTypeArguments = R7;
-
- const Register kClassIdReg = R9;
- const Register kOwnTypeArgumentValue = TMP;
-
- BuildOptimizedTypeArgumentValueCheck(
- assembler, hi, type_arg, type_param_value_offset_i, kClassIdReg,
- kInstanceTypeArguments, kInstantiatorTypeArgumentsReg,
- kFunctionTypeArgumentsReg, kOwnTypeArgumentValue, check_failed);
-}
-
static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
TypeCheckMode mode) {
const Register kInstanceReg = R0;
@@ -2573,14 +2617,14 @@
const Register kSubtypeTestCacheReg = R3;
const Register kDstTypeReg = R8;
- __ PushObject(Object::null_object()); // Make room for result.
+ __ PushObject(NullObject()); // Make room for result.
__ Push(kInstanceReg);
__ Push(kDstTypeReg);
__ Push(kInstantiatorTypeArgumentsReg);
__ Push(kFunctionTypeArgumentsReg);
- __ PushObject(Object::null_object());
+ __ PushObject(NullObject());
__ Push(kSubtypeTestCacheReg);
- __ PushObject(Smi::ZoneHandle(Smi::New(mode)));
+ __ PushImmediate(target::ToRawSmi(mode));
__ CallRuntime(kTypeCheckRuntimeEntry, 7);
__ Drop(1); // mode
__ Pop(kSubtypeTestCacheReg);
@@ -2592,15 +2636,16 @@
__ Drop(1); // Discard return value.
}
-void StubCode::GenerateLazySpecializeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
+ Assembler* assembler) {
const Register kInstanceReg = R0;
Label done;
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(EQUAL, &done);
__ ldr(CODE_REG,
- Address(THR, Thread::lazy_specialize_type_test_stub_offset()));
+ Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
__ EnterStubFrame();
InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
__ LeaveStubFrame();
@@ -2609,7 +2654,7 @@
__ Ret();
}
-void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
Label done, call_runtime;
const Register kInstanceReg = R0;
@@ -2622,14 +2667,14 @@
#ifdef DEBUG
// Guaranteed by caller.
Label no_error;
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(NOT_EQUAL, &no_error);
__ Breakpoint();
__ Bind(&no_error);
#endif
// If the subtype-cache is null, it needs to be lazily-created by the runtime.
- __ CompareObject(kSubtypeTestCacheReg, Object::null_object());
+ __ CompareObject(kSubtypeTestCacheReg, NullObject());
__ BranchIf(EQUAL, &call_runtime);
const Register kTmp = R9;
@@ -2641,13 +2686,15 @@
__ BranchIf(NOT_EQUAL, &is_complex_case);
// Check whether this [Type] is instantiated/uninstantiated.
- __ ldr(kTmp, FieldAddress(kDstTypeReg, Type::type_state_offset()), kByte);
- __ cmp(kTmp, Operand(RawType::kFinalizedInstantiated));
+ __ ldr(kTmp, FieldAddress(kDstTypeReg, target::Type::type_state_offset()),
+ kByte);
+ __ cmp(kTmp,
+ Operand(target::RawAbstractType::kTypeStateFinalizedInstantiated));
__ BranchIf(NOT_EQUAL, &is_complex_case);
// Check whether this [Type] is a function type.
- __ ldr(kTmp, FieldAddress(kDstTypeReg, Type::signature_offset()));
- __ CompareObject(kTmp, Object::null_object());
+ __ ldr(kTmp, FieldAddress(kDstTypeReg, target::Type::signature_offset()));
+ __ CompareObject(kTmp, NullObject());
__ BranchIf(NOT_EQUAL, &is_complex_case);
// This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
@@ -2658,8 +2705,8 @@
__ Bind(&is_simple_case);
{
__ PushPair(kInstantiatorTypeArgumentsReg, kSubtypeTestCacheReg);
- __ BranchLink(StubCode::Subtype2TestCache());
- __ CompareObject(R1, Bool::True());
+ __ BranchLink(StubCodeSubtype2TestCache());
+ __ CompareObject(R1, CastHandle<Object>(TrueObject()));
__ PopPair(kInstantiatorTypeArgumentsReg, kSubtypeTestCacheReg);
__ BranchIf(EQUAL, &done); // Cache said: yes.
__ Jump(&call_runtime);
@@ -2668,8 +2715,8 @@
__ Bind(&is_complex_case);
{
__ PushPair(kInstantiatorTypeArgumentsReg, kSubtypeTestCacheReg);
- __ BranchLink(StubCode::Subtype6TestCache());
- __ CompareObject(R1, Bool::True());
+ __ BranchLink(StubCodeSubtype6TestCache());
+ __ CompareObject(R1, CastHandle<Object>(TrueObject()));
__ PopPair(kInstantiatorTypeArgumentsReg, kSubtypeTestCacheReg);
__ BranchIf(EQUAL, &done); // Cache said: yes.
// Fall through to runtime_call
@@ -2682,11 +2729,11 @@
// because we do constant evaluation with default stubs and only install
// optimized versions before writing out the AOT snapshot.
// So dynamic/Object/void will run with default stub in constant evaluation.
- __ CompareObject(kDstTypeReg, Type::dynamic_type());
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(DynamicType()));
__ BranchIf(EQUAL, &done);
- __ CompareObject(kDstTypeReg, Type::Handle(Type::ObjectType()));
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(ObjectType()));
__ BranchIf(EQUAL, &done);
- __ CompareObject(kDstTypeReg, Type::void_type());
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(VoidType()));
__ BranchIf(EQUAL, &done);
InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
@@ -2696,7 +2743,7 @@
__ Ret();
}
-void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
__ mov(R0, CSP);
__ ret();
}
@@ -2708,23 +2755,24 @@
// R2: frame_pointer.
// R3: thread.
// Does not return.
-void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == R0);
ASSERT(kStackTraceObjectReg == R1);
__ mov(LR, R0); // Program counter.
__ mov(SP, R1); // Stack pointer.
__ mov(FP, R2); // Frame_pointer.
__ mov(THR, R3);
- __ ldr(BARRIER_MASK, Address(THR, Thread::write_barrier_mask_offset()));
+ __ ldr(BARRIER_MASK,
+ Address(THR, target::Thread::write_barrier_mask_offset()));
// Set the tag.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
- __ StoreToOffset(R2, THR, Thread::vm_tag_offset());
+ __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
// Clear top exit frame.
- __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
+ __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
// Restore the pool pointer.
__ RestoreCodePointer();
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
__ sub(PP, PP, Operand(kHeapObjectTag)); // Pool in PP is untagged!
} else {
__ LoadPoolPointer();
@@ -2737,19 +2785,21 @@
//
// The arguments are stored in the Thread object.
// Does not return.
-void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) {
- __ LoadFromOffset(LR, THR, Thread::resume_pc_offset());
+void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+ __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset());
- ASSERT(Thread::CanLoadFromThread(Object::null_object()));
- __ LoadFromOffset(R2, THR, Thread::OffsetFromThread(Object::null_object()));
+ word offset_from_thread = 0;
+ bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
+ ASSERT(ok);
+ __ LoadFromOffset(R2, THR, offset_from_thread);
// Exception object.
- __ LoadFromOffset(R0, THR, Thread::active_exception_offset());
- __ StoreToOffset(R2, THR, Thread::active_exception_offset());
+ __ LoadFromOffset(R0, THR, target::Thread::active_exception_offset());
+ __ StoreToOffset(R2, THR, target::Thread::active_exception_offset());
// StackTrace object.
- __ LoadFromOffset(R1, THR, Thread::active_stacktrace_offset());
- __ StoreToOffset(R2, THR, Thread::active_stacktrace_offset());
+ __ LoadFromOffset(R1, THR, target::Thread::active_stacktrace_offset());
+ __ StoreToOffset(R2, THR, target::Thread::active_stacktrace_offset());
__ ret(); // Jump to the exception handler code.
}
@@ -2757,13 +2807,13 @@
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
-void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push zap value instead of CODE_REG.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
// Load the deopt pc into LR.
- __ LoadFromOffset(LR, THR, Thread::resume_pc_offset());
+ __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset());
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
// After we have deoptimized, jump to the correct frame.
@@ -2776,7 +2826,7 @@
// Calls to the runtime to optimize the given function.
// R6: function to be re-optimized.
// R4: argument descriptor (preserved).
-void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ Push(R4);
// Setup space on stack for the return value.
@@ -2786,8 +2836,8 @@
__ Pop(R0); // Discard argument.
__ Pop(R0); // Get Function object
__ Pop(R4); // Restore argument descriptor.
- __ LoadFieldFromOffset(CODE_REG, R0, Function::code_offset());
- __ LoadFieldFromOffset(R1, R0, Function::entry_point_offset());
+ __ LoadFieldFromOffset(CODE_REG, R0, target::Function::code_offset());
+ __ LoadFieldFromOffset(R1, R0, target::Function::entry_point_offset());
__ LeaveStubFrame();
__ br(R1);
__ brk(0);
@@ -2813,8 +2863,8 @@
__ b(&done, NE);
// Double values bitwise compare.
- __ LoadFieldFromOffset(left, left, Double::value_offset());
- __ LoadFieldFromOffset(right, right, Double::value_offset());
+ __ LoadFieldFromOffset(left, left, target::Double::value_offset());
+ __ LoadFieldFromOffset(right, right, target::Double::value_offset());
__ b(&reference_compare);
__ Bind(&check_mint);
@@ -2822,8 +2872,8 @@
__ b(&reference_compare, NE);
__ CompareClassId(right, kMintCid);
__ b(&done, NE);
- __ LoadFieldFromOffset(left, left, Mint::value_offset());
- __ LoadFieldFromOffset(right, right, Mint::value_offset());
+ __ LoadFieldFromOffset(left, left, target::Mint::value_offset());
+ __ LoadFieldFromOffset(right, right, target::Mint::value_offset());
__ Bind(&reference_compare);
__ CompareRegisters(left, right);
@@ -2835,13 +2885,14 @@
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
-void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(R1);
- __ LoadFromOffset(R1, R1, Isolate::single_step_offset(), kUnsignedByte);
+ __ LoadFromOffset(R1, R1, target::Isolate::single_step_offset(),
+ kUnsignedByte);
__ CompareImmediate(R1, 0);
__ b(&stepping, NE);
__ Bind(&done_stepping);
@@ -2849,8 +2900,8 @@
const Register left = R1;
const Register right = R0;
- __ LoadFromOffset(left, SP, 1 * kWordSize);
- __ LoadFromOffset(right, SP, 0 * kWordSize);
+ __ LoadFromOffset(left, SP, 1 * target::kWordSize);
+ __ LoadFromOffset(right, SP, 0 * target::kWordSize);
GenerateIdenticalWithNumberCheckStub(assembler, left, right);
__ ret();
@@ -2869,12 +2920,12 @@
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
-void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register left = R1;
const Register right = R0;
- __ LoadFromOffset(left, SP, 1 * kWordSize);
- __ LoadFromOffset(right, SP, 0 * kWordSize);
+ __ LoadFromOffset(left, SP, 1 * target::kWordSize);
+ __ LoadFromOffset(right, SP, 0 * target::kWordSize);
GenerateIdenticalWithNumberCheckStub(assembler, left, right);
__ ret();
}
@@ -2885,7 +2936,7 @@
// Passed to target:
// CODE_REG: target Code
// R4: arguments descriptor
-void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
__ BranchIfSmi(R0, &smi_case);
@@ -2895,8 +2946,8 @@
Label cid_loaded;
__ Bind(&cid_loaded);
- __ ldr(R2, FieldAddress(R5, MegamorphicCache::buckets_offset()));
- __ ldr(R1, FieldAddress(R5, MegamorphicCache::mask_offset()));
+ __ ldr(R2, FieldAddress(R5, target::MegamorphicCache::buckets_offset()));
+ __ ldr(R1, FieldAddress(R5, target::MegamorphicCache::mask_offset()));
// R2: cache buckets array.
// R1: mask as a smi.
@@ -2905,7 +2956,7 @@
// R0: class ID of the receiver (smi).
// Compute the table index.
- ASSERT(MegamorphicCache::kSpreadFactor == 7);
+ ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
// Use lsl and sub to multiply with 7 == 8 - 1.
__ LslImmediate(R3, R0, 3);
__ sub(R3, R3, Operand(R0));
@@ -2914,7 +2965,7 @@
__ Bind(&loop);
__ and_(R3, R3, Operand(R1));
- const intptr_t base = Array::data_offset();
+ const intptr_t base = target::Array::data_offset();
// R3 is smi tagged, but table entries are 16 bytes, so LSL 3.
__ add(TMP, R2, Operand(R3, LSL, 3));
__ ldr(R6, FieldAddress(TMP, base));
@@ -2928,17 +2979,19 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- const auto target_address = FieldAddress(TMP, base + kWordSize);
+ const auto target_address = FieldAddress(TMP, base + target::kWordSize);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ ldr(R1, target_address);
__ ldr(ARGS_DESC_REG,
- FieldAddress(R5, MegamorphicCache::arguments_descriptor_offset()));
+ FieldAddress(
+ R5, target::MegamorphicCache::arguments_descriptor_offset()));
} else {
__ ldr(R0, target_address);
- __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
+ __ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
__ ldr(ARGS_DESC_REG,
- FieldAddress(R5, MegamorphicCache::arguments_descriptor_offset()));
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
+ FieldAddress(
+ R5, target::MegamorphicCache::arguments_descriptor_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
}
__ br(R1);
@@ -2949,7 +3002,7 @@
__ b(&load_target, EQ); // branch if miss.
// Try next extry in the table.
- __ AddImmediate(R3, Smi::RawValue(1));
+ __ AddImmediate(R3, target::ToRawSmi(1));
__ b(&loop);
// Load cid for the Smi case.
@@ -2964,12 +3017,12 @@
// Passed to target:
// CODE_REG: target Code object
// R4: arguments descriptor
-void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughFunctionStub(Assembler* assembler) {
Label loop, found, miss;
__ ldr(ARGS_DESC_REG,
- FieldAddress(R5, ICData::arguments_descriptor_offset()));
- __ ldr(R8, FieldAddress(R5, ICData::ic_data_offset()));
- __ AddImmediate(R8, Array::data_offset() - kHeapObjectTag);
+ FieldAddress(R5, target::ICData::arguments_descriptor_offset()));
+ __ ldr(R8, FieldAddress(R5, target::ICData::ic_data_offset()));
+ __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);
// R1: receiver cid as Smi
@@ -2978,33 +3031,35 @@
__ ldr(R2, Address(R8, 0));
__ cmp(R1, Operand(R2));
__ b(&found, EQ);
- __ CompareImmediate(R2, Smi::RawValue(kIllegalCid));
+ __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid));
__ b(&miss, EQ);
const intptr_t entry_length =
- ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) * kWordSize;
+ target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
+ target::kWordSize;
__ AddImmediate(R8, entry_length); // Next entry.
__ b(&loop);
__ Bind(&found);
- const intptr_t target_offset = ICData::TargetIndexFor(1) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(1) * target::kWordSize;
__ ldr(R0, Address(R8, target_offset));
- __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
+ __ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
+ __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ br(R1);
__ Bind(&miss);
__ LoadIsolate(R2);
- __ ldr(CODE_REG, Address(R2, Isolate::ic_miss_code_offset()));
- __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(CODE_REG, Address(R2, target::Isolate::ic_miss_code_offset()));
+ __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ br(R1);
}
-void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
- __ ldr(R8, FieldAddress(R5, ICData::ic_data_offset()));
- __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset()));
- __ AddImmediate(R8, Array::data_offset() - kHeapObjectTag);
+ __ ldr(R8, FieldAddress(R5, target::ICData::ic_data_offset()));
+ __ ldr(R4, FieldAddress(R5, target::ICData::arguments_descriptor_offset()));
+ __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);
// R1: receiver cid as Smi
@@ -3013,17 +3068,20 @@
__ ldr(R2, Address(R8, 0));
__ cmp(R1, Operand(R2));
__ b(&found, EQ);
- __ CompareImmediate(R2, Smi::RawValue(kIllegalCid));
+ __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid));
__ b(&miss, EQ);
const intptr_t entry_length =
- ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) * kWordSize;
+ target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
+ target::kWordSize;
__ AddImmediate(R8, entry_length); // Next entry.
__ b(&loop);
__ Bind(&found);
- const intptr_t code_offset = ICData::CodeIndexFor(1) * kWordSize;
- const intptr_t entry_offset = ICData::EntryPointIndexFor(1) * kWordSize;
+ const intptr_t code_offset =
+ target::ICData::CodeIndexFor(1) * target::kWordSize;
+ const intptr_t entry_offset =
+ target::ICData::EntryPointIndexFor(1) * target::kWordSize;
__ ldr(R1, Address(R8, entry_offset));
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
__ ldr(CODE_REG, Address(R8, code_offset));
@@ -3032,15 +3090,15 @@
__ Bind(&miss);
__ LoadIsolate(R2);
- __ ldr(CODE_REG, Address(R2, Isolate::ic_miss_code_offset()));
- __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ ldr(CODE_REG, Address(R2, target::Isolate::ic_miss_code_offset()));
+ __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ br(R1);
}
// Called from switchable IC calls.
// R0: receiver
// R5: SingleTargetCache
-void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnlinkedCallStub(Assembler* assembler) {
__ EnterStubFrame();
__ Push(R0); // Preserve receiver.
@@ -3054,9 +3112,10 @@
__ Pop(R0); // Restore receiver.
__ LeaveStubFrame();
- __ ldr(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
- __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kMonomorphic)));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
+ __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kMonomorphic)));
__ br(R1);
}
@@ -3065,12 +3124,12 @@
// R5: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
-void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(R1, R0);
- __ ldr(R2, FieldAddress(R5, SingleTargetCache::lower_limit_offset()),
+ __ ldr(R2, FieldAddress(R5, target::SingleTargetCache::lower_limit_offset()),
kUnsignedHalfword);
- __ ldr(R3, FieldAddress(R5, SingleTargetCache::upper_limit_offset()),
+ __ ldr(R3, FieldAddress(R5, target::SingleTargetCache::upper_limit_offset()),
kUnsignedHalfword);
__ cmp(R1, Operand(R2));
@@ -3078,8 +3137,9 @@
__ cmp(R1, Operand(R3));
__ b(&miss, GT);
- __ ldr(R1, FieldAddress(R5, SingleTargetCache::entry_point_offset()));
- __ ldr(CODE_REG, FieldAddress(R5, SingleTargetCache::target_offset()));
+ __ ldr(R1, FieldAddress(R5, target::SingleTargetCache::entry_point_offset()));
+ __ ldr(CODE_REG,
+ FieldAddress(R5, target::SingleTargetCache::target_offset()));
__ br(R1);
__ Bind(&miss);
@@ -3095,16 +3155,18 @@
__ Pop(R0); // Restore receiver.
__ LeaveStubFrame();
- __ ldr(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
- __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kMonomorphic)));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
+ __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kMonomorphic)));
__ br(R1);
}
// Called from the monomorphic checked entry.
// R0: receiver
-void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) {
- __ ldr(CODE_REG, Address(THR, Thread::monomorphic_miss_stub_offset()));
+void StubCodeCompiler::GenerateMonomorphicMissStub(Assembler* assembler) {
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::monomorphic_miss_stub_offset()));
__ EnterStubFrame();
__ Push(R0); // Preserve receiver.
@@ -3117,20 +3179,24 @@
__ Pop(R0); // Restore receiver.
__ LeaveStubFrame();
- __ ldr(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
- __ ldr(R1, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kMonomorphic)));
+ __ ldr(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
+ __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kMonomorphic)));
__ br(R1);
}
-void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
+ Assembler* assembler) {
__ brk(0);
}
-void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
__ brk(0);
}
+} // namespace compiler
+
} // namespace dart
#endif // defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/stub_code_compiler_dbc.cc b/runtime/vm/compiler/stub_code_compiler_dbc.cc
new file mode 100644
index 0000000..f303ad4
--- /dev/null
+++ b/runtime/vm/compiler/stub_code_compiler_dbc.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+#if defined(TARGET_ARCH_DBC)
+
+#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/backend/flow_graph_compiler.h"
+#include "vm/compiler/jit/compiler.h"
+#include "vm/cpu.h"
+#include "vm/dart_entry.h"
+#include "vm/heap/heap.h"
+#include "vm/instructions.h"
+#include "vm/object_store.h"
+#include "vm/runtime_entry.h"
+#include "vm/stack_frame.h"
+#include "vm/stub_code.h"
+#include "vm/tags.h"
+
+#define __ assembler->
+
+namespace dart {
+
+DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects.");
+DEFINE_FLAG(bool,
+ use_slow_path,
+ false,
+ "Set to true for debugging & verifying the slow paths.");
+
+namespace compiler {
+
+void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
+ __ Compile();
+}
+
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
+ Assembler* assembler) {
+ __ NoSuchMethod();
+}
+
+// Not executed, but used as a stack marker when calling
+// DRT_OptimizeInvokedFunction.
+void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// Not executed, but used as a sentinel in Simulator::JumpToFrame.
+void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+ __ Trap();
+}
+
+void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
+ __ DeoptRewind();
+}
+
+// TODO(vegorov) Don't generate this stub.
+void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// TODO(vegorov) Don't generate these stubs.
+void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
+ const Class& cls) {
+ __ Trap();
+}
+
+// TODO(vegorov) Don't generate this stub.
+void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// These deoptimization stubs are only used to populate stack frames
+// with something meaningful to make sure GC can scan the stack during
+// the last phase of deoptimization which materializes objects.
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
+ Assembler* assembler) {
+ __ Trap();
+}
+
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
+ Assembler* assembler) {
+ __ Trap();
+}
+
+void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// TODO(kustermann): Don't generate this stub.
+void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// TODO(kustermann): Don't generate this stub.
+void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// TODO(kustermann): Don't generate this stub.
+void StubCodeCompiler::GenerateTypeRefTypeTestStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// TODO(kustermann): Don't generate this stub.
+void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
+ __ Trap();
+}
+
+// TODO(kustermann): Don't generate this stub.
+void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
+ Assembler* assembler) {
+ __ Trap();
+}
+
+// TODO(kustermann): Don't generate this stub.
+void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
+ __ Trap();
+}
+
+void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
+ Assembler* assembler) {
+ __ Trap();
+}
+
+void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
+ __ Trap();
+}
+
+void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
+ __ Trap();
+}
+
+void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
+ Assembler* assembler) {
+ __ Trap();
+}
+
+} // namespace compiler
+
+} // namespace dart
+
+#endif // defined TARGET_ARCH_DBC
diff --git a/runtime/vm/stub_code_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
similarity index 67%
rename from runtime/vm/stub_code_ia32.cc
rename to runtime/vm/compiler/stub_code_compiler_ia32.cc
index 46a8aeb..94bafe4 100644
--- a/runtime/vm/stub_code_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -1,21 +1,22 @@
-// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h"
+
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/stub_code_compiler.h"
+
#if defined(TARGET_ARCH_IA32) && !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/class_id.h"
+#include "vm/code_entry_kind.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/compiler/jit/compiler.h"
-#include "vm/dart_entry.h"
-#include "vm/heap/heap.h"
-#include "vm/heap/scavenger.h"
+#include "vm/compiler/backend/locations.h"
+#include "vm/constants_ia32.h"
#include "vm/instructions.h"
-#include "vm/object_store.h"
-#include "vm/resolver.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
+#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
#define __ assembler->
@@ -28,7 +29,7 @@
false,
"Set to true for debugging & verifying the slow paths.");
-#define INT32_SIZEOF(x) static_cast<int32_t>(sizeof(x))
+namespace compiler {
// Input parameters:
// ESP : points to return address.
@@ -38,18 +39,19 @@
// ECX : address of the runtime function to call.
// EDX : number of arguments to the call.
// Must preserve callee saved registers EDI and EBX.
-void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
- __ movl(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset()));
+ __ movl(CODE_REG,
+ Address(THR, target::Thread::call_to_runtime_stub_offset()));
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), EBP);
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
#if defined(DEBUG)
{
@@ -66,7 +68,9 @@
__ movl(Assembler::VMTagAddress(), ECX);
// Reserve space for arguments and align frame before entering C++ world.
- __ AddImmediate(ESP, Immediate(-INT32_SIZEOF(NativeArguments)));
+ __ AddImmediate(
+ ESP,
+ Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize())));
if (OS::ActivationFrameAlignment() > 1) {
__ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
@@ -77,16 +81,20 @@
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
__ movl(Address(ESP, argc_tag_offset), EDX); // Set argc in NativeArguments.
// Compute argv.
- __ leal(EAX, Address(EBP, EDX, TIMES_4, kParamEndSlotFromFp * kWordSize));
- __ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
- __ addl(EAX, Immediate(1 * kWordSize)); // Retval is next to 1st argument.
+ __ leal(EAX,
+ Address(EBP, EDX, TIMES_4,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
+ __ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
+ __ addl(EAX,
+ Immediate(1 * target::kWordSize)); // Retval is next to 1st argument.
__ movl(Address(ESP, retval_offset), EAX); // Set retval in NativeArguments.
__ call(ECX);
__ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
__ LeaveFrame();
@@ -99,21 +107,23 @@
__ ret();
}
-void StubCode::GenerateNullErrorSharedWithoutFPURegsStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
+ Assembler* assembler) {
__ Breakpoint();
}
-void StubCode::GenerateNullErrorSharedWithFPURegsStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
+ Assembler* assembler) {
__ Breakpoint();
}
-void StubCode::GenerateStackOverflowSharedWithoutFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
Assembler* assembler) {
// TODO(sjindel): implement.
__ Breakpoint();
}
-void StubCode::GenerateStackOverflowSharedWithFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
Assembler* assembler) {
// TODO(sjindel): implement.
__ Breakpoint();
@@ -123,8 +133,8 @@
// ESP : points to return address.
// EAX : stop message (const char*).
// Must preserve all registers, except EAX.
-void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) {
- __ EnterCallRuntimeFrame(1 * kWordSize);
+void StubCodeCompiler::GeneratePrintStopMessageStub(Assembler* assembler) {
+ __ EnterCallRuntimeFrame(1 * target::kWordSize);
__ movl(Address(ESP, 0), EAX);
__ CallRuntime(kPrintStopMessageRuntimeEntry, 1);
__ LeaveCallRuntimeFrame();
@@ -138,23 +148,23 @@
// ECX : address of the native function to call.
// EDX : argc_tag including number of arguments and function kind.
static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
- ExternalLabel* wrapper) {
+ Address wrapper_address) {
const intptr_t native_args_struct_offset =
- NativeEntry::kNumCallWrapperArguments * kWordSize;
+ target::NativeEntry::kNumCallWrapperArguments * target::kWordSize;
const intptr_t thread_offset =
- NativeArguments::thread_offset() + native_args_struct_offset;
+ target::NativeArguments::thread_offset() + native_args_struct_offset;
const intptr_t argc_tag_offset =
- NativeArguments::argc_tag_offset() + native_args_struct_offset;
+ target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
const intptr_t argv_offset =
- NativeArguments::argv_offset() + native_args_struct_offset;
+ target::NativeArguments::argv_offset() + native_args_struct_offset;
const intptr_t retval_offset =
- NativeArguments::retval_offset() + native_args_struct_offset;
+ target::NativeArguments::retval_offset() + native_args_struct_offset;
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to dart VM code.
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), EBP);
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
#if defined(DEBUG)
{
@@ -173,8 +183,10 @@
// Reserve space for the native arguments structure, the outgoing parameters
// (pointer to the native arguments structure, the C function entry point)
// and align frame before entering the C++ world.
- __ AddImmediate(ESP,
- Immediate(-INT32_SIZEOF(NativeArguments) - (2 * kWordSize)));
+ __ AddImmediate(
+ ESP,
+ Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize()) -
+ (2 * target::kWordSize)));
if (OS::ActivationFrameAlignment() > 1) {
__ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
@@ -183,31 +195,39 @@
__ movl(Address(ESP, thread_offset), THR); // Set thread in NativeArgs.
__ movl(Address(ESP, argc_tag_offset), EDX); // Set argc in NativeArguments.
__ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
- __ leal(EAX, Address(EBP, 2 * kWordSize)); // Compute return value addr.
+ __ leal(EAX,
+ Address(EBP, 2 * target::kWordSize)); // Compute return value addr.
__ movl(Address(ESP, retval_offset), EAX); // Set retval in NativeArguments.
- __ leal(EAX, Address(ESP, 2 * kWordSize)); // Pointer to the NativeArguments.
+ __ leal(
+ EAX,
+ Address(ESP, 2 * target::kWordSize)); // Pointer to the NativeArguments.
__ movl(Address(ESP, 0), EAX); // Pass the pointer to the NativeArguments.
- __ movl(Address(ESP, kWordSize), ECX); // Function to call.
- __ call(wrapper);
+ __ movl(Address(ESP, target::kWordSize), ECX); // Function to call.
+ __ call(wrapper_address);
__ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
__ LeaveFrame();
__ ret();
}
-void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) {
- ExternalLabel wrapper(NativeEntry::NoScopeNativeCallWrapperEntry());
- GenerateCallNativeWithWrapperStub(assembler, &wrapper);
+void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+ GenerateCallNativeWithWrapperStub(
+ assembler,
+ Address(THR,
+ target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
-void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
- ExternalLabel wrapper(NativeEntry::AutoScopeNativeCallWrapperEntry());
- GenerateCallNativeWithWrapperStub(assembler, &wrapper);
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+ GenerateCallNativeWithWrapperStub(
+ assembler,
+ Address(THR,
+ target::Thread::auto_scope_native_wrapper_entry_point_offset()));
}
// Input parameters:
@@ -216,22 +236,22 @@
// EAX : address of first argument in argument array.
// ECX : address of the native function to call.
// EDX : argc_tag including number of arguments and function kind.
-void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) {
- const intptr_t native_args_struct_offset = kWordSize;
+void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+ const intptr_t native_args_struct_offset = target::kWordSize;
const intptr_t thread_offset =
- NativeArguments::thread_offset() + native_args_struct_offset;
+ target::NativeArguments::thread_offset() + native_args_struct_offset;
const intptr_t argc_tag_offset =
- NativeArguments::argc_tag_offset() + native_args_struct_offset;
+ target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
const intptr_t argv_offset =
- NativeArguments::argv_offset() + native_args_struct_offset;
+ target::NativeArguments::argv_offset() + native_args_struct_offset;
const intptr_t retval_offset =
- NativeArguments::retval_offset() + native_args_struct_offset;
+ target::NativeArguments::retval_offset() + native_args_struct_offset;
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to dart VM code.
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), EBP);
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
#if defined(DEBUG)
{
@@ -250,7 +270,10 @@
// Reserve space for the native arguments structure, the outgoing parameter
// (pointer to the native arguments structure) and align frame before
// entering the C++ world.
- __ AddImmediate(ESP, Immediate(-INT32_SIZEOF(NativeArguments) - kWordSize));
+ __ AddImmediate(
+ ESP,
+ Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize()) -
+ target::kWordSize));
if (OS::ActivationFrameAlignment() > 1) {
__ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
@@ -259,16 +282,19 @@
__ movl(Address(ESP, thread_offset), THR); // Set thread in NativeArgs.
__ movl(Address(ESP, argc_tag_offset), EDX); // Set argc in NativeArguments.
__ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
- __ leal(EAX, Address(EBP, 2 * kWordSize)); // Compute return value addr.
+ __ leal(EAX,
+ Address(EBP, 2 * target::kWordSize)); // Compute return value addr.
__ movl(Address(ESP, retval_offset), EAX); // Set retval in NativeArguments.
- __ leal(EAX, Address(ESP, kWordSize)); // Pointer to the NativeArguments.
+ __ leal(EAX,
+ Address(ESP, target::kWordSize)); // Pointer to the NativeArguments.
__ movl(Address(ESP, 0), EAX); // Pass the pointer to the NativeArguments.
__ call(ECX);
__ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
__ LeaveFrame();
__ ret();
@@ -276,7 +302,7 @@
// Input parameters:
// EDX: arguments descriptor array.
-void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushl(EDX); // Preserve arguments descriptor array.
__ pushl(Immediate(0)); // Setup space on stack for return value.
@@ -286,14 +312,14 @@
// Remove the stub frame as we are about to jump to the dart function.
__ LeaveFrame();
- __ movl(ECX, FieldAddress(EAX, Code::entry_point_offset()));
+ __ movl(ECX, FieldAddress(EAX, target::Code::entry_point_offset()));
__ jmp(ECX);
}
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// EDX: arguments descriptor array.
-void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@@ -302,7 +328,7 @@
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
__ popl(EAX); // Get Code object.
__ popl(EDX); // Restore arguments descriptor array.
- __ movl(EAX, FieldAddress(EAX, Code::entry_point_offset()));
+ __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
__ LeaveFrame();
__ jmp(EAX);
__ int3();
@@ -310,12 +336,13 @@
// Called from object allocate instruction when the allocation stub has been
// disabled.
-void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
+ Assembler* assembler) {
__ EnterStubFrame();
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
__ popl(EAX); // Get Code object.
- __ movl(EAX, FieldAddress(EAX, Code::entry_point_offset()));
+ __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
__ LeaveFrame();
__ jmp(EAX);
__ int3();
@@ -323,20 +350,21 @@
// Input parameters:
// EDX: smi-tagged argument count, may be zero.
-// EBP[kParamEndSlotFromFp + 1]: last argument.
+// EBP[target::frame_layout.param_end_from_fp + 1]: last argument.
// Uses EAX, EBX, ECX, EDX, EDI.
static void PushArrayOfArguments(Assembler* assembler) {
// Allocate array to store arguments of caller.
- const Immediate& raw_null =
- Immediate(reinterpret_cast<intptr_t>(Object::null()));
+ const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
__ movl(ECX, raw_null); // Null element type for raw Array.
- __ Call(StubCode::AllocateArray());
+ __ Call(StubCodeAllocateArray());
__ SmiUntag(EDX);
// EAX: newly allocated array.
// EDX: length of the array (was preserved by the stub).
__ pushl(EAX); // Array is in EAX and on top of stack.
- __ leal(EBX, Address(EBP, EDX, TIMES_4, kParamEndSlotFromFp * kWordSize));
- __ leal(ECX, FieldAddress(EAX, Array::data_offset()));
+ __ leal(EBX,
+ Address(EBP, EDX, TIMES_4,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
+ __ leal(ECX, FieldAddress(EAX, target::Array::data_offset()));
// EBX: address of first argument on stack.
// ECX: address of first argument in array.
Label loop, loop_condition;
@@ -345,8 +373,8 @@
__ movl(EDI, Address(EBX, 0));
// Generational barrier is needed, array is not necessarily in new space.
__ StoreIntoObject(EAX, Address(ECX, 0), EDI);
- __ AddImmediate(ECX, Immediate(kWordSize));
- __ AddImmediate(EBX, Immediate(-kWordSize));
+ __ AddImmediate(ECX, Immediate(target::kWordSize));
+ __ AddImmediate(EBX, Immediate(-target::kWordSize));
__ Bind(&loop_condition);
__ decl(EDX);
__ j(POSITIVE, &loop, Assembler::kNearJump);
@@ -397,7 +425,7 @@
if (i == CODE_REG) {
// Save the original value of CODE_REG pushed before invoking this stub
// instead of the value used to call this stub.
- __ pushl(Address(EBP, 2 * kWordSize));
+ __ pushl(Address(EBP, 2 * target::kWordSize));
} else {
__ pushl(static_cast<Register>(i));
}
@@ -411,21 +439,24 @@
}
__ movl(ECX, ESP); // Preserve saved registers block.
- __ ReserveAlignedFrameSpace(2 * kWordSize);
- __ movl(Address(ESP, 0 * kWordSize), ECX); // Start of register block.
+ __ ReserveAlignedFrameSpace(2 * target::kWordSize);
+ __ movl(Address(ESP, 0 * target::kWordSize),
+ ECX); // Start of register block.
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
- __ movl(Address(ESP, 1 * kWordSize), Immediate(is_lazy ? 1 : 0));
+ __ movl(Address(ESP, 1 * target::kWordSize), Immediate(is_lazy ? 1 : 0));
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (EAX) is stack-size (FP - SP) in bytes.
if (kind == kLazyDeoptFromReturn) {
// Restore result into EBX temporarily.
- __ movl(EBX, Address(EBP, saved_result_slot_from_fp * kWordSize));
+ __ movl(EBX, Address(EBP, saved_result_slot_from_fp * target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into EBX temporarily.
- __ movl(EBX, Address(EBP, saved_exception_slot_from_fp * kWordSize));
- __ movl(ECX, Address(EBP, saved_stacktrace_slot_from_fp * kWordSize));
+ __ movl(EBX,
+ Address(EBP, saved_exception_slot_from_fp * target::kWordSize));
+ __ movl(ECX,
+ Address(EBP, saved_stacktrace_slot_from_fp * target::kWordSize));
}
__ LeaveFrame();
@@ -442,23 +473,23 @@
__ pushl(EBX); // Preserve exception as first local.
__ pushl(ECX); // Preserve stacktrace as first local.
}
- __ ReserveAlignedFrameSpace(1 * kWordSize);
+ __ ReserveAlignedFrameSpace(1 * target::kWordSize);
__ movl(Address(ESP, 0), EBP); // Pass last FP as parameter on stack.
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
if (kind == kLazyDeoptFromReturn) {
// Restore result into EBX.
__ movl(EBX,
Address(EBP, compiler::target::frame_layout.first_local_from_fp *
- kWordSize));
+ target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into EBX.
__ movl(EBX,
Address(EBP, compiler::target::frame_layout.first_local_from_fp *
- kWordSize));
+ target::kWordSize));
__ movl(
ECX,
Address(EBP, (compiler::target::frame_layout.first_local_from_fp - 1) *
- kWordSize));
+ target::kWordSize));
}
// Code above cannot cause GC.
__ LeaveFrame();
@@ -473,7 +504,7 @@
__ pushl(EBX); // Preserve exception, it will be GC-d here.
__ pushl(ECX); // Preserve stacktrace, it will be GC-d here.
}
- __ pushl(Immediate(Smi::RawValue(0))); // Space for the result.
+ __ pushl(Immediate(target::ToRawSmi(0))); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
// Result tells stub how many bytes to remove from the expression stack
// of the bottom-most frame. They were used as materialization arguments.
@@ -494,7 +525,8 @@
}
// EAX: result, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
+ Assembler* assembler) {
// Return address for "call" to deopt stub.
__ pushl(Immediate(kZapReturnAddress));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
@@ -503,14 +535,15 @@
// EAX: exception, must be preserved
// EDX: stacktrace, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
+ Assembler* assembler) {
// Return address for "call" to deopt stub.
__ pushl(Immediate(kZapReturnAddress));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
__ ret();
}
-void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
__ ret();
}
@@ -520,27 +553,28 @@
__ Comment("NoSuchMethodDispatch");
// When lazily generated invocation dispatchers are disabled, the
// miss-handler may return null.
- const Immediate& raw_null =
- Immediate(reinterpret_cast<intptr_t>(Object::null()));
+ const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
__ cmpl(EAX, raw_null);
__ j(NOT_EQUAL, call_target_function);
__ EnterStubFrame();
// Load the receiver.
- __ movl(EDI, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
- __ movl(EAX, Address(EBP, EDI, TIMES_HALF_WORD_SIZE,
- kParamEndSlotFromFp * kWordSize));
+ __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
+ __ movl(EAX,
+ Address(EBP, EDI, TIMES_HALF_WORD_SIZE,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
__ pushl(Immediate(0)); // Setup space on stack for result.
__ pushl(EAX); // Receiver.
__ pushl(ECX); // ICData/MegamorphicCache.
__ pushl(EDX); // Arguments descriptor array.
// Adjust arguments count.
- __ cmpl(FieldAddress(EDX, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ cmpl(
+ FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
__ movl(EDX, EDI);
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
- __ addl(EDX, Immediate(Smi::RawValue(1))); // Include the type arguments.
+ __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
__ Bind(&args_count_ok);
// EDX: Smi-tagged arguments array length.
@@ -553,14 +587,14 @@
__ ret();
}
-void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver into EAX. The argument count in the arguments
// descriptor in EDX is a smi.
- __ movl(EAX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
+ __ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
// Two words (saved fp, stub's pc marker) in the stack above the return
// address.
- __ movl(EAX, Address(ESP, EAX, TIMES_2, 2 * kWordSize));
+ __ movl(EAX, Address(ESP, EAX, TIMES_2, 2 * target::kWordSize));
// Preserve IC data and arguments descriptor.
__ pushl(ECX);
__ pushl(EDX);
@@ -585,7 +619,7 @@
__ Bind(&call_target_function);
}
- __ movl(EBX, FieldAddress(EAX, Function::entry_point_offset()));
+ __ movl(EBX, FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(EBX);
}
@@ -595,11 +629,12 @@
// ECX : array element type (either NULL or an instantiated type).
// Uses EAX, EBX, ECX, EDI as temporary registers.
// The newly allocated object is returned in EAX.
-void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
- // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
+ // RoundedAllocationSize(
+ // (array_length * kwordSize) + target::Array::header_size()).
// Assert that length is a Smi.
__ testl(EDX, Immediate(kSmiTagMask));
@@ -612,8 +647,8 @@
__ j(LESS, &slow_case);
// Check for maximum allowed length.
- const Immediate& max_len = Immediate(
- reinterpret_cast<int32_t>(Smi::New(Array::kMaxNewSpaceElements)));
+ const Immediate& max_len =
+ Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
__ cmpl(EDX, max_len);
__ j(GREATER, &slow_case);
@@ -621,18 +656,19 @@
__ MaybeTraceAllocation(kArrayCid, EAX, &slow_case, Assembler::kFarJump));
const intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawArray) + kObjectAlignment - 1;
+ target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
+ 1;
// EDX is Smi.
__ leal(EBX, Address(EDX, TIMES_2, fixed_size_plus_alignment_padding));
ASSERT(kSmiTagShift == 1);
- __ andl(EBX, Immediate(-kObjectAlignment));
+ __ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
// ECX: array element type.
// EDX: array length as Smi.
// EBX: allocation size.
const intptr_t cid = kArrayCid;
- __ movl(EAX, Address(THR, Thread::top_offset()));
+ __ movl(EAX, Address(THR, target::Thread::top_offset()));
__ addl(EBX, EAX);
__ j(CARRY, &slow_case);
@@ -641,12 +677,12 @@
// EBX: potential next object start.
// ECX: array element type.
// EDX: array length as Smi).
- __ cmpl(EBX, Address(THR, Thread::end_offset()));
+ __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
- __ movl(Address(THR, Thread::top_offset()), EBX);
+ __ movl(Address(THR, target::Thread::top_offset()), EBX);
__ subl(EBX, EAX);
__ addl(EAX, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EBX, EDI));
@@ -659,9 +695,10 @@
{
Label size_tag_overflow, done;
__ movl(EDI, EBX);
- __ cmpl(EDI, Immediate(RawObject::SizeTag::kMaxSizeTag));
+ __ cmpl(EDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shll(EDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2));
+ __ shll(EDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&size_tag_overflow);
@@ -669,11 +706,9 @@
__ Bind(&done);
// Get the class index and insert it into the tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orl(EDI, Immediate(tags));
- __ movl(FieldAddress(EAX, Array::tags_offset()), EDI); // Tags.
+ __ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); // Tags.
}
// EAX: new object start as a tagged pointer.
// EBX: allocation size.
@@ -682,11 +717,11 @@
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
- EAX, FieldAddress(EAX, Array::type_arguments_offset()), ECX);
+ EAX, FieldAddress(EAX, target::Array::type_arguments_offset()), ECX);
// Set the length field.
- __ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, Array::length_offset()),
- EDX);
+ __ StoreIntoObjectNoBarrier(
+ EAX, FieldAddress(EAX, target::Array::length_offset()), EDX);
// Initialize all array elements to raw_null.
// EAX: new object start as a tagged pointer.
@@ -696,15 +731,15 @@
// ECX: array element type.
// EDX: array length as Smi.
__ leal(EBX, FieldAddress(EAX, EBX, TIMES_1, 0));
- __ leal(EDI, FieldAddress(EAX, sizeof(RawArray)));
+ __ leal(EDI, FieldAddress(EAX, target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
__ cmpl(EDI, EBX);
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
// No generational barrier needed, since we are storing null.
- __ StoreIntoObjectNoBarrier(EAX, Address(EDI, 0), Object::null_object());
- __ addl(EDI, Immediate(kWordSize));
+ __ StoreIntoObjectNoBarrier(EAX, Address(EDI, 0), NullObject());
+ __ addl(EDI, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
__ ret(); // returns the newly allocated object in EAX.
@@ -734,18 +769,18 @@
// ESP + 12 : arguments array.
// ESP + 16 : current thread.
// Uses EAX, EDX, ECX, EDI as temporary registers.
-void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
- const intptr_t kTargetCodeOffset = 3 * kWordSize;
- const intptr_t kArgumentsDescOffset = 4 * kWordSize;
- const intptr_t kArgumentsOffset = 5 * kWordSize;
- const intptr_t kThreadOffset = 6 * kWordSize;
+void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
+ const intptr_t kTargetCodeOffset = 3 * target::kWordSize;
+ const intptr_t kArgumentsDescOffset = 4 * target::kWordSize;
+ const intptr_t kArgumentsOffset = 5 * target::kWordSize;
+ const intptr_t kThreadOffset = 6 * target::kWordSize;
__ pushl(Address(ESP, 0)); // Marker for the profiler.
__ EnterFrame(0);
// Push code object to PC marker slot.
__ movl(EAX, Address(EBP, kThreadOffset));
- __ pushl(Address(EAX, Thread::invoke_dart_code_stub_offset()));
+ __ pushl(Address(EAX, target::Thread::invoke_dart_code_stub_offset()));
// Save C++ ABI callee-saved registers.
__ pushl(EBX);
@@ -761,15 +796,16 @@
// Save top resource and top exit frame info. Use EDX as a temporary register.
// StackFrameIterator reads the top exit frame info saved in this frame.
- __ movl(EDX, Address(THR, Thread::top_resource_offset()));
+ __ movl(EDX, Address(THR, target::Thread::top_resource_offset()));
__ pushl(EDX);
- __ movl(Address(THR, Thread::top_resource_offset()), Immediate(0));
- // The constant kExitLinkSlotFromEntryFp must be kept in sync with the
- // code below.
- ASSERT(kExitLinkSlotFromEntryFp == -7);
- __ movl(EDX, Address(THR, Thread::top_exit_frame_info_offset()));
+ __ movl(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
+ // The constant target::frame_layout.exit_link_slot_from_entry_fp must be
+ // kept in sync with the code below.
+ ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -7);
+ __ movl(EDX, Address(THR, target::Thread::top_exit_frame_info_offset()));
__ pushl(EDX);
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
// Mark that the thread is executing Dart code. Do this after initializing the
// exit link for the profiler.
@@ -780,12 +816,13 @@
__ movl(EDX, Address(EDX, VMHandles::kOffsetOfRawPtrInHandle));
// Load number of arguments into EBX and adjust count for type arguments.
- __ movl(EBX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
- __ cmpl(FieldAddress(EDX, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ movl(EBX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
+ __ cmpl(
+ FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
- __ addl(EBX, Immediate(Smi::RawValue(1))); // Include the type arguments.
+ __ addl(EBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
__ Bind(&args_count_ok);
// Save number of arguments as Smi on stack, replacing ArgumentsDesc.
__ movl(Address(EBP, kArgumentsDescOffset), EBX);
@@ -801,7 +838,7 @@
// Compute address of 'arguments array' data area into EDI.
__ movl(EDI, Address(EBP, kArgumentsOffset));
__ movl(EDI, Address(EDI, VMHandles::kOffsetOfRawPtrInHandle));
- __ leal(EDI, FieldAddress(EDI, Array::data_offset()));
+ __ leal(EDI, FieldAddress(EDI, target::Array::data_offset()));
__ Bind(&push_arguments);
__ movl(ECX, Address(EDI, EAX, TIMES_4, 0));
@@ -814,7 +851,7 @@
// Call the dart code entrypoint.
__ movl(EAX, Address(EBP, kTargetCodeOffset));
__ movl(EAX, Address(EAX, VMHandles::kOffsetOfRawPtrInHandle));
- __ call(FieldAddress(EAX, Code::entry_point_offset()));
+ __ call(FieldAddress(EAX, target::Code::entry_point_offset()));
// Read the saved number of passed arguments as Smi.
__ movl(EDX, Address(EBP, kArgumentsDescOffset));
@@ -823,8 +860,8 @@
// Restore the saved top exit frame info and top resource back into the
// Isolate structure.
- __ popl(Address(THR, Thread::top_exit_frame_info_offset()));
- __ popl(Address(THR, Thread::top_resource_offset()));
+ __ popl(Address(THR, target::Thread::top_exit_frame_info_offset()));
+ __ popl(Address(THR, target::Thread::top_resource_offset()));
// Restore the current VMTag from the stack.
__ popl(Assembler::VMTagAddress());
@@ -841,7 +878,8 @@
__ ret();
}
-void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
+ Assembler* assembler) {
__ Unimplemented("Interpreter not yet supported");
}
@@ -851,15 +889,16 @@
// Output:
// EAX: new allocated RawContext object.
// EBX and EDX are destroyed.
-void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (FLAG_inline_alloc) {
Label slow_case;
// First compute the rounded instance size.
// EDX: number of context variables.
intptr_t fixed_size_plus_alignment_padding =
- (sizeof(RawContext) + kObjectAlignment - 1);
+ (target::Context::header_size() +
+ target::ObjectAlignment::kObjectAlignment - 1);
__ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
- __ andl(EBX, Immediate(-kObjectAlignment));
+ __ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, EAX, &slow_case,
Assembler::kFarJump));
@@ -867,13 +906,13 @@
// Now allocate the object.
// EDX: number of context variables.
const intptr_t cid = kContextCid;
- __ movl(EAX, Address(THR, Thread::top_offset()));
+ __ movl(EAX, Address(THR, target::Thread::top_offset()));
__ addl(EBX, EAX);
// Check if the allocation fits into the remaining space.
// EAX: potential new object.
// EBX: potential next object start.
// EDX: number of context variables.
- __ cmpl(EBX, Address(THR, Thread::end_offset()));
+ __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
@@ -890,7 +929,7 @@
// EAX: new object.
// EBX: next object start.
// EDX: number of context variables.
- __ movl(Address(THR, Thread::top_offset()), EBX);
+ __ movl(Address(THR, target::Thread::top_offset()), EBX);
// EBX: Size of allocation in bytes.
__ subl(EBX, EAX);
__ addl(EAX, Immediate(kHeapObjectTag));
@@ -903,10 +942,11 @@
{
Label size_tag_overflow, done;
__ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
- __ andl(EBX, Immediate(-kObjectAlignment));
- __ cmpl(EBX, Immediate(RawObject::SizeTag::kMaxSizeTag));
+ __ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
+ __ cmpl(EBX, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shll(EBX, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2));
+ __ shll(EBX, Immediate(target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done);
__ Bind(&size_tag_overflow);
@@ -917,39 +957,36 @@
// EAX: new object.
// EDX: number of context variables.
// EBX: size and bit tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orl(EBX, Immediate(tags));
- __ movl(FieldAddress(EAX, Context::tags_offset()), EBX); // Tags.
+ __ movl(FieldAddress(EAX, target::Object::tags_offset()), EBX); // Tags.
}
// Setup up number of context variables field.
// EAX: new object.
// EDX: number of context variables as integer value (not object).
- __ movl(FieldAddress(EAX, Context::num_variables_offset()), EDX);
+ __ movl(FieldAddress(EAX, target::Context::num_variables_offset()), EDX);
// Setup the parent field.
// EAX: new object.
// EDX: number of context variables.
// No generational barrier needed, since we are storing null.
- __ StoreIntoObjectNoBarrier(EAX,
- FieldAddress(EAX, Context::parent_offset()),
- Object::null_object());
+ __ StoreIntoObjectNoBarrier(
+ EAX, FieldAddress(EAX, target::Context::parent_offset()), NullObject());
// Initialize the context variables.
// EAX: new object.
// EDX: number of context variables.
{
Label loop, entry;
- __ leal(EBX, FieldAddress(EAX, Context::variable_offset(0)));
+ __ leal(EBX, FieldAddress(EAX, target::Context::variable_offset(0)));
__ jmp(&entry, Assembler::kNearJump);
__ Bind(&loop);
__ decl(EDX);
// No generational barrier needed, since we are storing null.
__ StoreIntoObjectNoBarrier(EAX, Address(EBX, EDX, TIMES_4, 0),
- Object::null_object());
+ NullObject());
__ Bind(&entry);
__ cmpl(EDX, Immediate(0));
__ j(NOT_EQUAL, &loop, Assembler::kNearJump);
@@ -976,7 +1013,7 @@
__ ret();
}
-void StubCode::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
// Not used on IA32.
__ Breakpoint();
}
@@ -1003,8 +1040,8 @@
// store buffer if the object is in the store buffer already.
// Spilled: EAX, ECX
// EDX: Address being stored
- __ movl(EAX, FieldAddress(EDX, Object::tags_offset()));
- __ testl(EAX, Immediate(1 << RawObject::kOldAndNotRememberedBit));
+ __ movl(EAX, FieldAddress(EDX, target::Object::tags_offset()));
+ __ testl(EAX, Immediate(1 << target::RawObject::kOldAndNotRememberedBit));
__ j(NOT_EQUAL, &add_to_buffer, Assembler::kNearJump);
__ popl(ECX);
__ popl(EAX);
@@ -1017,12 +1054,12 @@
if (cards) {
// Check if this object is using remembered cards.
- __ testl(EAX, Immediate(1 << RawObject::kCardRememberedBit));
+ __ testl(EAX, Immediate(1 << target::RawObject::kCardRememberedBit));
__ j(NOT_EQUAL, &remember_card, Assembler::kFarJump); // Unlikely.
} else {
#if defined(DEBUG)
Label ok;
- __ testl(EAX, Immediate(1 << RawObject::kCardRememberedBit));
+ __ testl(EAX, Immediate(1 << target::RawObject::kCardRememberedBit));
__ j(ZERO, &ok, Assembler::kFarJump); // Unlikely.
__ Stop("Wrong barrier");
__ Bind(&ok);
@@ -1031,16 +1068,18 @@
// lock+andl is an atomic read-modify-write.
__ lock();
- __ andl(FieldAddress(EDX, Object::tags_offset()),
- Immediate(~(1 << RawObject::kOldAndNotRememberedBit)));
+ __ andl(FieldAddress(EDX, target::Object::tags_offset()),
+ Immediate(~(1 << target::RawObject::kOldAndNotRememberedBit)));
// Load the StoreBuffer block out of the thread. Then load top_ out of the
// StoreBufferBlock and add the address to the pointers_.
// Spilled: EAX, ECX
// EDX: Address being stored
- __ movl(EAX, Address(THR, Thread::store_buffer_block_offset()));
- __ movl(ECX, Address(EAX, StoreBufferBlock::top_offset()));
- __ movl(Address(EAX, ECX, TIMES_4, StoreBufferBlock::pointers_offset()), EDX);
+ __ movl(EAX, Address(THR, target::Thread::store_buffer_block_offset()));
+ __ movl(ECX, Address(EAX, target::StoreBufferBlock::top_offset()));
+ __ movl(
+ Address(EAX, ECX, TIMES_4, target::StoreBufferBlock::pointers_offset()),
+ EDX);
// Increment top_ and check for overflow.
// Spilled: EAX, ECX
@@ -1048,8 +1087,8 @@
// EAX: StoreBufferBlock
Label overflow;
__ incl(ECX);
- __ movl(Address(EAX, StoreBufferBlock::top_offset()), ECX);
- __ cmpl(ECX, Immediate(StoreBufferBlock::kSize));
+ __ movl(Address(EAX, target::StoreBufferBlock::top_offset()), ECX);
+ __ cmpl(ECX, Immediate(target::StoreBufferBlock::kSize));
// Restore values.
// Spilled: EAX, ECX
__ popl(ECX);
@@ -1061,7 +1100,7 @@
__ Bind(&overflow);
// Setup frame, push callee-saved registers.
- __ EnterCallRuntimeFrame(1 * kWordSize);
+ __ EnterCallRuntimeFrame(1 * target::kWordSize);
__ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
__ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
// Restore callee-saved registers, tear down frame.
@@ -1073,16 +1112,19 @@
// Get card table.
__ Bind(&remember_card);
- __ movl(EAX, EDX); // Object.
- __ andl(EAX, Immediate(kPageMask)); // HeapPage.
- __ cmpl(Address(EAX, HeapPage::card_table_offset()), Immediate(0));
+ __ movl(EAX, EDX); // Object.
+ __ andl(EAX, Immediate(target::kPageMask)); // HeapPage.
+ __ cmpl(Address(EAX, target::HeapPage::card_table_offset()), Immediate(0));
__ j(EQUAL, &remember_card_slow, Assembler::kNearJump);
// Dirty the card.
__ subl(EDI, EAX); // Offset in page.
- __ movl(EAX, Address(EAX, HeapPage::card_table_offset())); // Card table.
+ __ movl(
+ EAX,
+ Address(EAX, target::HeapPage::card_table_offset())); // Card table.
__ shrl(EDI,
- Immediate(HeapPage::kBytesPerCardLog2)); // Index in card table.
+ Immediate(
+ target::HeapPage::kBytesPerCardLog2)); // Index in card table.
__ movb(Address(EAX, EDI, TIMES_1, 0), Immediate(1));
__ popl(ECX);
__ popl(EAX);
@@ -1090,9 +1132,9 @@
// Card table not yet allocated.
__ Bind(&remember_card_slow);
- __ EnterCallRuntimeFrame(2 * kWordSize);
- __ movl(Address(ESP, 0 * kWordSize), EDX); // Object
- __ movl(Address(ESP, 1 * kWordSize), EDI); // Slot
+ __ EnterCallRuntimeFrame(2 * target::kWordSize);
+ __ movl(Address(ESP, 0 * target::kWordSize), EDX); // Object
+ __ movl(Address(ESP, 1 * target::kWordSize), EDI); // Slot
__ CallRuntime(kRememberCardRuntimeEntry, 2);
__ LeaveCallRuntimeFrame();
__ popl(ECX);
@@ -1101,14 +1143,16 @@
}
}
-void StubCode::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::write_barrier_code_offset()), false);
+ assembler, Address(THR, target::Thread::write_barrier_code_offset()),
+ false);
}
-void StubCode::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::array_write_barrier_code_offset()), true);
+ assembler,
+ Address(THR, target::Thread::array_write_barrier_code_offset()), true);
}
// Called for inline allocation of objects.
@@ -1118,56 +1162,53 @@
// Uses EAX, EBX, ECX, EDX, EDI as temporary registers.
// Returns patch_code_pc offset where patching code for disabling the stub
// has been generated (similar to regularly generated Dart code).
-void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
- const Class& cls) {
- const intptr_t kObjectTypeArgumentsOffset = 1 * kWordSize;
- const Immediate& raw_null =
- Immediate(reinterpret_cast<intptr_t>(Object::null()));
+void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
+ const Class& cls) {
+ const intptr_t kObjectTypeArgumentsOffset = 1 * target::kWordSize;
+ const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
// The generated code is different if the class is parameterized.
- const bool is_cls_parameterized = cls.NumTypeArguments() > 0;
- ASSERT(!is_cls_parameterized ||
- (cls.type_arguments_field_offset() != Class::kNoTypeArguments));
+ const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
+ ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
+ cls) != target::Class::kNoTypeArguments);
// kInlineInstanceSize is a constant used as a threshold for determining
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12; // In words.
- const intptr_t instance_size = cls.instance_size();
+ const intptr_t instance_size = target::Class::InstanceSize(cls);
ASSERT(instance_size > 0);
if (is_cls_parameterized) {
__ movl(EDX, Address(ESP, kObjectTypeArgumentsOffset));
// EDX: instantiated type arguments.
}
- Isolate* isolate = Isolate::Current();
- if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) &&
- !cls.TraceAllocation(isolate)) {
+ if (FLAG_inline_alloc &&
+ target::Heap::IsAllocatableInNewSpace(instance_size) &&
+ !target::Class::TraceAllocation(cls)) {
Label slow_case;
// Allocate the object and update top to point to
// next object start and initialize the allocated object.
// EDX: instantiated type arguments (if is_cls_parameterized).
- __ movl(EAX, Address(THR, Thread::top_offset()));
+ __ movl(EAX, Address(THR, target::Thread::top_offset()));
__ leal(EBX, Address(EAX, instance_size));
// Check if the allocation fits into the remaining space.
// EAX: potential new object start.
// EBX: potential next object start.
- __ cmpl(EBX, Address(THR, Thread::end_offset()));
+ __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
__ j(ABOVE_EQUAL, &slow_case);
}
- __ movl(Address(THR, Thread::top_offset()), EBX);
- NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), ECX));
+ __ movl(Address(THR, target::Thread::top_offset()), EBX);
+ NOT_IN_PRODUCT(__ UpdateAllocationStats(target::Class::GetId(cls), ECX));
// EAX: new object start (untagged).
// EBX: next object start.
// EDX: new object type arguments (if is_cls_parameterized).
// Set the tags.
- uint32_t tags = 0;
- tags = RawObject::SizeTag::update(instance_size, tags);
- ASSERT(cls.id() != kIllegalCid);
- tags = RawObject::ClassIdTag::update(cls.id(), tags);
- tags = RawObject::NewBit::update(true, tags);
- __ movl(Address(EAX, Instance::tags_offset()), Immediate(tags));
+ ASSERT(target::Class::GetId(cls) != kIllegalCid);
+ uint32_t tags = target::MakeTagWordForNewSpaceObject(
+ target::Class::GetId(cls), instance_size);
+ __ movl(Address(EAX, target::Object::tags_offset()), Immediate(tags));
__ addl(EAX, Immediate(kHeapObjectTag));
// Initialize the remaining words of the object.
@@ -1176,16 +1217,17 @@
// EBX: next object start.
// EDX: new object type arguments (if is_cls_parameterized).
// First try inlining the initialization without a loop.
- if (instance_size < (kInlineInstanceSize * kWordSize)) {
+ if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
// Check if the object contains any non-header fields.
// Small objects are initialized using a consecutive set of writes.
- for (intptr_t current_offset = Instance::NextFieldOffset();
- current_offset < instance_size; current_offset += kWordSize) {
+ for (intptr_t current_offset = target::Instance::first_field_offset();
+ current_offset < instance_size;
+ current_offset += target::kWordSize) {
__ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, current_offset),
- Object::null_object());
+ NullObject());
}
} else {
- __ leal(ECX, FieldAddress(EAX, Instance::NextFieldOffset()));
+ __ leal(ECX, FieldAddress(EAX, target::Instance::first_field_offset()));
// Loop until the whole object is initialized.
// EAX: new object (tagged).
// EBX: next object start.
@@ -1196,8 +1238,8 @@
__ Bind(&init_loop);
__ cmpl(ECX, EBX);
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
- __ StoreIntoObjectNoBarrier(EAX, Address(ECX, 0), Object::null_object());
- __ addl(ECX, Immediate(kWordSize));
+ __ StoreIntoObjectNoBarrier(EAX, Address(ECX, 0), NullObject());
+ __ addl(ECX, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
}
@@ -1205,7 +1247,7 @@
// EAX: new object (tagged).
// EDX: new object type arguments.
// Set the type arguments in the new object.
- intptr_t offset = cls.type_arguments_field_offset();
+ const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
__ StoreIntoObjectNoBarrier(EAX, FieldAddress(EAX, offset), EDX);
}
// Done allocating and initializing the instance.
@@ -1220,7 +1262,8 @@
// calling into the runtime.
__ EnterStubFrame();
__ pushl(raw_null); // Setup space on stack for return value.
- __ PushObject(cls); // Push class of object to be allocated.
+ __ PushObject(
+ CastHandle<Object>(cls)); // Push class of object to be allocated.
if (is_cls_parameterized) {
__ pushl(EDX); // Push type arguments of object to be allocated.
} else {
@@ -1244,24 +1287,28 @@
// ESP + 4 : address of last argument.
// EDX : arguments descriptor array.
// Uses EAX, EBX, EDI as temporary registers.
-void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
+ Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
- __ movl(EDI, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
- __ movl(EAX, Address(EBP, EDI, TIMES_2, kParamEndSlotFromFp * kWordSize));
+ __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
+ __ movl(EAX,
+ Address(EBP, EDI, TIMES_2,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
__ pushl(Immediate(0)); // Setup space on stack for result from noSuchMethod.
__ pushl(EAX); // Receiver.
__ pushl(EDX); // Arguments descriptor array.
// Adjust arguments count.
- __ cmpl(FieldAddress(EDX, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ cmpl(
+ FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
__ movl(EDX, EDI);
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
- __ addl(EDX, Immediate(Smi::RawValue(1))); // Include the type arguments.
+ __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
__ Bind(&args_count_ok);
// EDX: Smi-tagged arguments array length.
@@ -1275,7 +1322,8 @@
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
-void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
+ Assembler* assembler) {
Register ic_reg = ECX;
Register func_reg = EBX;
if (FLAG_trace_optimized_ic_calls) {
@@ -1291,19 +1339,19 @@
__ popl(func_reg); // Restore.
__ LeaveFrame();
}
- __ incl(FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
// Loads function into 'temp_reg'.
-void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
- Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
+ Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = ECX;
Register func_reg = temp_reg;
ASSERT(ic_reg != func_reg);
__ Comment("Increment function counter");
- __ movl(func_reg, FieldAddress(ic_reg, ICData::owner_offset()));
- __ incl(FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ movl(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
+ __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
}
@@ -1317,8 +1365,8 @@
Label* not_smi_or_overflow) {
__ Comment("Fast Smi op");
ASSERT(num_args == 2);
- __ movl(EDI, Address(ESP, +1 * kWordSize)); // Right
- __ movl(EAX, Address(ESP, +2 * kWordSize)); // Left
+ __ movl(EDI, Address(ESP, +1 * target::kWordSize)); // Right
+ __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left
__ movl(EBX, EDI);
__ orl(EBX, EAX);
__ testl(EBX, Immediate(kSmiTagMask));
@@ -1344,10 +1392,10 @@
Label done, is_true;
__ cmpl(EAX, EDI);
__ j(EQUAL, &is_true, Assembler::kNearJump);
- __ LoadObject(EAX, Bool::False());
+ __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&is_true);
- __ LoadObject(EAX, Bool::True());
+ __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
__ Bind(&done);
break;
}
@@ -1356,26 +1404,26 @@
}
// ECX: IC data object.
- __ movl(EBX, FieldAddress(ECX, ICData::ic_data_offset()));
+ __ movl(EBX, FieldAddress(ECX, target::ICData::ic_data_offset()));
// EBX: ic_data_array with check entries: classes and target functions.
- __ leal(EBX, FieldAddress(EBX, Array::data_offset()));
+ __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
#if defined(DEBUG)
// Check that first entry is for Smi/Smi.
Label error, ok;
- const Immediate& imm_smi_cid =
- Immediate(reinterpret_cast<intptr_t>(Smi::New(kSmiCid)));
- __ cmpl(Address(EBX, 0 * kWordSize), imm_smi_cid);
+ const Immediate& imm_smi_cid = Immediate(target::ToRawSmi(kSmiCid));
+ __ cmpl(Address(EBX, 0 * target::kWordSize), imm_smi_cid);
__ j(NOT_EQUAL, &error, Assembler::kNearJump);
- __ cmpl(Address(EBX, 1 * kWordSize), imm_smi_cid);
+ __ cmpl(Address(EBX, 1 * target::kWordSize), imm_smi_cid);
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Bind(&error);
__ Stop("Incorrect IC data");
__ Bind(&ok);
#endif
if (FLAG_optimization_counter_threshold >= 0) {
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
// Update counter, ignore overflow.
- __ addl(Address(EBX, count_offset), Immediate(Smi::RawValue(1)));
+ __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
}
__ ret();
}
@@ -1390,7 +1438,7 @@
// - Check if 'num_args' (including receiver) match any IC data group.
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
-void StubCode::GenerateNArgsCheckInlineCacheStub(
+void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
@@ -1404,9 +1452,9 @@
Label ok;
// Check that the IC data array has NumArgsTested() == num_args.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ movl(EBX, FieldAddress(ECX, ICData::state_bits_offset()));
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ andl(EBX, Immediate(ICData::NumArgsTestedMask()));
+ __ movl(EBX, FieldAddress(ECX, target::ICData::state_bits_offset()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ andl(EBX, Immediate(target::ICData::NumArgsTestedMask()));
__ cmpl(EBX, Immediate(num_args));
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Stop("Incorrect stub for IC data");
@@ -1419,7 +1467,7 @@
if (!optimized) {
__ Comment("Check single stepping");
__ LoadIsolate(EAX);
- __ cmpb(Address(EAX, Isolate::single_step_offset()), Immediate(0));
+ __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
__ j(NOT_EQUAL, &stepping);
__ Bind(&done_stepping);
}
@@ -1433,20 +1481,21 @@
__ Comment("Extract ICData initial values and receiver cid");
// ECX: IC data object (preserved).
// Load arguments descriptor into EDX.
- __ movl(EDX, FieldAddress(ECX, ICData::arguments_descriptor_offset()));
+ __ movl(EDX,
+ FieldAddress(ECX, target::ICData::arguments_descriptor_offset()));
// Loop that checks if there is an IC data match.
Label loop, found, miss;
// ECX: IC data object (preserved).
- __ movl(EBX, FieldAddress(ECX, ICData::ic_data_offset()));
+ __ movl(EBX, FieldAddress(ECX, target::ICData::ic_data_offset()));
// EBX: ic_data_array with check entries: classes and target functions.
- __ leal(EBX, FieldAddress(EBX, Array::data_offset()));
+ __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
// EBX: points directly to the first ic data array element.
// Get argument descriptor into EAX. In the 1-argument case this is the
// last time we need the argument descriptor, and we reuse EAX for the
// class IDs from the IC descriptor. In the 2-argument case we preserve
// the argument descriptor in EAX.
- __ movl(EAX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
+ __ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
if (num_args == 1) {
// Load receiver into EDI.
__ movl(EDI,
@@ -1459,20 +1508,23 @@
// We unroll the generic one that is generated once more than the others.
bool optimize = kind == Token::kILLEGAL;
- const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(num_args) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
const intptr_t entry_size =
- ICData::TestEntryLengthFor(num_args, exactness_check) * kWordSize;
+ target::ICData::TestEntryLengthFor(num_args, exactness_check) *
+ target::kWordSize;
__ Bind(&loop);
for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
Label update;
if (num_args == 1) {
__ movl(EDI, Address(EBX, 0));
- __ cmpl(EDI, EAX); // Class id match?
- __ j(EQUAL, &found); // Break.
- __ addl(EBX, Immediate(entry_size)); // Next entry.
- __ cmpl(EDI, Immediate(Smi::RawValue(kIllegalCid))); // Done?
+ __ cmpl(EDI, EAX); // Class id match?
+ __ j(EQUAL, &found); // Break.
+ __ addl(EBX, Immediate(entry_size)); // Next entry.
+ __ cmpl(EDI, Immediate(target::ToRawSmi(kIllegalCid))); // Done?
} else {
ASSERT(num_args == 2);
// Load receiver into EDI.
@@ -1482,15 +1534,15 @@
__ j(NOT_EQUAL, &update); // Continue.
// Load second argument into EDI.
- __ movl(EDI, Address(ESP, EAX, TIMES_2, -kWordSize));
+ __ movl(EDI, Address(ESP, EAX, TIMES_2, -target::kWordSize));
__ LoadTaggedClassIdMayBeSmi(EDI, EDI);
- __ cmpl(EDI, Address(EBX, kWordSize)); // Class id match?
- __ j(EQUAL, &found); // Break.
+ __ cmpl(EDI, Address(EBX, target::kWordSize)); // Class id match?
+ __ j(EQUAL, &found); // Break.
__ Bind(&update);
__ addl(EBX, Immediate(entry_size)); // Next entry.
__ cmpl(Address(EBX, -entry_size),
- Immediate(Smi::RawValue(kIllegalCid))); // Done?
+ Immediate(target::ToRawSmi(kIllegalCid))); // Done?
}
if (unroll == 0) {
@@ -1504,7 +1556,7 @@
__ Comment("IC miss");
// Compute address of arguments (first read number of arguments from
// arguments descriptor array and then compute address on the stack).
- __ movl(EAX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
+ __ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
__ leal(EAX, Address(ESP, EAX, TIMES_2, 0)); // EAX is Smi.
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
@@ -1514,7 +1566,7 @@
__ pushl(Immediate(0)); // Result slot.
// Push call arguments.
for (intptr_t i = 0; i < num_args; i++) {
- __ movl(EBX, Address(EAX, -kWordSize * i));
+ __ movl(EBX, Address(EAX, -target::kWordSize * i));
__ pushl(EBX);
}
__ pushl(ECX); // Pass IC data object.
@@ -1540,14 +1592,14 @@
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update caller's counter");
// Ignore overflow.
- __ addl(Address(EBX, count_offset), Immediate(Smi::RawValue(1)));
+ __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
}
__ movl(EAX, Address(EBX, target_offset));
__ Bind(&call_target_function);
__ Comment("Call target");
// EAX: Target function.
- __ movl(EBX, FieldAddress(EAX, Function::entry_point_offset()));
+ __ movl(EBX, FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(EBX);
#if !defined(PRODUCT)
@@ -1573,37 +1625,39 @@
// 2 .. (length - 1): group of checks, each check containing:
// - N classes.
// - 1 target function.
-void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
-void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
kInlineCacheMissHandlerTwoArgsRuntimeEntry,
Token::kILLEGAL);
}
-void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD);
}
-void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB);
}
-void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ);
@@ -1620,7 +1674,7 @@
// 2 .. (length - 1): group of checks, each check containing:
// - N classes.
// - 1 target function.
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 1,
@@ -1628,12 +1682,13 @@
Token::kILLEGAL, true /* optimized */);
}
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
- Assembler* assembler) {
+void StubCodeCompiler::
+ GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
+ Assembler* assembler) {
__ Stop("Unimplemented");
}
-void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
@@ -1644,7 +1699,8 @@
// Intermediary stub between a static call and its target. ICData contains
// the target function and the call count.
// ECX: ICData
-void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
#if defined(DEBUG)
@@ -1652,9 +1708,9 @@
Label ok;
// Check that the IC data array has NumArgsTested() == num_args.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ movl(EBX, FieldAddress(ECX, ICData::state_bits_offset()));
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ andl(EBX, Immediate(ICData::NumArgsTestedMask()));
+ __ movl(EBX, FieldAddress(ECX, target::ICData::state_bits_offset()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ andl(EBX, Immediate(target::ICData::NumArgsTestedMask()));
__ cmpl(EBX, Immediate(0));
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Stop("Incorrect IC data for unoptimized static call");
@@ -1666,30 +1722,33 @@
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(EAX);
- __ cmpb(Address(EAX, Isolate::single_step_offset()), Immediate(0));
+ __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
__ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
__ Bind(&done_stepping);
#endif
// ECX: IC data object (preserved).
- __ movl(EBX, FieldAddress(ECX, ICData::ic_data_offset()));
+ __ movl(EBX, FieldAddress(ECX, target::ICData::ic_data_offset()));
// EBX: ic_data_array with entries: target functions and count.
- __ leal(EBX, FieldAddress(EBX, Array::data_offset()));
+ __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
// EBX: points directly to the first ic data array element.
- const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(0) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(0) * target::kWordSize;
if (FLAG_optimization_counter_threshold >= 0) {
// Increment count for this call, ignore overflow.
- __ addl(Address(EBX, count_offset), Immediate(Smi::RawValue(1)));
+ __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
}
// Load arguments descriptor into EDX.
- __ movl(EDX, FieldAddress(ECX, ICData::arguments_descriptor_offset()));
+ __ movl(EDX,
+ FieldAddress(ECX, target::ICData::arguments_descriptor_offset()));
// Get function and call it, if possible.
__ movl(EAX, Address(EBX, target_offset));
- __ movl(EBX, FieldAddress(EAX, Function::entry_point_offset()));
+ __ movl(EBX, FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(EBX);
#if !defined(PRODUCT)
@@ -1703,13 +1762,15 @@
#endif
}
-void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, EBX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL);
@@ -1718,7 +1779,7 @@
// Stub for compiling a function and jumping to the compiled code.
// EDX: Arguments descriptor.
// EAX: Function.
-void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushl(EDX); // Preserve arguments descriptor array.
__ pushl(EAX); // Pass function.
@@ -1729,16 +1790,16 @@
// When using the interpreter, the function's code may now point to the
// InterpretCall stub. Make sure EAX, ECX, and EDX are preserved.
- __ movl(EBX, FieldAddress(EAX, Function::entry_point_offset()));
+ __ movl(EBX, FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(EBX);
}
-void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
__ Unimplemented("Interpreter not yet supported");
}
// ECX: Contains an ICData.
-void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
// Save IC data.
__ pushl(ECX);
@@ -1750,11 +1811,11 @@
__ popl(ECX); // Restore IC data.
__ LeaveFrame();
// Jump to original stub.
- __ movl(EAX, FieldAddress(EAX, Code::entry_point_offset()));
+ __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
__ jmp(EAX);
}
-void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
// Room for result. Debugger stub returns address of the
// unpatched runtime stub.
@@ -1763,19 +1824,19 @@
__ popl(EAX); // Code of the original stub
__ LeaveFrame();
// Jump to original stub.
- __ movl(EAX, FieldAddress(EAX, Code::entry_point_offset()));
+ __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
__ jmp(EAX);
}
// Called only from unoptimized code.
-void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ ret();
#else
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(EAX);
- __ movzxb(EAX, Address(EAX, Isolate::single_step_offset()));
+ __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
__ cmpl(EAX, Immediate(0));
__ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
__ Bind(&done_stepping);
@@ -1799,26 +1860,25 @@
static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
ASSERT(n == 1 || n == 2 || n == 4 || n == 6);
- static intptr_t kFunctionTypeArgumentsInBytes = 1 * kWordSize;
- static intptr_t kInstantiatorTypeArgumentsInBytes = 2 * kWordSize;
- static intptr_t kInstanceOffsetInBytes = 3 * kWordSize;
- static intptr_t kCacheOffsetInBytes = 4 * kWordSize;
+ static intptr_t kFunctionTypeArgumentsInBytes = 1 * target::kWordSize;
+ static intptr_t kInstantiatorTypeArgumentsInBytes = 2 * target::kWordSize;
+ static intptr_t kInstanceOffsetInBytes = 3 * target::kWordSize;
+ static intptr_t kCacheOffsetInBytes = 4 * target::kWordSize;
const Register kInstanceReg = EAX;
const Register kInstanceCidOrFunction = ECX;
const Register kInstanceInstantiatorTypeArgumentsReg = EBX;
- const Immediate& raw_null =
- Immediate(reinterpret_cast<intptr_t>(Object::null()));
+ const auto& raw_null = Immediate(target::ToRawPointer(NullObject()));
__ movl(kInstanceReg, Address(ESP, kInstanceOffsetInBytes));
// Loop initialization (moved up here to avoid having all dependent loads
// after each other)
__ movl(EDX, Address(ESP, kCacheOffsetInBytes));
- __ movl(EDX, FieldAddress(EDX, SubtypeTestCache::cache_offset()));
- __ addl(EDX, Immediate(Array::data_offset() - kHeapObjectTag));
+ __ movl(EDX, FieldAddress(EDX, target::SubtypeTestCache::cache_offset()));
+ __ addl(EDX, Immediate(target::Array::data_offset() - kHeapObjectTag));
Label loop, not_closure;
if (n >= 4) {
@@ -1832,16 +1892,17 @@
// Closure handling.
{
__ movl(kInstanceCidOrFunction,
- FieldAddress(kInstanceReg, Closure::function_offset()));
+ FieldAddress(kInstanceReg, target::Closure::function_offset()));
if (n >= 2) {
- __ movl(kInstanceInstantiatorTypeArgumentsReg,
- FieldAddress(kInstanceReg,
- Closure::instantiator_type_arguments_offset()));
+ __ movl(
+ kInstanceInstantiatorTypeArgumentsReg,
+ FieldAddress(kInstanceReg,
+ target::Closure::instantiator_type_arguments_offset()));
if (n >= 6) {
- __ pushl(FieldAddress(kInstanceReg,
- Closure::delayed_type_arguments_offset()));
- __ pushl(FieldAddress(kInstanceReg,
- Closure::function_type_arguments_offset()));
+ __ pushl(FieldAddress(
+ kInstanceReg, target::Closure::delayed_type_arguments_offset()));
+ __ pushl(FieldAddress(
+ kInstanceReg, target::Closure::function_type_arguments_offset()));
}
}
__ jmp(&loop, Assembler::kNearJump);
@@ -1854,10 +1915,12 @@
Label has_no_type_arguments;
__ LoadClassById(EDI, kInstanceCidOrFunction);
__ movl(kInstanceInstantiatorTypeArgumentsReg, raw_null);
- __ movl(EDI,
- FieldAddress(
- EDI, Class::type_arguments_field_offset_in_words_offset()));
- __ cmpl(EDI, Immediate(Class::kNoTypeArguments));
+ __ movl(
+ EDI,
+ FieldAddress(
+ EDI,
+ target::Class::type_arguments_field_offset_in_words_offset()));
+ __ cmpl(EDI, Immediate(target::Class::kNoTypeArguments));
__ j(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
__ movl(kInstanceInstantiatorTypeArgumentsReg,
FieldAddress(kInstanceReg, EDI, TIMES_4, 0));
@@ -1872,15 +1935,18 @@
}
const intptr_t kInstanceParentFunctionTypeArgumentsFromSp = 0;
- const intptr_t kInstanceDelayedFunctionTypeArgumentsFromSp = kWordSize;
- const intptr_t args_offset = n >= 6 ? 2 * kWordSize : 0;
+ const intptr_t kInstanceDelayedFunctionTypeArgumentsFromSp =
+ target::kWordSize;
+ const intptr_t args_offset = n >= 6 ? 2 * target::kWordSize : 0;
Label found, not_found, next_iteration;
// Loop header.
__ Bind(&loop);
- __ movl(EDI, Address(EDX, kWordSize *
- SubtypeTestCache::kInstanceClassIdOrFunction));
+ __ movl(
+ EDI,
+ Address(EDX, target::kWordSize *
+ target::SubtypeTestCache::kInstanceClassIdOrFunction));
__ cmpl(EDI, raw_null);
__ j(EQUAL, ¬_found, Assembler::kNearJump);
__ cmpl(EDI, kInstanceCidOrFunction);
@@ -1889,19 +1955,24 @@
} else {
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
__ cmpl(kInstanceInstantiatorTypeArgumentsReg,
- Address(EDX, kWordSize * SubtypeTestCache::kInstanceTypeArguments));
+ Address(EDX, target::kWordSize *
+ target::SubtypeTestCache::kInstanceTypeArguments));
if (n == 2) {
__ j(EQUAL, &found, Assembler::kNearJump);
} else {
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
- __ movl(EDI,
- Address(EDX, kWordSize *
- SubtypeTestCache::kInstantiatorTypeArguments));
+ __ movl(
+ EDI,
+ Address(EDX,
+ target::kWordSize *
+ target::SubtypeTestCache::kInstantiatorTypeArguments));
__ cmpl(EDI,
Address(ESP, args_offset + kInstantiatorTypeArgumentsInBytes));
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
- __ movl(EDI, Address(EDX, kWordSize *
- SubtypeTestCache::kFunctionTypeArguments));
+ __ movl(
+ EDI,
+ Address(EDX, target::kWordSize *
+ target::SubtypeTestCache::kFunctionTypeArguments));
__ cmpl(EDI, Address(ESP, args_offset + kFunctionTypeArgumentsInBytes));
if (n == 4) {
__ j(EQUAL, &found, Assembler::kNearJump);
@@ -1909,31 +1980,29 @@
ASSERT(n == 6);
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
- __ movl(
- EDI,
- Address(
- EDX,
- kWordSize *
- SubtypeTestCache::kInstanceParentFunctionTypeArguments));
+ __ movl(EDI,
+ Address(EDX, target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceParentFunctionTypeArguments));
__ cmpl(EDI, Address(ESP, kInstanceParentFunctionTypeArgumentsFromSp));
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
- __ movl(
- EDI,
- Address(
- EDX,
- kWordSize *
- SubtypeTestCache::kInstanceDelayedFunctionTypeArguments));
+ __ movl(EDI,
+ Address(EDX, target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceDelayedFunctionTypeArguments));
__ cmpl(EDI, Address(ESP, kInstanceDelayedFunctionTypeArgumentsFromSp));
__ j(EQUAL, &found, Assembler::kNearJump);
}
}
}
__ Bind(&next_iteration);
- __ addl(EDX, Immediate(kWordSize * SubtypeTestCache::kTestEntryLength));
+ __ addl(EDX, Immediate(target::kWordSize *
+ target::SubtypeTestCache::kTestEntryLength));
__ jmp(&loop, Assembler::kNearJump);
__ Bind(&found);
- __ movl(ECX, Address(EDX, kWordSize * SubtypeTestCache::kTestResult));
+ __ movl(ECX, Address(EDX, target::kWordSize *
+ target::SubtypeTestCache::kTestResult));
if (n == 6) {
__ Drop(2);
}
@@ -1948,51 +2017,52 @@
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 2);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 4);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype6TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 6);
}
-void StubCode::GenerateDefaultTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
// Not implemented on ia32.
__ Breakpoint();
}
-void StubCode::GenerateTopTypeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
// Not implemented on ia32.
__ Breakpoint();
}
-void StubCode::GenerateTypeRefTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTypeRefTypeTestStub(Assembler* assembler) {
// Not implemented on ia32.
__ Breakpoint();
}
-void StubCode::GenerateUnreachableTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
// Not implemented on ia32.
__ Breakpoint();
}
-void StubCode::GenerateLazySpecializeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
+ Assembler* assembler) {
// Not implemented on ia32.
__ Breakpoint();
}
-void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
// Not implemented on ia32.
__ Breakpoint();
}
@@ -2000,8 +2070,8 @@
// Return the current stack pointer address, used to do stack alignment checks.
// TOS + 0: return address
// Result in EAX.
-void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) {
- __ leal(EAX, Address(ESP, kWordSize));
+void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+ __ leal(EAX, Address(ESP, target::kWordSize));
__ ret();
}
@@ -2012,15 +2082,19 @@
// TOS + 3: frame_pointer
// TOS + 4: thread
// No Result.
-void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
- __ movl(THR, Address(ESP, 4 * kWordSize)); // Load target thread.
- __ movl(EBP, Address(ESP, 3 * kWordSize)); // Load target frame_pointer.
- __ movl(EBX, Address(ESP, 1 * kWordSize)); // Load target PC into EBX.
- __ movl(ESP, Address(ESP, 2 * kWordSize)); // Load target stack_pointer.
+void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
+ __ movl(THR, Address(ESP, 4 * target::kWordSize)); // Load target thread.
+ __ movl(EBP,
+ Address(ESP, 3 * target::kWordSize)); // Load target frame_pointer.
+ __ movl(EBX,
+ Address(ESP, 1 * target::kWordSize)); // Load target PC into EBX.
+ __ movl(ESP,
+ Address(ESP, 2 * target::kWordSize)); // Load target stack_pointer.
// Set tag.
__ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Clear top exit frame.
- __ movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
__ jmp(EBX); // Jump to the exception handler code.
}
@@ -2028,21 +2102,21 @@
//
// The arguments are stored in the Thread object.
// No result.
-void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == EAX);
ASSERT(kStackTraceObjectReg == EDX);
- __ movl(EBX, Address(THR, Thread::resume_pc_offset()));
+ __ movl(EBX, Address(THR, target::Thread::resume_pc_offset()));
- ASSERT(Thread::CanLoadFromThread(Object::null_object()));
- __ movl(ECX, Address(THR, Thread::OffsetFromThread(Object::null_object())));
+ ASSERT(target::CanLoadFromThread(NullObject()));
+ __ movl(ECX, Address(THR, target::Thread::OffsetFromThread(NullObject())));
// Load the exception from the current thread.
- Address exception_addr(THR, Thread::active_exception_offset());
+ Address exception_addr(THR, target::Thread::active_exception_offset());
__ movl(kExceptionObjectReg, exception_addr);
__ movl(exception_addr, ECX);
// Load the stacktrace from the current thread.
- Address stacktrace_addr(THR, Thread::active_stacktrace_offset());
+ Address stacktrace_addr(THR, target::Thread::active_stacktrace_offset());
__ movl(kStackTraceObjectReg, stacktrace_addr);
__ movl(stacktrace_addr, ECX);
@@ -2052,9 +2126,9 @@
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
-void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push the deopt pc.
- __ pushl(Address(THR, Thread::resume_pc_offset()));
+ __ pushl(Address(THR, target::Thread::resume_pc_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
// After we have deoptimized, jump to the correct frame.
@@ -2067,7 +2141,7 @@
// Calls to the runtime to optimize the given function.
// EBX: function to be reoptimized.
// EDX: argument descriptor (preserved).
-void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushl(EDX);
__ pushl(Immediate(0)); // Setup space on stack for return value.
@@ -2077,8 +2151,8 @@
__ popl(EAX); // Get Function object
__ popl(EDX); // Restore argument descriptor.
__ LeaveFrame();
- __ movl(CODE_REG, FieldAddress(EAX, Function::code_offset()));
- __ movl(EAX, FieldAddress(EAX, Function::entry_point_offset()));
+ __ movl(CODE_REG, FieldAddress(EAX, target::Function::code_offset()));
+ __ movl(EAX, FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(EAX);
__ int3();
}
@@ -2105,11 +2179,15 @@
__ j(NOT_EQUAL, &done, Assembler::kNearJump);
// Double values bitwise compare.
- __ movl(temp, FieldAddress(left, Double::value_offset() + 0 * kWordSize));
- __ cmpl(temp, FieldAddress(right, Double::value_offset() + 0 * kWordSize));
+ __ movl(temp, FieldAddress(left, target::Double::value_offset() +
+ 0 * target::kWordSize));
+ __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
+ 0 * target::kWordSize));
__ j(NOT_EQUAL, &done, Assembler::kNearJump);
- __ movl(temp, FieldAddress(left, Double::value_offset() + 1 * kWordSize));
- __ cmpl(temp, FieldAddress(right, Double::value_offset() + 1 * kWordSize));
+ __ movl(temp, FieldAddress(left, target::Double::value_offset() +
+ 1 * target::kWordSize));
+ __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
+ 1 * target::kWordSize));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&check_mint);
@@ -2117,11 +2195,15 @@
__ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
__ CompareClassId(right, kMintCid, temp);
__ j(NOT_EQUAL, &done, Assembler::kNearJump);
- __ movl(temp, FieldAddress(left, Mint::value_offset() + 0 * kWordSize));
- __ cmpl(temp, FieldAddress(right, Mint::value_offset() + 0 * kWordSize));
+ __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
+ 0 * target::kWordSize));
+ __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
+ 0 * target::kWordSize));
__ j(NOT_EQUAL, &done, Assembler::kNearJump);
- __ movl(temp, FieldAddress(left, Mint::value_offset() + 1 * kWordSize));
- __ cmpl(temp, FieldAddress(right, Mint::value_offset() + 1 * kWordSize));
+ __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
+ 1 * target::kWordSize));
+ __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
+ 1 * target::kWordSize));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&reference_compare);
@@ -2134,13 +2216,13 @@
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
-void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(EAX);
- __ movzxb(EAX, Address(EAX, Isolate::single_step_offset()));
+ __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
__ cmpl(EAX, Immediate(0));
__ j(NOT_EQUAL, &stepping);
__ Bind(&done_stepping);
@@ -2149,8 +2231,8 @@
const Register left = EAX;
const Register right = EDX;
const Register temp = ECX;
- __ movl(left, Address(ESP, 2 * kWordSize));
- __ movl(right, Address(ESP, 1 * kWordSize));
+ __ movl(left, Address(ESP, 2 * target::kWordSize));
+ __ movl(right, Address(ESP, 1 * target::kWordSize));
GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
__ ret();
@@ -2168,24 +2250,24 @@
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
-void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register left = EAX;
const Register right = EDX;
const Register temp = ECX;
- __ movl(left, Address(ESP, 2 * kWordSize));
- __ movl(right, Address(ESP, 1 * kWordSize));
+ __ movl(left, Address(ESP, 2 * target::kWordSize));
+ __ movl(right, Address(ESP, 1 * target::kWordSize));
GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
__ ret();
}
// Called from megamorphic calls.
// EBX: receiver
-// ECX: MegamorphicCache (preserved)
+// ECX: target::MegamorphicCache (preserved)
// Passed to target:
// EBX: target entry point
// EDX: argument descriptor
-void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
// Check if object (in tmp) is a Smi.
@@ -2198,8 +2280,8 @@
Label cid_loaded;
__ Bind(&cid_loaded);
- __ movl(EBX, FieldAddress(ECX, MegamorphicCache::mask_offset()));
- __ movl(EDI, FieldAddress(ECX, MegamorphicCache::buckets_offset()));
+ __ movl(EBX, FieldAddress(ECX, target::MegamorphicCache::mask_offset()));
+ __ movl(EDI, FieldAddress(ECX, target::MegamorphicCache::buckets_offset()));
// EDI: cache buckets array.
// EBX: mask as a smi.
@@ -2207,7 +2289,7 @@
__ addl(EAX, EAX);
// Compute the table index.
- ASSERT(MegamorphicCache::kSpreadFactor == 7);
+ ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
// Use leal and subl multiply with 7 == 8 - 1.
__ leal(EDX, Address(EAX, TIMES_8, 0));
__ subl(EDX, EAX);
@@ -2216,7 +2298,7 @@
__ Bind(&loop);
__ andl(EDX, EBX);
- const intptr_t base = Array::data_offset();
+ const intptr_t base = target::Array::data_offset();
Label probe_failed;
// EDX is smi tagged, but table entries are two words, so TIMES_4.
__ cmpl(EAX, FieldAddress(EDI, EDX, TIMES_4, base));
@@ -2228,20 +2310,21 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, base + kWordSize));
+ __ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
__ movl(EDX,
- FieldAddress(ECX, MegamorphicCache::arguments_descriptor_offset()));
- __ movl(EBX, FieldAddress(EAX, Function::entry_point_offset()));
+ FieldAddress(
+ ECX, target::MegamorphicCache::arguments_descriptor_offset()));
+ __ movl(EBX, FieldAddress(EAX, target::Function::entry_point_offset()));
__ ret();
__ Bind(&probe_failed);
// Probe failed, check if it is a miss.
__ cmpl(FieldAddress(EDI, EDX, TIMES_4, base),
- Immediate(Smi::RawValue(kIllegalCid)));
+ Immediate(target::ToRawSmi(kIllegalCid)));
__ j(ZERO, &load_target, Assembler::kNearJump);
// Try next entry in the table.
- __ AddImmediate(EDX, Immediate(Smi::RawValue(1)));
+ __ AddImmediate(EDX, Immediate(target::ToRawSmi(1)));
__ jmp(&loop);
// Load cid for the Smi case.
@@ -2255,34 +2338,37 @@
// ECX: ICData (preserved)
// Passed to target:
// EDX: arguments descriptor
-void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughFunctionStub(Assembler* assembler) {
__ int3();
}
-void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
__ int3();
}
-void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnlinkedCallStub(Assembler* assembler) {
__ int3();
}
-void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
__ int3();
}
-void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMonomorphicMissStub(Assembler* assembler) {
__ int3();
}
-void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
+ Assembler* assembler) {
__ int3();
}
-void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
__ int3();
}
+} // namespace compiler
+
} // namespace dart
#endif // defined(TARGET_ARCH_IA32) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/stub_code_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
similarity index 68%
rename from runtime/vm/stub_code_x64.cc
rename to runtime/vm/compiler/stub_code_compiler_x64.cc
index 2d4bf0d..5997a40 100644
--- a/runtime/vm/stub_code_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -1,27 +1,22 @@
-// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h"
-#include "vm/stub_code.h"
+#define SHOULD_NOT_INCLUDE_RUNTIME
+
+#include "vm/compiler/stub_code_compiler.h"
#if defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
+#include "vm/class_id.h"
+#include "vm/code_entry_kind.h"
#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/assembler/disassembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/compiler/jit/compiler.h"
#include "vm/constants_x64.h"
-#include "vm/dart_entry.h"
-#include "vm/heap/heap.h"
-#include "vm/heap/scavenger.h"
#include "vm/instructions.h"
-#include "vm/object_store.h"
-#include "vm/resolver.h"
-#include "vm/stack_frame.h"
+#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
-#include "vm/type_testing_stubs.h"
#define __ assembler->
@@ -35,6 +30,8 @@
DECLARE_FLAG(bool, enable_interpreter);
DECLARE_FLAG(bool, precompiled_mode);
+namespace compiler {
+
// Input parameters:
// RSP : points to return address.
// RSP + 8 : address of last argument in argument array.
@@ -43,18 +40,19 @@
// RBX : address of the runtime function to call.
// R10 : number of arguments to the call.
// Must preserve callee saved registers R12 and R13.
-void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
- const intptr_t thread_offset = NativeArguments::thread_offset();
- const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
- const intptr_t argv_offset = NativeArguments::argv_offset();
- const intptr_t retval_offset = NativeArguments::retval_offset();
+void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
+ const intptr_t thread_offset = target::NativeArguments::thread_offset();
+ const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
+ const intptr_t argv_offset = target::NativeArguments::argv_offset();
+ const intptr_t retval_offset = target::NativeArguments::retval_offset();
- __ movq(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset()));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::call_to_runtime_stub_offset()));
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), RBP);
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()), RBP);
#if defined(DEBUG)
{
@@ -72,23 +70,30 @@
__ movq(Assembler::VMTagAddress(), RBX);
// Reserve space for arguments and align frame before entering C++ world.
- __ subq(RSP, Immediate(sizeof(NativeArguments)));
+ __ subq(RSP, Immediate(target::NativeArguments::StructSize()));
if (OS::ActivationFrameAlignment() > 1) {
__ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
- // Pass NativeArguments structure by value and call runtime.
+ // Pass target::NativeArguments structure by value and call runtime.
__ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
// There are no runtime calls to closures, so we do not need to set the tag
// bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
- __ movq(Address(RSP, argc_tag_offset), R10); // Set argc in NativeArguments.
+ __ movq(Address(RSP, argc_tag_offset),
+ R10); // Set argc in target::NativeArguments.
// Compute argv.
- __ leaq(RAX, Address(RBP, R10, TIMES_8, kParamEndSlotFromFp * kWordSize));
- __ movq(Address(RSP, argv_offset), RAX); // Set argv in NativeArguments.
- __ addq(RAX, Immediate(1 * kWordSize)); // Retval is next to 1st argument.
- __ movq(Address(RSP, retval_offset), RAX); // Set retval in NativeArguments.
+ __ leaq(RAX,
+ Address(RBP, R10, TIMES_8,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
+ __ movq(Address(RSP, argv_offset),
+ RAX); // Set argv in target::NativeArguments.
+ __ addq(RAX,
+ Immediate(1 * target::kWordSize)); // Retval is next to 1st argument.
+ __ movq(Address(RSP, retval_offset),
+ RAX); // Set retval in target::NativeArguments.
#if defined(_WIN64)
- ASSERT(sizeof(NativeArguments) > CallingConventions::kRegisterTransferLimit);
+ ASSERT(target::NativeArguments::StructSize() >
+ CallingConventions::kRegisterTransferLimit);
__ movq(CallingConventions::kArg1Reg, RSP);
#endif
__ CallCFunction(RBX);
@@ -97,12 +102,13 @@
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
// Restore the global object pool after returning from runtime (old space is
// moving, so the GOP could have been relocated).
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ movq(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ movq(PP, Address(THR, target::Thread::global_object_pool_offset()));
}
__ LeaveStubFrame();
@@ -116,11 +122,12 @@
__ ret();
}
-void StubCode::GenerateSharedStub(Assembler* assembler,
- bool save_fpu_registers,
- const RuntimeEntry* target,
- intptr_t self_code_stub_offset_from_thread,
- bool allow_return) {
+void StubCodeCompiler::GenerateSharedStub(
+ Assembler* assembler,
+ bool save_fpu_registers,
+ const RuntimeEntry* target,
+ intptr_t self_code_stub_offset_from_thread,
+ bool allow_return) {
// We want the saved registers to appear like part of the caller's frame, so
// we push them before calling EnterStubFrame.
__ PushRegisters(kDartAvailableCpuRegs,
@@ -130,14 +137,15 @@
Utils::CountOneBitsWord(kDartAvailableCpuRegs);
const intptr_t kSavedFpuRegisterSlots =
- save_fpu_registers ? kNumberOfFpuRegisters * kFpuRegisterSize / kWordSize
- : 0;
+ save_fpu_registers
+ ? kNumberOfFpuRegisters * kFpuRegisterSize / target::kWordSize
+ : 0;
const intptr_t kAllSavedRegistersSlots =
kSavedCpuRegisterSlots + kSavedFpuRegisterSlots;
// Copy down the return address so the stack layout is correct.
- __ pushq(Address(RSP, kAllSavedRegistersSlots * kWordSize));
+ __ pushq(Address(RSP, kAllSavedRegistersSlots * target::kWordSize));
__ movq(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
@@ -161,26 +169,18 @@
// RBX: The extracted method.
// RDX: The type_arguments_field_offset (or 0)
-void StubCode::GenerateBuildMethodExtractorStub(Assembler* assembler) {
- Thread* thread = Thread::Current();
- Zone* Z = thread->zone();
- ObjectStore* object_store = thread->isolate()->object_store();
-
- const auto& closure_class =
- Class::ZoneHandle(Z, object_store->closure_class());
- const auto& closure_allocation_stub =
- Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
-
+void StubCodeCompiler::GenerateBuildMethodExtractorStub(
+ Assembler* assembler,
+ const Object& closure_allocation_stub,
+ const Object& context_allocation_stub) {
const intptr_t kReceiverOffsetInWords =
compiler::target::frame_layout.param_end_from_fp + 1;
- const auto& context_allocation_stub = StubCode::AllocateContext();
-
__ EnterStubFrame();
// Push type_arguments vector (or null)
Label no_type_args;
- __ movq(RCX, Address(THR, Thread::object_null_offset()));
+ __ movq(RCX, Address(THR, target::Thread::object_null_offset()));
__ cmpq(RDX, Immediate(0));
__ j(EQUAL, &no_type_args, Assembler::kNearJump);
__ movq(RAX,
@@ -195,21 +195,22 @@
// Allocate context.
{
Label done, slow_path;
- __ TryAllocateArray(kContextCid, Context::InstanceSize(1), &slow_path,
- Assembler::kFarJump,
+ __ TryAllocateArray(kContextCid, target::Context::InstanceSize(1),
+ &slow_path, Assembler::kFarJump,
RAX, // instance
RSI, // end address
RDI);
- __ movq(RSI, Address(THR, Thread::object_null_offset()));
- __ movq(FieldAddress(RAX, Context::parent_offset()), RSI);
- __ movq(FieldAddress(RAX, Context::num_variables_offset()), Immediate(1));
+ __ movq(RSI, Address(THR, target::Thread::object_null_offset()));
+ __ movq(FieldAddress(RAX, target::Context::parent_offset()), RSI);
+ __ movq(FieldAddress(RAX, target::Context::num_variables_offset()),
+ Immediate(1));
__ jmp(&done);
__ Bind(&slow_path);
__ LoadImmediate(/*num_vars=*/R10, Immediate(1));
__ LoadObject(CODE_REG, context_allocation_stub);
- __ call(FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ Bind(&done);
}
@@ -217,69 +218,75 @@
// Store receiver in context
__ movq(RSI,
Address(RBP, compiler::target::kWordSize * kReceiverOffsetInWords));
- __ StoreIntoObject(RAX, FieldAddress(RAX, Context::variable_offset(0)), RSI);
+ __ StoreIntoObject(
+ RAX, FieldAddress(RAX, target::Context::variable_offset(0)), RSI);
// Push context.
__ pushq(RAX);
// Allocate closure.
__ LoadObject(CODE_REG, closure_allocation_stub);
- __ call(FieldAddress(CODE_REG,
- Code::entry_point_offset(Code::EntryKind::kUnchecked)));
+ __ call(FieldAddress(
+ CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kUnchecked)));
// Populate closure object.
__ popq(RCX); // Pop context.
- __ StoreIntoObject(RAX, FieldAddress(RAX, Closure::context_offset()), RCX);
+ __ StoreIntoObject(RAX, FieldAddress(RAX, target::Closure::context_offset()),
+ RCX);
__ popq(RCX); // Pop extracted method.
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, Closure::function_offset()), RCX);
+ RAX, FieldAddress(RAX, target::Closure::function_offset()), RCX);
__ popq(RCX); // Pop type argument vector.
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, Closure::instantiator_type_arguments_offset()),
+ RAX,
+ FieldAddress(RAX, target::Closure::instantiator_type_arguments_offset()),
RCX);
- __ LoadObject(RCX, Object::empty_type_arguments());
+ __ LoadObject(RCX, EmptyTypeArguments());
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, Closure::delayed_type_arguments_offset()), RCX);
+ RAX, FieldAddress(RAX, target::Closure::delayed_type_arguments_offset()),
+ RCX);
__ LeaveStubFrame();
__ Ret();
}
-void StubCode::GenerateNullErrorSharedWithoutFPURegsStub(Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/false,
- &kNullErrorRuntimeEntry,
- Thread::null_error_shared_without_fpu_regs_stub_offset(),
- /*allow_return=*/false);
+void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
+ Assembler* assembler) {
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
+ target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
+ /*allow_return=*/false);
}
-void StubCode::GenerateNullErrorSharedWithFPURegsStub(Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
- &kNullErrorRuntimeEntry,
- Thread::null_error_shared_with_fpu_regs_stub_offset(),
- /*allow_return=*/false);
+void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
+ Assembler* assembler) {
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
+ target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
+ /*allow_return=*/false);
}
-void StubCode::GenerateStackOverflowSharedWithoutFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kStackOverflowRuntimeEntry,
- Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
+ target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/true);
}
-void StubCode::GenerateStackOverflowSharedWithFPURegsStub(
+void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
Assembler* assembler) {
- GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
- &kStackOverflowRuntimeEntry,
- Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
- /*allow_return=*/true);
+ GenerateSharedStub(
+ assembler, /*save_fpu_registers=*/true, &kStackOverflowRuntimeEntry,
+ target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
+ /*allow_return=*/true);
}
// Input parameters:
// RSP : points to return address.
// RDI : stop message (const char*).
// Must preserve all registers.
-void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) {
+void StubCodeCompiler::GeneratePrintStopMessageStub(Assembler* assembler) {
__ EnterCallRuntimeFrame(0);
// Call the runtime leaf function. RDI already contains the parameter.
#if defined(_WIN64)
@@ -300,19 +307,19 @@
Address wrapper_address) {
const intptr_t native_args_struct_offset = 0;
const intptr_t thread_offset =
- NativeArguments::thread_offset() + native_args_struct_offset;
+ target::NativeArguments::thread_offset() + native_args_struct_offset;
const intptr_t argc_tag_offset =
- NativeArguments::argc_tag_offset() + native_args_struct_offset;
+ target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
const intptr_t argv_offset =
- NativeArguments::argv_offset() + native_args_struct_offset;
+ target::NativeArguments::argv_offset() + native_args_struct_offset;
const intptr_t retval_offset =
- NativeArguments::retval_offset() + native_args_struct_offset;
+ target::NativeArguments::retval_offset() + native_args_struct_offset;
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), RBP);
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()), RBP);
#if defined(DEBUG)
{
@@ -332,19 +339,23 @@
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
// RDI) and align frame before entering the C++ world.
- __ subq(RSP, Immediate(sizeof(NativeArguments)));
+ __ subq(RSP, Immediate(target::NativeArguments::StructSize()));
if (OS::ActivationFrameAlignment() > 1) {
__ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
- // Pass NativeArguments structure by value and call native function.
- __ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
- __ movq(Address(RSP, argc_tag_offset), R10); // Set argc in NativeArguments.
- __ movq(Address(RSP, argv_offset), RAX); // Set argv in NativeArguments.
- __ leaq(RAX, Address(RBP, 2 * kWordSize)); // Compute return value addr.
- __ movq(Address(RSP, retval_offset), RAX); // Set retval in NativeArguments.
+ // Pass target::NativeArguments structure by value and call native function.
+ __ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
+ __ movq(Address(RSP, argc_tag_offset),
+ R10); // Set argc in target::NativeArguments.
+ __ movq(Address(RSP, argv_offset),
+ RAX); // Set argv in target::NativeArguments.
+ __ leaq(RAX,
+ Address(RBP, 2 * target::kWordSize)); // Compute return value addr.
+ __ movq(Address(RSP, retval_offset),
+ RAX); // Set retval in target::NativeArguments.
- // Pass the pointer to the NativeArguments.
+ // Pass the pointer to the target::NativeArguments.
__ movq(CallingConventions::kArg1Reg, RSP);
// Pass pointer to function entrypoint.
__ movq(CallingConventions::kArg2Reg, RBX);
@@ -356,22 +367,25 @@
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
__ LeaveStubFrame();
__ ret();
}
-void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
- Address(THR, Thread::no_scope_native_wrapper_entry_point_offset()));
+ Address(THR,
+ target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
-void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
- Address(THR, Thread::auto_scope_native_wrapper_entry_point_offset()));
+ Address(THR,
+ target::Thread::auto_scope_native_wrapper_entry_point_offset()));
}
// Input parameters:
@@ -380,22 +394,22 @@
// RAX : address of first argument in argument array.
// RBX : address of the native function to call.
// R10 : argc_tag including number of arguments and function kind.
-void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
const intptr_t native_args_struct_offset = 0;
const intptr_t thread_offset =
- NativeArguments::thread_offset() + native_args_struct_offset;
+ target::NativeArguments::thread_offset() + native_args_struct_offset;
const intptr_t argc_tag_offset =
- NativeArguments::argc_tag_offset() + native_args_struct_offset;
+ target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
const intptr_t argv_offset =
- NativeArguments::argv_offset() + native_args_struct_offset;
+ target::NativeArguments::argv_offset() + native_args_struct_offset;
const intptr_t retval_offset =
- NativeArguments::retval_offset() + native_args_struct_offset;
+ target::NativeArguments::retval_offset() + native_args_struct_offset;
__ EnterStubFrame();
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), RBP);
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()), RBP);
#if defined(DEBUG)
{
@@ -415,19 +429,23 @@
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
// RDI) and align frame before entering the C++ world.
- __ subq(RSP, Immediate(sizeof(NativeArguments)));
+ __ subq(RSP, Immediate(target::NativeArguments::StructSize()));
if (OS::ActivationFrameAlignment() > 1) {
__ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
- // Pass NativeArguments structure by value and call native function.
- __ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
- __ movq(Address(RSP, argc_tag_offset), R10); // Set argc in NativeArguments.
- __ movq(Address(RSP, argv_offset), RAX); // Set argv in NativeArguments.
- __ leaq(RAX, Address(RBP, 2 * kWordSize)); // Compute return value addr.
- __ movq(Address(RSP, retval_offset), RAX); // Set retval in NativeArguments.
+ // Pass target::NativeArguments structure by value and call native function.
+ __ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
+ __ movq(Address(RSP, argc_tag_offset),
+ R10); // Set argc in target::NativeArguments.
+ __ movq(Address(RSP, argv_offset),
+ RAX); // Set argv in target::NativeArguments.
+ __ leaq(RAX,
+ Address(RBP, 2 * target::kWordSize)); // Compute return value addr.
+ __ movq(Address(RSP, retval_offset),
+ RAX); // Set retval in target::NativeArguments.
- // Pass the pointer to the NativeArguments.
+ // Pass the pointer to the target::NativeArguments.
__ movq(CallingConventions::kArg1Reg, RSP);
__ CallCFunction(RBX);
@@ -435,7 +453,8 @@
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
__ LeaveStubFrame();
__ ret();
@@ -443,7 +462,7 @@
// Input parameters:
// R10: arguments descriptor array.
-void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(R10); // Preserve arguments descriptor array.
// Setup space on stack for return value.
@@ -454,18 +473,19 @@
// Remove the stub frame as we are about to jump to the dart function.
__ LeaveStubFrame();
- __ movq(RBX, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(RBX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ jmp(RBX);
}
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// R10: arguments descriptor array.
-void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
- __ movq(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset()));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::fix_callers_target_code_offset()));
__ EnterStubFrame();
__ pushq(R10); // Preserve arguments descriptor array.
// Setup space on stack for return value.
@@ -473,7 +493,7 @@
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
__ popq(CODE_REG); // Get Code object.
__ popq(R10); // Restore arguments descriptor array.
- __ movq(RAX, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ LeaveStubFrame();
__ jmp(RAX);
__ int3();
@@ -481,17 +501,19 @@
// Called from object allocate instruction when the allocation stub has been
// disabled.
-void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
+ Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
- __ movq(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset()));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::fix_allocation_stub_code_offset()));
__ EnterStubFrame();
// Setup space on stack for return value.
__ pushq(Immediate(0));
__ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
__ popq(CODE_REG); // Get Code object.
- __ movq(RAX, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ LeaveStubFrame();
__ jmp(RAX);
__ int3();
@@ -499,18 +521,20 @@
// Input parameters:
// R10: smi-tagged argument count, may be zero.
-// RBP[kParamEndSlotFromFp + 1]: last argument.
+// RBP[target::frame_layout.param_end_from_fp + 1]: last argument.
static void PushArrayOfArguments(Assembler* assembler) {
- __ LoadObject(R12, Object::null_object());
+ __ LoadObject(R12, NullObject());
// Allocate array to store arguments of caller.
__ movq(RBX, R12); // Null element type for raw Array.
- __ Call(StubCode::AllocateArray());
+ __ Call(StubCodeAllocateArray());
__ SmiUntag(R10);
// RAX: newly allocated array.
// R10: length of the array (was preserved by the stub).
__ pushq(RAX); // Array is in RAX and on top of stack.
- __ leaq(R12, Address(RBP, R10, TIMES_8, kParamEndSlotFromFp * kWordSize));
- __ leaq(RBX, FieldAddress(RAX, Array::data_offset()));
+ __ leaq(R12,
+ Address(RBP, R10, TIMES_8,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
+ __ leaq(RBX, FieldAddress(RAX, target::Array::data_offset()));
// R12: address of first argument on stack.
// RBX: address of first argument in array.
Label loop, loop_condition;
@@ -524,8 +548,8 @@
__ movq(RDI, Address(R12, 0));
// Generational barrier is needed, array is not necessarily in new space.
__ StoreIntoObject(RAX, Address(RBX, 0), RDI);
- __ addq(RBX, Immediate(kWordSize));
- __ subq(R12, Immediate(kWordSize));
+ __ addq(RBX, Immediate(target::kWordSize));
+ __ subq(R12, Immediate(target::kWordSize));
__ Bind(&loop_condition);
__ decq(R10);
__ j(POSITIVE, &loop, Assembler::kNearJump);
@@ -582,7 +606,7 @@
if (i == CODE_REG) {
// Save the original value of CODE_REG pushed before invoking this stub
// instead of the value used to call this stub.
- __ pushq(Address(RBP, 2 * kWordSize));
+ __ pushq(Address(RBP, 2 * target::kWordSize));
} else {
__ pushq(static_cast<Register>(i));
}
@@ -606,11 +630,13 @@
if (kind == kLazyDeoptFromReturn) {
// Restore result into RBX temporarily.
- __ movq(RBX, Address(RBP, saved_result_slot_from_fp * kWordSize));
+ __ movq(RBX, Address(RBP, saved_result_slot_from_fp * target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into RBX temporarily.
- __ movq(RBX, Address(RBP, saved_exception_slot_from_fp * kWordSize));
- __ movq(RDX, Address(RBP, saved_stacktrace_slot_from_fp * kWordSize));
+ __ movq(RBX,
+ Address(RBP, saved_exception_slot_from_fp * target::kWordSize));
+ __ movq(RDX,
+ Address(RBP, saved_stacktrace_slot_from_fp * target::kWordSize));
}
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@@ -640,17 +666,17 @@
// Restore result into RBX.
__ movq(RBX,
Address(RBP, compiler::target::frame_layout.first_local_from_fp *
- kWordSize));
+ target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore exception into RBX.
__ movq(RBX,
Address(RBP, compiler::target::frame_layout.first_local_from_fp *
- kWordSize));
+ target::kWordSize));
// Restore stacktrace into RDX.
__ movq(
RDX,
Address(RBP, (compiler::target::frame_layout.first_local_from_fp - 1) *
- kWordSize));
+ target::kWordSize));
}
// Code above cannot cause GC.
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@@ -668,7 +694,7 @@
__ pushq(RBX); // Preserve exception.
__ pushq(RDX); // Preserve stacktrace.
}
- __ pushq(Immediate(Smi::RawValue(0))); // Space for the result.
+ __ pushq(Immediate(target::ToRawSmi(0))); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
// Result tells stub how many bytes to remove from the expression stack
// of the bottom-most frame. They were used as materialization arguments.
@@ -689,33 +715,37 @@
}
// RAX: result, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
+ Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(kZapCodeReg));
// Return address for "call" to deopt stub.
__ pushq(Immediate(kZapReturnAddress));
- __ movq(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset()));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
__ ret();
}
// RAX: exception, must be preserved
// RDX: stacktrace, must be preserved
-void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
+ Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(kZapCodeReg));
// Return address for "call" to deopt stub.
__ pushq(Immediate(kZapReturnAddress));
- __ movq(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset()));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
__ ret();
}
-void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ popq(TMP);
__ pushq(CODE_REG);
__ pushq(TMP);
- __ movq(CODE_REG, Address(THR, Thread::deoptimize_stub_offset()));
+ __ movq(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
__ ret();
}
@@ -725,25 +755,27 @@
__ Comment("NoSuchMethodDispatch");
// When lazily generated invocation dispatchers are disabled, the
// miss-handler may return null.
- __ CompareObject(RAX, Object::null_object());
+ __ CompareObject(RAX, NullObject());
__ j(NOT_EQUAL, call_target_function);
__ EnterStubFrame();
// Load the receiver.
- __ movq(RDI, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
- __ movq(RAX, Address(RBP, RDI, TIMES_HALF_WORD_SIZE,
- kParamEndSlotFromFp * kWordSize));
+ __ movq(RDI, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
+ __ movq(RAX,
+ Address(RBP, RDI, TIMES_HALF_WORD_SIZE,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
__ pushq(Immediate(0)); // Setup space on stack for result.
__ pushq(RAX); // Receiver.
__ pushq(RBX); // ICData/MegamorphicCache.
__ pushq(R10); // Arguments descriptor array.
// Adjust arguments count.
- __ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ cmpq(
+ FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
__ movq(R10, RDI);
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
- __ addq(R10, Immediate(Smi::RawValue(1))); // Include the type arguments.
+ __ addq(R10, Immediate(target::ToRawSmi(1))); // Include the type arguments.
__ Bind(&args_count_ok);
// R10: Smi-tagged arguments array length.
@@ -756,16 +788,16 @@
__ ret();
}
-void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver into RAX. The argument count in the arguments
// descriptor in R10 is a smi.
- __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
+ __ movq(RAX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
// Three words (saved pp, saved fp, stub's pc marker)
// in the stack above the return address.
- __ movq(RAX,
- Address(RSP, RAX, TIMES_4,
- compiler::target::frame_layout.saved_below_pc() * kWordSize));
+ __ movq(RAX, Address(RSP, RAX, TIMES_4,
+ compiler::target::frame_layout.saved_below_pc() *
+ target::kWordSize));
// Preserve IC data and arguments descriptor.
__ pushq(RBX);
__ pushq(R10);
@@ -790,8 +822,8 @@
GenerateDispatcherCode(assembler, &call_target_function);
__ Bind(&call_target_function);
}
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
+ __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ jmp(RCX);
}
@@ -801,11 +833,12 @@
// RBX : array element type (either NULL or an instantiated type).
// NOTE: R10 cannot be clobbered here as the caller relies on it being saved.
// The newly allocated object is returned in RAX.
-void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
- // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
+ // RoundedAllocationSize(
+ // (array_length * target::kwordSize) + target::Array::header_size()).
__ movq(RDI, R10); // Array Length.
// Check that length is a positive Smi.
__ testq(RDI, Immediate(kSmiTagMask));
@@ -817,8 +850,8 @@
__ cmpq(RDI, Immediate(0));
__ j(LESS, &slow_case);
// Check for maximum allowed length.
- const Immediate& max_len = Immediate(
- reinterpret_cast<int64_t>(Smi::New(Array::kMaxNewSpaceElements)));
+ const Immediate& max_len =
+ Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
__ cmpq(RDI, max_len);
__ j(GREATER, &slow_case);
@@ -827,14 +860,15 @@
__ MaybeTraceAllocation(kArrayCid, &slow_case, Assembler::kFarJump));
const intptr_t fixed_size_plus_alignment_padding =
- sizeof(RawArray) + kObjectAlignment - 1;
+ target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
+ 1;
// RDI is a Smi.
__ leaq(RDI, Address(RDI, TIMES_4, fixed_size_plus_alignment_padding));
ASSERT(kSmiTagShift == 1);
- __ andq(RDI, Immediate(-kObjectAlignment));
+ __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
const intptr_t cid = kArrayCid;
- __ movq(RAX, Address(THR, Thread::top_offset()));
+ __ movq(RAX, Address(THR, target::Thread::top_offset()));
// RDI: allocation size.
__ movq(RCX, RAX);
@@ -845,12 +879,12 @@
// RAX: potential new object start.
// RCX: potential next object start.
// RDI: allocation size.
- __ cmpq(RCX, Address(THR, Thread::end_offset()));
+ __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
- __ movq(Address(THR, Thread::top_offset()), RCX);
+ __ movq(Address(THR, target::Thread::top_offset()), RCX);
__ addq(RAX, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI));
// Initialize the tags.
@@ -858,9 +892,10 @@
// RDI: allocation size.
{
Label size_tag_overflow, done;
- __ cmpq(RDI, Immediate(RawObject::SizeTag::kMaxSizeTag));
+ __ cmpq(RDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2));
+ __ shlq(RDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&size_tag_overflow);
@@ -868,30 +903,28 @@
__ Bind(&done);
// Get the class index and insert it into the tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orq(RDI, Immediate(tags));
- __ movq(FieldAddress(RAX, Array::tags_offset()), RDI); // Tags.
+ __ movq(FieldAddress(RAX, target::Array::tags_offset()), RDI); // Tags.
}
// RAX: new object start as a tagged pointer.
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, Array::type_arguments_offset()), RBX);
+ RAX, FieldAddress(RAX, target::Array::type_arguments_offset()), RBX);
// Set the length field.
- __ StoreIntoObjectNoBarrier(RAX, FieldAddress(RAX, Array::length_offset()),
- R10);
+ __ StoreIntoObjectNoBarrier(
+ RAX, FieldAddress(RAX, target::Array::length_offset()), R10);
// Initialize all array elements to raw_null.
// RAX: new object start as a tagged pointer.
// RCX: new object end address.
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
- __ LoadObject(R12, Object::null_object());
- __ leaq(RDI, FieldAddress(RAX, sizeof(RawArray)));
+ __ LoadObject(R12, NullObject());
+ __ leaq(RDI, FieldAddress(RAX, target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
@@ -904,7 +937,7 @@
__ j(ABOVE_EQUAL, &done, kJumpLength);
// No generational barrier needed, since we are storing null.
__ StoreIntoObjectNoBarrier(RAX, Address(RDI, 0), R12);
- __ addq(RDI, Immediate(kWordSize));
+ __ addq(RDI, Immediate(target::kWordSize));
__ jmp(&init_loop, kJumpLength);
__ Bind(&done);
__ ret(); // returns the newly allocated object in RAX.
@@ -934,7 +967,7 @@
// RSI : arguments descriptor array.
// RDX : arguments array.
// RCX : current thread.
-void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ pushq(Address(RSP, 0)); // Marker for the profiler.
__ EnterFrame(0);
@@ -944,7 +977,7 @@
const Register kThreadReg = CallingConventions::kArg4Reg;
// Push code object to PC marker slot.
- __ pushq(Address(kThreadReg, Thread::invoke_dart_code_stub_offset()));
+ __ pushq(Address(kThreadReg, target::Thread::invoke_dart_code_stub_offset()));
// At this point, the stack looks like:
// | stub code object
@@ -953,7 +986,7 @@
const intptr_t kInitialOffset = 2;
// Save arguments descriptor array, later replaced by Smi argument count.
- const intptr_t kArgumentsDescOffset = -(kInitialOffset)*kWordSize;
+ const intptr_t kArgumentsDescOffset = -(kInitialOffset)*target::kWordSize;
__ pushq(kArgDescReg);
// Save C++ ABI callee-saved registers.
@@ -961,7 +994,7 @@
CallingConventions::kCalleeSaveXmmRegisters);
// If any additional (or fewer) values are pushed, the offsets in
- // kExitLinkSlotFromEntryFp will need to be changed.
+ // target::frame_layout.exit_link_slot_from_entry_fp will need to be changed.
// Set up THR, which caches the current thread in Dart code.
if (THR != kThreadReg) {
@@ -974,26 +1007,29 @@
// Save top resource and top exit frame info. Use RAX as a temporary register.
// StackFrameIterator reads the top exit frame info saved in this frame.
- __ movq(RAX, Address(THR, Thread::top_resource_offset()));
+ __ movq(RAX, Address(THR, target::Thread::top_resource_offset()));
__ pushq(RAX);
- __ movq(Address(THR, Thread::top_resource_offset()), Immediate(0));
- __ movq(RAX, Address(THR, Thread::top_exit_frame_info_offset()));
+ __ movq(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
+ __ movq(RAX, Address(THR, target::Thread::top_exit_frame_info_offset()));
__ pushq(RAX);
-// The constant kExitLinkSlotFromEntryFp must be kept in sync with the
-// code below.
+ // The constant target::frame_layout.exit_link_slot_from_entry_fp must be kept
+ // in sync with the code below.
#if defined(DEBUG)
{
Label ok;
- __ leaq(RAX, Address(RBP, kExitLinkSlotFromEntryFp * kWordSize));
+ __ leaq(RAX,
+ Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
+ target::kWordSize));
__ cmpq(RAX, RSP);
__ j(EQUAL, &ok);
- __ Stop("kExitLinkSlotFromEntryFp mismatch");
+ __ Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
__ Bind(&ok);
}
#endif
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
// Mark that the thread is executing Dart code. Do this after initializing the
// exit link for the profiler.
@@ -1006,12 +1042,13 @@
ASSERT(kTargetCodeReg != RDX);
// Load number of arguments into RBX and adjust count for type arguments.
- __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
- __ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ movq(RBX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
+ __ cmpq(
+ FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
- __ addq(RBX, Immediate(Smi::RawValue(1))); // Include the type arguments.
+ __ addq(RBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
__ Bind(&args_count_ok);
// Save number of arguments as Smi on stack, replacing saved ArgumentsDesc.
__ movq(Address(RBP, kArgumentsDescOffset), RBX);
@@ -1019,7 +1056,7 @@
// Compute address of 'arguments array' data area into RDX.
__ movq(RDX, Address(kArgsReg, VMHandles::kOffsetOfRawPtrInHandle));
- __ leaq(RDX, FieldAddress(RDX, Array::data_offset()));
+ __ leaq(RDX, FieldAddress(RDX, target::Array::data_offset()));
// Set up arguments for the Dart call.
Label push_arguments;
@@ -1035,13 +1072,14 @@
// Call the Dart code entrypoint.
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ movq(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ movq(PP, Address(THR, target::Thread::global_object_pool_offset()));
} else {
__ xorq(PP, PP); // GC-safe value into PP.
}
__ movq(CODE_REG,
Address(kTargetCodeReg, VMHandles::kOffsetOfRawPtrInHandle));
- __ movq(kTargetCodeReg, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(kTargetCodeReg,
+ FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ call(kTargetCodeReg); // R10 is the arguments descriptor array.
// Read the saved number of passed arguments as Smi.
@@ -1052,8 +1090,8 @@
// Restore the saved top exit frame info and top resource back into the
// Isolate structure.
- __ popq(Address(THR, Thread::top_exit_frame_info_offset()));
- __ popq(Address(THR, Thread::top_resource_offset()));
+ __ popq(Address(THR, target::Thread::top_exit_frame_info_offset()));
+ __ popq(Address(THR, target::Thread::top_resource_offset()));
// Restore the current VMTag from the stack.
__ popq(Assembler::VMTagAddress());
@@ -1077,7 +1115,8 @@
// RSI : arguments raw descriptor array.
// RDX : address of first argument.
// RCX : current thread.
-void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInvokeDartCodeFromBytecodeStub(
+ Assembler* assembler) {
#if defined(DART_PRECOMPILED_RUNTIME)
__ Stop("Not using interpreter");
#else
@@ -1090,8 +1129,9 @@
const Register kThreadReg = CallingConventions::kArg4Reg;
// Push code object to PC marker slot.
- __ pushq(Address(kThreadReg,
- Thread::invoke_dart_code_from_bytecode_stub_offset()));
+ __ pushq(
+ Address(kThreadReg,
+ target::Thread::invoke_dart_code_from_bytecode_stub_offset()));
// At this point, the stack looks like:
// | stub code object
@@ -1100,7 +1140,7 @@
const intptr_t kInitialOffset = 2;
// Save arguments descriptor array, later replaced by Smi argument count.
- const intptr_t kArgumentsDescOffset = -(kInitialOffset)*kWordSize;
+ const intptr_t kArgumentsDescOffset = -(kInitialOffset)*target::kWordSize;
__ pushq(kArgDescReg);
// Save C++ ABI callee-saved registers.
@@ -1108,7 +1148,7 @@
CallingConventions::kCalleeSaveXmmRegisters);
// If any additional (or fewer) values are pushed, the offsets in
- // kExitLinkSlotFromEntryFp will need to be changed.
+ // target::frame_layout.exit_link_slot_from_entry_fp will need to be changed.
// Set up THR, which caches the current thread in Dart code.
if (THR != kThreadReg) {
@@ -1121,22 +1161,25 @@
// Save top resource and top exit frame info. Use RAX as a temporary register.
// StackFrameIterator reads the top exit frame info saved in this frame.
- __ movq(RAX, Address(THR, Thread::top_resource_offset()));
+ __ movq(RAX, Address(THR, target::Thread::top_resource_offset()));
__ pushq(RAX);
- __ movq(Address(THR, Thread::top_resource_offset()), Immediate(0));
- __ movq(RAX, Address(THR, Thread::top_exit_frame_info_offset()));
+ __ movq(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
+ __ movq(RAX, Address(THR, target::Thread::top_exit_frame_info_offset()));
__ pushq(RAX);
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
-// The constant kExitLinkSlotFromEntryFp must be kept in sync with the
-// code below.
+ // The constant target::frame_layout.exit_link_slot_from_entry_fp must be kept
+ // in sync with the code below.
#if defined(DEBUG)
{
Label ok;
- __ leaq(RAX, Address(RBP, kExitLinkSlotFromEntryFp * kWordSize));
+ __ leaq(RAX,
+ Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
+ target::kWordSize));
__ cmpq(RAX, RSP);
__ j(EQUAL, &ok);
- __ Stop("kExitLinkSlotFromEntryFp mismatch");
+ __ Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
__ Bind(&ok);
}
#endif
@@ -1152,12 +1195,13 @@
ASSERT(kTargetCodeReg != RDX);
// Load number of arguments into RBX and adjust count for type arguments.
- __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
- __ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ movq(RBX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
+ __ cmpq(
+ FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
- __ addq(RBX, Immediate(Smi::RawValue(1))); // Include the type arguments.
+ __ addq(RBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
__ Bind(&args_count_ok);
// Save number of arguments as Smi on stack, replacing saved ArgumentsDesc.
__ movq(Address(RBP, kArgumentsDescOffset), RBX);
@@ -1183,7 +1227,8 @@
// Call the Dart code entrypoint.
__ xorq(PP, PP); // GC-safe value into PP.
__ movq(CODE_REG, kTargetCodeReg);
- __ movq(kTargetCodeReg, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(kTargetCodeReg,
+ FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ call(kTargetCodeReg); // R10 is the arguments descriptor array.
// Read the saved number of passed arguments as Smi.
@@ -1194,8 +1239,8 @@
// Restore the saved top exit frame info and top resource back into the
// Isolate structure.
- __ popq(Address(THR, Thread::top_exit_frame_info_offset()));
- __ popq(Address(THR, Thread::top_resource_offset()));
+ __ popq(Address(THR, target::Thread::top_exit_frame_info_offset()));
+ __ popq(Address(THR, target::Thread::top_resource_offset()));
// Restore the current VMTag from the stack.
__ popq(Assembler::VMTagAddress());
@@ -1218,16 +1263,17 @@
// R10: number of context variables.
// Output:
// RAX: new allocated RawContext object.
-void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
- __ LoadObject(R9, Object::null_object());
+void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
+ __ LoadObject(R9, NullObject());
if (FLAG_inline_alloc) {
Label slow_case;
// First compute the rounded instance size.
// R10: number of context variables.
intptr_t fixed_size_plus_alignment_padding =
- (sizeof(RawContext) + kObjectAlignment - 1);
+ (target::Context::header_size() +
+ target::ObjectAlignment::kObjectAlignment - 1);
__ leaq(R13, Address(R10, TIMES_8, fixed_size_plus_alignment_padding));
- __ andq(R13, Immediate(-kObjectAlignment));
+ __ andq(R13, Immediate(-target::ObjectAlignment::kObjectAlignment));
// Check for allocation tracing.
NOT_IN_PRODUCT(
@@ -1236,13 +1282,13 @@
// Now allocate the object.
// R10: number of context variables.
const intptr_t cid = kContextCid;
- __ movq(RAX, Address(THR, Thread::top_offset()));
+ __ movq(RAX, Address(THR, target::Thread::top_offset()));
__ addq(R13, RAX);
// Check if the allocation fits into the remaining space.
// RAX: potential new object.
// R13: potential next object start.
// R10: number of context variables.
- __ cmpq(R13, Address(THR, Thread::end_offset()));
+ __ cmpq(R13, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
@@ -1254,7 +1300,7 @@
// RAX: new object.
// R13: next object start.
// R10: number of context variables.
- __ movq(Address(THR, Thread::top_offset()), R13);
+ __ movq(Address(THR, target::Thread::top_offset()), R13);
// R13: Size of allocation in bytes.
__ subq(R13, RAX);
__ addq(RAX, Immediate(kHeapObjectTag));
@@ -1267,10 +1313,11 @@
{
Label size_tag_overflow, done;
__ leaq(R13, Address(R10, TIMES_8, fixed_size_plus_alignment_padding));
- __ andq(R13, Immediate(-kObjectAlignment));
- __ cmpq(R13, Immediate(RawObject::SizeTag::kMaxSizeTag));
+ __ andq(R13, Immediate(-target::ObjectAlignment::kObjectAlignment));
+ __ cmpq(R13, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shlq(R13, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2));
+ __ shlq(R13, Immediate(target::RawObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done);
__ Bind(&size_tag_overflow);
@@ -1281,31 +1328,29 @@
// RAX: new object.
// R10: number of context variables.
// R13: size and bit tags.
- uint32_t tags = 0;
- tags = RawObject::ClassIdTag::update(cid, tags);
- tags = RawObject::NewBit::update(true, tags);
+ uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orq(R13, Immediate(tags));
- __ movq(FieldAddress(RAX, Context::tags_offset()), R13); // Tags.
+ __ movq(FieldAddress(RAX, target::Object::tags_offset()), R13); // Tags.
}
// Setup up number of context variables field.
// RAX: new object.
// R10: number of context variables as integer value (not object).
- __ movq(FieldAddress(RAX, Context::num_variables_offset()), R10);
+ __ movq(FieldAddress(RAX, target::Context::num_variables_offset()), R10);
// Setup the parent field.
// RAX: new object.
// R10: number of context variables.
// No generational barrier needed, since we are storing null.
__ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, Context::parent_offset()), R9);
+ RAX, FieldAddress(RAX, target::Context::parent_offset()), R9);
// Initialize the context variables.
// RAX: new object.
// R10: number of context variables.
{
Label loop, entry;
- __ leaq(R13, FieldAddress(RAX, Context::variable_offset(0)));
+ __ leaq(R13, FieldAddress(RAX, target::Context::variable_offset(0)));
#if defined(DEBUG)
static const bool kJumpLength = Assembler::kFarJump;
#else
@@ -1341,7 +1386,7 @@
__ ret();
}
-void StubCode::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@@ -1349,7 +1394,7 @@
intptr_t start = __ CodeSize();
__ pushq(kWriteBarrierObjectReg);
__ movq(kWriteBarrierObjectReg, reg);
- __ call(Address(THR, Thread::write_barrier_entry_point_offset()));
+ __ call(Address(THR, target::Thread::write_barrier_entry_point_offset()));
__ popq(kWriteBarrierObjectReg);
__ ret();
intptr_t end = __ CodeSize();
@@ -1372,18 +1417,18 @@
Address stub_code,
bool cards) {
Label add_to_mark_stack, remember_card;
- __ testq(RAX, Immediate(1 << kNewObjectBitPosition));
+ __ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
__ j(ZERO, &add_to_mark_stack);
if (cards) {
- __ movl(TMP, FieldAddress(RDX, Object::tags_offset()));
- __ testl(TMP, Immediate(1 << RawObject::kCardRememberedBit));
+ __ movl(TMP, FieldAddress(RDX, target::Object::tags_offset()));
+ __ testl(TMP, Immediate(1 << target::RawObject::kCardRememberedBit));
__ j(NOT_ZERO, &remember_card, Assembler::kFarJump);
} else {
#if defined(DEBUG)
Label ok;
- __ movl(TMP, FieldAddress(RDX, Object::tags_offset()));
- __ testl(TMP, Immediate(1 << RawObject::kCardRememberedBit));
+ __ movl(TMP, FieldAddress(RDX, target::Object::tags_offset()));
+ __ testl(TMP, Immediate(1 << target::RawObject::kCardRememberedBit));
__ j(ZERO, &ok, Assembler::kFarJump);
__ Stop("Wrong barrier");
__ Bind(&ok);
@@ -1397,8 +1442,8 @@
// RAX: Current tag value
// lock+andl is an atomic read-modify-write.
__ lock();
- __ andl(FieldAddress(RDX, Object::tags_offset()),
- Immediate(~(1 << RawObject::kOldAndNotRememberedBit)));
+ __ andl(FieldAddress(RDX, target::Object::tags_offset()),
+ Immediate(~(1 << target::RawObject::kOldAndNotRememberedBit)));
// Save registers being destroyed.
__ pushq(RAX);
@@ -1407,17 +1452,19 @@
// Load the StoreBuffer block out of the thread. Then load top_ out of the
// StoreBufferBlock and add the address to the pointers_.
// RDX: Address being stored
- __ movq(RAX, Address(THR, Thread::store_buffer_block_offset()));
- __ movl(RCX, Address(RAX, StoreBufferBlock::top_offset()));
- __ movq(Address(RAX, RCX, TIMES_8, StoreBufferBlock::pointers_offset()), RDX);
+ __ movq(RAX, Address(THR, target::Thread::store_buffer_block_offset()));
+ __ movl(RCX, Address(RAX, target::StoreBufferBlock::top_offset()));
+ __ movq(
+ Address(RAX, RCX, TIMES_8, target::StoreBufferBlock::pointers_offset()),
+ RDX);
// Increment top_ and check for overflow.
// RCX: top_
// RAX: StoreBufferBlock
Label overflow;
__ incq(RCX);
- __ movl(Address(RAX, StoreBufferBlock::top_offset()), RCX);
- __ cmpl(RCX, Immediate(StoreBufferBlock::kSize));
+ __ movl(Address(RAX, target::StoreBufferBlock::top_offset()), RCX);
+ __ cmpl(RCX, Immediate(target::StoreBufferBlock::kSize));
// Restore values.
__ popq(RCX);
__ popq(RAX);
@@ -1437,30 +1484,31 @@
__ ret();
__ Bind(&add_to_mark_stack);
- __ pushq(RAX); // Spill.
- __ pushq(RCX); // Spill.
+ __ pushq(RAX); // Spill.
+ __ pushq(RCX); // Spill.
__ movq(TMP, RAX); // RAX is fixed implicit operand of CAS.
// Atomically clear kOldAndNotMarkedBit.
// Note that we use 32 bit operations here to match the size of the
// background marker which is also manipulating this 32 bit word.
Label retry, lost_race, marking_overflow;
- __ movl(RAX, FieldAddress(TMP, Object::tags_offset()));
+ __ movl(RAX, FieldAddress(TMP, target::Object::tags_offset()));
__ Bind(&retry);
__ movl(RCX, RAX);
- __ testl(RCX, Immediate(1 << RawObject::kOldAndNotMarkedBit));
+ __ testl(RCX, Immediate(1 << target::RawObject::kOldAndNotMarkedBit));
__ j(ZERO, &lost_race); // Marked by another thread.
- __ andl(RCX, Immediate(~(1 << RawObject::kOldAndNotMarkedBit)));
- __ LockCmpxchgl(FieldAddress(TMP, Object::tags_offset()), RCX);
+ __ andl(RCX, Immediate(~(1 << target::RawObject::kOldAndNotMarkedBit)));
+ __ LockCmpxchgl(FieldAddress(TMP, target::Object::tags_offset()), RCX);
__ j(NOT_EQUAL, &retry, Assembler::kNearJump);
- __ movq(RAX, Address(THR, Thread::marking_stack_block_offset()));
- __ movl(RCX, Address(RAX, MarkingStackBlock::top_offset()));
- __ movq(Address(RAX, RCX, TIMES_8, MarkingStackBlock::pointers_offset()),
- TMP);
+ __ movq(RAX, Address(THR, target::Thread::marking_stack_block_offset()));
+ __ movl(RCX, Address(RAX, target::MarkingStackBlock::top_offset()));
+ __ movq(
+ Address(RAX, RCX, TIMES_8, target::MarkingStackBlock::pointers_offset()),
+ TMP);
__ incq(RCX);
- __ movl(Address(RAX, MarkingStackBlock::top_offset()), RCX);
- __ cmpl(RCX, Immediate(MarkingStackBlock::kSize));
+ __ movl(Address(RAX, target::MarkingStackBlock::top_offset()), RCX);
+ __ cmpl(RCX, Immediate(target::MarkingStackBlock::kSize));
__ popq(RCX); // Unspill.
__ popq(RAX); // Unspill.
__ j(EQUAL, &marking_overflow, Assembler::kNearJump);
@@ -1486,16 +1534,19 @@
// Get card table.
__ Bind(&remember_card);
- __ movq(TMP, RDX); // Object.
- __ andq(TMP, Immediate(kPageMask)); // HeapPage.
- __ cmpq(Address(TMP, HeapPage::card_table_offset()), Immediate(0));
+ __ movq(TMP, RDX); // Object.
+ __ andq(TMP, Immediate(target::kPageMask)); // HeapPage.
+ __ cmpq(Address(TMP, target::HeapPage::card_table_offset()), Immediate(0));
__ j(EQUAL, &remember_card_slow, Assembler::kNearJump);
// Dirty the card.
__ subq(R13, TMP); // Offset in page.
- __ movq(TMP, Address(TMP, HeapPage::card_table_offset())); // Card table.
+ __ movq(
+ TMP,
+ Address(TMP, target::HeapPage::card_table_offset())); // Card table.
__ shrq(R13,
- Immediate(HeapPage::kBytesPerCardLog2)); // Index in card table.
+ Immediate(
+ target::HeapPage::kBytesPerCardLog2)); // Index in card table.
__ movb(Address(TMP, R13, TIMES_1, 0), Immediate(1));
__ ret();
@@ -1513,70 +1564,70 @@
}
}
-void StubCode::GenerateWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::write_barrier_code_offset()), false);
+ assembler, Address(THR, target::Thread::write_barrier_code_offset()),
+ false);
}
-void StubCode::GenerateArrayWriteBarrierStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
- assembler, Address(THR, Thread::array_write_barrier_code_offset()), true);
+ assembler,
+ Address(THR, target::Thread::array_write_barrier_code_offset()), true);
}
// Called for inline allocation of objects.
// Input parameters:
// RSP + 8 : type arguments object (only if class is parameterized).
// RSP : points to return address.
-void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
- const Class& cls) {
- const intptr_t kObjectTypeArgumentsOffset = 1 * kWordSize;
+void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
+ const Class& cls) {
+ const intptr_t kObjectTypeArgumentsOffset = 1 * target::kWordSize;
// The generated code is different if the class is parameterized.
- const bool is_cls_parameterized = cls.NumTypeArguments() > 0;
- ASSERT(!is_cls_parameterized ||
- (cls.type_arguments_field_offset() != Class::kNoTypeArguments));
+ const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
+ ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
+ cls) != target::Class::kNoTypeArguments);
// kInlineInstanceSize is a constant used as a threshold for determining
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12; // In words.
- const intptr_t instance_size = cls.instance_size();
+ const intptr_t instance_size = target::Class::InstanceSize(cls);
ASSERT(instance_size > 0);
- __ LoadObject(R9, Object::null_object());
+ __ LoadObject(R9, NullObject());
if (is_cls_parameterized) {
__ movq(RDX, Address(RSP, kObjectTypeArgumentsOffset));
// RDX: instantiated type arguments.
}
- Isolate* isolate = Isolate::Current();
- if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) &&
- !cls.TraceAllocation(isolate)) {
+ if (FLAG_inline_alloc &&
+ target::Heap::IsAllocatableInNewSpace(instance_size) &&
+ !target::Class::TraceAllocation(cls)) {
Label slow_case;
// Allocate the object and update top to point to
// next object start and initialize the allocated object.
// RDX: instantiated type arguments (if is_cls_parameterized).
- __ movq(RAX, Address(THR, Thread::top_offset()));
+ __ movq(RAX, Address(THR, target::Thread::top_offset()));
__ leaq(RBX, Address(RAX, instance_size));
// Check if the allocation fits into the remaining space.
// RAX: potential new object start.
// RBX: potential next object start.
- __ cmpq(RBX, Address(THR, Thread::end_offset()));
+ __ cmpq(RBX, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
__ j(ABOVE_EQUAL, &slow_case);
}
- __ movq(Address(THR, Thread::top_offset()), RBX);
- NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id()));
+ __ movq(Address(THR, target::Thread::top_offset()), RBX);
+ NOT_IN_PRODUCT(__ UpdateAllocationStats(target::Class::GetId(cls)));
// RAX: new object start (untagged).
// RBX: next object start.
// RDX: new object type arguments (if is_cls_parameterized).
// Set the tags.
- uint32_t tags = 0;
- tags = RawObject::SizeTag::update(instance_size, tags);
- ASSERT(cls.id() != kIllegalCid);
- tags = RawObject::ClassIdTag::update(cls.id(), tags);
- tags = RawObject::NewBit::update(true, tags);
+ ASSERT(target::Class::GetId(cls) != kIllegalCid);
+ const uint32_t tags = target::MakeTagWordForNewSpaceObject(
+ target::Class::GetId(cls), instance_size);
// 64 bit store also zeros the identity hash field.
- __ movq(Address(RAX, Instance::tags_offset()), Immediate(tags));
+ __ movq(Address(RAX, target::Object::tags_offset()), Immediate(tags));
__ addq(RAX, Immediate(kHeapObjectTag));
// Initialize the remaining words of the object.
@@ -1585,15 +1636,16 @@
// RDX: new object type arguments (if is_cls_parameterized).
// R9: raw null.
// First try inlining the initialization without a loop.
- if (instance_size < (kInlineInstanceSize * kWordSize)) {
+ if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
// Check if the object contains any non-header fields.
// Small objects are initialized using a consecutive set of writes.
- for (intptr_t current_offset = Instance::NextFieldOffset();
- current_offset < instance_size; current_offset += kWordSize) {
+ for (intptr_t current_offset = target::Instance::first_field_offset();
+ current_offset < instance_size;
+ current_offset += target::kWordSize) {
__ StoreIntoObjectNoBarrier(RAX, FieldAddress(RAX, current_offset), R9);
}
} else {
- __ leaq(RCX, FieldAddress(RAX, Instance::NextFieldOffset()));
+ __ leaq(RCX, FieldAddress(RAX, target::Instance::first_field_offset()));
// Loop until the whole object is initialized.
// RAX: new object (tagged).
// RBX: next object start.
@@ -1610,7 +1662,7 @@
#endif // DEBUG
__ j(ABOVE_EQUAL, &done, kJumpLength);
__ StoreIntoObjectNoBarrier(RAX, Address(RCX, 0), R9);
- __ addq(RCX, Immediate(kWordSize));
+ __ addq(RCX, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
}
@@ -1618,7 +1670,7 @@
// RAX: new object (tagged).
// RDX: new object type arguments.
// Set the type arguments in the new object.
- intptr_t offset = cls.type_arguments_field_offset();
+ const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
__ StoreIntoObjectNoBarrier(RAX, FieldAddress(RAX, offset), RDX);
}
// Done allocating and initializing the instance.
@@ -1632,7 +1684,8 @@
// Create a stub frame.
__ EnterStubFrame(); // Uses PP to access class object.
__ pushq(R9); // Setup space on stack for return value.
- __ PushObject(cls); // Push class of object to be allocated.
+ __ PushObject(
+ CastHandle<Object>(cls)); // Push class of object to be allocated.
if (is_cls_parameterized) {
__ pushq(RDX); // Push type arguments of object to be allocated.
} else {
@@ -1655,24 +1708,28 @@
// RSP : points to return address.
// RSP + 8 : address of last argument.
// R10 : arguments descriptor array.
-void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
+ Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
- __ movq(R13, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
- __ movq(RAX, Address(RBP, R13, TIMES_4, kParamEndSlotFromFp * kWordSize));
+ __ movq(R13, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
+ __ movq(RAX,
+ Address(RBP, R13, TIMES_4,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
__ pushq(Immediate(0)); // Result slot.
__ pushq(RAX); // Receiver.
__ pushq(R10); // Arguments descriptor array.
// Adjust arguments count.
- __ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ cmpq(
+ FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
__ movq(R10, R13);
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
- __ addq(R10, Immediate(Smi::RawValue(1))); // Include the type arguments.
+ __ addq(R10, Immediate(target::ToRawSmi(1))); // Include the type arguments.
__ Bind(&args_count_ok);
// R10: Smi-tagged arguments array length.
@@ -1686,7 +1743,8 @@
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
-void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
+ Assembler* assembler) {
Register ic_reg = RBX;
Register func_reg = RDI;
if (FLAG_trace_optimized_ic_calls) {
@@ -1702,19 +1760,19 @@
__ popq(func_reg); // Restore.
__ LeaveStubFrame();
}
- __ incl(FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
// Loads function into 'temp_reg', preserves 'ic_reg'.
-void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
- Register temp_reg) {
+void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
+ Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = RBX;
Register func_reg = temp_reg;
ASSERT(ic_reg != func_reg);
__ Comment("Increment function counter");
- __ movq(func_reg, FieldAddress(ic_reg, ICData::owner_offset()));
- __ incl(FieldAddress(func_reg, Function::usage_counter_offset()));
+ __ movq(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
+ __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
}
@@ -1728,8 +1786,8 @@
Label* not_smi_or_overflow) {
__ Comment("Fast Smi op");
ASSERT(num_args == 2);
- __ movq(RCX, Address(RSP, +1 * kWordSize)); // Right
- __ movq(RAX, Address(RSP, +2 * kWordSize)); // Left.
+ __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Right
+ __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left.
__ movq(R13, RCX);
__ orq(R13, RAX);
__ testq(R13, Immediate(kSmiTagMask));
@@ -1749,10 +1807,10 @@
Label done, is_true;
__ cmpq(RAX, RCX);
__ j(EQUAL, &is_true, Assembler::kNearJump);
- __ LoadObject(RAX, Bool::False());
+ __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&is_true);
- __ LoadObject(RAX, Bool::True());
+ __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
__ Bind(&done);
break;
}
@@ -1761,18 +1819,17 @@
}
// RBX: IC data object (preserved).
- __ movq(R13, FieldAddress(RBX, ICData::ic_data_offset()));
+ __ movq(R13, FieldAddress(RBX, target::ICData::ic_data_offset()));
// R13: ic_data_array with check entries: classes and target functions.
- __ leaq(R13, FieldAddress(R13, Array::data_offset()));
+ __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
// R13: points directly to the first ic data array element.
#if defined(DEBUG)
// Check that first entry is for Smi/Smi.
Label error, ok;
- const Immediate& imm_smi_cid =
- Immediate(reinterpret_cast<intptr_t>(Smi::New(kSmiCid)));
- __ cmpq(Address(R13, 0 * kWordSize), imm_smi_cid);
+ const Immediate& imm_smi_cid = Immediate(target::ToRawSmi(kSmiCid));
+ __ cmpq(Address(R13, 0 * target::kWordSize), imm_smi_cid);
__ j(NOT_EQUAL, &error, Assembler::kNearJump);
- __ cmpq(Address(R13, 1 * kWordSize), imm_smi_cid);
+ __ cmpq(Address(R13, 1 * target::kWordSize), imm_smi_cid);
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Bind(&error);
__ Stop("Incorrect IC data");
@@ -1780,9 +1837,10 @@
#endif
if (FLAG_optimization_counter_threshold >= 0) {
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
// Update counter, ignore overflow.
- __ addq(Address(R13, count_offset), Immediate(Smi::RawValue(1)));
+ __ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
}
__ ret();
@@ -1798,7 +1856,7 @@
// - Check if 'num_args' (including receiver) match any IC data group.
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
-void StubCode::GenerateNArgsCheckInlineCacheStub(
+void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
@@ -1811,9 +1869,9 @@
Label ok;
// Check that the IC data array has NumArgsTested() == num_args.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ movl(RCX, FieldAddress(RBX, ICData::state_bits_offset()));
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ andq(RCX, Immediate(ICData::NumArgsTestedMask()));
+ __ movl(RCX, FieldAddress(RBX, target::ICData::state_bits_offset()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ andq(RCX, Immediate(target::ICData::NumArgsTestedMask()));
__ cmpq(RCX, Immediate(num_args));
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Stop("Incorrect stub for IC data");
@@ -1826,7 +1884,7 @@
if (!optimized) {
__ Comment("Check single stepping");
__ LoadIsolate(RAX);
- __ cmpb(Address(RAX, Isolate::single_step_offset()), Immediate(0));
+ __ cmpb(Address(RAX, target::Isolate::single_step_offset()), Immediate(0));
__ j(NOT_EQUAL, &stepping);
__ Bind(&done_stepping);
}
@@ -1840,24 +1898,25 @@
__ Comment("Extract ICData initial values and receiver cid");
// Load arguments descriptor into R10.
- __ movq(R10, FieldAddress(RBX, ICData::arguments_descriptor_offset()));
+ __ movq(R10,
+ FieldAddress(RBX, target::ICData::arguments_descriptor_offset()));
// Loop that checks if there is an IC data match.
Label loop, found, miss;
// RBX: IC data object (preserved).
- __ movq(R13, FieldAddress(RBX, ICData::ic_data_offset()));
+ __ movq(R13, FieldAddress(RBX, target::ICData::ic_data_offset()));
// R13: ic_data_array with check entries: classes and target functions.
- __ leaq(R13, FieldAddress(R13, Array::data_offset()));
+ __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
// R13: points directly to the first ic data array element.
// Get argument count as Smi into RCX.
- __ movq(RCX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
+ __ movq(RCX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
// Load first argument into RDX.
__ movq(RDX, Address(RSP, RCX, TIMES_4, 0));
__ LoadTaggedClassIdMayBeSmi(RAX, RDX);
// RAX: first argument class ID as Smi.
if (num_args == 2) {
// Load second argument into R9.
- __ movq(R9, Address(RSP, RCX, TIMES_4, -kWordSize));
+ __ movq(R9, Address(RSP, RCX, TIMES_4, -target::kWordSize));
__ LoadTaggedClassIdMayBeSmi(RCX, R9);
// RCX: second argument class ID (smi).
}
@@ -1866,10 +1925,12 @@
// We unroll the generic one that is generated once more than the others.
const bool optimize = kind == Token::kILLEGAL;
- const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(num_args) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(num_args) * target::kWordSize;
const intptr_t exactness_offset =
- ICData::ExactnessOffsetFor(num_args) * kWordSize;
+ target::ICData::ExactnessOffsetFor(num_args) * target::kWordSize;
__ Bind(&loop);
for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
@@ -1878,7 +1939,7 @@
__ cmpq(RAX, R9); // Class id match?
if (num_args == 2) {
__ j(NOT_EQUAL, &update); // Continue.
- __ movq(R9, Address(R13, kWordSize));
+ __ movq(R9, Address(R13, target::kWordSize));
// R9: next class ID to check (smi).
__ cmpq(RCX, R9); // Class id match?
}
@@ -1887,10 +1948,11 @@
__ Bind(&update);
const intptr_t entry_size =
- ICData::TestEntryLengthFor(num_args, exactness_check) * kWordSize;
+ target::ICData::TestEntryLengthFor(num_args, exactness_check) *
+ target::kWordSize;
__ addq(R13, Immediate(entry_size)); // Next entry.
- __ cmpq(R9, Immediate(Smi::RawValue(kIllegalCid))); // Done?
+ __ cmpq(R9, Immediate(target::ToRawSmi(kIllegalCid))); // Done?
if (unroll == 0) {
__ j(NOT_EQUAL, &loop);
} else {
@@ -1902,7 +1964,7 @@
__ Comment("IC miss");
// Compute address of arguments (first read number of arguments from
// arguments descriptor array and then compute address on the stack).
- __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
+ __ movq(RAX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
__ leaq(RAX, Address(RSP, RAX, TIMES_4, 0)); // RAX is Smi.
__ EnterStubFrame();
__ pushq(R10); // Preserve arguments descriptor array.
@@ -1910,7 +1972,7 @@
__ pushq(Immediate(0)); // Result slot.
// Push call arguments.
for (intptr_t i = 0; i < num_args; i++) {
- __ movq(RCX, Address(RAX, -kWordSize * i));
+ __ movq(RCX, Address(RAX, -target::kWordSize * i));
__ pushq(RCX);
}
__ pushq(RBX); // Pass IC data object.
@@ -1938,7 +2000,7 @@
Label exactness_ok;
ASSERT(num_args == 1);
__ movq(RAX, Address(R13, exactness_offset));
- __ cmpq(RAX, Immediate(Smi::RawValue(
+ __ cmpq(RAX, Immediate(target::ToRawSmi(
StaticTypeExactnessState::HasExactSuperType().Encode())));
__ j(LESS, &exactness_ok);
__ j(EQUAL, &call_target_function_through_unchecked_entry);
@@ -1946,8 +2008,9 @@
// Check trivial exactness.
// Note: RawICData::static_receiver_type_ is guaranteed to be not null
// because we only emit calls to this stub when it is not null.
- __ movq(RCX, FieldAddress(RBX, ICData::static_receiver_type_offset()));
- __ movq(RCX, FieldAddress(RCX, Type::arguments_offset()));
+ __ movq(RCX,
+ FieldAddress(RBX, target::ICData::static_receiver_type_offset()));
+ __ movq(RCX, FieldAddress(RCX, target::Type::arguments_offset()));
// RAX contains an offset to type arguments in words as a smi,
// hence TIMES_4. RDX is guaranteed to be non-smi because it is expected to
// have type arguments.
@@ -1956,8 +2019,8 @@
// Update exactness state (not-exact anymore).
__ movq(Address(R13, exactness_offset),
- Immediate(
- Smi::RawValue(StaticTypeExactnessState::NotExact().Encode())));
+ Immediate(target::ToRawSmi(
+ StaticTypeExactnessState::NotExact().Encode())));
__ Bind(&exactness_ok);
}
__ movq(RAX, Address(R13, target_offset));
@@ -1965,14 +2028,14 @@
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update ICData counter");
// Ignore overflow.
- __ addq(Address(R13, count_offset), Immediate(Smi::RawValue(1)));
+ __ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
}
__ Comment("Call target (via checked entry point)");
__ Bind(&call_target_function);
// RAX: Target function.
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
+ __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ jmp(RCX);
if (exactness_check) {
@@ -1980,12 +2043,13 @@
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update ICData counter");
// Ignore overflow.
- __ addq(Address(R13, count_offset), Immediate(Smi::RawValue(1)));
+ __ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
}
__ Comment("Call target (via unchecked entry point)");
__ movq(RAX, Address(R13, target_offset));
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, Function::unchecked_entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
+ __ movq(RCX, FieldAddress(
+ RAX, target::Function::unchecked_entry_point_offset()));
__ jmp(RCX);
}
@@ -2013,13 +2077,14 @@
// 2 .. (length - 1): group of checks, each check containing:
// - N classes.
// - 1 target function.
-void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
+void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(
@@ -2027,26 +2092,27 @@
/*optimized=*/false, /*exactness_check=*/true);
}
-void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
kInlineCacheMissHandlerTwoArgsRuntimeEntry,
Token::kILLEGAL);
}
-void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD);
}
-void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB);
}
-void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ);
@@ -2063,7 +2129,7 @@
// 2 .. (length - 1): group of checks, each check containing:
// - N classes.
// - 1 target function.
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 1,
@@ -2071,8 +2137,9 @@
Token::kILLEGAL, /*optimized=*/true);
}
-void StubCode::GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
- Assembler* assembler) {
+void StubCodeCompiler::
+ GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
+ Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 1,
kInlineCacheMissHandlerOneArgRuntimeEntry,
@@ -2080,7 +2147,7 @@
/*exactness_check=*/true);
}
-void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
+void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
@@ -2091,16 +2158,17 @@
// Intermediary stub between a static call and its target. ICData contains
// the target function and the call count.
// RBX: ICData
-void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
#if defined(DEBUG)
{
Label ok;
// Check that the IC data array has NumArgsTested() == 0.
// 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
- __ movl(RCX, FieldAddress(RBX, ICData::state_bits_offset()));
- ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
- __ andq(RCX, Immediate(ICData::NumArgsTestedMask()));
+ __ movl(RCX, FieldAddress(RBX, target::ICData::state_bits_offset()));
+ ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
+ __ andq(RCX, Immediate(target::ICData::NumArgsTestedMask()));
__ cmpq(RCX, Immediate(0));
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Stop("Incorrect IC data for unoptimized static call");
@@ -2112,7 +2180,7 @@
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(RAX);
- __ movzxb(RAX, Address(RAX, Isolate::single_step_offset()));
+ __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
__ cmpq(RAX, Immediate(0));
#if defined(DEBUG)
static const bool kJumpLength = Assembler::kFarJump;
@@ -2124,25 +2192,28 @@
#endif
// RBX: IC data object (preserved).
- __ movq(R12, FieldAddress(RBX, ICData::ic_data_offset()));
+ __ movq(R12, FieldAddress(RBX, target::ICData::ic_data_offset()));
// R12: ic_data_array with entries: target functions and count.
- __ leaq(R12, FieldAddress(R12, Array::data_offset()));
+ __ leaq(R12, FieldAddress(R12, target::Array::data_offset()));
// R12: points directly to the first ic data array element.
- const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize;
- const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(0) * target::kWordSize;
+ const intptr_t count_offset =
+ target::ICData::CountIndexFor(0) * target::kWordSize;
if (FLAG_optimization_counter_threshold >= 0) {
// Increment count for this call, ignore overflow.
- __ addq(Address(R12, count_offset), Immediate(Smi::RawValue(1)));
+ __ addq(Address(R12, count_offset), Immediate(target::ToRawSmi(1)));
}
// Load arguments descriptor into R10.
- __ movq(R10, FieldAddress(RBX, ICData::arguments_descriptor_offset()));
+ __ movq(R10,
+ FieldAddress(RBX, target::ICData::arguments_descriptor_offset()));
// Get function and call it, if possible.
__ movq(RAX, Address(R12, target_offset));
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
+ __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ jmp(RCX);
#if !defined(PRODUCT)
@@ -2157,13 +2228,15 @@
#endif
}
-void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
+ Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, RCX);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL);
@@ -2172,7 +2245,7 @@
// Stub for compiling a function and jumping to the compiled code.
// R10: Arguments descriptor.
// RAX: Function.
-void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(R10); // Preserve arguments descriptor array.
__ pushq(RAX); // Pass function.
@@ -2183,15 +2256,15 @@
// When using the interpreter, the function's code may now point to the
// InterpretCall stub. Make sure RAX, R10, and RBX are preserved.
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
+ __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ jmp(RCX);
}
// Stub for interpreting a function call.
// R10: Arguments descriptor.
// RAX: Function.
-void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateInterpretCallStub(Assembler* assembler) {
#if defined(DART_PRECOMPILED_RUNTIME)
__ Stop("Not using interpreter");
#else
@@ -2210,43 +2283,47 @@
#endif
// Adjust arguments count for type arguments vector.
- __ movq(R11, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
+ __ movq(R11, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
__ SmiUntag(R11);
- __ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
- Immediate(0));
+ __ cmpq(
+ FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
+ Immediate(0));
Label args_count_ok;
__ j(EQUAL, &args_count_ok, Assembler::kNearJump);
__ incq(R11);
__ Bind(&args_count_ok);
// Compute argv.
- __ leaq(R12, Address(RBP, R11, TIMES_8, kParamEndSlotFromFp * kWordSize));
+ __ leaq(R12,
+ Address(RBP, R11, TIMES_8,
+ target::frame_layout.param_end_from_fp * target::kWordSize));
// Indicate decreasing memory addresses of arguments with negative argc.
__ negq(R11);
// Reserve shadow space for args and align frame before entering C++ world.
- __ subq(RSP, Immediate(5 * kWordSize));
+ __ subq(RSP, Immediate(5 * target::kWordSize));
if (OS::ActivationFrameAlignment() > 1) {
__ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
- __ movq(CallingConventions::kArg1Reg, RAX); // Function.
- __ movq(CallingConventions::kArg2Reg, R10); // Arguments descriptor.
- __ movq(CallingConventions::kArg3Reg, R11); // Negative argc.
- __ movq(CallingConventions::kArg4Reg, R12); // Argv.
+ __ movq(CallingConventions::kArg1Reg, RAX); // Function.
+ __ movq(CallingConventions::kArg2Reg, R10); // Arguments descriptor.
+ __ movq(CallingConventions::kArg3Reg, R11); // Negative argc.
+ __ movq(CallingConventions::kArg4Reg, R12); // Argv.
#if defined(_WIN64)
- __ movq(Address(RSP, 0 * kWordSize), THR); // Thread.
+ __ movq(Address(RSP, 0 * target::kWordSize), THR); // Thread.
#else
__ movq(CallingConventions::kArg5Reg, THR); // Thread.
#endif
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), RBP);
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()), RBP);
// Mark that the thread is executing VM code.
- __ movq(RAX, Address(THR, Thread::interpret_call_entry_point_offset()));
+ __ movq(RAX,
+ Address(THR, target::Thread::interpret_call_entry_point_offset()));
__ movq(Assembler::VMTagAddress(), RAX);
__ call(RAX);
@@ -2255,7 +2332,8 @@
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
__ LeaveStubFrame();
__ ret();
@@ -2264,7 +2342,7 @@
// RBX: Contains an ICData.
// TOS(0): return address (Dart code).
-void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(RBX); // Preserve IC data.
__ pushq(Immediate(0)); // Result slot.
@@ -2273,31 +2351,31 @@
__ popq(RBX); // Restore IC data.
__ LeaveStubFrame();
- __ movq(RAX, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ jmp(RAX); // Jump to original stub.
}
// TOS(0): return address (Dart code).
-void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(Immediate(0)); // Result slot.
__ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
__ popq(CODE_REG); // Original stub.
__ LeaveStubFrame();
- __ movq(RAX, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ jmp(RAX); // Jump to original stub.
}
// Called only from unoptimized code.
-void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
- __ ret();
+ __ Ret();
#else
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(RAX);
- __ movzxb(RAX, Address(RAX, Isolate::single_step_offset()));
+ __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
__ cmpq(RAX, Immediate(0));
__ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
__ Bind(&done_stepping);
@@ -2339,7 +2417,7 @@
const Register kNullReg = R8;
- __ LoadObject(kNullReg, Object::null_object());
+ __ LoadObject(kNullReg, NullObject());
// Free up these 2 registers to be used for 6-value test.
if (n >= 6) {
@@ -2349,8 +2427,9 @@
// Loop initialization (moved up here to avoid having all dependent loads
// after each other).
- __ movq(RSI, FieldAddress(kCacheReg, SubtypeTestCache::cache_offset()));
- __ addq(RSI, Immediate(Array::data_offset() - kHeapObjectTag));
+ __ movq(RSI,
+ FieldAddress(kCacheReg, target::SubtypeTestCache::cache_offset()));
+ __ addq(RSI, Immediate(target::Array::data_offset() - kHeapObjectTag));
Label loop, not_closure;
if (n >= 4) {
@@ -2364,19 +2443,21 @@
// Closure handling.
{
__ movq(kInstanceCidOrFunction,
- FieldAddress(kInstanceReg, Closure::function_offset()));
+ FieldAddress(kInstanceReg, target::Closure::function_offset()));
if (n >= 2) {
- __ movq(kInstanceInstantiatorTypeArgumentsReg,
- FieldAddress(kInstanceReg,
- Closure::instantiator_type_arguments_offset()));
+ __ movq(
+ kInstanceInstantiatorTypeArgumentsReg,
+ FieldAddress(kInstanceReg,
+ target::Closure::instantiator_type_arguments_offset()));
if (n >= 6) {
ASSERT(n == 6);
- __ movq(kInstanceParentFunctionTypeArgumentsReg,
- FieldAddress(kInstanceReg,
- Closure::function_type_arguments_offset()));
+ __ movq(
+ kInstanceParentFunctionTypeArgumentsReg,
+ FieldAddress(kInstanceReg,
+ target::Closure::function_type_arguments_offset()));
__ movq(kInstanceDelayedFunctionTypeArgumentsReg,
FieldAddress(kInstanceReg,
- Closure::delayed_type_arguments_offset()));
+ target::Closure::delayed_type_arguments_offset()));
}
}
__ jmp(&loop, Assembler::kNearJump);
@@ -2393,10 +2474,12 @@
// [LoadClassById] also tags [kInstanceCidOrFunction] as a side-effect.
__ LoadClassById(RDI, kInstanceCidOrFunction);
__ movq(kInstanceInstantiatorTypeArgumentsReg, kNullReg);
- __ movl(RDI,
- FieldAddress(
- RDI, Class::type_arguments_field_offset_in_words_offset()));
- __ cmpl(RDI, Immediate(Class::kNoTypeArguments));
+ __ movl(
+ RDI,
+ FieldAddress(
+ RDI,
+ target::Class::type_arguments_field_offset_in_words_offset()));
+ __ cmpl(RDI, Immediate(target::Class::kNoTypeArguments));
__ j(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
__ movq(kInstanceInstantiatorTypeArgumentsReg,
FieldAddress(kInstanceReg, RDI, TIMES_8, 0));
@@ -2413,8 +2496,10 @@
// Loop header.
__ Bind(&loop);
- __ movq(RDI, Address(RSI, kWordSize *
- SubtypeTestCache::kInstanceClassIdOrFunction));
+ __ movq(
+ RDI,
+ Address(RSI, target::kWordSize *
+ target::SubtypeTestCache::kInstanceClassIdOrFunction));
__ cmpq(RDI, kNullReg);
__ j(EQUAL, ¬_found, Assembler::kNearJump);
__ cmpq(RDI, kInstanceCidOrFunction);
@@ -2423,18 +2508,22 @@
} else {
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
__ cmpq(kInstanceInstantiatorTypeArgumentsReg,
- Address(RSI, kWordSize * SubtypeTestCache::kInstanceTypeArguments));
+ Address(RSI, target::kWordSize *
+ target::SubtypeTestCache::kInstanceTypeArguments));
if (n == 2) {
__ j(EQUAL, &found, Assembler::kNearJump);
} else {
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
- __ cmpq(kInstantiatorTypeArgumentsReg,
- Address(RSI, kWordSize *
- SubtypeTestCache::kInstantiatorTypeArguments));
+ __ cmpq(
+ kInstantiatorTypeArgumentsReg,
+ Address(RSI,
+ target::kWordSize *
+ target::SubtypeTestCache::kInstantiatorTypeArguments));
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
__ cmpq(
kFunctionTypeArgumentsReg,
- Address(RSI, kWordSize * SubtypeTestCache::kFunctionTypeArguments));
+ Address(RSI, target::kWordSize *
+ target::SubtypeTestCache::kFunctionTypeArguments));
if (n == 4) {
__ j(EQUAL, &found, Assembler::kNearJump);
@@ -2442,30 +2531,28 @@
ASSERT(n == 6);
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
- __ cmpq(
- kInstanceParentFunctionTypeArgumentsReg,
- Address(
- RSI,
- kWordSize *
- SubtypeTestCache::kInstanceParentFunctionTypeArguments));
+ __ cmpq(kInstanceParentFunctionTypeArgumentsReg,
+ Address(RSI, target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceParentFunctionTypeArguments));
__ j(NOT_EQUAL, &next_iteration, Assembler::kNearJump);
- __ cmpq(
- kInstanceDelayedFunctionTypeArgumentsReg,
- Address(
- RSI,
- kWordSize *
- SubtypeTestCache::kInstanceDelayedFunctionTypeArguments));
+ __ cmpq(kInstanceDelayedFunctionTypeArgumentsReg,
+ Address(RSI, target::kWordSize *
+ target::SubtypeTestCache::
+ kInstanceDelayedFunctionTypeArguments));
__ j(EQUAL, &found, Assembler::kNearJump);
}
}
}
__ Bind(&next_iteration);
- __ addq(RSI, Immediate(kWordSize * SubtypeTestCache::kTestEntryLength));
+ __ addq(RSI, Immediate(target::kWordSize *
+ target::SubtypeTestCache::kTestEntryLength));
__ jmp(&loop, Assembler::kNearJump);
__ Bind(&found);
- __ movq(R8, Address(RSI, kWordSize * SubtypeTestCache::kTestResult));
+ __ movq(R8, Address(RSI, target::kWordSize *
+ target::SubtypeTestCache::kTestResult));
if (n >= 6) {
__ popq(kInstanceDelayedFunctionTypeArgumentsReg);
__ popq(kInstanceParentFunctionTypeArgumentsReg);
@@ -2481,22 +2568,22 @@
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype2TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 2);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype4TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 4);
}
// See comment on [GenerateSubtypeNTestCacheStub].
-void StubCode::GenerateSubtype6TestCacheStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSubtype6TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 6);
}
@@ -2516,88 +2603,40 @@
//
// Note of warning: The caller will not populate CODE_REG and we have therefore
// no access to the pool.
-void StubCode::GenerateDefaultTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
Label done;
const Register kInstanceReg = RAX;
// Fast case for 'null'.
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(EQUAL, &done);
- __ movq(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
- __ jmp(FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(CODE_REG, Address(THR, target::Thread::slow_type_test_stub_offset()));
+ __ jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ Bind(&done);
__ Ret();
}
-void StubCode::GenerateTopTypeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
__ Ret();
}
-void StubCode::GenerateTypeRefTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateTypeRefTypeTestStub(Assembler* assembler) {
const Register kTypeRefReg = RBX;
// We dereference the TypeRef and tail-call to it's type testing stub.
- __ movq(kTypeRefReg, FieldAddress(kTypeRefReg, TypeRef::type_offset()));
- __ jmp(FieldAddress(kTypeRefReg,
- AbstractType::type_test_stub_entry_point_offset()));
+ __ movq(kTypeRefReg,
+ FieldAddress(kTypeRefReg, target::TypeRef::type_offset()));
+ __ jmp(FieldAddress(
+ kTypeRefReg, target::AbstractType::type_test_stub_entry_point_offset()));
}
-void StubCode::GenerateUnreachableTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
__ Breakpoint();
}
-void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
- Assembler* assembler,
- HierarchyInfo* hi,
- const Type& type,
- const Class& type_class) {
- const Register kInstanceReg = RAX;
- const Register kClassIdReg = TMP;
-
- BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
- kInstanceReg, kClassIdReg);
-
- __ movq(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
- __ jmp(FieldAddress(CODE_REG, Code::entry_point_offset()));
-}
-
-void TypeTestingStubGenerator::
- BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
- HierarchyInfo* hi,
- const Class& type_class,
- const TypeArguments& tp,
- const TypeArguments& ta) {
- const Register kInstanceReg = RAX;
- const Register kInstanceTypeArguments = RSI;
- const Register kClassIdReg = TMP;
-
- BuildOptimizedSubclassRangeCheckWithTypeArguments(
- assembler, hi, type_class, tp, ta, kClassIdReg, kInstanceReg,
- kInstanceTypeArguments);
-}
-
-void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
- Assembler* assembler,
- HierarchyInfo* hi,
- const AbstractType& type_arg,
- intptr_t type_param_value_offset_i,
- Label* check_failed) {
- const Register kInstanceTypeArguments = RSI;
- const Register kInstantiatorTypeArgumentsReg = RDX;
- const Register kFunctionTypeArgumentsReg = RCX;
-
- const Register kClassIdReg = TMP;
- const Register kOwnTypeArgumentValue = RDI;
-
- BuildOptimizedTypeArgumentValueCheck(
- assembler, hi, type_arg, type_param_value_offset_i, kClassIdReg,
- kInstanceTypeArguments, kInstantiatorTypeArgumentsReg,
- kFunctionTypeArgumentsReg, kOwnTypeArgumentValue, check_failed);
-}
-
static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
TypeCheckMode mode) {
const Register kInstanceReg = RAX;
@@ -2606,14 +2645,14 @@
const Register kDstTypeReg = RBX;
const Register kSubtypeTestCacheReg = R9;
- __ PushObject(Object::null_object()); // Make room for result.
+ __ PushObject(NullObject()); // Make room for result.
__ pushq(kInstanceReg);
__ pushq(kDstTypeReg);
__ pushq(kInstantiatorTypeArgumentsReg);
__ pushq(kFunctionTypeArgumentsReg);
- __ PushObject(Object::null_object());
+ __ PushObject(NullObject());
__ pushq(kSubtypeTestCacheReg);
- __ PushObject(Smi::ZoneHandle(Smi::New(mode)));
+ __ PushImmediate(Immediate(target::ToRawSmi(mode)));
__ CallRuntime(kTypeCheckRuntimeEntry, 7);
__ Drop(1);
__ popq(kSubtypeTestCacheReg);
@@ -2625,17 +2664,19 @@
__ Drop(1); // Discard return value.
}
-void StubCode::GenerateLazySpecializeTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
+ Assembler* assembler) {
const Register kInstanceReg = RAX;
Label done;
// Fast case for 'null'.
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(EQUAL, &done);
- __ movq(CODE_REG,
- Address(THR, Thread::lazy_specialize_type_test_stub_offset()));
+ __ movq(
+ CODE_REG,
+ Address(THR, target::Thread::lazy_specialize_type_test_stub_offset()));
__ EnterStubFrame();
InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromLazySpecializeStub);
__ LeaveStubFrame();
@@ -2644,7 +2685,7 @@
__ Ret();
}
-void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
Label done, call_runtime;
const Register kInstanceReg = RAX;
@@ -2656,14 +2697,14 @@
#ifdef DEBUG
// Guaranteed by caller.
Label no_error;
- __ CompareObject(kInstanceReg, Object::null_object());
+ __ CompareObject(kInstanceReg, NullObject());
__ BranchIf(NOT_EQUAL, &no_error);
__ Breakpoint();
__ Bind(&no_error);
#endif
// If the subtype-cache is null, it needs to be lazily-created by the runtime.
- __ CompareObject(kSubtypeTestCacheReg, Object::null_object());
+ __ CompareObject(kSubtypeTestCacheReg, NullObject());
__ BranchIf(EQUAL, &call_runtime);
const Register kTmp = RDI;
@@ -2675,13 +2716,13 @@
__ BranchIf(NOT_EQUAL, &is_complex_case);
// Check whether this [Type] is instantiated/uninstantiated.
- __ cmpb(FieldAddress(kDstTypeReg, Type::type_state_offset()),
- Immediate(RawType::kFinalizedInstantiated));
+ __ cmpb(FieldAddress(kDstTypeReg, target::Type::type_state_offset()),
+ Immediate(target::RawAbstractType::kTypeStateFinalizedInstantiated));
__ BranchIf(NOT_EQUAL, &is_complex_case);
// Check whether this [Type] is a function type.
- __ movq(kTmp, FieldAddress(kDstTypeReg, Type::signature_offset()));
- __ CompareObject(kTmp, Object::null_object());
+ __ movq(kTmp, FieldAddress(kDstTypeReg, target::Type::signature_offset()));
+ __ CompareObject(kTmp, NullObject());
__ BranchIf(NOT_EQUAL, &is_complex_case);
// This [Type] could be a FutureOr. Subtype2TestCache does not support Smi.
@@ -2691,16 +2732,16 @@
__ Bind(&is_simple_case);
{
- __ Call(StubCode::Subtype2TestCache());
- __ CompareObject(R8, Bool::True());
+ __ Call(StubCodeSubtype2TestCache());
+ __ CompareObject(R8, CastHandle<Object>(TrueObject()));
__ BranchIf(EQUAL, &done); // Cache said: yes.
__ Jump(&call_runtime);
}
__ Bind(&is_complex_case);
{
- __ Call(StubCode::Subtype6TestCache());
- __ CompareObject(R8, Bool::True());
+ __ Call(StubCodeSubtype6TestCache());
+ __ CompareObject(R8, CastHandle<Object>(TrueObject()));
__ BranchIf(EQUAL, &done); // Cache said: yes.
// Fall through to runtime_call
}
@@ -2712,11 +2753,11 @@
// because we do constant evaluation with default stubs and only install
// optimized versions before writing out the AOT snapshot.
// So dynamic/Object/void will run with default stub in constant evaluation.
- __ CompareObject(kDstTypeReg, Type::dynamic_type());
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(DynamicType()));
__ BranchIf(EQUAL, &done);
- __ CompareObject(kDstTypeReg, Type::Handle(Type::ObjectType()));
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(ObjectType()));
__ BranchIf(EQUAL, &done);
- __ CompareObject(kDstTypeReg, Type::void_type());
+ __ CompareObject(kDstTypeReg, CastHandle<Object>(VoidType()));
__ BranchIf(EQUAL, &done);
InvokeTypeCheckFromTypeTestStub(assembler, kTypeCheckFromSlowStub);
@@ -2730,8 +2771,8 @@
// checks.
// TOS + 0: return address
// Result in RAX.
-void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) {
- __ leaq(RAX, Address(RSP, kWordSize));
+void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
+ __ leaq(RAX, Address(RSP, target::kWordSize));
__ ret();
}
@@ -2742,18 +2783,19 @@
// Arg3: frame_pointer
// Arg4: thread
// No Result.
-void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
__ movq(THR, CallingConventions::kArg4Reg);
__ movq(RBP, CallingConventions::kArg3Reg);
__ movq(RSP, CallingConventions::kArg2Reg);
// Set the tag.
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Clear top exit frame.
- __ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
+ __ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
+ Immediate(0));
// Restore the pool pointer.
__ RestoreCodePointer();
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
- __ movq(PP, Address(THR, Thread::global_object_pool_offset()));
+ __ movq(PP, Address(THR, target::Thread::global_object_pool_offset()));
} else {
__ LoadPoolPointer(PP);
}
@@ -2764,22 +2806,24 @@
//
// The arguments are stored in the Thread object.
// No result.
-void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == RAX);
ASSERT(kStackTraceObjectReg == RDX);
__ movq(CallingConventions::kArg1Reg,
- Address(THR, Thread::resume_pc_offset()));
+ Address(THR, target::Thread::resume_pc_offset()));
- ASSERT(Thread::CanLoadFromThread(Object::null_object()));
- __ movq(TMP, Address(THR, Thread::OffsetFromThread(Object::null_object())));
+ word offset_from_thread = 0;
+ bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
+ ASSERT(ok);
+ __ movq(TMP, Address(THR, offset_from_thread));
// Load the exception from the current thread.
- Address exception_addr(THR, Thread::active_exception_offset());
+ Address exception_addr(THR, target::Thread::active_exception_offset());
__ movq(kExceptionObjectReg, exception_addr);
__ movq(exception_addr, TMP);
// Load the stacktrace from the current thread.
- Address stacktrace_addr(THR, Thread::active_stacktrace_offset());
+ Address stacktrace_addr(THR, target::Thread::active_stacktrace_offset());
__ movq(kStackTraceObjectReg, stacktrace_addr);
__ movq(stacktrace_addr, TMP);
@@ -2789,12 +2833,12 @@
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
-void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push zap value instead of CODE_REG.
__ pushq(Immediate(kZapCodeReg));
// Push the deopt pc.
- __ pushq(Address(THR, Thread::resume_pc_offset()));
+ __ pushq(Address(THR, target::Thread::resume_pc_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
// After we have deoptimized, jump to the correct frame.
@@ -2807,7 +2851,7 @@
// Calls to the runtime to optimize the given function.
// RDI: function to be reoptimized.
// R10: argument descriptor (preserved).
-void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(R10); // Preserve args descriptor.
__ pushq(Immediate(0)); // Result slot.
@@ -2817,8 +2861,8 @@
__ popq(RAX); // Get Code object.
__ popq(R10); // Restore argument descriptor.
__ LeaveStubFrame();
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
- __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
+ __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ jmp(RCX);
__ int3();
}
@@ -2845,8 +2889,8 @@
__ j(NOT_EQUAL, &done, Assembler::kFarJump);
// Double values bitwise compare.
- __ movq(left, FieldAddress(left, Double::value_offset()));
- __ cmpq(left, FieldAddress(right, Double::value_offset()));
+ __ movq(left, FieldAddress(left, target::Double::value_offset()));
+ __ cmpq(left, FieldAddress(right, target::Double::value_offset()));
__ jmp(&done, Assembler::kFarJump);
__ Bind(&check_mint);
@@ -2854,8 +2898,8 @@
__ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
__ CompareClassId(right, kMintCid);
__ j(NOT_EQUAL, &done, Assembler::kFarJump);
- __ movq(left, FieldAddress(left, Mint::value_offset()));
- __ cmpq(left, FieldAddress(right, Mint::value_offset()));
+ __ movq(left, FieldAddress(left, target::Mint::value_offset()));
+ __ cmpq(left, FieldAddress(right, target::Mint::value_offset()));
__ jmp(&done, Assembler::kFarJump);
__ Bind(&reference_compare);
@@ -2868,13 +2912,13 @@
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
-void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
__ LoadIsolate(RAX);
- __ movzxb(RAX, Address(RAX, Isolate::single_step_offset()));
+ __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
__ cmpq(RAX, Immediate(0));
__ j(NOT_EQUAL, &stepping);
__ Bind(&done_stepping);
@@ -2883,8 +2927,8 @@
const Register left = RAX;
const Register right = RDX;
- __ movq(left, Address(RSP, 2 * kWordSize));
- __ movq(right, Address(RSP, 1 * kWordSize));
+ __ movq(left, Address(RSP, 2 * target::kWordSize));
+ __ movq(right, Address(RSP, 1 * target::kWordSize));
GenerateIdenticalWithNumberCheckStub(assembler, left, right);
__ ret();
@@ -2903,24 +2947,24 @@
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
-void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
+void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register left = RAX;
const Register right = RDX;
- __ movq(left, Address(RSP, 2 * kWordSize));
- __ movq(right, Address(RSP, 1 * kWordSize));
+ __ movq(left, Address(RSP, 2 * target::kWordSize));
+ __ movq(right, Address(RSP, 1 * target::kWordSize));
GenerateIdenticalWithNumberCheckStub(assembler, left, right);
__ ret();
}
// Called from megamorphic calls.
// RDI: receiver
-// RBX: MegamorphicCache (preserved)
+// RBX: target::MegamorphicCache (preserved)
// Passed to target:
// CODE_REG: target Code
// R10: arguments descriptor
-void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
__ testq(RDI, Immediate(kSmiTagMask));
@@ -2932,8 +2976,8 @@
Label cid_loaded;
__ Bind(&cid_loaded);
- __ movq(R9, FieldAddress(RBX, MegamorphicCache::mask_offset()));
- __ movq(RDI, FieldAddress(RBX, MegamorphicCache::buckets_offset()));
+ __ movq(R9, FieldAddress(RBX, target::MegamorphicCache::mask_offset()));
+ __ movq(RDI, FieldAddress(RBX, target::MegamorphicCache::buckets_offset()));
// R9: mask as a smi.
// RDI: cache buckets array.
@@ -2941,7 +2985,7 @@
__ addq(RAX, RAX);
// Compute the table index.
- ASSERT(MegamorphicCache::kSpreadFactor == 7);
+ ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
// Use leaq and subq multiply with 7 == 8 - 1.
__ leaq(RCX, Address(RAX, TIMES_8, 0));
__ subq(RCX, RAX);
@@ -2950,7 +2994,7 @@
__ Bind(&loop);
__ andq(RCX, R9);
- const intptr_t base = Array::data_offset();
+ const intptr_t base = target::Array::data_offset();
// RCX is smi tagged, but table entries are two words, so TIMES_8.
Label probe_failed;
__ cmpq(RAX, FieldAddress(RDI, RCX, TIMES_8, base));
@@ -2962,28 +3006,31 @@
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
- const auto target_address = FieldAddress(RDI, RCX, TIMES_8, base + kWordSize);
+ const auto target_address =
+ FieldAddress(RDI, RCX, TIMES_8, base + target::kWordSize);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ movq(R10,
- FieldAddress(RBX, MegamorphicCache::arguments_descriptor_offset()));
+ FieldAddress(
+ RBX, target::MegamorphicCache::arguments_descriptor_offset()));
__ jmp(target_address);
} else {
__ movq(RAX, target_address);
__ movq(R10,
- FieldAddress(RBX, MegamorphicCache::arguments_descriptor_offset()));
- __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
+ FieldAddress(
+ RBX, target::MegamorphicCache::arguments_descriptor_offset()));
+ __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
__ jmp(RCX);
}
// Probe failed, check if it is a miss.
__ Bind(&probe_failed);
__ cmpq(FieldAddress(RDI, RCX, TIMES_8, base),
- Immediate(Smi::RawValue(kIllegalCid)));
+ Immediate(target::ToRawSmi(kIllegalCid)));
__ j(ZERO, &load_target, Assembler::kNearJump);
// Try next entry in the table.
- __ AddImmediate(RCX, Immediate(Smi::RawValue(1)));
+ __ AddImmediate(RCX, Immediate(target::ToRawSmi(1)));
__ jmp(&loop);
// Load cid for the Smi case.
@@ -2998,11 +3045,12 @@
// Passed to target:
// CODE_REG: target Code object
// R10: arguments descriptor
-void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughFunctionStub(Assembler* assembler) {
Label loop, found, miss;
- __ movq(R13, FieldAddress(RBX, ICData::ic_data_offset()));
- __ movq(R10, FieldAddress(RBX, ICData::arguments_descriptor_offset()));
- __ leaq(R13, FieldAddress(R13, Array::data_offset()));
+ __ movq(R13, FieldAddress(RBX, target::ICData::ic_data_offset()));
+ __ movq(R10,
+ FieldAddress(RBX, target::ICData::arguments_descriptor_offset()));
+ __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
// R13: first IC entry
__ LoadTaggedClassIdMayBeSmi(RAX, RDI);
// RAX: receiver cid as Smi
@@ -3012,34 +3060,37 @@
__ cmpq(RAX, R9);
__ j(EQUAL, &found, Assembler::kNearJump);
- ASSERT(Smi::RawValue(kIllegalCid) == 0);
+ ASSERT(target::ToRawSmi(kIllegalCid) == 0);
__ testq(R9, R9);
__ j(ZERO, &miss, Assembler::kNearJump);
const intptr_t entry_length =
- ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) * kWordSize;
+ target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
+ target::kWordSize;
__ addq(R13, Immediate(entry_length)); // Next entry.
__ jmp(&loop);
__ Bind(&found);
- const intptr_t target_offset = ICData::TargetIndexFor(1) * kWordSize;
+ const intptr_t target_offset =
+ target::ICData::TargetIndexFor(1) * target::kWordSize;
__ movq(RAX, Address(R13, target_offset));
- __ movq(RCX, FieldAddress(RAX, Function::entry_point_offset()));
- __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
+ __ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
+ __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
__ jmp(RCX);
__ Bind(&miss);
__ LoadIsolate(RAX);
- __ movq(CODE_REG, Address(RAX, Isolate::ic_miss_code_offset()));
- __ movq(RCX, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(CODE_REG, Address(RAX, target::Isolate::ic_miss_code_offset()));
+ __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ jmp(RCX);
}
-void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
- __ movq(R13, FieldAddress(RBX, ICData::ic_data_offset()));
- __ movq(R10, FieldAddress(RBX, ICData::arguments_descriptor_offset()));
- __ leaq(R13, FieldAddress(R13, Array::data_offset()));
+ __ movq(R13, FieldAddress(RBX, target::ICData::ic_data_offset()));
+ __ movq(R10,
+ FieldAddress(RBX, target::ICData::arguments_descriptor_offset()));
+ __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
// R13: first IC entry
__ LoadTaggedClassIdMayBeSmi(RAX, RDI);
// RAX: receiver cid as Smi
@@ -3049,18 +3100,21 @@
__ cmpq(RAX, R9);
__ j(EQUAL, &found, Assembler::kNearJump);
- ASSERT(Smi::RawValue(kIllegalCid) == 0);
+ ASSERT(target::ToRawSmi(kIllegalCid) == 0);
__ testq(R9, R9);
__ j(ZERO, &miss, Assembler::kNearJump);
const intptr_t entry_length =
- ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) * kWordSize;
+ target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
+ target::kWordSize;
__ addq(R13, Immediate(entry_length)); // Next entry.
__ jmp(&loop);
__ Bind(&found);
- const intptr_t code_offset = ICData::CodeIndexFor(1) * kWordSize;
- const intptr_t entry_offset = ICData::EntryPointIndexFor(1) * kWordSize;
+ const intptr_t code_offset =
+ target::ICData::CodeIndexFor(1) * target::kWordSize;
+ const intptr_t entry_offset =
+ target::ICData::EntryPointIndexFor(1) * target::kWordSize;
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
__ movq(CODE_REG, Address(R13, code_offset));
}
@@ -3068,14 +3122,14 @@
__ Bind(&miss);
__ LoadIsolate(RAX);
- __ movq(CODE_REG, Address(RAX, Isolate::ic_miss_code_offset()));
- __ movq(RCX, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ movq(CODE_REG, Address(RAX, target::Isolate::ic_miss_code_offset()));
+ __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ jmp(RCX);
}
// RDI: receiver
// RBX: UnlinkedCall
-void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateUnlinkedCallStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(RDI); // Preserve receiver.
@@ -3090,9 +3144,10 @@
__ popq(RDI); // Restore receiver.
__ LeaveStubFrame();
- __ movq(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
- __ movq(RCX, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kMonomorphic)));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
+ __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kMonomorphic)));
__ jmp(RCX);
}
@@ -3101,17 +3156,21 @@
// RBX: SingleTargetCache
// Passed to target::
// CODE_REG: target Code object
-void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(RAX, RDI);
- __ movzxw(R9, FieldAddress(RBX, SingleTargetCache::lower_limit_offset()));
- __ movzxw(R10, FieldAddress(RBX, SingleTargetCache::upper_limit_offset()));
+ __ movzxw(R9,
+ FieldAddress(RBX, target::SingleTargetCache::lower_limit_offset()));
+ __ movzxw(R10,
+ FieldAddress(RBX, target::SingleTargetCache::upper_limit_offset()));
__ cmpq(RAX, R9);
__ j(LESS, &miss, Assembler::kNearJump);
__ cmpq(RAX, R10);
__ j(GREATER, &miss, Assembler::kNearJump);
- __ movq(RCX, FieldAddress(RBX, SingleTargetCache::entry_point_offset()));
- __ movq(CODE_REG, FieldAddress(RBX, SingleTargetCache::target_offset()));
+ __ movq(RCX,
+ FieldAddress(RBX, target::SingleTargetCache::entry_point_offset()));
+ __ movq(CODE_REG,
+ FieldAddress(RBX, target::SingleTargetCache::target_offset()));
__ jmp(RCX);
__ Bind(&miss);
@@ -3127,16 +3186,18 @@
__ popq(RDI); // Restore receiver.
__ LeaveStubFrame();
- __ movq(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
- __ movq(RCX, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kMonomorphic)));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
+ __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kMonomorphic)));
__ jmp(RCX);
}
// Called from the monomorphic checked entry.
// RDI: receiver
-void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) {
- __ movq(CODE_REG, Address(THR, Thread::monomorphic_miss_stub_offset()));
+void StubCodeCompiler::GenerateMonomorphicMissStub(Assembler* assembler) {
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::monomorphic_miss_stub_offset()));
__ EnterStubFrame();
__ pushq(RDI); // Preserve receiver.
@@ -3149,20 +3210,24 @@
__ popq(RDI); // Restore receiver.
__ LeaveStubFrame();
- __ movq(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset()));
- __ movq(RCX, FieldAddress(CODE_REG, Code::entry_point_offset(
- Code::EntryKind::kMonomorphic)));
+ __ movq(CODE_REG,
+ Address(THR, target::Thread::ic_lookup_through_code_stub_offset()));
+ __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
+ CodeEntryKind::kMonomorphic)));
__ jmp(RCX);
}
-void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
+ Assembler* assembler) {
__ int3();
}
-void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
+void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
__ int3();
}
+} // namespace compiler
+
} // namespace dart
#endif // defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/frame_layout.h b/runtime/vm/frame_layout.h
index f77da40..53ddcc7 100644
--- a/runtime/vm/frame_layout.h
+++ b/runtime/vm/frame_layout.h
@@ -38,6 +38,9 @@
// The offset (in words) from FP to the code object (if applicable).
int code_from_fp;
+ // Entry and exit frame layout.
+ int exit_link_slot_from_entry_fp;
+
// The number of fixed slots below the saved PC.
int saved_below_pc() const { return -first_local_from_fp; }
diff --git a/runtime/vm/megamorphic_cache_table.cc b/runtime/vm/megamorphic_cache_table.cc
index 4595b9c..c07452f 100644
--- a/runtime/vm/megamorphic_cache_table.cc
+++ b/runtime/vm/megamorphic_cache_table.cc
@@ -53,9 +53,9 @@
// The miss handler for a class ID not found in the table is invoked as a
// normal Dart function.
ObjectPoolBuilder object_pool_builder;
- const Code& code = Code::Handle(
- StubCode::Generate("_stub_MegamorphicMiss", &object_pool_builder,
- StubCode::GenerateMegamorphicMissStub));
+ const Code& code = Code::Handle(StubCode::Generate(
+ "_stub_MegamorphicMiss", &object_pool_builder,
+ compiler::StubCodeCompiler::GenerateMegamorphicMissStub));
const auto& object_pool =
ObjectPool::Handle(ObjectPool::NewFromBuilder(object_pool_builder));
@@ -92,7 +92,8 @@
ASSERT(FLAG_precompiled_mode && FLAG_use_bare_instructions);
const Code& code = Code::Handle(StubCode::Generate(
- "_stub_MegamorphicMiss", wrapper, StubCode::GenerateMegamorphicMissStub));
+ "_stub_MegamorphicMiss", wrapper,
+ compiler::StubCodeCompiler::GenerateMegamorphicMissStub));
code.set_exception_handlers(Object::empty_exception_handlers());
auto object_store = isolate->object_store();
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index a195232..53e3ea3 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -8700,8 +8700,9 @@
const intptr_t type_arguments_offset = cls.type_arguments_field_offset();
ASSERT(type_arguments_offset != Class::kNoTypeArguments);
if (StaticTypeExactnessState::CanRepresentAsTriviallyExact(
- type_arguments_offset)) {
- return StaticTypeExactnessState::TriviallyExact(type_arguments_offset);
+ type_arguments_offset / kWordSize)) {
+ return StaticTypeExactnessState::TriviallyExact(type_arguments_offset /
+ kWordSize);
} else {
return StaticTypeExactnessState::NotExact();
}
@@ -8839,7 +8840,7 @@
return "not-exact";
} else if (IsTriviallyExact()) {
return Thread::Current()->zone()->PrintToString(
- "trivially-exact(%" Pd ")", GetTypeArgumentsOffsetInWords());
+ "trivially-exact(%hhu)", GetTypeArgumentsOffsetInWords());
} else if (IsHasExactSuperType()) {
return "has-exact-super-type";
} else if (IsHasExactSuperClass()) {
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index ab91004..e37a4d8 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -29,6 +29,7 @@
#include "vm/os.h"
#include "vm/raw_object.h"
#include "vm/report.h"
+#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
#include "vm/thread.h"
#include "vm/token_position.h"
@@ -1541,134 +1542,6 @@
friend class Class;
};
-// Representation of a state of runtime tracking of static type exactness for
-// a particular location in the program (e.g. exactness of type annotation
-// on a field).
-//
-// Given the static type G<T0, ..., Tn> we say that it is exact iff any
-// values that can be observed at this location has runtime type T such that
-// type arguments of T at G are exactly <T0, ..., Tn>.
-//
-// Currently we only support tracking for locations that are also known
-// to be monomorphic with respect to the actual class of the values it contains.
-//
-// Important: locations should never switch from tracked (kIsTriviallyExact,
-// kHasExactSuperType, kHasExactSuperClass, kNotExact) to not tracked
-// (kNotTracking) or the other way around because that would affect unoptimized
-// graphs generated by graph builder and skew deopt ids.
-class StaticTypeExactnessState final {
- public:
- // Values stored in the location with static type G<T0, ..., Tn> are all
- // instances of C<T0, ..., Tn> and C<U0, ..., Un> at G has type parameters
- // <U0, ..., Un>.
- //
- // For trivially exact types we can simply compare type argument
- // vectors as pointers to check exactness. That's why we represent
- // trivially exact locations as offset in words to the type arguments of
- // class C. All other states are represented as non-positive values.
- //
- // Note: we are ignoring the type argument vector sharing optimization for
- // now.
- static inline StaticTypeExactnessState TriviallyExact(
- intptr_t type_arguments_offset) {
- ASSERT((type_arguments_offset > 0) &&
- Utils::IsAligned(type_arguments_offset, kWordSize) &&
- Utils::IsInt(8, type_arguments_offset / kWordSize));
- return StaticTypeExactnessState(type_arguments_offset / kWordSize);
- }
-
- static inline bool CanRepresentAsTriviallyExact(
- intptr_t type_arguments_offset) {
- return Utils::IsInt(8, type_arguments_offset / kWordSize);
- }
-
- // Values stored in the location with static type G<T0, ..., Tn> are all
- // instances of class C<...> and C<U0, ..., Un> at G has type
- // parameters <T0, ..., Tn> for any <U0, ..., Un> - that is C<...> has a
- // supertype G<T0, ..., Tn>.
- //
- // For such locations we can simply check if the value stored
- // is an instance of an expected class and we don't have to look at
- // type arguments carried by the instance.
- //
- // We distinguish situations where we know that G is a superclass of C from
- // situations where G might be superinterface of C - because in the first
- // type arguments of G give us constant prefix of type arguments of C.
- static inline StaticTypeExactnessState HasExactSuperType() {
- return StaticTypeExactnessState(kHasExactSuperType);
- }
-
- static inline StaticTypeExactnessState HasExactSuperClass() {
- return StaticTypeExactnessState(kHasExactSuperClass);
- }
-
- // Values stored in the location don't fall under either kIsTriviallyExact
- // or kHasExactSuperType categories.
- //
- // Note: that does not imply that static type annotation is not exact
- // according to a broader definition, e.g. location might simply be
- // polymorphic and store instances of multiple different types.
- // However for simplicity we don't track such cases yet.
- static inline StaticTypeExactnessState NotExact() {
- return StaticTypeExactnessState(kNotExact);
- }
-
- // The location does not track exactness of its static type at runtime.
- static inline StaticTypeExactnessState NotTracking() {
- return StaticTypeExactnessState(kNotTracking);
- }
-
- static inline StaticTypeExactnessState Unitialized() {
- return StaticTypeExactnessState(kUninitialized);
- }
-
- static StaticTypeExactnessState Compute(const Type& static_type,
- const Instance& value,
- bool print_trace = false);
-
- bool IsTracking() const { return value_ != kNotTracking; }
- bool IsUninitialized() const { return value_ == kUninitialized; }
- bool IsHasExactSuperClass() const { return value_ == kHasExactSuperClass; }
- bool IsHasExactSuperType() const { return value_ == kHasExactSuperType; }
- bool IsTriviallyExact() const { return value_ > kUninitialized; }
- bool NeedsFieldGuard() const { return value_ >= kUninitialized; }
- bool IsExactOrUninitialized() const { return value_ > kNotExact; }
- bool IsExact() const {
- return IsTriviallyExact() || IsHasExactSuperType() ||
- IsHasExactSuperClass();
- }
-
- const char* ToCString() const;
-
- StaticTypeExactnessState CollapseSuperTypeExactness() const {
- return IsHasExactSuperClass() ? HasExactSuperType() : *this;
- }
-
- static inline StaticTypeExactnessState Decode(int8_t value) {
- return StaticTypeExactnessState(value);
- }
-
- int8_t Encode() const { return value_; }
- intptr_t GetTypeArgumentsOffsetInWords() const {
- ASSERT(IsTriviallyExact());
- return value_;
- }
-
- static constexpr int8_t kUninitialized = 0;
-
- private:
- static constexpr int8_t kNotTracking = -4;
- static constexpr int8_t kNotExact = -3;
- static constexpr int8_t kHasExactSuperType = -2;
- static constexpr int8_t kHasExactSuperClass = -1;
-
- explicit StaticTypeExactnessState(int8_t value) : value_(value) {}
-
- int8_t value_;
-
- DISALLOW_ALLOCATION();
-};
-
// Object holding information about an IC: test classes and their
// corresponding targets. The owner of the ICData can be either the function
// or the original ICData object. In case of background compilation we
@@ -5862,6 +5735,8 @@
static intptr_t DataOffsetFor(intptr_t cid);
static intptr_t ElementSizeFor(intptr_t cid);
+ static intptr_t NextFieldOffset() { return sizeof(RawInstance); }
+
protected:
#ifndef PRODUCT
virtual void PrintSharedInstanceJSON(JSONObject* jsobj, bool ref) const;
@@ -5883,8 +5758,6 @@
}
bool IsValidFieldOffset(intptr_t offset) const;
- static intptr_t NextFieldOffset() { return sizeof(RawInstance); }
-
// The following raw methods are used for morphing.
// They are needed due to the extraction of the class in IsValidFieldOffset.
RawObject** RawFieldAddrAtOffset(intptr_t offset) const {
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index c2a5ec0..68f90fd 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -1834,7 +1834,7 @@
};
class RawAbstractType : public RawInstance {
- protected:
+ public:
enum TypeState {
kAllocated, // Initial state.
kBeingFinalized, // In the process of being finalized.
@@ -1842,6 +1842,7 @@
kFinalizedUninstantiated, // Uninstantiated type ready for use.
};
+ protected:
uword type_test_stub_entry_point_; // Accessed from generated code.
RawCode* type_test_stub_; // Must be the last field, since subclasses use it
// in their VISIT_FROM.
diff --git a/runtime/vm/stack_frame.cc b/runtime/vm/stack_frame.cc
index 900061b..0e98fc7 100644
--- a/runtime/vm/stack_frame.cc
+++ b/runtime/vm/stack_frame.cc
@@ -33,6 +33,7 @@
/*.dart_fixed_frame_size = */ -1,
/*.saved_caller_pp_from_fp = */ -1,
/*.code_from_fp = */ -1,
+ /*.exit_link_slot_from_entry_fp = */ -1,
};
const FrameLayout default_frame_layout = {
@@ -43,6 +44,7 @@
/*.dart_fixed_frame_size = */ kDartFrameFixedSize,
/*.saved_caller_pp_from_fp = */ kSavedCallerPpSlotFromFp,
/*.code_from_fp = */ kPcMarkerSlotFromFp,
+ /*.exit_link_slot_from_entry_fp = */ kExitLinkSlotFromEntryFp,
};
const FrameLayout bare_instructions_frame_layout = {
/*.first_object_from_pc =*/kFirstObjectSlotFromFp, // No saved PP slot.
@@ -55,6 +57,7 @@
2, // No saved CODE, PP slots.
/*.saved_caller_pp_from_fp = */ 0, // No saved PP slot.
/*.code_from_fp = */ 0, // No saved CODE
+ /*.exit_link_slot_from_entry_fp = */ kExitLinkSlotFromEntryFp,
};
namespace compiler {
diff --git a/runtime/vm/static_type_exactness_state.h b/runtime/vm/static_type_exactness_state.h
new file mode 100644
index 0000000..1660eba
--- /dev/null
+++ b/runtime/vm/static_type_exactness_state.h
@@ -0,0 +1,149 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_STATIC_TYPE_EXACTNESS_STATE_H_
+#define RUNTIME_VM_STATIC_TYPE_EXACTNESS_STATE_H_
+
+#include "platform/allocation.h"
+
+// This header defines the list of VM implementation classes and their ids.
+//
+// Note: we assume that all builds of Dart VM use exactly the same class ids
+// for these classes.
+
+namespace dart {
+
+class Instance;
+class Type;
+
+// Representation of a state of runtime tracking of static type exactness for
+// a particular location in the program (e.g. exactness of type annotation
+// on a field).
+//
+// Given the static type G<T0, ..., Tn> we say that it is exact iff any
+// values that can be observed at this location has runtime type T such that
+// type arguments of T at G are exactly <T0, ..., Tn>.
+//
+// Currently we only support tracking for locations that are also known
+// to be monomorphic with respect to the actual class of the values it contains.
+//
+// Important: locations should never switch from tracked (kIsTriviallyExact,
+// kHasExactSuperType, kHasExactSuperClass, kNotExact) to not tracked
+// (kNotTracking) or the other way around because that would affect unoptimized
+// graphs generated by graph builder and skew deopt ids.
+class StaticTypeExactnessState final {
+ public:
+ // Values stored in the location with static type G<T0, ..., Tn> are all
+ // instances of C<T0, ..., Tn> and C<U0, ..., Un> at G has type parameters
+ // <U0, ..., Un>.
+ //
+ // For trivially exact types we can simply compare type argument
+ // vectors as pointers to check exactness. That's why we represent
+ // trivially exact locations as offset in words to the type arguments of
+ // class C. All other states are represented as non-positive values.
+ //
+ // Note: we are ignoring the type argument vector sharing optimization for
+ // now.
+ static inline StaticTypeExactnessState TriviallyExact(
+ intptr_t type_arguments_offset_in_bytes) {
+ ASSERT((type_arguments_offset_in_bytes > 0) &&
+ Utils::IsInt(8, type_arguments_offset_in_bytes));
+ return StaticTypeExactnessState(type_arguments_offset_in_bytes);
+ }
+
+ static inline bool CanRepresentAsTriviallyExact(
+ intptr_t type_arguments_offset_in_bytes) {
+ return Utils::IsInt(8, type_arguments_offset_in_bytes);
+ }
+
+ // Values stored in the location with static type G<T0, ..., Tn> are all
+ // instances of class C<...> and C<U0, ..., Un> at G has type
+ // parameters <T0, ..., Tn> for any <U0, ..., Un> - that is C<...> has a
+ // supertype G<T0, ..., Tn>.
+ //
+ // For such locations we can simply check if the value stored
+ // is an instance of an expected class and we don't have to look at
+ // type arguments carried by the instance.
+ //
+ // We distinguish situations where we know that G is a superclass of C from
+ // situations where G might be superinterface of C - because in the first
+ // type arguments of G give us constant prefix of type arguments of C.
+ static inline StaticTypeExactnessState HasExactSuperType() {
+ return StaticTypeExactnessState(kHasExactSuperType);
+ }
+
+ static inline StaticTypeExactnessState HasExactSuperClass() {
+ return StaticTypeExactnessState(kHasExactSuperClass);
+ }
+
+ // Values stored in the location don't fall under either kIsTriviallyExact
+ // or kHasExactSuperType categories.
+ //
+ // Note: that does not imply that static type annotation is not exact
+ // according to a broader definition, e.g. location might simply be
+ // polymorphic and store instances of multiple different types.
+ // However for simplicity we don't track such cases yet.
+ static inline StaticTypeExactnessState NotExact() {
+ return StaticTypeExactnessState(kNotExact);
+ }
+
+ // The location does not track exactness of its static type at runtime.
+ static inline StaticTypeExactnessState NotTracking() {
+ return StaticTypeExactnessState(kNotTracking);
+ }
+
+ static inline StaticTypeExactnessState Unitialized() {
+ return StaticTypeExactnessState(kUninitialized);
+ }
+
+ static StaticTypeExactnessState Compute(const Type& static_type,
+ const Instance& value,
+ bool print_trace = false);
+
+ bool IsTracking() const { return value_ != kNotTracking; }
+ bool IsUninitialized() const { return value_ == kUninitialized; }
+ bool IsHasExactSuperClass() const { return value_ == kHasExactSuperClass; }
+ bool IsHasExactSuperType() const { return value_ == kHasExactSuperType; }
+ bool IsTriviallyExact() const { return value_ > kUninitialized; }
+ bool NeedsFieldGuard() const { return value_ >= kUninitialized; }
+ bool IsExactOrUninitialized() const { return value_ > kNotExact; }
+ bool IsExact() const {
+ return IsTriviallyExact() || IsHasExactSuperType() ||
+ IsHasExactSuperClass();
+ }
+
+ const char* ToCString() const;
+
+ StaticTypeExactnessState CollapseSuperTypeExactness() const {
+ return IsHasExactSuperClass() ? HasExactSuperType() : *this;
+ }
+
+ static inline StaticTypeExactnessState Decode(int8_t value) {
+ return StaticTypeExactnessState(value);
+ }
+
+ int8_t Encode() const { return value_; }
+ int8_t GetTypeArgumentsOffsetInWords() const {
+ ASSERT(IsTriviallyExact());
+ return value_;
+ }
+
+ static constexpr int8_t kUninitialized = 0;
+
+ private:
+ static constexpr int8_t kNotTracking = -4;
+ static constexpr int8_t kNotExact = -3;
+ static constexpr int8_t kHasExactSuperType = -2;
+ static constexpr int8_t kHasExactSuperClass = -1;
+
+ explicit StaticTypeExactnessState(int8_t value) : value_(value) {}
+
+ int8_t value_;
+
+ DISALLOW_ALLOCATION();
+};
+
+} // namespace dart
+
+#endif // RUNTIME_VM_STATIC_TYPE_EXACTNESS_STATE_H_
diff --git a/runtime/vm/stub_code.cc b/runtime/vm/stub_code.cc
index 6e90207..9171436 100644
--- a/runtime/vm/stub_code.cc
+++ b/runtime/vm/stub_code.cc
@@ -47,8 +47,9 @@
#define STUB_CODE_GENERATE(name) \
entries_[k##name##Index] = Code::ReadOnlyHandle(); \
- *entries_[k##name##Index] = Generate("_stub_" #name, &object_pool_builder, \
- StubCode::Generate##name##Stub);
+ *entries_[k##name##Index] = \
+ Generate("_stub_" #name, &object_pool_builder, \
+ compiler::StubCodeCompiler::Generate##name##Stub);
#define STUB_CODE_SET_OBJECT_POOL(name) \
entries_[k##name##Index]->set_object_pool(object_pool.raw());
@@ -181,7 +182,7 @@
Assembler assembler(wrapper);
const char* name = cls.ToCString();
- StubCode::GenerateAllocationStubForClass(&assembler, cls);
+ compiler::StubCodeCompiler::GenerateAllocationStubForClass(&assembler, cls);
if (thread->IsMutatorThread()) {
stub ^= Code::FinalizeCode(name, nullptr, &assembler, pool_attachment,
@@ -243,9 +244,20 @@
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
RawCode* StubCode::GetBuildMethodExtractorStub(ObjectPoolBuilder* pool) {
#if !defined(DART_PRECOMPILED_RUNTIME)
+ auto thread = Thread::Current();
+ auto Z = thread->zone();
+ auto object_store = thread->isolate()->object_store();
+
+ const auto& closure_class =
+ Class::ZoneHandle(Z, object_store->closure_class());
+ const auto& closure_allocation_stub =
+ Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
+ const auto& context_allocation_stub = StubCode::AllocateContext();
+
ObjectPoolBuilder object_pool_builder;
Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
- StubCode::GenerateBuildMethodExtractorStub(&assembler);
+ compiler::StubCodeCompiler::GenerateBuildMethodExtractorStub(
+ &assembler, closure_allocation_stub, context_allocation_stub);
const char* name = "BuildMethodExtractor";
const Code& stub = Code::Handle(Code::FinalizeCode(
diff --git a/runtime/vm/stub_code.h b/runtime/vm/stub_code.h
index aa148e3..2593ca00 100644
--- a/runtime/vm/stub_code.h
+++ b/runtime/vm/stub_code.h
@@ -7,7 +7,10 @@
#include "vm/allocation.h"
#include "vm/compiler/assembler/assembler.h"
+#include "vm/compiler/runtime_api.h"
+#include "vm/compiler/stub_code_compiler.h"
#include "vm/object.h"
+#include "vm/stub_code_list.h"
namespace dart {
@@ -19,100 +22,6 @@
class SnapshotReader;
class SnapshotWriter;
-// List of stubs created in the VM isolate, these stubs are shared by different
-// isolates running in this dart process.
-#if !defined(TARGET_ARCH_DBC)
-#define VM_STUB_CODE_LIST(V) \
- V(GetCStackPointer) \
- V(JumpToFrame) \
- V(RunExceptionHandler) \
- V(DeoptForRewind) \
- V(WriteBarrier) \
- V(WriteBarrierWrappers) \
- V(ArrayWriteBarrier) \
- V(PrintStopMessage) \
- V(AllocateArray) \
- V(AllocateContext) \
- V(CallToRuntime) \
- V(LazyCompile) \
- V(InterpretCall) \
- V(CallBootstrapNative) \
- V(CallNoScopeNative) \
- V(CallAutoScopeNative) \
- V(FixCallersTarget) \
- V(CallStaticFunction) \
- V(OptimizeFunction) \
- V(InvokeDartCode) \
- V(InvokeDartCodeFromBytecode) \
- V(DebugStepCheck) \
- V(UnlinkedCall) \
- V(MonomorphicMiss) \
- V(SingleTargetCall) \
- V(ICCallThroughFunction) \
- V(ICCallThroughCode) \
- V(MegamorphicCall) \
- V(FixAllocationStubTarget) \
- V(Deoptimize) \
- V(DeoptimizeLazyFromReturn) \
- V(DeoptimizeLazyFromThrow) \
- V(UnoptimizedIdenticalWithNumberCheck) \
- V(OptimizedIdenticalWithNumberCheck) \
- V(ICCallBreakpoint) \
- V(RuntimeCallBreakpoint) \
- V(OneArgCheckInlineCache) \
- V(TwoArgsCheckInlineCache) \
- V(SmiAddInlineCache) \
- V(SmiSubInlineCache) \
- V(SmiEqualInlineCache) \
- V(OneArgOptimizedCheckInlineCache) \
- V(TwoArgsOptimizedCheckInlineCache) \
- V(ZeroArgsUnoptimizedStaticCall) \
- V(OneArgUnoptimizedStaticCall) \
- V(TwoArgsUnoptimizedStaticCall) \
- V(Subtype1TestCache) \
- V(Subtype2TestCache) \
- V(Subtype4TestCache) \
- V(Subtype6TestCache) \
- V(DefaultTypeTest) \
- V(TopTypeTypeTest) \
- V(TypeRefTypeTest) \
- V(UnreachableTypeTest) \
- V(SlowTypeTest) \
- V(LazySpecializeTypeTest) \
- V(CallClosureNoSuchMethod) \
- V(FrameAwaitingMaterialization) \
- V(AsynchronousGapMarker) \
- V(NullErrorSharedWithFPURegs) \
- V(NullErrorSharedWithoutFPURegs) \
- V(StackOverflowSharedWithFPURegs) \
- V(StackOverflowSharedWithoutFPURegs) \
- V(OneArgCheckInlineCacheWithExactnessCheck) \
- V(OneArgOptimizedCheckInlineCacheWithExactnessCheck)
-
-#else
-#define VM_STUB_CODE_LIST(V) \
- V(LazyCompile) \
- V(OptimizeFunction) \
- V(CallClosureNoSuchMethod) \
- V(RunExceptionHandler) \
- V(DeoptForRewind) \
- V(FixCallersTarget) \
- V(Deoptimize) \
- V(DeoptimizeLazyFromReturn) \
- V(DeoptimizeLazyFromThrow) \
- V(DefaultTypeTest) \
- V(TopTypeTypeTest) \
- V(TypeRefTypeTest) \
- V(UnreachableTypeTest) \
- V(SlowTypeTest) \
- V(LazySpecializeTypeTest) \
- V(FrameAwaitingMaterialization) \
- V(AsynchronousGapMarker) \
- V(InvokeDartCodeFromBytecode) \
- V(InterpretCall)
-
-#endif // !defined(TARGET_ARCH_DBC)
-
// Is it permitted for the stubs above to refer to Object::null(), which is
// allocated in the VM isolate and shared across all isolates.
// However, in cases where a simple GC-safe placeholder is needed on the stack,
@@ -153,9 +62,15 @@
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
static RawCode* GetBuildMethodExtractorStub(ObjectPoolBuilder* pool);
- static void GenerateBuildMethodExtractorStub(compiler::Assembler* assembler);
#endif
+ // Generate the stub and finalize the generated code into the stub
+ // code executable area.
+ static RawCode* Generate(
+ const char* name,
+ ObjectPoolBuilder* object_pool_builder,
+ void (*GenerateStub)(compiler::Assembler* assembler));
+
static const Code& UnoptimizedStaticCallEntry(intptr_t num_args_tested);
static const intptr_t kNoInstantiator = 0;
@@ -172,8 +87,9 @@
#if !defined(DART_PRECOMPILED_RUNTIME)
#define GENERATE_STUB(name) \
static RawCode* BuildIsolateSpecific##name##Stub(ObjectPoolBuilder* opw) { \
- return StubCode::Generate("_iso_stub_" #name, opw, \
- StubCode::Generate##name##Stub); \
+ return StubCode::Generate( \
+ "_iso_stub_" #name, opw, \
+ compiler::StubCodeCompiler::Generate##name##Stub); \
}
VM_STUB_CODE_LIST(GENERATE_STUB);
#undef GENERATE_STUB
@@ -190,68 +106,8 @@
};
static Code* entries_[kNumStubEntries];
-
-#if !defined(DART_PRECOMPILED_RUNTIME)
-#define STUB_CODE_GENERATE(name) \
- static void Generate##name##Stub(compiler::Assembler* assembler);
- VM_STUB_CODE_LIST(STUB_CODE_GENERATE)
-#undef STUB_CODE_GENERATE
-
- // Generate the stub and finalize the generated code into the stub
- // code executable area.
- static RawCode* Generate(
- const char* name,
- ObjectPoolBuilder* object_pool_builder,
- void (*GenerateStub)(compiler::Assembler* assembler));
-
- static void GenerateSharedStub(compiler::Assembler* assembler,
- bool save_fpu_registers,
- const RuntimeEntry* target,
- intptr_t self_code_stub_offset_from_thread,
- bool allow_return);
-
- static void GenerateMegamorphicMissStub(compiler::Assembler* assembler);
- static void GenerateAllocationStubForClass(compiler::Assembler* assembler,
- const Class& cls);
- static void GenerateNArgsCheckInlineCacheStub(
- compiler::Assembler* assembler,
- intptr_t num_args,
- const RuntimeEntry& handle_ic_miss,
- Token::Kind kind,
- bool optimized = false,
- bool exactness_check = false);
- static void GenerateUsageCounterIncrement(compiler::Assembler* assembler,
- Register temp_reg);
- static void GenerateOptimizedUsageCounterIncrement(
- compiler::Assembler* assembler);
-#endif // !defined(DART_PRECOMPILED_RUNTIME)
};
-enum DeoptStubKind { kLazyDeoptFromReturn, kLazyDeoptFromThrow, kEagerDeopt };
-
-// Invocation mode for TypeCheck runtime entry that describes
-// where we are calling it from.
-enum TypeCheckMode {
- // TypeCheck is invoked from LazySpecializeTypeTest stub.
- // It should replace stub on the type with a specialized version.
- kTypeCheckFromLazySpecializeStub,
-
- // TypeCheck is invoked from the SlowTypeTest stub.
- // This means that cache can be lazily created (if needed)
- // and dst_name can be fetched from the pool.
- kTypeCheckFromSlowStub,
-
- // TypeCheck is invoked from normal inline AssertAssignable.
- // Both cache and dst_name must be already populated.
- kTypeCheckFromInline
-};
-
-// Zap value used to indicate unused CODE_REG in deopt.
-static const uword kZapCodeReg = 0xf1f1f1f1;
-
-// Zap value used to indicate unused return address in deopt.
-static const uword kZapReturnAddress = 0xe1e1e1e1;
-
} // namespace dart
#endif // RUNTIME_VM_STUB_CODE_H_
diff --git a/runtime/vm/stub_code_dbc.cc b/runtime/vm/stub_code_dbc.cc
deleted file mode 100644
index fa35e41..0000000
--- a/runtime/vm/stub_code_dbc.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
-// for details. All rights reserved. Use of this source code is governed by a
-// BSD-style license that can be found in the LICENSE file.
-
-#include "vm/globals.h"
-#if defined(TARGET_ARCH_DBC)
-
-#include "vm/compiler/assembler/assembler.h"
-#include "vm/compiler/backend/flow_graph_compiler.h"
-#include "vm/compiler/jit/compiler.h"
-#include "vm/cpu.h"
-#include "vm/dart_entry.h"
-#include "vm/heap/heap.h"
-#include "vm/instructions.h"
-#include "vm/object_store.h"
-#include "vm/runtime_entry.h"
-#include "vm/stack_frame.h"
-#include "vm/stub_code.h"
-#include "vm/tags.h"
-
-#define __ assembler->
-
-namespace dart {
-
-DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects.");
-DEFINE_FLAG(bool,
- use_slow_path,
- false,
- "Set to true for debugging & verifying the slow paths.");
-
-void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
- __ Compile();
-}
-
-void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
- __ NoSuchMethod();
-}
-
-// Not executed, but used as a stack marker when calling
-// DRT_OptimizeInvokedFunction.
-void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
- __ Trap();
-}
-
-// Not executed, but used as a sentinel in Simulator::JumpToFrame.
-void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) {
- __ Trap();
-}
-
-void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) {
- __ DeoptRewind();
-}
-
-// TODO(vegorov) Don't generate this stub.
-void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
- __ Trap();
-}
-
-// TODO(vegorov) Don't generate these stubs.
-void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
- const Class& cls) {
- __ Trap();
-}
-
-// TODO(vegorov) Don't generate this stub.
-void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
- __ Trap();
-}
-
-// These deoptimization stubs are only used to populate stack frames
-// with something meaningful to make sure GC can scan the stack during
-// the last phase of deoptimization which materializes objects.
-void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
- __ Trap();
-}
-
-void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
- __ Trap();
-}
-
-void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
- __ Trap();
-}
-
-// TODO(kustermann): Don't generate this stub.
-void StubCode::GenerateDefaultTypeTestStub(Assembler* assembler) {
- __ Trap();
-}
-
-// TODO(kustermann): Don't generate this stub.
-void StubCode::GenerateTopTypeTypeTestStub(Assembler* assembler) {
- __ Trap();
-}
-
-// TODO(kustermann): Don't generate this stub.
-void StubCode::GenerateTypeRefTypeTestStub(Assembler* assembler) {
- __ Trap();
-}
-
-// TODO(kustermann): Don't generate this stub.
-void StubCode::GenerateUnreachableTypeTestStub(Assembler* assembler) {
- __ Trap();
-}
-
-// TODO(kustermann): Don't generate this stub.
-void StubCode::GenerateLazySpecializeTypeTestStub(Assembler* assembler) {
- __ Trap();
-}
-
-// TODO(kustermann): Don't generate this stub.
-void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
- __ Trap();
-}
-
-void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) {
- __ Trap();
-}
-
-void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
- __ Trap();
-}
-
-void StubCode::GenerateInterpretCallStub(Assembler* assembler) {
- __ Trap();
-}
-
-void StubCode::GenerateInvokeDartCodeFromBytecodeStub(Assembler* assembler) {
- __ Trap();
-}
-
-} // namespace dart
-
-#endif // defined TARGET_ARCH_DBC
diff --git a/runtime/vm/stub_code_list.h b/runtime/vm/stub_code_list.h
new file mode 100644
index 0000000..ce9d729
--- /dev/null
+++ b/runtime/vm/stub_code_list.h
@@ -0,0 +1,106 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_STUB_CODE_LIST_H_
+#define RUNTIME_VM_STUB_CODE_LIST_H_
+
+namespace dart {
+
+// List of stubs created in the VM isolate, these stubs are shared by different
+// isolates running in this dart process.
+#if !defined(TARGET_ARCH_DBC)
+#define VM_STUB_CODE_LIST(V) \
+ V(GetCStackPointer) \
+ V(JumpToFrame) \
+ V(RunExceptionHandler) \
+ V(DeoptForRewind) \
+ V(WriteBarrier) \
+ V(WriteBarrierWrappers) \
+ V(ArrayWriteBarrier) \
+ V(PrintStopMessage) \
+ V(AllocateArray) \
+ V(AllocateContext) \
+ V(CallToRuntime) \
+ V(LazyCompile) \
+ V(InterpretCall) \
+ V(CallBootstrapNative) \
+ V(CallNoScopeNative) \
+ V(CallAutoScopeNative) \
+ V(FixCallersTarget) \
+ V(CallStaticFunction) \
+ V(OptimizeFunction) \
+ V(InvokeDartCode) \
+ V(InvokeDartCodeFromBytecode) \
+ V(DebugStepCheck) \
+ V(UnlinkedCall) \
+ V(MonomorphicMiss) \
+ V(SingleTargetCall) \
+ V(ICCallThroughFunction) \
+ V(ICCallThroughCode) \
+ V(MegamorphicCall) \
+ V(FixAllocationStubTarget) \
+ V(Deoptimize) \
+ V(DeoptimizeLazyFromReturn) \
+ V(DeoptimizeLazyFromThrow) \
+ V(UnoptimizedIdenticalWithNumberCheck) \
+ V(OptimizedIdenticalWithNumberCheck) \
+ V(ICCallBreakpoint) \
+ V(RuntimeCallBreakpoint) \
+ V(OneArgCheckInlineCache) \
+ V(TwoArgsCheckInlineCache) \
+ V(SmiAddInlineCache) \
+ V(SmiSubInlineCache) \
+ V(SmiEqualInlineCache) \
+ V(OneArgOptimizedCheckInlineCache) \
+ V(TwoArgsOptimizedCheckInlineCache) \
+ V(ZeroArgsUnoptimizedStaticCall) \
+ V(OneArgUnoptimizedStaticCall) \
+ V(TwoArgsUnoptimizedStaticCall) \
+ V(Subtype1TestCache) \
+ V(Subtype2TestCache) \
+ V(Subtype4TestCache) \
+ V(Subtype6TestCache) \
+ V(DefaultTypeTest) \
+ V(TopTypeTypeTest) \
+ V(TypeRefTypeTest) \
+ V(UnreachableTypeTest) \
+ V(SlowTypeTest) \
+ V(LazySpecializeTypeTest) \
+ V(CallClosureNoSuchMethod) \
+ V(FrameAwaitingMaterialization) \
+ V(AsynchronousGapMarker) \
+ V(NullErrorSharedWithFPURegs) \
+ V(NullErrorSharedWithoutFPURegs) \
+ V(StackOverflowSharedWithFPURegs) \
+ V(StackOverflowSharedWithoutFPURegs) \
+ V(OneArgCheckInlineCacheWithExactnessCheck) \
+ V(OneArgOptimizedCheckInlineCacheWithExactnessCheck)
+
+#else
+#define VM_STUB_CODE_LIST(V) \
+ V(LazyCompile) \
+ V(OptimizeFunction) \
+ V(CallClosureNoSuchMethod) \
+ V(RunExceptionHandler) \
+ V(DeoptForRewind) \
+ V(FixCallersTarget) \
+ V(Deoptimize) \
+ V(DeoptimizeLazyFromReturn) \
+ V(DeoptimizeLazyFromThrow) \
+ V(DefaultTypeTest) \
+ V(TopTypeTypeTest) \
+ V(TypeRefTypeTest) \
+ V(UnreachableTypeTest) \
+ V(SlowTypeTest) \
+ V(LazySpecializeTypeTest) \
+ V(FrameAwaitingMaterialization) \
+ V(AsynchronousGapMarker) \
+ V(InvokeDartCodeFromBytecode) \
+ V(InterpretCall)
+
+#endif // !defined(TARGET_ARCH_DBC)
+
+} // namespace dart
+
+#endif // RUNTIME_VM_STUB_CODE_LIST_H_
diff --git a/runtime/vm/type_testing_stubs_arm.cc b/runtime/vm/type_testing_stubs_arm.cc
new file mode 100644
index 0000000..30f1de07
--- /dev/null
+++ b/runtime/vm/type_testing_stubs_arm.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+
+#if defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/type_testing_stubs.h"
+
+#define __ assembler->
+
+namespace dart {
+
+void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
+ Assembler* assembler,
+ HierarchyInfo* hi,
+ const Type& type,
+ const Class& type_class) {
+ const Register kInstanceReg = R0;
+ const Register kClassIdReg = R9;
+
+ BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
+ kInstanceReg, kClassIdReg);
+
+ __ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
+ __ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
+}
+
+void TypeTestingStubGenerator::
+ BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
+ HierarchyInfo* hi,
+ const Class& type_class,
+ const TypeArguments& tp,
+ const TypeArguments& ta) {
+ const Register kInstanceReg = R0;
+ const Register kInstanceTypeArguments = NOTFP;
+ const Register kClassIdReg = R9;
+
+ BuildOptimizedSubclassRangeCheckWithTypeArguments(
+ assembler, hi, type_class, tp, ta, kClassIdReg, kInstanceReg,
+ kInstanceTypeArguments);
+}
+
+void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
+ Assembler* assembler,
+ HierarchyInfo* hi,
+ const AbstractType& type_arg,
+ intptr_t type_param_value_offset_i,
+ Label* check_failed) {
+ const Register kInstantiatorTypeArgumentsReg = R2;
+ const Register kFunctionTypeArgumentsReg = R1;
+ const Register kInstanceTypeArguments = NOTFP;
+
+ const Register kClassIdReg = R9;
+ const Register kOwnTypeArgumentValue = TMP;
+
+ BuildOptimizedTypeArgumentValueCheck(
+ assembler, hi, type_arg, type_param_value_offset_i, kClassIdReg,
+ kInstanceTypeArguments, kInstantiatorTypeArgumentsReg,
+ kFunctionTypeArgumentsReg, kOwnTypeArgumentValue, check_failed);
+}
+
+} // namespace dart
+
+#endif // defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/type_testing_stubs_arm64.cc b/runtime/vm/type_testing_stubs_arm64.cc
new file mode 100644
index 0000000..0f2981e
--- /dev/null
+++ b/runtime/vm/type_testing_stubs_arm64.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+
+#if defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/type_testing_stubs.h"
+
+#define __ assembler->
+
+namespace dart {
+
+void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
+ Assembler* assembler,
+ HierarchyInfo* hi,
+ const Type& type,
+ const Class& type_class) {
+ const Register kInstanceReg = R0;
+ const Register kClassIdReg = R9;
+
+ BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
+ kInstanceReg, kClassIdReg);
+
+ __ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
+ __ ldr(R9, FieldAddress(CODE_REG, Code::entry_point_offset()));
+ __ br(R9);
+}
+
+void TypeTestingStubGenerator::
+ BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
+ HierarchyInfo* hi,
+ const Class& type_class,
+ const TypeArguments& tp,
+ const TypeArguments& ta) {
+ const Register kInstanceReg = R0;
+ const Register kInstanceTypeArguments = R7;
+ const Register kClassIdReg = R9;
+
+ BuildOptimizedSubclassRangeCheckWithTypeArguments(
+ assembler, hi, type_class, tp, ta, kClassIdReg, kInstanceReg,
+ kInstanceTypeArguments);
+}
+
+void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
+ Assembler* assembler,
+ HierarchyInfo* hi,
+ const AbstractType& type_arg,
+ intptr_t type_param_value_offset_i,
+ Label* check_failed) {
+ const Register kInstantiatorTypeArgumentsReg = R1;
+ const Register kFunctionTypeArgumentsReg = R2;
+ const Register kInstanceTypeArguments = R7;
+
+ const Register kClassIdReg = R9;
+ const Register kOwnTypeArgumentValue = TMP;
+
+ BuildOptimizedTypeArgumentValueCheck(
+ assembler, hi, type_arg, type_param_value_offset_i, kClassIdReg,
+ kInstanceTypeArguments, kInstantiatorTypeArgumentsReg,
+ kFunctionTypeArgumentsReg, kOwnTypeArgumentValue, check_failed);
+}
+
+} // namespace dart
+
+#endif // defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/type_testing_stubs_x64.cc b/runtime/vm/type_testing_stubs_x64.cc
new file mode 100644
index 0000000..69991de
--- /dev/null
+++ b/runtime/vm/type_testing_stubs_x64.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/globals.h"
+
+#if defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
+
+#include "vm/type_testing_stubs.h"
+
+#define __ assembler->
+
+namespace dart {
+
+void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
+ Assembler* assembler,
+ HierarchyInfo* hi,
+ const Type& type,
+ const Class& type_class) {
+ const Register kInstanceReg = RAX;
+ const Register kClassIdReg = TMP;
+
+ BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
+ kInstanceReg, kClassIdReg);
+
+ __ movq(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
+ __ jmp(FieldAddress(CODE_REG, Code::entry_point_offset()));
+}
+
+void TypeTestingStubGenerator::
+ BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
+ HierarchyInfo* hi,
+ const Class& type_class,
+ const TypeArguments& tp,
+ const TypeArguments& ta) {
+ const Register kInstanceReg = RAX;
+ const Register kInstanceTypeArguments = RSI;
+ const Register kClassIdReg = TMP;
+
+ BuildOptimizedSubclassRangeCheckWithTypeArguments(
+ assembler, hi, type_class, tp, ta, kClassIdReg, kInstanceReg,
+ kInstanceTypeArguments);
+}
+
+void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
+ Assembler* assembler,
+ HierarchyInfo* hi,
+ const AbstractType& type_arg,
+ intptr_t type_param_value_offset_i,
+ Label* check_failed) {
+ const Register kInstanceTypeArguments = RSI;
+ const Register kInstantiatorTypeArgumentsReg = RDX;
+ const Register kFunctionTypeArgumentsReg = RCX;
+
+ const Register kClassIdReg = TMP;
+ const Register kOwnTypeArgumentValue = RDI;
+
+ BuildOptimizedTypeArgumentValueCheck(
+ assembler, hi, type_arg, type_param_value_offset_i, kClassIdReg,
+ kInstanceTypeArguments, kInstantiatorTypeArgumentsReg,
+ kFunctionTypeArgumentsReg, kOwnTypeArgumentValue, check_failed);
+}
+
+} // namespace dart
+
+#endif // defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/vm_sources.gni b/runtime/vm/vm_sources.gni
index 9732770..961a8e0 100644
--- a/runtime/vm/vm_sources.gni
+++ b/runtime/vm/vm_sources.gni
@@ -293,13 +293,10 @@
"stack_frame_x64.h",
"stack_trace.cc",
"stack_trace.h",
+ "static_type_exactness_state.h",
"stub_code.cc",
"stub_code.h",
- "stub_code_arm.cc",
- "stub_code_arm64.cc",
- "stub_code_dbc.cc",
- "stub_code_ia32.cc",
- "stub_code_x64.cc",
+ "stub_code_list.h",
"symbols.cc",
"symbols.h",
"tags.cc",
@@ -337,6 +334,9 @@
"token_position.h",
"type_table.h",
"type_testing_stubs.cc",
+ "type_testing_stubs_arm.cc",
+ "type_testing_stubs_arm64.cc",
+ "type_testing_stubs_x64.cc",
"type_testing_stubs.h",
"unibrow-inl.h",
"unibrow.cc",