| // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. |
| #if defined(TARGET_ARCH_ARM) |
| |
| #include "vm/flow_graph_compiler.h" |
| |
| #include "vm/ast_printer.h" |
| #include "vm/dart_entry.h" |
| #include "vm/deopt_instructions.h" |
| #include "vm/il_printer.h" |
| #include "vm/locations.h" |
| #include "vm/object_store.h" |
| #include "vm/parser.h" |
| #include "vm/stack_frame.h" |
| #include "vm/stub_code.h" |
| #include "vm/symbols.h" |
| |
| namespace dart { |
| |
| DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); |
| DECLARE_FLAG(int, optimization_counter_threshold); |
| DECLARE_FLAG(int, reoptimization_counter_threshold); |
| DECLARE_FLAG(bool, print_ast); |
| DECLARE_FLAG(bool, print_scopes); |
| DECLARE_FLAG(bool, enable_type_checks); |
| DECLARE_FLAG(bool, eliminate_type_checks); |
| |
| |
| FlowGraphCompiler::~FlowGraphCompiler() { |
| // BlockInfos are zone-allocated, so their destructors are not called. |
| // Verify the labels explicitly here. |
| for (int i = 0; i < block_info_.length(); ++i) { |
| ASSERT(!block_info_[i]->jump_label()->IsLinked()); |
| } |
| } |
| |
| |
| bool FlowGraphCompiler::SupportsUnboxedMints() { |
| return false; |
| } |
| |
| |
| // TODO(srdjan): Enable by calling C-functions. |
| bool FlowGraphCompiler::SupportsInlinedTrigonometrics() { |
| return false; |
| } |
| |
| |
| RawDeoptInfo* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
| DeoptInfoBuilder* builder) { |
| if (deopt_env_ == NULL) return DeoptInfo::null(); |
| |
| intptr_t stack_height = compiler->StackSize(); |
| AllocateIncomingParametersRecursive(deopt_env_, &stack_height); |
| |
| intptr_t slot_ix = 0; |
| Environment* current = deopt_env_; |
| |
| // Emit all kMaterializeObject instructions describing objects to be |
| // materialized on the deoptimization as a prefix to the deoptimization info. |
| EmitMaterializations(deopt_env_, builder); |
| |
| // The real frame starts here. |
| builder->MarkFrameStart(); |
| |
| // Current PP, FP, and PC. |
| builder->AddPp(current->function(), slot_ix++); |
| builder->AddCallerFp(slot_ix++); |
| builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); |
| |
| // Callee's PC marker is not used anymore. Pass Function::null() to set to 0. |
| builder->AddPcMarker(Function::Handle(), slot_ix++); |
| |
| // Emit all values that are needed for materialization as a part of the |
| // expression stack for the bottom-most frame. This guarantees that GC |
| // will be able to find them during materialization. |
| slot_ix = builder->EmitMaterializationArguments(slot_ix); |
| |
| // For the innermost environment, set outgoing arguments and the locals. |
| for (intptr_t i = current->Length() - 1; |
| i >= current->fixed_parameter_count(); |
| i--) { |
| builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); |
| } |
| |
| Environment* previous = current; |
| current = current->outer(); |
| while (current != NULL) { |
| // PP, FP, and PC. |
| builder->AddPp(current->function(), slot_ix++); |
| builder->AddCallerFp(slot_ix++); |
| |
| // For any outer environment the deopt id is that of the call instruction |
| // which is recorded in the outer environment. |
| builder->AddReturnAddress(current->function(), |
| Isolate::ToDeoptAfter(current->deopt_id()), |
| slot_ix++); |
| |
| // PC marker. |
| builder->AddPcMarker(previous->function(), slot_ix++); |
| |
| // The values of outgoing arguments can be changed from the inlined call so |
| // we must read them from the previous environment. |
| for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { |
| builder->AddCopy(previous->ValueAt(i), |
| previous->LocationAt(i), |
| slot_ix++); |
| } |
| |
| // Set the locals, note that outgoing arguments are not in the environment. |
| for (intptr_t i = current->Length() - 1; |
| i >= current->fixed_parameter_count(); |
| i--) { |
| builder->AddCopy(current->ValueAt(i), |
| current->LocationAt(i), |
| slot_ix++); |
| } |
| |
| // Iterate on the outer environment. |
| previous = current; |
| current = current->outer(); |
| } |
| // The previous pointer is now the outermost environment. |
| ASSERT(previous != NULL); |
| |
| // For the outermost environment, set caller PC, caller PP, and caller FP. |
| builder->AddCallerPp(slot_ix++); |
| builder->AddCallerFp(slot_ix++); |
| builder->AddCallerPc(slot_ix++); |
| |
| // PC marker. |
| builder->AddPcMarker(previous->function(), slot_ix++); |
| |
| // For the outermost environment, set the incoming arguments. |
| for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { |
| builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); |
| } |
| |
| const DeoptInfo& deopt_info = DeoptInfo::Handle(builder->CreateDeoptInfo()); |
| return deopt_info.raw(); |
| } |
| |
| |
| void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, |
| intptr_t stub_ix) { |
| // Calls do not need stubs, they share a deoptimization trampoline. |
| ASSERT(reason() != kDeoptAtCall); |
| Assembler* assem = compiler->assembler(); |
| #define __ assem-> |
| __ Comment("Deopt stub for id %" Pd "", deopt_id()); |
| __ Bind(entry_label()); |
| if (FLAG_trap_on_deoptimization) __ bkpt(0); |
| |
| ASSERT(deopt_env() != NULL); |
| |
| __ BranchLink(&StubCode::DeoptimizeLabel()); |
| set_pc_offset(assem->CodeSize()); |
| __ bkpt(0); // TODO(regis): Remove breakpoint to save space. |
| #undef __ |
| } |
| |
| |
| #define __ assembler()-> |
| |
| |
| // Fall through if bool_register contains null. |
| void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, |
| Label* is_true, |
| Label* is_false) { |
| Label fall_through; |
| __ CompareImmediate(bool_register, |
| reinterpret_cast<intptr_t>(Object::null())); |
| __ b(&fall_through, EQ); |
| __ CompareObject(bool_register, Bool::True()); |
| __ b(is_true, EQ); |
| __ b(is_false); |
| __ Bind(&fall_through); |
| } |
| |
| |
| // R0: instance (must be preserved). |
| // R1: instantiator type arguments (if used). |
| RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( |
| TypeTestStubKind test_kind, |
| Register instance_reg, |
| Register type_arguments_reg, |
| Register temp_reg, |
| Label* is_instance_lbl, |
| Label* is_not_instance_lbl) { |
| ASSERT(instance_reg == R0); |
| ASSERT(temp_reg == kNoRegister); // Unused on ARM. |
| const SubtypeTestCache& type_test_cache = |
| SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); |
| __ LoadObject(R2, type_test_cache); |
| if (test_kind == kTestTypeOneArg) { |
| ASSERT(type_arguments_reg == kNoRegister); |
| __ LoadImmediate(R1, reinterpret_cast<intptr_t>(Object::null())); |
| __ BranchLink(&StubCode::Subtype1TestCacheLabel()); |
| } else if (test_kind == kTestTypeTwoArgs) { |
| ASSERT(type_arguments_reg == kNoRegister); |
| __ LoadImmediate(R1, reinterpret_cast<intptr_t>(Object::null())); |
| __ BranchLink(&StubCode::Subtype2TestCacheLabel()); |
| } else if (test_kind == kTestTypeThreeArgs) { |
| ASSERT(type_arguments_reg == R1); |
| __ BranchLink(&StubCode::Subtype3TestCacheLabel()); |
| } else { |
| UNREACHABLE(); |
| } |
| // Result is in R1: null -> not found, otherwise Bool::True or Bool::False. |
| GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl); |
| return type_test_cache.raw(); |
| } |
| |
| |
| // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if |
| // type test is conclusive, otherwise fallthrough if a type test could not |
| // be completed. |
| // R0: instance being type checked (preserved). |
| // Clobbers R2. |
| RawSubtypeTestCache* |
| FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( |
| intptr_t token_pos, |
| const AbstractType& type, |
| Label* is_instance_lbl, |
| Label* is_not_instance_lbl) { |
| __ Comment("InstantiatedTypeWithArgumentsTest"); |
| ASSERT(type.IsInstantiated()); |
| const Class& type_class = Class::ZoneHandle(type.type_class()); |
| ASSERT(type_class.HasTypeArguments() || type_class.IsSignatureClass()); |
| const Register kInstanceReg = R0; |
| Error& malformed_error = Error::Handle(); |
| const Type& int_type = Type::Handle(Type::IntType()); |
| const bool smi_is_ok = int_type.IsSubtypeOf(type, &malformed_error); |
| // Malformed type should have been handled at graph construction time. |
| ASSERT(smi_is_ok || malformed_error.IsNull()); |
| __ tst(kInstanceReg, ShifterOperand(kSmiTagMask)); |
| if (smi_is_ok) { |
| __ b(is_instance_lbl, EQ); |
| } else { |
| __ b(is_not_instance_lbl, EQ); |
| } |
| const AbstractTypeArguments& type_arguments = |
| AbstractTypeArguments::ZoneHandle(type.arguments()); |
| const bool is_raw_type = type_arguments.IsNull() || |
| type_arguments.IsRaw(type_arguments.Length()); |
| // Signature class is an instantiated parameterized type. |
| if (!type_class.IsSignatureClass()) { |
| if (is_raw_type) { |
| const Register kClassIdReg = R2; |
| // dynamic type argument, check only classes. |
| __ LoadClassId(kClassIdReg, kInstanceReg); |
| __ CompareImmediate(kClassIdReg, type_class.id()); |
| __ b(is_instance_lbl, EQ); |
| // List is a very common case. |
| if (IsListClass(type_class)) { |
| GenerateListTypeCheck(kClassIdReg, is_instance_lbl); |
| } |
| return GenerateSubtype1TestCacheLookup( |
| token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
| } |
| // If one type argument only, check if type argument is Object or dynamic. |
| if (type_arguments.Length() == 1) { |
| const AbstractType& tp_argument = AbstractType::ZoneHandle( |
| type_arguments.TypeAt(0)); |
| ASSERT(!tp_argument.IsMalformed()); |
| if (tp_argument.IsType()) { |
| ASSERT(tp_argument.HasResolvedTypeClass()); |
| // Check if type argument is dynamic or Object. |
| const Type& object_type = Type::Handle(Type::ObjectType()); |
| if (object_type.IsSubtypeOf(tp_argument, NULL)) { |
| // Instance class test only necessary. |
| return GenerateSubtype1TestCacheLookup( |
| token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
| } |
| } |
| } |
| } |
| // Regular subtype test cache involving instance's type arguments. |
| const Register kTypeArgumentsReg = kNoRegister; |
| const Register kTempReg = kNoRegister; |
| // R0: instance (must be preserved). |
| return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, |
| kInstanceReg, |
| kTypeArgumentsReg, |
| kTempReg, |
| is_instance_lbl, |
| is_not_instance_lbl); |
| } |
| |
| |
| void FlowGraphCompiler::CheckClassIds(Register class_id_reg, |
| const GrowableArray<intptr_t>& class_ids, |
| Label* is_equal_lbl, |
| Label* is_not_equal_lbl) { |
| for (intptr_t i = 0; i < class_ids.length(); i++) { |
| __ CompareImmediate(class_id_reg, class_ids[i]); |
| __ b(is_equal_lbl, EQ); |
| } |
| __ b(is_not_equal_lbl); |
| } |
| |
| |
| // Testing against an instantiated type with no arguments, without |
| // SubtypeTestCache. |
| // R0: instance being type checked (preserved). |
| // Clobbers R2, R3. |
| // Returns true if there is a fallthrough. |
| bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( |
| intptr_t token_pos, |
| const AbstractType& type, |
| Label* is_instance_lbl, |
| Label* is_not_instance_lbl) { |
| __ Comment("InstantiatedTypeNoArgumentsTest"); |
| ASSERT(type.IsInstantiated()); |
| const Class& type_class = Class::Handle(type.type_class()); |
| ASSERT(!type_class.HasTypeArguments()); |
| |
| const Register kInstanceReg = R0; |
| __ tst(kInstanceReg, ShifterOperand(kSmiTagMask)); |
| // If instance is Smi, check directly. |
| const Class& smi_class = Class::Handle(Smi::Class()); |
| if (smi_class.IsSubtypeOf(TypeArguments::Handle(), |
| type_class, |
| TypeArguments::Handle(), |
| NULL)) { |
| __ b(is_instance_lbl, EQ); |
| } else { |
| __ b(is_not_instance_lbl, EQ); |
| } |
| // Compare if the classes are equal. |
| const Register kClassIdReg = R2; |
| __ LoadClassId(kClassIdReg, kInstanceReg); |
| __ CompareImmediate(kClassIdReg, type_class.id()); |
| __ b(is_instance_lbl, EQ); |
| // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted |
| // interfaces. |
| // Bool interface can be implemented only by core class Bool. |
| if (type.IsBoolType()) { |
| __ CompareImmediate(kClassIdReg, kBoolCid); |
| __ b(is_instance_lbl, EQ); |
| __ b(is_not_instance_lbl); |
| return false; |
| } |
| if (type.IsFunctionType()) { |
| // Check if instance is a closure. |
| __ LoadClassById(R3, kClassIdReg); |
| __ ldr(R3, FieldAddress(R3, Class::signature_function_offset())); |
| __ CompareImmediate(R3, reinterpret_cast<int32_t>(Object::null())); |
| __ b(is_instance_lbl, NE); |
| } |
| // Custom checking for numbers (Smi, Mint, Bigint and Double). |
| // Note that instance is not Smi (checked above). |
| if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) { |
| GenerateNumberTypeCheck( |
| kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); |
| return false; |
| } |
| if (type.IsStringType()) { |
| GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); |
| return false; |
| } |
| // Otherwise fallthrough. |
| return true; |
| } |
| |
| |
| // Uses SubtypeTestCache to store instance class and result. |
| // R0: instance to test. |
| // Clobbers R1-R5. |
| // Immediate class test already done. |
| // TODO(srdjan): Implement a quicker subtype check, as type test |
| // arrays can grow too high, but they may be useful when optimizing |
| // code (type-feedback). |
| RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( |
| intptr_t token_pos, |
| const Class& type_class, |
| Label* is_instance_lbl, |
| Label* is_not_instance_lbl) { |
| __ Comment("Subtype1TestCacheLookup"); |
| const Register kInstanceReg = R0; |
| __ LoadClass(R1, kInstanceReg, R2); |
| // R1: instance class. |
| // Check immediate superclass equality. |
| __ ldr(R2, FieldAddress(R1, Class::super_type_offset())); |
| __ ldr(R2, FieldAddress(R2, Type::type_class_offset())); |
| __ CompareObject(R2, type_class); |
| __ b(is_instance_lbl, EQ); |
| |
| const Register kTypeArgumentsReg = kNoRegister; |
| const Register kTempReg = kNoRegister; |
| return GenerateCallSubtypeTestStub(kTestTypeOneArg, |
| kInstanceReg, |
| kTypeArgumentsReg, |
| kTempReg, |
| is_instance_lbl, |
| is_not_instance_lbl); |
| } |
| |
| |
| // Generates inlined check if 'type' is a type parameter or type itself |
| // R0: instance (preserved). |
| RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( |
| intptr_t token_pos, |
| const AbstractType& type, |
| Label* is_instance_lbl, |
| Label* is_not_instance_lbl) { |
| __ Comment("UninstantiatedTypeTest"); |
| ASSERT(!type.IsInstantiated()); |
| // Skip check if destination is a dynamic type. |
| if (type.IsTypeParameter()) { |
| const TypeParameter& type_param = TypeParameter::Cast(type); |
| // Load instantiator (or null) and instantiator type arguments on stack. |
| __ ldr(R1, Address(SP, 0)); // Get instantiator type arguments. |
| // R1: instantiator type arguments. |
| // Check if type argument is dynamic. |
| __ CompareImmediate(R1, reinterpret_cast<intptr_t>(Object::null())); |
| __ b(is_instance_lbl, EQ); |
| // Can handle only type arguments that are instances of TypeArguments. |
| // (runtime checks canonicalize type arguments). |
| Label fall_through; |
| __ CompareClassId(R1, kTypeArgumentsCid, R2); |
| __ b(&fall_through, NE); |
| __ ldr(R2, |
| FieldAddress(R1, TypeArguments::type_at_offset(type_param.index()))); |
| // R2: concrete type of type. |
| // Check if type argument is dynamic. |
| __ CompareObject(R2, Type::ZoneHandle(Type::DynamicType())); |
| __ b(is_instance_lbl, EQ); |
| __ CompareImmediate(R2, reinterpret_cast<intptr_t>(Object::null())); |
| __ b(is_instance_lbl, EQ); |
| const Type& object_type = Type::ZoneHandle(Type::ObjectType()); |
| __ CompareObject(R2, object_type); |
| __ b(is_instance_lbl, EQ); |
| |
| // For Smi check quickly against int and num interfaces. |
| Label not_smi; |
| __ tst(R0, ShifterOperand(kSmiTagMask)); // Value is Smi? |
| __ b(¬_smi, NE); |
| __ CompareObject(R2, Type::ZoneHandle(Type::IntType())); |
| __ b(is_instance_lbl, EQ); |
| __ CompareObject(R2, Type::ZoneHandle(Type::Number())); |
| __ b(is_instance_lbl, EQ); |
| // Smi must be handled in runtime. |
| __ b(&fall_through); |
| |
| __ Bind(¬_smi); |
| // R1: instantiator type arguments. |
| // R0: instance. |
| const Register kInstanceReg = R0; |
| const Register kTypeArgumentsReg = R1; |
| const Register kTempReg = kNoRegister; |
| const SubtypeTestCache& type_test_cache = |
| SubtypeTestCache::ZoneHandle( |
| GenerateCallSubtypeTestStub(kTestTypeThreeArgs, |
| kInstanceReg, |
| kTypeArgumentsReg, |
| kTempReg, |
| is_instance_lbl, |
| is_not_instance_lbl)); |
| __ Bind(&fall_through); |
| return type_test_cache.raw(); |
| } |
| if (type.IsType()) { |
| const Register kInstanceReg = R0; |
| const Register kTypeArgumentsReg = R1; |
| __ tst(kInstanceReg, ShifterOperand(kSmiTagMask)); // Is instance Smi? |
| __ b(is_not_instance_lbl, EQ); |
| __ ldr(kTypeArgumentsReg, Address(SP, 0)); // Instantiator type args. |
| // Uninstantiated type class is known at compile time, but the type |
| // arguments are determined at runtime by the instantiator. |
| const Register kTempReg = kNoRegister; |
| return GenerateCallSubtypeTestStub(kTestTypeThreeArgs, |
| kInstanceReg, |
| kTypeArgumentsReg, |
| kTempReg, |
| is_instance_lbl, |
| is_not_instance_lbl); |
| } |
| return SubtypeTestCache::null(); |
| } |
| |
| |
| // Inputs: |
| // - R0: instance being type checked (preserved). |
| // - R1: optional instantiator type arguments (preserved). |
| // Clobbers R2, R3. |
| // Returns: |
| // - preserved instance in R0 and optional instantiator type arguments in R1. |
| // Note that this inlined code must be followed by the runtime_call code, as it |
| // may fall through to it. Otherwise, this inline code will jump to the label |
| // is_instance or to the label is_not_instance. |
| RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof( |
| intptr_t token_pos, |
| const AbstractType& type, |
| Label* is_instance_lbl, |
| Label* is_not_instance_lbl) { |
| __ Comment("InlineInstanceof"); |
| if (type.IsVoidType()) { |
| // A non-null value is returned from a void function, which will result in a |
| // type error. A null value is handled prior to executing this inline code. |
| return SubtypeTestCache::null(); |
| } |
| if (TypeCheckAsClassEquality(type)) { |
| const intptr_t type_cid = Class::Handle(type.type_class()).id(); |
| const Register kInstanceReg = R0; |
| __ tst(kInstanceReg, ShifterOperand(kSmiTagMask)); |
| if (type_cid == kSmiCid) { |
| __ b(is_instance_lbl, EQ); |
| } else { |
| __ b(is_not_instance_lbl, EQ); |
| __ CompareClassId(kInstanceReg, type_cid, R3); |
| __ b(is_instance_lbl, EQ); |
| } |
| __ b(is_not_instance_lbl); |
| return SubtypeTestCache::null(); |
| } |
| if (type.IsInstantiated()) { |
| const Class& type_class = Class::ZoneHandle(type.type_class()); |
| // A class equality check is only applicable with a dst type of a |
| // non-parameterized class, non-signature class, or with a raw dst type of |
| // a parameterized class. |
| if (type_class.IsSignatureClass() || type_class.HasTypeArguments()) { |
| return GenerateInstantiatedTypeWithArgumentsTest(token_pos, |
| type, |
| is_instance_lbl, |
| is_not_instance_lbl); |
| // Fall through to runtime call. |
| } |
| const bool has_fall_through = |
| GenerateInstantiatedTypeNoArgumentsTest(token_pos, |
| type, |
| is_instance_lbl, |
| is_not_instance_lbl); |
| if (has_fall_through) { |
| // If test non-conclusive so far, try the inlined type-test cache. |
| // 'type' is known at compile time. |
| return GenerateSubtype1TestCacheLookup( |
| token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
| } else { |
| return SubtypeTestCache::null(); |
| } |
| } |
| return GenerateUninstantiatedTypeTest(token_pos, |
| type, |
| is_instance_lbl, |
| is_not_instance_lbl); |
| } |
| |
| |
| // If instanceof type test cannot be performed successfully at compile time and |
| // therefore eliminated, optimize it by adding inlined tests for: |
| // - NULL -> return false. |
| // - Smi -> compile time subtype check (only if dst class is not parameterized). |
| // - Class equality (only if class is not parameterized). |
| // Inputs: |
| // - R0: object. |
| // - R1: instantiator type arguments or raw_null. |
| // - R2: instantiator or raw_null. |
| // Returns: |
| // - true or false in R0. |
| void FlowGraphCompiler::GenerateInstanceOf(intptr_t token_pos, |
| intptr_t deopt_id, |
| const AbstractType& type, |
| bool negate_result, |
| LocationSummary* locs) { |
| ASSERT(type.IsFinalized() && !type.IsMalformed()); |
| |
| // Preserve instantiator (R2) and its type arguments (R1). |
| __ PushList((1 << R1) | (1 << R2)); |
| |
| Label is_instance, is_not_instance; |
| // If type is instantiated and non-parameterized, we can inline code |
| // checking whether the tested instance is a Smi. |
| if (type.IsInstantiated()) { |
| // A null object is only an instance of Object and dynamic, which has |
| // already been checked above (if the type is instantiated). So we can |
| // return false here if the instance is null (and if the type is |
| // instantiated). |
| // We can only inline this null check if the type is instantiated at compile |
| // time, since an uninstantiated type at compile time could be Object or |
| // dynamic at run time. |
| __ CompareImmediate(R0, reinterpret_cast<int32_t>(Object::null())); |
| __ b(&is_not_instance, EQ); |
| } |
| |
| // Generate inline instanceof test. |
| SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); |
| test_cache = GenerateInlineInstanceof(token_pos, type, |
| &is_instance, &is_not_instance); |
| |
| // test_cache is null if there is no fall-through. |
| Label done; |
| if (!test_cache.IsNull()) { |
| // Generate runtime call. |
| // Load instantiator (R2) and its type arguments (R1). |
| __ ldm(IA, SP, (1 << R1) | (1 << R2)); |
| __ PushObject(Object::ZoneHandle()); // Make room for the result. |
| __ Push(R0); // Push the instance. |
| __ PushObject(type); // Push the type. |
| // Push instantiator (R2) and its type arguments (R1). |
| __ PushList((1 << R1) | (1 << R2)); |
| __ LoadObject(R0, test_cache); |
| __ Push(R0); |
| GenerateCallRuntime(token_pos, deopt_id, kInstanceofRuntimeEntry, locs); |
| // Pop the parameters supplied to the runtime entry. The result of the |
| // instanceof runtime call will be left as the result of the operation. |
| __ Drop(5); |
| if (negate_result) { |
| __ Pop(R1); |
| __ LoadObject(R0, Bool::True()); |
| __ cmp(R1, ShifterOperand(R0)); |
| __ b(&done, NE); |
| __ LoadObject(R0, Bool::False()); |
| } else { |
| __ Pop(R0); |
| } |
| __ b(&done); |
| } |
| __ Bind(&is_not_instance); |
| __ LoadObject(R0, negate_result ? Bool::True() : Bool::False()); |
| __ b(&done); |
| |
| __ Bind(&is_instance); |
| __ LoadObject(R0, negate_result ? Bool::False() : Bool::True()); |
| __ Bind(&done); |
| // Remove instantiator (R2) and its type arguments (R1). |
| __ Drop(2); |
| } |
| |
| |
| // Optimize assignable type check by adding inlined tests for: |
| // - NULL -> return NULL. |
| // - Smi -> compile time subtype check (only if dst class is not parameterized). |
| // - Class equality (only if class is not parameterized). |
| // Inputs: |
| // - R0: instance being type checked. |
| // - R1: instantiator type arguments or raw_null. |
| // - R2: instantiator or raw_null. |
| // Returns: |
| // - object in R0 for successful assignable check (or throws TypeError). |
| // Performance notes: positive checks must be quick, negative checks can be slow |
| // as they throw an exception. |
| void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos, |
| intptr_t deopt_id, |
| const AbstractType& dst_type, |
| const String& dst_name, |
| LocationSummary* locs) { |
| ASSERT(token_pos >= 0); |
| ASSERT(!dst_type.IsNull()); |
| ASSERT(dst_type.IsFinalized()); |
| // Assignable check is skipped in FlowGraphBuilder, not here. |
| ASSERT(dst_type.IsMalformed() || |
| (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); |
| // Preserve instantiator (R2) and its type arguments (R1). |
| __ PushList((1 << R1) | (1 << R2)); |
| // A null object is always assignable and is returned as result. |
| Label is_assignable, runtime_call; |
| __ CompareImmediate(R0, reinterpret_cast<int32_t>(Object::null())); |
| __ b(&is_assignable, EQ); |
| |
| if (!FLAG_eliminate_type_checks || dst_type.IsMalformed()) { |
| // If type checks are not eliminated during the graph building then |
| // a transition sentinel can be seen here. |
| __ CompareObject(R0, Object::transition_sentinel()); |
| __ b(&is_assignable, EQ); |
| } |
| |
| // Generate throw new TypeError() if the type is malformed. |
| if (dst_type.IsMalformed()) { |
| const Error& error = Error::Handle(dst_type.malformed_error()); |
| const String& error_message = String::ZoneHandle( |
| Symbols::New(error.ToErrorCString())); |
| __ PushObject(Object::ZoneHandle()); // Make room for the result. |
| __ Push(R0); // Push the source object. |
| __ PushObject(dst_name); // Push the name of the destination. |
| __ PushObject(error_message); |
| GenerateCallRuntime(token_pos, |
| deopt_id, |
| kMalformedTypeErrorRuntimeEntry, |
| locs); |
| // We should never return here. |
| __ bkpt(0); |
| |
| __ Bind(&is_assignable); // For a null object. |
| // Restore instantiator (R2) and its type arguments (R1). |
| __ PopList((1 << R1) | (1 << R2)); |
| return; |
| } |
| |
| // Generate inline type check, linking to runtime call if not assignable. |
| SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); |
| test_cache = GenerateInlineInstanceof(token_pos, dst_type, |
| &is_assignable, &runtime_call); |
| |
| __ Bind(&runtime_call); |
| // Load instantiator (R2) and its type arguments (R1). |
| __ ldm(IA, SP, (1 << R1) | (1 << R2)); |
| __ PushObject(Object::ZoneHandle()); // Make room for the result. |
| __ Push(R0); // Push the source object. |
| __ PushObject(dst_type); // Push the type of the destination. |
| // Push instantiator (R2) and its type arguments (R1). |
| __ PushList((1 << R1) | (1 << R2)); |
| __ PushObject(dst_name); // Push the name of the destination. |
| __ LoadObject(R0, test_cache); |
| __ Push(R0); |
| GenerateCallRuntime(token_pos, deopt_id, kTypeCheckRuntimeEntry, locs); |
| // Pop the parameters supplied to the runtime entry. The result of the |
| // type check runtime call is the checked value. |
| __ Drop(6); |
| __ Pop(R0); |
| |
| __ Bind(&is_assignable); |
| // Restore instantiator (R2) and its type arguments (R1). |
| __ PopList((1 << R1) | (1 << R2)); |
| } |
| |
| |
| void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) { |
| if (!is_optimizing()) { |
| if (FLAG_enable_type_checks && instr->IsAssertAssignable()) { |
| AssertAssignableInstr* assert = instr->AsAssertAssignable(); |
| AddCurrentDescriptor(PcDescriptors::kDeopt, |
| assert->deopt_id(), |
| assert->token_pos()); |
| } else if (instr->IsGuardField() || |
| instr->CanBecomeDeoptimizationTarget()) { |
| AddCurrentDescriptor(PcDescriptors::kDeopt, |
| instr->deopt_id(), |
| Scanner::kDummyTokenIndex); |
| } |
| AllocateRegistersLocally(instr); |
| } else if (instr->MayThrow() && |
| (CurrentTryIndex() != CatchClauseNode::kInvalidTryIndex)) { |
| // Optimized try-block: Sync locals to fixed stack locations. |
| EmitTrySync(instr, CurrentTryIndex()); |
| } |
| } |
| |
| |
| void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset, |
| Location loc, |
| bool* push_emitted) { |
| if (loc.IsConstant()) { |
| if (!*push_emitted) { |
| __ Push(R0); |
| *push_emitted = true; |
| } |
| __ LoadObject(R0, loc.constant()); |
| __ StoreToOffset(kWord, R0, FP, dest_offset); |
| } else if (loc.IsRegister()) { |
| if (*push_emitted && (loc.reg() == R0)) { |
| __ ldr(R0, Address(SP, 0)); |
| __ StoreToOffset(kWord, R0, FP, dest_offset); |
| } else { |
| __ StoreToOffset(kWord, loc.reg(), FP, dest_offset); |
| } |
| } else { |
| const intptr_t src_offset = loc.ToStackSlotOffset(); |
| if (src_offset != dest_offset) { |
| if (!*push_emitted) { |
| __ Push(R0); |
| *push_emitted = true; |
| } |
| __ LoadFromOffset(kWord, R0, FP, src_offset); |
| __ StoreToOffset(kWord, R0, FP, dest_offset); |
| } |
| } |
| } |
| |
| |
| void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) { |
| ASSERT(is_optimizing()); |
| Environment* env = instr->env(); |
| CatchBlockEntryInstr* catch_block = |
| flow_graph().graph_entry()->GetCatchEntry(try_index); |
| const GrowableArray<Definition*>* idefs = catch_block->initial_definitions(); |
| // Parameters. |
| intptr_t i = 0; |
| bool push_emitted = false; |
| const intptr_t num_non_copied_params = flow_graph().num_non_copied_params(); |
| const intptr_t param_base = |
| kParamEndSlotFromFp + num_non_copied_params; |
| for (; i < num_non_copied_params; ++i) { |
| if ((*idefs)[i]->IsConstant()) continue; // Common constants |
| Location loc = env->LocationAt(i); |
| EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted); |
| } |
| |
| // Process locals. Skip exception_var and stacktrace_var. |
| intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params; |
| intptr_t ex_idx = local_base - catch_block->exception_var().index(); |
| intptr_t st_idx = local_base - catch_block->stacktrace_var().index(); |
| for (; i < flow_graph().variable_count(); ++i) { |
| if (i == ex_idx || i == st_idx) continue; |
| if ((*idefs)[i]->IsConstant()) continue; |
| Location loc = env->LocationAt(i); |
| EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted); |
| // Update safepoint bitmap to indicate that the target location |
| // now contains a pointer. |
| instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true); |
| } |
| if (push_emitted) { |
| __ Pop(R0); |
| } |
| } |
| |
| |
| void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { |
| if (is_optimizing()) return; |
| Definition* defn = instr->AsDefinition(); |
| if ((defn != NULL) && defn->is_used()) { |
| __ Push(defn->locs()->out().reg()); |
| } |
| } |
| |
| |
| // Input parameters: |
| // R4: arguments descriptor array. |
| void FlowGraphCompiler::CopyParameters() { |
| __ Comment("Copy parameters"); |
| const Function& function = parsed_function().function(); |
| LocalScope* scope = parsed_function().node_sequence()->scope(); |
| const int num_fixed_params = function.num_fixed_parameters(); |
| const int num_opt_pos_params = function.NumOptionalPositionalParameters(); |
| const int num_opt_named_params = function.NumOptionalNamedParameters(); |
| const int num_params = |
| num_fixed_params + num_opt_pos_params + num_opt_named_params; |
| ASSERT(function.NumParameters() == num_params); |
| ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp); |
| |
| // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args, |
| // where num_pos_args is the number of positional arguments passed in. |
| const int min_num_pos_args = num_fixed_params; |
| const int max_num_pos_args = num_fixed_params + num_opt_pos_params; |
| |
| __ ldr(R8, FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); |
| // Check that min_num_pos_args <= num_pos_args. |
| Label wrong_num_arguments; |
| __ CompareImmediate(R8, Smi::RawValue(min_num_pos_args)); |
| __ b(&wrong_num_arguments, LT); |
| // Check that num_pos_args <= max_num_pos_args. |
| __ CompareImmediate(R8, Smi::RawValue(max_num_pos_args)); |
| __ b(&wrong_num_arguments, GT); |
| |
| // Copy positional arguments. |
| // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied |
| // to fp[kFirstLocalSlotFromFp - i]. |
| |
| __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset())); |
| // Since R7 and R8 are Smi, use LSL 1 instead of LSL 2. |
| // Let R7 point to the last passed positional argument, i.e. to |
| // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)]. |
| __ sub(R7, R7, ShifterOperand(R8)); |
| __ add(R7, FP, ShifterOperand(R7, LSL, 1)); |
| __ add(R7, R7, ShifterOperand((kParamEndSlotFromFp + 1) * kWordSize)); |
| |
| // Let R6 point to the last copied positional argument, i.e. to |
| // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)]. |
| __ AddImmediate(R6, FP, (kFirstLocalSlotFromFp + 1) * kWordSize); |
| __ sub(R6, R6, ShifterOperand(R8, LSL, 1)); // R8 is a Smi. |
| __ SmiUntag(R8); |
| Label loop, loop_condition; |
| __ b(&loop_condition); |
| // We do not use the final allocation index of the variable here, i.e. |
| // scope->VariableAt(i)->index(), because captured variables still need |
| // to be copied to the context that is not yet allocated. |
| const Address argument_addr(R7, R8, LSL, 2); |
| const Address copy_addr(R6, R8, LSL, 2); |
| __ Bind(&loop); |
| __ ldr(IP, argument_addr); |
| __ str(IP, copy_addr); |
| __ Bind(&loop_condition); |
| __ subs(R8, R8, ShifterOperand(1)); |
| __ b(&loop, PL); |
| |
| // Copy or initialize optional named arguments. |
| Label all_arguments_processed; |
| #ifdef DEBUG |
| const bool check_correct_named_args = true; |
| #else |
| const bool check_correct_named_args = function.IsClosureFunction(); |
| #endif |
| if (num_opt_named_params > 0) { |
| // Start by alphabetically sorting the names of the optional parameters. |
| LocalVariable** opt_param = new LocalVariable*[num_opt_named_params]; |
| int* opt_param_position = new int[num_opt_named_params]; |
| for (int pos = num_fixed_params; pos < num_params; pos++) { |
| LocalVariable* parameter = scope->VariableAt(pos); |
| const String& opt_param_name = parameter->name(); |
| int i = pos - num_fixed_params; |
| while (--i >= 0) { |
| LocalVariable* param_i = opt_param[i]; |
| const intptr_t result = opt_param_name.CompareTo(param_i->name()); |
| ASSERT(result != 0); |
| if (result > 0) break; |
| opt_param[i + 1] = opt_param[i]; |
| opt_param_position[i + 1] = opt_param_position[i]; |
| } |
| opt_param[i + 1] = parameter; |
| opt_param_position[i + 1] = pos; |
| } |
| // Generate code handling each optional parameter in alphabetical order. |
| __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset())); |
| __ ldr(R8, |
| FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); |
| __ SmiUntag(R8); |
| // Let R7 point to the first passed argument, i.e. to |
| // fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi. |
| __ add(R7, FP, ShifterOperand(R7, LSL, 1)); |
| __ AddImmediate(R7, R7, kParamEndSlotFromFp * kWordSize); |
| // Let R6 point to the entry of the first named argument. |
| __ add(R6, R4, ShifterOperand( |
| ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag)); |
| for (int i = 0; i < num_opt_named_params; i++) { |
| Label load_default_value, assign_optional_parameter; |
| const int param_pos = opt_param_position[i]; |
| // Check if this named parameter was passed in. |
| // Load R5 with the name of the argument. |
| __ ldr(R5, Address(R6, ArgumentsDescriptor::name_offset())); |
| ASSERT(opt_param[i]->name().IsSymbol()); |
| __ CompareObject(R5, opt_param[i]->name()); |
| __ b(&load_default_value, NE); |
| // Load R5 with passed-in argument at provided arg_pos, i.e. at |
| // fp[kParamEndSlotFromFp + num_args - arg_pos]. |
| __ ldr(R5, Address(R6, ArgumentsDescriptor::position_offset())); |
| // R5 is arg_pos as Smi. |
| // Point to next named entry. |
| __ add(R6, R6, ShifterOperand(ArgumentsDescriptor::named_entry_size())); |
| __ rsb(R5, R5, ShifterOperand(0)); |
| Address argument_addr(R7, R5, LSL, 1); // R5 is a negative Smi. |
| __ ldr(R5, argument_addr); |
| __ b(&assign_optional_parameter); |
| __ Bind(&load_default_value); |
| // Load R5 with default argument. |
| const Object& value = Object::ZoneHandle( |
| parsed_function().default_parameter_values().At( |
| param_pos - num_fixed_params)); |
| __ LoadObject(R5, value); |
| __ Bind(&assign_optional_parameter); |
| // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos]. |
| // We do not use the final allocation index of the variable here, i.e. |
| // scope->VariableAt(i)->index(), because captured variables still need |
| // to be copied to the context that is not yet allocated. |
| const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; |
| const Address param_addr(FP, computed_param_pos * kWordSize); |
| __ str(R5, param_addr); |
| } |
| delete[] opt_param; |
| delete[] opt_param_position; |
| if (check_correct_named_args) { |
| // Check that R6 now points to the null terminator in the arguments |
| // descriptor. |
| __ ldr(R5, Address(R6, 0)); |
| __ CompareImmediate(R5, reinterpret_cast<int32_t>(Object::null())); |
| __ b(&all_arguments_processed, EQ); |
| } |
| } else { |
| ASSERT(num_opt_pos_params > 0); |
| __ ldr(R8, |
| FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); |
| __ SmiUntag(R8); |
| for (int i = 0; i < num_opt_pos_params; i++) { |
| Label next_parameter; |
| // Handle this optional positional parameter only if k or fewer positional |
| // arguments have been passed, where k is param_pos, the position of this |
| // optional parameter in the formal parameter list. |
| const int param_pos = num_fixed_params + i; |
| __ CompareImmediate(R8, param_pos); |
| __ b(&next_parameter, GT); |
| // Load R5 with default argument. |
| const Object& value = Object::ZoneHandle( |
| parsed_function().default_parameter_values().At(i)); |
| __ LoadObject(R5, value); |
| // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos]. |
| // We do not use the final allocation index of the variable here, i.e. |
| // scope->VariableAt(i)->index(), because captured variables still need |
| // to be copied to the context that is not yet allocated. |
| const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; |
| const Address param_addr(FP, computed_param_pos * kWordSize); |
| __ str(R5, param_addr); |
| __ Bind(&next_parameter); |
| } |
| if (check_correct_named_args) { |
| __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset())); |
| __ SmiUntag(R7); |
| // Check that R8 equals R7, i.e. no named arguments passed. |
| __ cmp(R8, ShifterOperand(R7)); |
| __ b(&all_arguments_processed, EQ); |
| } |
| } |
| |
| __ Bind(&wrong_num_arguments); |
| if (function.IsClosureFunction()) { |
| // Invoke noSuchMethod function passing "call" as the original name. |
| const int kNumArgsChecked = 1; |
| const ICData& ic_data = ICData::ZoneHandle( |
| ICData::New(function, Symbols::Call(), Object::empty_array(), |
| Isolate::kNoDeoptId, kNumArgsChecked)); |
| __ LoadObject(R5, ic_data); |
| __ LeaveDartFrame(); // The arguments are still on the stack. |
| __ Branch(&StubCode::CallNoSuchMethodFunctionLabel()); |
| // The noSuchMethod call may return to the caller, but not here. |
| __ bkpt(0); |
| } else if (check_correct_named_args) { |
| __ Stop("Wrong arguments"); |
| } |
| |
| __ Bind(&all_arguments_processed); |
| // Nullify originally passed arguments only after they have been copied and |
| // checked, otherwise noSuchMethod would not see their original values. |
| // This step can be skipped in case we decide that formal parameters are |
| // implicitly final, since garbage collecting the unmodified value is not |
| // an issue anymore. |
| |
| // R4 : arguments descriptor array. |
| __ ldr(R8, FieldAddress(R4, ArgumentsDescriptor::count_offset())); |
| __ SmiUntag(R8); |
| __ add(R7, FP, ShifterOperand((kParamEndSlotFromFp + 1) * kWordSize)); |
| const Address original_argument_addr(R7, R8, LSL, 2); |
| __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null())); |
| Label null_args_loop, null_args_loop_condition; |
| __ b(&null_args_loop_condition); |
| __ Bind(&null_args_loop); |
| __ str(IP, original_argument_addr); |
| __ Bind(&null_args_loop_condition); |
| __ subs(R8, R8, ShifterOperand(1)); |
| __ b(&null_args_loop, PL); |
| } |
| |
| |
| void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { |
| // LR: return address. |
| // SP: receiver. |
| // Sequence node has one return node, its input is load field node. |
| __ ldr(R0, Address(SP, 0 * kWordSize)); |
| __ LoadFromOffset(kWord, R0, R0, offset - kHeapObjectTag); |
| __ Ret(); |
| } |
| |
| |
| void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { |
| // LR: return address. |
| // SP+1: receiver. |
| // SP+0: value. |
| // Sequence node has one store node and one return NULL node. |
| __ ldr(R0, Address(SP, 1 * kWordSize)); // Receiver. |
| __ ldr(R1, Address(SP, 0 * kWordSize)); // Value. |
| __ StoreIntoObject(R0, FieldAddress(R0, offset), R1); |
| __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null())); |
| __ Ret(); |
| } |
| |
| |
| void FlowGraphCompiler::EmitFrameEntry() { |
| const Function& function = parsed_function().function(); |
| if (CanOptimizeFunction() && |
| function.is_optimizable() && |
| (!is_optimizing() || may_reoptimize())) { |
| const Register function_reg = R6; |
| |
| // The pool pointer is not setup before entering the Dart frame. |
| // Preserve PP of caller. |
| __ mov(R7, ShifterOperand(PP)); |
| // Temporarily setup pool pointer for this dart function. |
| __ LoadPoolPointer(); |
| // Load function object from object pool. |
| __ LoadObject(function_reg, function); // Uses PP. |
| // Restore PP of caller. |
| __ mov(PP, ShifterOperand(R7)); |
| |
| // Patch point is after the eventually inlined function object. |
| AddCurrentDescriptor(PcDescriptors::kEntryPatch, |
| Isolate::kNoDeoptId, |
| 0); // No token position. |
| intptr_t threshold = FLAG_optimization_counter_threshold; |
| __ ldr(R7, FieldAddress(function_reg, |
| Function::usage_counter_offset())); |
| if (is_optimizing()) { |
| // Reoptimization of an optimized function is triggered by counting in |
| // IC stubs, but not at the entry of the function. |
| threshold = FLAG_reoptimization_counter_threshold; |
| } else { |
| __ add(R7, R7, ShifterOperand(1)); |
| __ str(R7, FieldAddress(function_reg, |
| Function::usage_counter_offset())); |
| } |
| __ CompareImmediate(R7, threshold); |
| ASSERT(function_reg == R6); |
| __ Branch(&StubCode::OptimizeFunctionLabel(), GE); |
| } else if (!flow_graph().IsCompiledForOsr()) { |
| AddCurrentDescriptor(PcDescriptors::kEntryPatch, |
| Isolate::kNoDeoptId, |
| 0); // No token position. |
| } |
| __ Comment("Enter frame"); |
| if (flow_graph().IsCompiledForOsr()) { |
| intptr_t extra_slots = StackSize() |
| - flow_graph().num_stack_locals() |
| - flow_graph().num_copied_params(); |
| ASSERT(extra_slots >= 0); |
| __ EnterOsrFrame(extra_slots * kWordSize); |
| } else { |
| ASSERT(StackSize() >= 0); |
| __ EnterDartFrame(StackSize() * kWordSize); |
| } |
| } |
| |
| |
| // Input parameters: |
| // LR: return address. |
| // SP: address of last argument. |
| // FP: caller's frame pointer. |
| // PP: caller's pool pointer. |
| // R5: ic-data. |
| // R4: arguments descriptor array. |
| void FlowGraphCompiler::CompileGraph() { |
| InitCompiler(); |
| if (TryIntrinsify()) { |
| // Although this intrinsified code will never be patched, it must satisfy |
| // CodePatcher::CodeIsPatchable, which verifies that this code has a minimum |
| // code size. |
| __ bkpt(0); |
| __ Branch(&StubCode::FixCallersTargetLabel()); |
| return; |
| } |
| |
| EmitFrameEntry(); |
| |
| const Function& function = parsed_function().function(); |
| |
| const int num_fixed_params = function.num_fixed_parameters(); |
| const int num_copied_params = parsed_function().num_copied_params(); |
| const int num_locals = parsed_function().num_stack_locals(); |
| |
| // We check the number of passed arguments when we have to copy them due to |
| // the presence of optional parameters. |
| // No such checking code is generated if only fixed parameters are declared, |
| // unless we are in debug mode or unless we are compiling a closure. |
| if (num_copied_params == 0) { |
| #ifdef DEBUG |
| ASSERT(!parsed_function().function().HasOptionalParameters()); |
| const bool check_arguments = !flow_graph().IsCompiledForOsr(); |
| #else |
| const bool check_arguments = |
| function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); |
| #endif |
| if (check_arguments) { |
| __ Comment("Check argument count"); |
| // Check that exactly num_fixed arguments are passed in. |
| Label correct_num_arguments, wrong_num_arguments; |
| __ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::count_offset())); |
| __ CompareImmediate(R0, Smi::RawValue(num_fixed_params)); |
| __ b(&wrong_num_arguments, NE); |
| __ ldr(R1, FieldAddress(R4, |
| ArgumentsDescriptor::positional_count_offset())); |
| __ cmp(R0, ShifterOperand(R1)); |
| __ b(&correct_num_arguments, EQ); |
| __ Bind(&wrong_num_arguments); |
| if (function.IsClosureFunction()) { |
| // Invoke noSuchMethod function passing the original function name. |
| // For closure functions, use "call" as the original name. |
| const String& name = |
| String::Handle(function.IsClosureFunction() |
| ? Symbols::Call().raw() |
| : function.name()); |
| const int kNumArgsChecked = 1; |
| const ICData& ic_data = ICData::ZoneHandle( |
| ICData::New(function, name, Object::empty_array(), |
| Isolate::kNoDeoptId, kNumArgsChecked)); |
| __ LoadObject(R5, ic_data); |
| __ LeaveDartFrame(); // The arguments are still on the stack. |
| __ Branch(&StubCode::CallNoSuchMethodFunctionLabel()); |
| // The noSuchMethod call may return to the caller, but not here. |
| __ bkpt(0); |
| } else { |
| __ Stop("Wrong number of arguments"); |
| } |
| __ Bind(&correct_num_arguments); |
| } |
| } else if (!flow_graph().IsCompiledForOsr()) { |
| CopyParameters(); |
| } |
| |
| // In unoptimized code, initialize (non-argument) stack allocated slots to |
| // null. |
| if (!is_optimizing() && (num_locals > 0)) { |
| __ Comment("Initialize spill slots"); |
| const intptr_t slot_base = parsed_function().first_stack_local_index(); |
| __ LoadImmediate(R0, reinterpret_cast<intptr_t>(Object::null())); |
| for (intptr_t i = 0; i < num_locals; ++i) { |
| // Subtract index i (locals lie at lower addresses than FP). |
| __ str(R0, Address(FP, (slot_base - i) * kWordSize)); |
| } |
| } |
| |
| if (FLAG_print_scopes) { |
| // Print the function scope (again) after generating the prologue in order |
| // to see annotations such as allocation indices of locals. |
| if (FLAG_print_ast) { |
| // Second printing. |
| OS::Print("Annotated "); |
| } |
| AstPrinter::PrintFunctionScope(parsed_function()); |
| } |
| |
| VisitBlocks(); |
| |
| __ bkpt(0); |
| GenerateDeferredCode(); |
| // Emit function patching code. This will be swapped with the first 3 |
| // instructions at entry point. |
| AddCurrentDescriptor(PcDescriptors::kPatchCode, |
| Isolate::kNoDeoptId, |
| 0); // No token position. |
| __ BranchPatchable(&StubCode::FixCallersTargetLabel()); |
| AddCurrentDescriptor(PcDescriptors::kLazyDeoptJump, |
| Isolate::kNoDeoptId, |
| 0); // No token position. |
| __ Branch(&StubCode::DeoptimizeLazyLabel()); |
| } |
| |
| |
| void FlowGraphCompiler::GenerateCall(intptr_t token_pos, |
| const ExternalLabel* label, |
| PcDescriptors::Kind kind, |
| LocationSummary* locs) { |
| __ BranchLinkPatchable(label); |
| AddCurrentDescriptor(kind, Isolate::kNoDeoptId, token_pos); |
| RecordSafepoint(locs); |
| } |
| |
| |
| void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, |
| intptr_t token_pos, |
| const ExternalLabel* label, |
| PcDescriptors::Kind kind, |
| LocationSummary* locs) { |
| __ BranchLinkPatchable(label); |
| AddCurrentDescriptor(kind, deopt_id, token_pos); |
| RecordSafepoint(locs); |
| // Marks either the continuation point in unoptimized code or the |
| // deoptimization point in optimized code, after call. |
| const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id); |
| if (is_optimizing()) { |
| AddDeoptIndexAtCall(deopt_id_after, token_pos); |
| } else { |
| // Add deoptimization continuation point after the call and before the |
| // arguments are removed. |
| AddCurrentDescriptor(PcDescriptors::kDeopt, deopt_id_after, token_pos); |
| } |
| } |
| |
| |
| void FlowGraphCompiler::GenerateCallRuntime(intptr_t token_pos, |
| intptr_t deopt_id, |
| const RuntimeEntry& entry, |
| LocationSummary* locs) { |
| __ CallRuntime(entry); |
| AddCurrentDescriptor(PcDescriptors::kOther, deopt_id, token_pos); |
| RecordSafepoint(locs); |
| if (deopt_id != Isolate::kNoDeoptId) { |
| // Marks either the continuation point in unoptimized code or the |
| // deoptimization point in optimized code, after call. |
| const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id); |
| if (is_optimizing()) { |
| AddDeoptIndexAtCall(deopt_id_after, token_pos); |
| } else { |
| // Add deoptimization continuation point after the call and before the |
| // arguments are removed. |
| AddCurrentDescriptor(PcDescriptors::kDeopt, |
| deopt_id_after, |
| token_pos); |
| } |
| } |
| } |
| |
| |
| void FlowGraphCompiler::EmitOptimizedInstanceCall( |
| ExternalLabel* target_label, |
| const ICData& ic_data, |
| intptr_t argument_count, |
| intptr_t deopt_id, |
| intptr_t token_pos, |
| LocationSummary* locs) { |
| // Each ICData propagated from unoptimized to optimized code contains the |
| // function that corresponds to the Dart function of that IC call. Due |
| // to inlining in optimized code, that function may not correspond to the |
| // top-level function (parsed_function().function()) which could be |
| // reoptimized and which counter needs to be incremented. |
| // Pass the function explicitly, it is used in IC stub. |
| |
| __ LoadObject(R6, parsed_function().function()); |
| __ LoadObject(R5, ic_data); |
| GenerateDartCall(deopt_id, |
| token_pos, |
| target_label, |
| PcDescriptors::kIcCall, |
| locs); |
| __ Drop(argument_count); |
| } |
| |
| |
| void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label, |
| const ICData& ic_data, |
| intptr_t argument_count, |
| intptr_t deopt_id, |
| intptr_t token_pos, |
| LocationSummary* locs) { |
| __ LoadObject(R5, ic_data); |
| GenerateDartCall(deopt_id, |
| token_pos, |
| target_label, |
| PcDescriptors::kIcCall, |
| locs); |
| __ Drop(argument_count); |
| } |
| |
| |
| void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
| const ICData& ic_data, |
| intptr_t argument_count, |
| intptr_t deopt_id, |
| intptr_t token_pos, |
| LocationSummary* locs) { |
| MegamorphicCacheTable* table = Isolate::Current()->megamorphic_cache_table(); |
| const String& name = String::Handle(ic_data.target_name()); |
| const Array& arguments_descriptor = |
| Array::ZoneHandle(ic_data.arguments_descriptor()); |
| ASSERT(!arguments_descriptor.IsNull()); |
| const MegamorphicCache& cache = |
| MegamorphicCache::ZoneHandle(table->Lookup(name, arguments_descriptor)); |
| Label not_smi, load_cache; |
| __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize); |
| __ tst(R0, ShifterOperand(kSmiTagMask)); |
| __ b(¬_smi, NE); |
| __ mov(R0, ShifterOperand(Smi::RawValue(kSmiCid))); |
| __ b(&load_cache); |
| |
| __ Bind(¬_smi); |
| __ LoadClassId(R0, R0); |
| __ SmiTag(R0); |
| |
| // R0: class ID of the receiver (smi). |
| __ Bind(&load_cache); |
| __ LoadObject(R1, cache); |
| __ ldr(R2, FieldAddress(R1, MegamorphicCache::buckets_offset())); |
| __ ldr(R1, FieldAddress(R1, MegamorphicCache::mask_offset())); |
| // R2: cache buckets array. |
| // R1: mask. |
| __ mov(R3, ShifterOperand(R0)); |
| |
| Label loop, update, call_target_function; |
| __ b(&loop); |
| |
| __ Bind(&update); |
| __ add(R3, R3, ShifterOperand(Smi::RawValue(1))); |
| __ Bind(&loop); |
| __ and_(R3, R3, ShifterOperand(R1)); |
| const intptr_t base = Array::data_offset(); |
| // R3 is smi tagged, but table entries are two words, so LSL 2. |
| __ add(IP, R2, ShifterOperand(R3, LSL, 2)); |
| __ ldr(R4, FieldAddress(IP, base)); |
| |
| ASSERT(kIllegalCid == 0); |
| __ tst(R4, ShifterOperand(R4)); |
| __ b(&call_target_function, EQ); |
| __ cmp(R4, ShifterOperand(R0)); |
| __ b(&update, NE); |
| |
| __ Bind(&call_target_function); |
| // Call the target found in the cache. For a class id match, this is a |
| // proper target for the given name and arguments descriptor. If the |
| // illegal class id was found, the target is a cache miss handler that can |
| // be invoked as a normal Dart function. |
| __ add(IP, R2, ShifterOperand(R3, LSL, 2)); |
| __ ldr(R0, FieldAddress(IP, base + kWordSize)); |
| __ ldr(R0, FieldAddress(R0, Function::code_offset())); |
| __ ldr(R0, FieldAddress(R0, Code::instructions_offset())); |
| __ LoadObject(R5, ic_data); |
| __ LoadObject(R4, arguments_descriptor); |
| __ AddImmediate(R0, Instructions::HeaderSize() - kHeapObjectTag); |
| __ blx(R0); |
| AddCurrentDescriptor(PcDescriptors::kOther, Isolate::kNoDeoptId, token_pos); |
| RecordSafepoint(locs); |
| AddDeoptIndexAtCall(Isolate::ToDeoptAfter(deopt_id), token_pos); |
| __ Drop(argument_count); |
| } |
| |
| |
| void FlowGraphCompiler::EmitOptimizedStaticCall( |
| const Function& function, |
| const Array& arguments_descriptor, |
| intptr_t argument_count, |
| intptr_t deopt_id, |
| intptr_t token_pos, |
| LocationSummary* locs) { |
| __ LoadObject(R4, arguments_descriptor); |
| // Do not use the code from the function, but let the code be patched so that |
| // we can record the outgoing edges to other code. |
| GenerateDartCall(deopt_id, |
| token_pos, |
| &StubCode::CallStaticFunctionLabel(), |
| PcDescriptors::kOptStaticCall, |
| locs); |
| AddStaticCallTarget(function); |
| __ Drop(argument_count); |
| } |
| |
| |
| void FlowGraphCompiler::EmitEqualityRegConstCompare(Register reg, |
| const Object& obj, |
| bool needs_number_check, |
| intptr_t token_pos) { |
| if (needs_number_check && |
| (obj.IsMint() || obj.IsDouble() || obj.IsBigint())) { |
| __ Push(reg); |
| __ PushObject(obj); |
| if (is_optimizing()) { |
| __ BranchLink(&StubCode::OptimizedIdenticalWithNumberCheckLabel()); |
| } else { |
| __ BranchLink(&StubCode::UnoptimizedIdenticalWithNumberCheckLabel()); |
| } |
| AddCurrentDescriptor(PcDescriptors::kRuntimeCall, |
| Isolate::kNoDeoptId, |
| token_pos); |
| __ Drop(1); // Discard constant. |
| __ Pop(reg); // Restore 'reg'. |
| return; |
| } |
| |
| __ CompareObject(reg, obj); |
| } |
| |
| |
| void FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, |
| Register right, |
| bool needs_number_check, |
| intptr_t token_pos) { |
| if (needs_number_check) { |
| __ Push(left); |
| __ Push(right); |
| if (is_optimizing()) { |
| __ BranchLink(&StubCode::OptimizedIdenticalWithNumberCheckLabel()); |
| } else { |
| __ BranchLink(&StubCode::UnoptimizedIdenticalWithNumberCheckLabel()); |
| } |
| AddCurrentDescriptor(PcDescriptors::kRuntimeCall, |
| Isolate::kNoDeoptId, |
| token_pos); |
| // Stub returns result in flags (result of a cmpl, we need ZF computed). |
| __ Pop(right); |
| __ Pop(left); |
| } else { |
| __ cmp(left, ShifterOperand(right)); |
| } |
| } |
| |
| |
| // Implement equality spec: if any of the arguments is null do identity check. |
| // Fallthrough calls super equality. |
| void FlowGraphCompiler::EmitSuperEqualityCallPrologue(Register result, |
| Label* skip_call) { |
| Label check_identity, fall_through; |
| __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null())); |
| __ ldr(result, Address(SP, 0 * kWordSize)); // Load right operand. |
| __ cmp(result, ShifterOperand(IP)); // Is right null? |
| __ ldr(result, Address(SP, 1 * kWordSize)); // Load left operand. |
| __ b(&check_identity, EQ); // Branch if right (IP) is null; left in result. |
| __ cmp(result, ShifterOperand(IP)); // Right is non-null; is left null? |
| __ b(&fall_through, NE); |
| // Right is non-null, left is null. We could return false, but we save code |
| // by falling through with an IP different than null. |
| __ mov(IP, ShifterOperand(0)); |
| __ Bind(&check_identity); |
| __ cmp(result, ShifterOperand(IP)); |
| __ LoadObject(result, Bool::True(), EQ); |
| __ LoadObject(result, Bool::False(), NE); |
| __ Drop(2); |
| __ b(skip_call); |
| __ Bind(&fall_through); |
| } |
| |
| |
| void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { |
| // TODO(vegorov): consider saving only caller save (volatile) registers. |
| const intptr_t fpu_regs_count = locs->live_registers()->fpu_regs_count(); |
| if (fpu_regs_count > 0) { |
| __ AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize)); |
| // Store fpu registers with the lowest register number at the lowest |
| // address. |
| intptr_t offset = 0; |
| for (intptr_t reg_idx = 0; reg_idx < kNumberOfFpuRegisters; ++reg_idx) { |
| QRegister fpu_reg = static_cast<QRegister>(reg_idx); |
| if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { |
| DRegister d1 = EvenDRegisterOf(fpu_reg); |
| DRegister d2 = OddDRegisterOf(fpu_reg); |
| // TOOD(regis): merge stores using vstmd instruction. |
| __ vstrd(d1, Address(SP, offset)); |
| __ vstrd(d2, Address(SP, offset + 2 * kWordSize)); |
| offset += kFpuRegisterSize; |
| } |
| } |
| ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); |
| } |
| |
| // Store general purpose registers with the lowest register number at the |
| // lowest address. |
| const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); |
| ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); |
| if (cpu_registers != 0) { |
| __ PushList(cpu_registers); |
| } |
| } |
| |
| |
| void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { |
| // General purpose registers have the lowest register number at the |
| // lowest address. |
| const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); |
| ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); |
| if (cpu_registers != 0) { |
| __ PopList(cpu_registers); |
| } |
| |
| const intptr_t fpu_regs_count = locs->live_registers()->fpu_regs_count(); |
| if (fpu_regs_count > 0) { |
| // Fpu registers have the lowest register number at the lowest address. |
| intptr_t offset = 0; |
| for (intptr_t reg_idx = 0; reg_idx < kNumberOfFpuRegisters; ++reg_idx) { |
| QRegister fpu_reg = static_cast<QRegister>(reg_idx); |
| if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { |
| DRegister d1 = EvenDRegisterOf(fpu_reg); |
| DRegister d2 = OddDRegisterOf(fpu_reg); |
| // TOOD(regis): merge loads using vldmd instruction. |
| __ vldrd(d1, Address(SP, offset)); |
| __ vldrd(d2, Address(SP, offset + 2 * kWordSize)); |
| offset += kFpuRegisterSize; |
| } |
| } |
| ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); |
| __ AddImmediate(SP, offset); |
| } |
| } |
| |
| |
| void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data, |
| Register class_id_reg, |
| intptr_t argument_count, |
| const Array& argument_names, |
| Label* deopt, |
| intptr_t deopt_id, |
| intptr_t token_index, |
| LocationSummary* locs) { |
| ASSERT(is_optimizing()); |
| ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0)); |
| Label match_found; |
| const intptr_t len = ic_data.NumberOfChecks(); |
| GrowableArray<CidTarget> sorted(len); |
| SortICDataByCount(ic_data, &sorted); |
| ASSERT(class_id_reg != R4); |
| ASSERT(len > 0); // Why bother otherwise. |
| const Array& arguments_descriptor = |
| Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, |
| argument_names)); |
| __ LoadObject(R4, arguments_descriptor); |
| for (intptr_t i = 0; i < len; i++) { |
| const bool is_last_check = (i == (len - 1)); |
| Label next_test; |
| assembler()->CompareImmediate(class_id_reg, sorted[i].cid); |
| if (is_last_check) { |
| assembler()->b(deopt, NE); |
| } else { |
| assembler()->b(&next_test, NE); |
| } |
| // Do not use the code from the function, but let the code be patched so |
| // that we can record the outgoing edges to other code. |
| GenerateDartCall(deopt_id, |
| token_index, |
| &StubCode::CallStaticFunctionLabel(), |
| PcDescriptors::kOptStaticCall, |
| locs); |
| const Function& function = *sorted[i].target; |
| AddStaticCallTarget(function); |
| __ Drop(argument_count); |
| if (!is_last_check) { |
| assembler()->b(&match_found); |
| } |
| assembler()->Bind(&next_test); |
| } |
| assembler()->Bind(&match_found); |
| } |
| |
| |
| void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition, |
| FpuRegister left, |
| FpuRegister right, |
| BranchInstr* branch) { |
| ASSERT(branch != NULL); |
| DRegister dleft = EvenDRegisterOf(left); |
| DRegister dright = EvenDRegisterOf(right); |
| assembler()->vcmpd(dleft, dright); |
| assembler()->vmstat(); |
| BlockEntryInstr* nan_result = (true_condition == NE) ? |
| branch->true_successor() : branch->false_successor(); |
| assembler()->b(GetJumpLabel(nan_result), VS); |
| branch->EmitBranchOnCondition(this, true_condition); |
| } |
| |
| |
| void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition, |
| FpuRegister left, |
| FpuRegister right, |
| Register result) { |
| DRegister dleft = EvenDRegisterOf(left); |
| DRegister dright = EvenDRegisterOf(right); |
| assembler()->vcmpd(dleft, dright); |
| assembler()->vmstat(); |
| assembler()->LoadObject(result, Bool::False()); |
| Label done; |
| assembler()->b(&done, VS); // NaN -> false. |
| assembler()->LoadObject(result, Bool::True(), true_condition); |
| assembler()->Bind(&done); |
| } |
| |
| |
| // Do not implement or use this function. |
| FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid, |
| intptr_t index_scale, |
| Register array, |
| intptr_t index) { |
| UNREACHABLE(); |
| return FieldAddress(array, index); |
| } |
| |
| |
| // Do not implement or use this function. |
| FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid, |
| intptr_t index_scale, |
| Register array, |
| Register index) { |
| UNREACHABLE(); // No register indexed with offset addressing mode on ARM. |
| return FieldAddress(array, index); |
| } |
| |
| |
| Address FlowGraphCompiler::ExternalElementAddressForIntIndex( |
| intptr_t index_scale, |
| Register array, |
| intptr_t index) { |
| UNREACHABLE(); |
| return FieldAddress(array, index); |
| } |
| |
| |
| Address FlowGraphCompiler::ExternalElementAddressForRegIndex( |
| intptr_t index_scale, |
| Register array, |
| Register index) { |
| UNREACHABLE(); |
| return FieldAddress(array, index); |
| } |
| |
| |
| #undef __ |
| #define __ compiler_->assembler()-> |
| |
| |
| void ParallelMoveResolver::EmitMove(int index) { |
| MoveOperands* move = moves_[index]; |
| const Location source = move->src(); |
| const Location destination = move->dest(); |
| |
| if (source.IsRegister()) { |
| if (destination.IsRegister()) { |
| __ mov(destination.reg(), ShifterOperand(source.reg())); |
| } else { |
| ASSERT(destination.IsStackSlot()); |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| __ StoreToOffset(kWord, source.reg(), FP, dest_offset); |
| } |
| } else if (source.IsStackSlot()) { |
| if (destination.IsRegister()) { |
| const intptr_t source_offset = source.ToStackSlotOffset(); |
| __ LoadFromOffset(kWord, destination.reg(), FP, source_offset); |
| } else { |
| ASSERT(destination.IsStackSlot()); |
| const intptr_t source_offset = source.ToStackSlotOffset(); |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| __ LoadFromOffset(kWord, TMP, FP, source_offset); |
| __ StoreToOffset(kWord, TMP, FP, dest_offset); |
| } |
| } else if (source.IsFpuRegister()) { |
| if (destination.IsFpuRegister()) { |
| __ vmovq(destination.fpu_reg(), source.fpu_reg()); |
| } else { |
| if (destination.IsDoubleStackSlot()) { |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| DRegister src = EvenDRegisterOf(source.fpu_reg()); |
| __ StoreDToOffset(src, FP, dest_offset); |
| } else { |
| ASSERT(destination.IsQuadStackSlot()); |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| DRegister dsrc0 = EvenDRegisterOf(source.fpu_reg()); |
| DRegister dsrc1 = OddDRegisterOf(source.fpu_reg()); |
| // TODO(zra): Write and use {Load,Store}Q{From,To}Offset(), which can |
| // use a single vld1/vst1 instruction. |
| __ StoreDToOffset(dsrc0, FP, dest_offset); |
| __ StoreDToOffset(dsrc1, FP, dest_offset + 2*kWordSize); |
| } |
| } |
| } else if (source.IsDoubleStackSlot()) { |
| if (destination.IsFpuRegister()) { |
| const intptr_t dest_offset = source.ToStackSlotOffset(); |
| DRegister dst = EvenDRegisterOf(destination.fpu_reg()); |
| __ LoadDFromOffset(dst, FP, dest_offset); |
| } else { |
| ASSERT(destination.IsDoubleStackSlot()); |
| const intptr_t source_offset = source.ToStackSlotOffset(); |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| __ LoadDFromOffset(DTMP, FP, source_offset); |
| __ StoreDToOffset(DTMP, FP, dest_offset); |
| } |
| } else if (source.IsQuadStackSlot()) { |
| if (destination.IsFpuRegister()) { |
| const intptr_t dest_offset = source.ToStackSlotOffset(); |
| DRegister dst0 = EvenDRegisterOf(destination.fpu_reg()); |
| DRegister dst1 = OddDRegisterOf(destination.fpu_reg()); |
| __ LoadDFromOffset(dst0, FP, dest_offset); |
| __ LoadDFromOffset(dst1, FP, dest_offset + 2*kWordSize); |
| } else { |
| ASSERT(destination.IsQuadStackSlot()); |
| const intptr_t source_offset = source.ToStackSlotOffset(); |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| DRegister dtmp0 = DTMP; |
| DRegister dtmp1 = OddDRegisterOf(QTMP); |
| __ LoadDFromOffset(dtmp0, FP, source_offset); |
| __ LoadDFromOffset(dtmp1, FP, source_offset + 2*kWordSize); |
| __ StoreDToOffset(dtmp0, FP, dest_offset); |
| __ StoreDToOffset(dtmp1, FP, dest_offset + 2*kWordSize); |
| } |
| } else { |
| ASSERT(source.IsConstant()); |
| if (destination.IsRegister()) { |
| const Object& constant = source.constant(); |
| __ LoadObject(destination.reg(), constant); |
| } else { |
| ASSERT(destination.IsStackSlot()); |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| __ LoadObject(TMP, source.constant()); |
| __ StoreToOffset(kWord, TMP, FP, dest_offset); |
| } |
| } |
| |
| move->Eliminate(); |
| } |
| |
| |
| void ParallelMoveResolver::EmitSwap(int index) { |
| MoveOperands* move = moves_[index]; |
| const Location source = move->src(); |
| const Location destination = move->dest(); |
| |
| if (source.IsRegister() && destination.IsRegister()) { |
| ASSERT(source.reg() != IP); |
| ASSERT(destination.reg() != IP); |
| __ mov(IP, ShifterOperand(source.reg())); |
| __ mov(source.reg(), ShifterOperand(destination.reg())); |
| __ mov(destination.reg(), ShifterOperand(IP)); |
| } else if (source.IsRegister() && destination.IsStackSlot()) { |
| Exchange(source.reg(), destination.ToStackSlotOffset()); |
| } else if (source.IsStackSlot() && destination.IsRegister()) { |
| Exchange(destination.reg(), source.ToStackSlotOffset()); |
| } else if (source.IsStackSlot() && destination.IsStackSlot()) { |
| Exchange(source.ToStackSlotOffset(), destination.ToStackSlotOffset()); |
| } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { |
| DRegister dst = EvenDRegisterOf(destination.fpu_reg()); |
| DRegister src = EvenDRegisterOf(source.fpu_reg()); |
| __ vmovd(DTMP, src); |
| __ vmovd(src, dst); |
| __ vmovd(dst, DTMP); |
| } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { |
| ASSERT(destination.IsDoubleStackSlot() || |
| destination.IsQuadStackSlot() || |
| source.IsDoubleStackSlot() || |
| source.IsQuadStackSlot()); |
| bool double_width = destination.IsDoubleStackSlot() || |
| source.IsDoubleStackSlot(); |
| QRegister qreg = source.IsFpuRegister() ? source.fpu_reg() |
| : destination.fpu_reg(); |
| DRegister reg = EvenDRegisterOf(qreg); |
| const intptr_t slot_offset = source.IsFpuRegister() |
| ? destination.ToStackSlotOffset() |
| : source.ToStackSlotOffset(); |
| |
| if (double_width) { |
| __ LoadDFromOffset(DTMP, FP, slot_offset); |
| __ StoreDToOffset(reg, FP, slot_offset); |
| __ vmovd(reg, DTMP); |
| } else { |
| UNIMPLEMENTED(); |
| } |
| } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { |
| const intptr_t source_offset = source.ToStackSlotOffset(); |
| const intptr_t dest_offset = destination.ToStackSlotOffset(); |
| |
| ScratchFpuRegisterScope ensure_scratch(this, QTMP); |
| DRegister scratch = EvenDRegisterOf(ensure_scratch.reg()); |
| __ LoadDFromOffset(DTMP, FP, source_offset); |
| __ LoadDFromOffset(scratch, FP, dest_offset); |
| __ StoreDToOffset(DTMP, FP, dest_offset); |
| __ StoreDToOffset(scratch, FP, source_offset); |
| } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { |
| UNIMPLEMENTED(); |
| } else { |
| UNREACHABLE(); |
| } |
| |
| // The swap of source and destination has executed a move from source to |
| // destination. |
| move->Eliminate(); |
| |
| // Any unperformed (including pending) move with a source of either |
| // this move's source or destination needs to have their source |
| // changed to reflect the state of affairs after the swap. |
| for (int i = 0; i < moves_.length(); ++i) { |
| const MoveOperands& other_move = *moves_[i]; |
| if (other_move.Blocks(source)) { |
| moves_[i]->set_src(destination); |
| } else if (other_move.Blocks(destination)) { |
| moves_[i]->set_src(source); |
| } |
| } |
| } |
| |
| |
| void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, |
| const Address& src) { |
| __ ldr(IP, src); |
| __ str(IP, dst); |
| } |
| |
| |
| void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { |
| __ LoadObject(IP, obj); |
| __ str(IP, dst); |
| } |
| |
| |
| // Do not call or implement this function. Instead, use the form below that |
| // uses an offset from the frame pointer instead of an Address. |
| void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { |
| UNREACHABLE(); |
| } |
| |
| |
| // Do not call or implement this function. Instead, use the form below that |
| // uses offsets from the frame pointer instead of Addresses. |
| void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { |
| UNREACHABLE(); |
| } |
| |
| |
| void ParallelMoveResolver::Exchange(Register reg, intptr_t stack_offset) { |
| __ mov(TMP, ShifterOperand(reg)); |
| __ LoadFromOffset(kWord, reg, FP, stack_offset); |
| __ StoreToOffset(kWord, TMP, FP, stack_offset); |
| } |
| |
| |
| void ParallelMoveResolver::Exchange(intptr_t stack_offset1, |
| intptr_t stack_offset2) { |
| ScratchRegisterScope ensure_scratch(this, IP); |
| __ LoadFromOffset(kWord, ensure_scratch.reg(), FP, stack_offset1); |
| __ LoadFromOffset(kWord, TMP, FP, stack_offset2); |
| __ StoreToOffset(kWord, ensure_scratch.reg(), FP, stack_offset2); |
| __ StoreToOffset(kWord, TMP, FP, stack_offset1); |
| } |
| |
| |
| void ParallelMoveResolver::SpillScratch(Register reg) { |
| __ Push(reg); |
| } |
| |
| |
| void ParallelMoveResolver::RestoreScratch(Register reg) { |
| __ Pop(reg); |
| } |
| |
| |
| void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { |
| DRegister dreg = EvenDRegisterOf(reg); |
| __ vstrd(dreg, Address(SP, -kDoubleSize, Address::PreIndex)); |
| } |
| |
| |
| void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { |
| DRegister dreg = EvenDRegisterOf(reg); |
| __ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex)); |
| } |
| |
| |
| #undef __ |
| |
| } // namespace dart |
| |
| #endif // defined TARGET_ARCH_ARM |