| // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. |
| #if defined(TARGET_ARCH_ARM64) |
| |
| #include "vm/intermediate_language.h" |
| |
| #include "vm/dart_entry.h" |
| #include "vm/flow_graph.h" |
| #include "vm/flow_graph_compiler.h" |
| #include "vm/flow_graph_range_analysis.h" |
| #include "vm/locations.h" |
| #include "vm/object_store.h" |
| #include "vm/parser.h" |
| #include "vm/simulator.h" |
| #include "vm/stack_frame.h" |
| #include "vm/stub_code.h" |
| #include "vm/symbols.h" |
| |
| #define __ compiler->assembler()-> |
| |
| namespace dart { |
| |
| DECLARE_FLAG(bool, allow_absolute_addresses); |
| DECLARE_FLAG(bool, emit_edge_counters); |
| DECLARE_FLAG(int, optimization_counter_threshold); |
| DECLARE_FLAG(bool, use_osr); |
| |
| // Generic summary for call instructions that have all arguments pushed |
| // on the stack and return the result in a fixed register R0. |
| LocationSummary* Instruction::MakeCallSummary(Zone* zone) { |
| LocationSummary* result = new(zone) LocationSummary( |
| zone, 0, 0, LocationSummary::kCall); |
| result->set_out(0, Location::RegisterLocation(R0)); |
| return result; |
| } |
| |
| |
| LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::AnyOrConstant(value())); |
| return locs; |
| } |
| |
| |
| void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode |
| // where PushArgument is handled by BindInstr::EmitNativeCode. |
| if (compiler->is_optimizing()) { |
| Location value = locs()->in(0); |
| if (value.IsRegister()) { |
| __ Push(value.reg()); |
| } else if (value.IsConstant()) { |
| __ PushObject(value.constant()); |
| } else { |
| ASSERT(value.IsStackSlot()); |
| const intptr_t value_offset = value.ToStackSlotOffset(); |
| __ LoadFromOffset(TMP, value.base_reg(), value_offset); |
| __ Push(TMP); |
| } |
| } |
| } |
| |
| |
| LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| // Attempt optimized compilation at return instruction instead of at the entry. |
| // The entry needs to be patchable, no inlined objects are allowed in the area |
| // that will be overwritten by the patch instructions: a branch macro sequence. |
| void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register result = locs()->in(0).reg(); |
| ASSERT(result == R0); |
| |
| if (compiler->intrinsic_mode()) { |
| // Intrinsics don't have a frame. |
| __ ret(); |
| return; |
| } |
| |
| #if defined(DEBUG) |
| Label stack_ok; |
| __ Comment("Stack Check"); |
| const intptr_t fp_sp_dist = |
| (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; |
| ASSERT(fp_sp_dist <= 0); |
| __ sub(R2, SP, Operand(FP)); |
| __ CompareImmediate(R2, fp_sp_dist); |
| __ b(&stack_ok, EQ); |
| __ brk(0); |
| __ Bind(&stack_ok); |
| #endif |
| ASSERT(__ constant_pool_allowed()); |
| __ LeaveDartFrame(); // Disallows constant pool use. |
| __ ret(); |
| // This ReturnInstr may be emitted out of order by the optimizer. The next |
| // block may be a target expecting a properly set constant pool pointer. |
| __ set_constant_pool_allowed(true); |
| } |
| |
| |
| static Condition NegateCondition(Condition condition) { |
| switch (condition) { |
| case EQ: return NE; |
| case NE: return EQ; |
| case LT: return GE; |
| case LE: return GT; |
| case GT: return LE; |
| case GE: return LT; |
| case CC: return CS; |
| case LS: return HI; |
| case HI: return LS; |
| case CS: return CC; |
| default: |
| UNREACHABLE(); |
| return EQ; |
| } |
| } |
| |
| |
| // Detect pattern when one value is zero and another is a power of 2. |
| static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) { |
| return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) || |
| (Utils::IsPowerOfTwo(v2) && (v1 == 0)); |
| } |
| |
| |
| LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| comparison()->InitializeLocationSummary(zone, opt); |
| return comparison()->locs(); |
| } |
| |
| |
| void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register result = locs()->out(0).reg(); |
| |
| Location left = locs()->in(0); |
| Location right = locs()->in(1); |
| ASSERT(!left.IsConstant() || !right.IsConstant()); |
| |
| // Emit comparison code. This must not overwrite the result register. |
| BranchLabels labels = { NULL, NULL, NULL }; |
| Condition true_condition = comparison()->EmitComparisonCode(compiler, labels); |
| |
| const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_); |
| |
| intptr_t true_value = if_true_; |
| intptr_t false_value = if_false_; |
| |
| if (is_power_of_two_kind) { |
| if (true_value == 0) { |
| // We need to have zero in result on true_condition. |
| true_condition = NegateCondition(true_condition); |
| } |
| } else { |
| if (true_value == 0) { |
| // Swap values so that false_value is zero. |
| intptr_t temp = true_value; |
| true_value = false_value; |
| false_value = temp; |
| } else { |
| true_condition = NegateCondition(true_condition); |
| } |
| } |
| |
| __ cset(result, true_condition); |
| |
| if (is_power_of_two_kind) { |
| const intptr_t shift = |
| Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value)); |
| __ LslImmediate(result, result, shift + kSmiTagSize); |
| } else { |
| __ sub(result, result, Operand(1)); |
| const int64_t val = |
| Smi::RawValue(true_value) - Smi::RawValue(false_value); |
| __ AndImmediate(result, result, val); |
| if (false_value != 0) { |
| __ AddImmediate(result, result, Smi::RawValue(false_value)); |
| } |
| } |
| } |
| |
| |
| LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| summary->set_in(0, Location::RegisterLocation(R0)); // Function. |
| summary->set_out(0, Location::RegisterLocation(R0)); |
| return summary; |
| } |
| |
| |
| void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| // Load arguments descriptor in R4. |
| int argument_count = ArgumentCount(); |
| const Array& arguments_descriptor = |
| Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, |
| argument_names())); |
| __ LoadObject(R4, arguments_descriptor); |
| |
| // R4: Arguments descriptor. |
| // R0: Function. |
| ASSERT(locs()->in(0).reg() == R0); |
| __ LoadFieldFromOffset(CODE_REG, R0, Function::code_offset()); |
| __ LoadFieldFromOffset(R2, R0, Function::entry_point_offset()); |
| |
| // R2: instructions. |
| // R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value). |
| __ LoadImmediate(R5, 0); |
| //?? |
| __ blr(R2); |
| compiler->RecordSafepoint(locs()); |
| // Marks either the continuation point in unoptimized code or the |
| // deoptimization point in optimized code, after call. |
| const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id()); |
| if (compiler->is_optimizing()) { |
| compiler->AddDeoptIndexAtCall(deopt_id_after, token_pos()); |
| } |
| // Add deoptimization continuation point after the call and before the |
| // arguments are removed. |
| // In optimized code this descriptor is needed for exception handling. |
| compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, |
| deopt_id_after, |
| token_pos()); |
| __ Drop(argument_count); |
| } |
| |
| |
| LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| return LocationSummary::Make(zone, |
| 0, |
| Location::RequiresRegister(), |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register result = locs()->out(0).reg(); |
| __ LoadFromOffset(result, FP, local().index() * kWordSize); |
| } |
| |
| |
| LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| return LocationSummary::Make(zone, |
| 1, |
| Location::SameAsFirstInput(), |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register value = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| ASSERT(result == value); // Assert that register assignment is correct. |
| __ StoreToOffset(value, FP, local().index() * kWordSize); |
| } |
| |
| |
| LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| return LocationSummary::Make(zone, |
| 0, |
| Location::RequiresRegister(), |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| // The register allocator drops constant definitions that have no uses. |
| if (!locs()->out(0).IsInvalid()) { |
| const Register result = locs()->out(0).reg(); |
| __ LoadObject(result, value()); |
| } |
| } |
| |
| |
| LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 0; |
| const Location out = (representation_ == kUnboxedInt32) ? |
| Location::RequiresRegister() : Location::RequiresFpuRegister(); |
| return LocationSummary::Make(zone, |
| kNumInputs, |
| out, |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| if (!locs()->out(0).IsInvalid()) { |
| switch (representation_) { |
| case kUnboxedDouble: |
| if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) { |
| const VRegister dst = locs()->out(0).fpu_reg(); |
| __ veor(dst, dst, dst); |
| } else { |
| const VRegister dst = locs()->out(0).fpu_reg(); |
| __ LoadDImmediate(dst, Double::Cast(value()).value()); |
| } |
| break; |
| case kUnboxedInt32: |
| __ LoadImmediate(locs()->out(0).reg(), |
| static_cast<int32_t>(Smi::Cast(value()).Value())); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| } |
| |
| |
| LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 3; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| summary->set_in(0, Location::RegisterLocation(R0)); // Value. |
| summary->set_in(1, Location::RegisterLocation(R2)); // Instantiator. |
| summary->set_in(2, Location::RegisterLocation(R1)); // Type arguments. |
| summary->set_out(0, Location::RegisterLocation(R0)); |
| return summary; |
| } |
| |
| |
| LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| locs->set_in(0, Location::RegisterLocation(R0)); |
| locs->set_out(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| static void EmitAssertBoolean(Register reg, |
| intptr_t token_pos, |
| intptr_t deopt_id, |
| LocationSummary* locs, |
| FlowGraphCompiler* compiler) { |
| // Check that the type of the value is allowed in conditional context. |
| // Call the runtime if the object is not bool::true or bool::false. |
| ASSERT(locs->always_calls()); |
| Label done; |
| |
| if (Isolate::Current()->flags().type_checks()) { |
| __ CompareObject(reg, Bool::True()); |
| __ b(&done, EQ); |
| __ CompareObject(reg, Bool::False()); |
| __ b(&done, EQ); |
| } else { |
| ASSERT(Isolate::Current()->flags().asserts()); |
| __ CompareObject(reg, Object::null_instance()); |
| __ b(&done, NE); |
| } |
| |
| __ Push(reg); // Push the source object. |
| compiler->GenerateRuntimeCall(token_pos, |
| deopt_id, |
| kNonBoolTypeErrorRuntimeEntry, |
| 1, |
| locs); |
| // We should never return here. |
| __ brk(0); |
| __ Bind(&done); |
| } |
| |
| |
| void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register obj = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| |
| EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler); |
| ASSERT(obj == result); |
| } |
| |
| |
| static Condition TokenKindToSmiCondition(Token::Kind kind) { |
| switch (kind) { |
| case Token::kEQ: return EQ; |
| case Token::kNE: return NE; |
| case Token::kLT: return LT; |
| case Token::kGT: return GT; |
| case Token::kLTE: return LE; |
| case Token::kGTE: return GE; |
| default: |
| UNREACHABLE(); |
| return VS; |
| } |
| } |
| |
| |
| static Condition FlipCondition(Condition condition) { |
| switch (condition) { |
| case EQ: return EQ; |
| case NE: return NE; |
| case LT: return GT; |
| case LE: return GE; |
| case GT: return LT; |
| case GE: return LE; |
| case CC: return HI; |
| case LS: return CS; |
| case HI: return CC; |
| case CS: return LS; |
| default: |
| UNREACHABLE(); |
| return EQ; |
| } |
| } |
| |
| |
| static void EmitBranchOnCondition(FlowGraphCompiler* compiler, |
| Condition true_condition, |
| BranchLabels labels) { |
| if (labels.fall_through == labels.false_label) { |
| // If the next block is the false successor we will fall through to it. |
| __ b(labels.true_label, true_condition); |
| } else { |
| // If the next block is not the false successor we will branch to it. |
| Condition false_condition = NegateCondition(true_condition); |
| __ b(labels.false_label, false_condition); |
| |
| // Fall through or jump to the true successor. |
| if (labels.fall_through != labels.true_label) { |
| __ b(labels.true_label); |
| } |
| } |
| } |
| |
| |
| static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler, |
| LocationSummary* locs, |
| Token::Kind kind) { |
| Location left = locs->in(0); |
| Location right = locs->in(1); |
| ASSERT(!left.IsConstant() || !right.IsConstant()); |
| |
| Condition true_condition = TokenKindToSmiCondition(kind); |
| |
| if (left.IsConstant()) { |
| __ CompareObject(right.reg(), left.constant()); |
| true_condition = FlipCondition(true_condition); |
| } else if (right.IsConstant()) { |
| __ CompareObject(left.reg(), right.constant()); |
| } else { |
| __ CompareRegisters(left.reg(), right.reg()); |
| } |
| return true_condition; |
| } |
| |
| |
| LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 2; |
| if (operation_cid() == kDoubleCid) { |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::RequiresFpuRegister()); |
| locs->set_in(1, Location::RequiresFpuRegister()); |
| locs->set_out(0, Location::RequiresRegister()); |
| return locs; |
| } |
| if (operation_cid() == kSmiCid) { |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::RegisterOrConstant(left())); |
| // Only one input can be a constant operand. The case of two constant |
| // operands should be handled by constant propagation. |
| // Only right can be a stack slot. |
| locs->set_in(1, locs->in(0).IsConstant() |
| ? Location::RequiresRegister() |
| : Location::RegisterOrConstant(right())); |
| locs->set_out(0, Location::RequiresRegister()); |
| return locs; |
| } |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| |
| static Condition TokenKindToDoubleCondition(Token::Kind kind) { |
| switch (kind) { |
| case Token::kEQ: return EQ; |
| case Token::kNE: return NE; |
| case Token::kLT: return LT; |
| case Token::kGT: return GT; |
| case Token::kLTE: return LE; |
| case Token::kGTE: return GE; |
| default: |
| UNREACHABLE(); |
| return VS; |
| } |
| } |
| |
| |
| static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, |
| LocationSummary* locs, |
| Token::Kind kind) { |
| const VRegister left = locs->in(0).fpu_reg(); |
| const VRegister right = locs->in(1).fpu_reg(); |
| __ fcmpd(left, right); |
| Condition true_condition = TokenKindToDoubleCondition(kind); |
| return true_condition; |
| } |
| |
| |
| Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
| BranchLabels labels) { |
| if (operation_cid() == kSmiCid) { |
| return EmitSmiComparisonOp(compiler, locs(), kind()); |
| } else { |
| ASSERT(operation_cid() == kDoubleCid); |
| return EmitDoubleComparisonOp(compiler, locs(), kind()); |
| } |
| } |
| |
| |
| void EqualityCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT((kind() == Token::kEQ) || (kind() == Token::kNE)); |
| Label is_true, is_false; |
| BranchLabels labels = { &is_true, &is_false, &is_false }; |
| Condition true_condition = EmitComparisonCode(compiler, labels); |
| if ((operation_cid() == kDoubleCid) && (true_condition != NE)) { |
| // Special case for NaN comparison. Result is always false unless |
| // relational operator is !=. |
| __ b(&is_false, VS); |
| } |
| EmitBranchOnCondition(compiler, true_condition, labels); |
| // TODO(zra): instead of branching, use the csel instruction to get |
| // True or False into result. |
| const Register result = locs()->out(0).reg(); |
| Label done; |
| __ Bind(&is_false); |
| __ LoadObject(result, Bool::False()); |
| __ b(&done); |
| __ Bind(&is_true); |
| __ LoadObject(result, Bool::True()); |
| __ Bind(&done); |
| } |
| |
| |
| void EqualityCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
| BranchInstr* branch) { |
| ASSERT((kind() == Token::kNE) || (kind() == Token::kEQ)); |
| |
| BranchLabels labels = compiler->CreateBranchLabels(branch); |
| Condition true_condition = EmitComparisonCode(compiler, labels); |
| if ((operation_cid() == kDoubleCid) && (true_condition != NE)) { |
| // Special case for NaN comparison. Result is always false unless |
| // relational operator is !=. |
| __ b(labels.false_label, VS); |
| } |
| EmitBranchOnCondition(compiler, true_condition, labels); |
| } |
| |
| |
| LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 2; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::RequiresRegister()); |
| // Only one input can be a constant operand. The case of two constant |
| // operands should be handled by constant propagation. |
| locs->set_in(1, Location::RegisterOrConstant(right())); |
| return locs; |
| } |
| |
| |
| Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
| BranchLabels labels) { |
| const Register left = locs()->in(0).reg(); |
| Location right = locs()->in(1); |
| if (right.IsConstant()) { |
| ASSERT(right.constant().IsSmi()); |
| const int64_t imm = |
| reinterpret_cast<int64_t>(right.constant().raw()); |
| __ TestImmediate(left, imm); |
| } else { |
| __ tst(left, Operand(right.reg())); |
| } |
| Condition true_condition = (kind() == Token::kNE) ? NE : EQ; |
| return true_condition; |
| } |
| |
| |
| void TestSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| // Never emitted outside of the BranchInstr. |
| UNREACHABLE(); |
| } |
| |
| |
| void TestSmiInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
| BranchInstr* branch) { |
| BranchLabels labels = compiler->CreateBranchLabels(branch); |
| Condition true_condition = EmitComparisonCode(compiler, labels); |
| EmitBranchOnCondition(compiler, true_condition, labels); |
| } |
| |
| |
| LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 1; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::RequiresRegister()); |
| locs->set_temp(0, Location::RequiresRegister()); |
| locs->set_out(0, Location::RequiresRegister()); |
| return locs; |
| } |
| |
| |
| Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
| BranchLabels labels) { |
| ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); |
| const Register val_reg = locs()->in(0).reg(); |
| const Register cid_reg = locs()->temp(0).reg(); |
| |
| Label* deopt = CanDeoptimize() ? |
| compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL; |
| |
| const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; |
| const ZoneGrowableArray<intptr_t>& data = cid_results(); |
| ASSERT(data[0] == kSmiCid); |
| bool result = data[1] == true_result; |
| __ tsti(val_reg, Immediate(kSmiTagMask)); |
| __ b(result ? labels.true_label : labels.false_label, EQ); |
| __ LoadClassId(cid_reg, val_reg); |
| |
| for (intptr_t i = 2; i < data.length(); i += 2) { |
| const intptr_t test_cid = data[i]; |
| ASSERT(test_cid != kSmiCid); |
| result = data[i + 1] == true_result; |
| __ CompareImmediate(cid_reg, test_cid); |
| __ b(result ? labels.true_label : labels.false_label, EQ); |
| } |
| // No match found, deoptimize or false. |
| if (deopt == NULL) { |
| Label* target = result ? labels.false_label : labels.true_label; |
| if (target != labels.fall_through) { |
| __ b(target); |
| } |
| } else { |
| __ b(deopt); |
| } |
| // Dummy result as the last instruction is a jump, any conditional |
| // branch using the result will therefore be skipped. |
| return EQ; |
| } |
| |
| |
| void TestCidsInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
| BranchInstr* branch) { |
| BranchLabels labels = compiler->CreateBranchLabels(branch); |
| EmitComparisonCode(compiler, labels); |
| } |
| |
| |
| void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register result_reg = locs()->out(0).reg(); |
| Label is_true, is_false, done; |
| BranchLabels labels = { &is_true, &is_false, &is_false }; |
| EmitComparisonCode(compiler, labels); |
| // TODO(zra): instead of branching, use the csel instruction to get |
| // True or False into result. |
| __ Bind(&is_false); |
| __ LoadObject(result_reg, Bool::False()); |
| __ b(&done); |
| __ Bind(&is_true); |
| __ LoadObject(result_reg, Bool::True()); |
| __ Bind(&done); |
| } |
| |
| |
| LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 2; |
| const intptr_t kNumTemps = 0; |
| if (operation_cid() == kDoubleCid) { |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| summary->set_in(0, Location::RequiresFpuRegister()); |
| summary->set_in(1, Location::RequiresFpuRegister()); |
| summary->set_out(0, Location::RequiresRegister()); |
| return summary; |
| } |
| ASSERT(operation_cid() == kSmiCid); |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| summary->set_in(0, Location::RegisterOrConstant(left())); |
| // Only one input can be a constant operand. The case of two constant |
| // operands should be handled by constant propagation. |
| summary->set_in(1, summary->in(0).IsConstant() |
| ? Location::RequiresRegister() |
| : Location::RegisterOrConstant(right())); |
| summary->set_out(0, Location::RequiresRegister()); |
| return summary; |
| } |
| |
| |
| Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
| BranchLabels labels) { |
| if (operation_cid() == kSmiCid) { |
| return EmitSmiComparisonOp(compiler, locs(), kind()); |
| } else { |
| ASSERT(operation_cid() == kDoubleCid); |
| return EmitDoubleComparisonOp(compiler, locs(), kind()); |
| } |
| } |
| |
| |
| void RelationalOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| Label is_true, is_false; |
| BranchLabels labels = { &is_true, &is_false, &is_false }; |
| Condition true_condition = EmitComparisonCode(compiler, labels); |
| if ((operation_cid() == kDoubleCid) && (true_condition != NE)) { |
| // Special case for NaN comparison. Result is always false unless |
| // relational operator is !=. |
| __ b(&is_false, VS); |
| } |
| EmitBranchOnCondition(compiler, true_condition, labels); |
| // TODO(zra): instead of branching, use the csel instruction to get |
| // True or False into result. |
| const Register result = locs()->out(0).reg(); |
| Label done; |
| __ Bind(&is_false); |
| __ LoadObject(result, Bool::False()); |
| __ b(&done); |
| __ Bind(&is_true); |
| __ LoadObject(result, Bool::True()); |
| __ Bind(&done); |
| } |
| |
| |
| void RelationalOpInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
| BranchInstr* branch) { |
| BranchLabels labels = compiler->CreateBranchLabels(branch); |
| Condition true_condition = EmitComparisonCode(compiler, labels); |
| if ((operation_cid() == kDoubleCid) && (true_condition != NE)) { |
| // Special case for NaN comparison. Result is always false unless |
| // relational operator is !=. |
| __ b(labels.false_label, VS); |
| } |
| EmitBranchOnCondition(compiler, true_condition, labels); |
| } |
| |
| |
| LocationSummary* NativeCallInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| return MakeCallSummary(zone); |
| } |
| |
| |
| void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register result = locs()->out(0).reg(); |
| |
| // Push the result place holder initialized to NULL. |
| __ PushObject(Object::null_object()); |
| // Pass a pointer to the first argument in R2. |
| if (!function().HasOptionalParameters()) { |
| __ AddImmediate(R2, FP, (kParamEndSlotFromFp + |
| function().NumParameters()) * kWordSize); |
| } else { |
| __ AddImmediate(R2, FP, kFirstLocalSlotFromFp * kWordSize); |
| } |
| // Compute the effective address. When running under the simulator, |
| // this is a redirection address that forces the simulator to call |
| // into the runtime system. |
| uword entry; |
| const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function()); |
| const bool is_leaf_call = |
| (argc_tag & NativeArguments::AutoSetupScopeMask()) == 0; |
| const StubEntry* stub_entry; |
| if (link_lazily()) { |
| stub_entry = StubCode::CallBootstrapCFunction_entry(); |
| entry = NativeEntry::LinkNativeCallEntry(); |
| } else { |
| entry = reinterpret_cast<uword>(native_c_function()); |
| if (is_bootstrap_native() || is_leaf_call) { |
| stub_entry = StubCode::CallBootstrapCFunction_entry(); |
| #if defined(USING_SIMULATOR) |
| entry = Simulator::RedirectExternalReference( |
| entry, Simulator::kBootstrapNativeCall, NativeEntry::kNumArguments); |
| #endif |
| } else { |
| // In the case of non bootstrap native methods the CallNativeCFunction |
| // stub generates the redirection address when running under the simulator |
| // and hence we do not change 'entry' here. |
| stub_entry = StubCode::CallNativeCFunction_entry(); |
| #if defined(USING_SIMULATOR) |
| if (!function().IsNativeAutoSetupScope()) { |
| entry = Simulator::RedirectExternalReference( |
| entry, Simulator::kBootstrapNativeCall, NativeEntry::kNumArguments); |
| } |
| #endif |
| } |
| } |
| __ LoadImmediate(R1, argc_tag); |
| ExternalLabel label(entry); |
| __ LoadNativeEntry(R5, &label); |
| compiler->GenerateCall(token_pos(), |
| *stub_entry, |
| RawPcDescriptors::kOther, |
| locs()); |
| __ Pop(result); |
| } |
| |
| |
| LocationSummary* StringFromCharCodeInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| // TODO(fschneider): Allow immediate operands for the char code. |
| return LocationSummary::Make(zone, |
| kNumInputs, |
| Location::RequiresRegister(), |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT(compiler->is_optimizing()); |
| const Register char_code = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| |
| __ ldr(result, Address(THR, Thread::predefined_symbols_address_offset())); |
| __ AddImmediate( |
| result, result, Symbols::kNullCharCodeSymbolOffset * kWordSize); |
| __ SmiUntag(TMP, char_code); // Untag to use scaled adress mode. |
| __ ldr(result, Address(result, TMP, UXTX, Address::Scaled)); |
| } |
| |
| |
| LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| return LocationSummary::Make(zone, |
| kNumInputs, |
| Location::RequiresRegister(), |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT(cid_ == kOneByteStringCid); |
| const Register str = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| __ LoadFieldFromOffset(result, str, String::length_offset()); |
| __ ldr(TMP, FieldAddress(str, OneByteString::data_offset()), kUnsignedByte); |
| __ CompareImmediate(result, Smi::RawValue(1)); |
| __ LoadImmediate(result, -1); |
| __ csel(result, TMP, result, EQ); |
| __ SmiTag(result); |
| } |
| |
| |
| LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| summary->set_in(0, Location::RegisterLocation(R0)); |
| summary->set_out(0, Location::RegisterLocation(R0)); |
| return summary; |
| } |
| |
| |
| void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register array = locs()->in(0).reg(); |
| __ Push(array); |
| const int kNumberOfArguments = 1; |
| const Array& kNoArgumentNames = Object::null_array(); |
| compiler->GenerateStaticCall(deopt_id(), |
| token_pos(), |
| CallFunction(), |
| kNumberOfArguments, |
| kNoArgumentNames, |
| locs(), |
| ICData::Handle()); |
| ASSERT(locs()->out(0).reg() == R0); |
| } |
| |
| |
| LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| return LocationSummary::Make(zone, |
| kNumInputs, |
| Location::RequiresRegister(), |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register obj = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| if (object()->definition()->representation() == kUntagged) { |
| __ LoadFromOffset(result, obj, offset()); |
| } else { |
| ASSERT(object()->definition()->representation() == kTagged); |
| __ LoadFieldFromOffset(result, obj, offset()); |
| } |
| } |
| |
| |
| LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| return LocationSummary::Make(zone, |
| kNumInputs, |
| Location::RequiresRegister(), |
| LocationSummary::kNoCall); |
| } |
| |
| |
| void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register object = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| |
| __ LoadTaggedClassIdMayBeSmi(result, object); |
| } |
| |
| |
| CompileType LoadIndexedInstr::ComputeType() const { |
| switch (class_id_) { |
| case kArrayCid: |
| case kImmutableArrayCid: |
| return CompileType::Dynamic(); |
| |
| case kTypedDataFloat32ArrayCid: |
| case kTypedDataFloat64ArrayCid: |
| return CompileType::FromCid(kDoubleCid); |
| case kTypedDataFloat32x4ArrayCid: |
| return CompileType::FromCid(kFloat32x4Cid); |
| case kTypedDataInt32x4ArrayCid: |
| return CompileType::FromCid(kInt32x4Cid); |
| case kTypedDataFloat64x2ArrayCid: |
| return CompileType::FromCid(kFloat64x2Cid); |
| |
| case kTypedDataInt8ArrayCid: |
| case kTypedDataUint8ArrayCid: |
| case kTypedDataUint8ClampedArrayCid: |
| case kExternalTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: |
| case kTypedDataInt16ArrayCid: |
| case kTypedDataUint16ArrayCid: |
| case kOneByteStringCid: |
| case kTwoByteStringCid: |
| case kTypedDataInt32ArrayCid: |
| case kTypedDataUint32ArrayCid: |
| return CompileType::FromCid(kSmiCid); |
| |
| default: |
| UNIMPLEMENTED(); |
| return CompileType::Dynamic(); |
| } |
| } |
| |
| |
| Representation LoadIndexedInstr::representation() const { |
| switch (class_id_) { |
| case kArrayCid: |
| case kImmutableArrayCid: |
| case kTypedDataInt8ArrayCid: |
| case kTypedDataUint8ArrayCid: |
| case kTypedDataUint8ClampedArrayCid: |
| case kExternalTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: |
| case kTypedDataInt16ArrayCid: |
| case kTypedDataUint16ArrayCid: |
| case kOneByteStringCid: |
| case kTwoByteStringCid: |
| return kTagged; |
| case kTypedDataInt32ArrayCid: |
| return kUnboxedInt32; |
| case kTypedDataUint32ArrayCid: |
| return kUnboxedUint32; |
| case kTypedDataFloat32ArrayCid: |
| case kTypedDataFloat64ArrayCid: |
| return kUnboxedDouble; |
| case kTypedDataInt32x4ArrayCid: |
| return kUnboxedInt32x4; |
| case kTypedDataFloat32x4ArrayCid: |
| return kUnboxedFloat32x4; |
| case kTypedDataFloat64x2ArrayCid: |
| return kUnboxedFloat64x2; |
| default: |
| UNIMPLEMENTED(); |
| return kTagged; |
| } |
| } |
| |
| |
| static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) { |
| ConstantInstr* constant = value->definition()->AsConstant(); |
| if ((constant == NULL) || !constant->value().IsSmi()) { |
| return false; |
| } |
| const int64_t index = Smi::Cast(constant->value()).AsInt64Value(); |
| const intptr_t scale = Instance::ElementSizeFor(cid); |
| const int64_t offset = index * scale + |
| (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
| if (!Utils::IsInt(32, offset)) { |
| return false; |
| } |
| return Address::CanHoldOffset(static_cast<int32_t>(offset), |
| Address::Offset, |
| Address::OperandSizeFor(cid)); |
| } |
| |
| |
| LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 2; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::RequiresRegister()); |
| if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { |
| locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); |
| } else { |
| locs->set_in(1, Location::RequiresRegister()); |
| } |
| if ((representation() == kUnboxedDouble) || |
| (representation() == kUnboxedFloat32x4) || |
| (representation() == kUnboxedInt32x4) || |
| (representation() == kUnboxedFloat64x2)) { |
| locs->set_out(0, Location::RequiresFpuRegister()); |
| } else { |
| locs->set_out(0, Location::RequiresRegister()); |
| } |
| return locs; |
| } |
| |
| |
| void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| // The array register points to the backing store for external arrays. |
| const Register array = locs()->in(0).reg(); |
| const Location index = locs()->in(1); |
| |
| Address element_address = index.IsRegister() |
| ? __ ElementAddressForRegIndex(true, // Load. |
| IsExternal(), class_id(), index_scale(), |
| array, index.reg()) |
| : __ ElementAddressForIntIndex( |
| IsExternal(), class_id(), index_scale(), |
| array, Smi::Cast(index.constant()).Value()); |
| // Warning: element_address may use register TMP as base. |
| |
| if ((representation() == kUnboxedDouble) || |
| (representation() == kUnboxedFloat32x4) || |
| (representation() == kUnboxedInt32x4) || |
| (representation() == kUnboxedFloat64x2)) { |
| const VRegister result = locs()->out(0).fpu_reg(); |
| switch (class_id()) { |
| case kTypedDataFloat32ArrayCid: |
| // Load single precision float. |
| __ fldrs(result, element_address); |
| break; |
| case kTypedDataFloat64ArrayCid: |
| // Load double precision float. |
| __ fldrd(result, element_address); |
| break; |
| case kTypedDataFloat64x2ArrayCid: |
| case kTypedDataInt32x4ArrayCid: |
| case kTypedDataFloat32x4ArrayCid: |
| __ fldrq(result, element_address); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| return; |
| } |
| |
| if ((representation() == kUnboxedInt32) || |
| (representation() == kUnboxedUint32)) { |
| const Register result = locs()->out(0).reg(); |
| switch (class_id()) { |
| case kTypedDataInt32ArrayCid: |
| ASSERT(representation() == kUnboxedInt32); |
| __ ldr(result, element_address, kWord); |
| break; |
| case kTypedDataUint32ArrayCid: |
| ASSERT(representation() == kUnboxedUint32); |
| __ ldr(result, element_address, kUnsignedWord); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| return; |
| } |
| |
| ASSERT(representation() == kTagged); |
| const Register result = locs()->out(0).reg(); |
| switch (class_id()) { |
| case kTypedDataInt8ArrayCid: |
| ASSERT(index_scale() == 1); |
| __ ldr(result, element_address, kByte); |
| __ SmiTag(result); |
| break; |
| case kTypedDataUint8ArrayCid: |
| case kTypedDataUint8ClampedArrayCid: |
| case kExternalTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: |
| case kOneByteStringCid: |
| ASSERT(index_scale() == 1); |
| __ ldr(result, element_address, kUnsignedByte); |
| __ SmiTag(result); |
| break; |
| case kTypedDataInt16ArrayCid: |
| __ ldr(result, element_address, kHalfword); |
| __ SmiTag(result); |
| break; |
| case kTypedDataUint16ArrayCid: |
| case kTwoByteStringCid: |
| __ ldr(result, element_address, kUnsignedHalfword); |
| __ SmiTag(result); |
| break; |
| default: |
| ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid)); |
| __ ldr(result, element_address); |
| break; |
| } |
| } |
| |
| |
| LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 2; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| summary->set_in(0, Location::RequiresRegister()); |
| summary->set_in(1, Location::RequiresRegister()); |
| summary->set_out(0, Location::RequiresRegister()); |
| return summary; |
| } |
| |
| |
| void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| // The string register points to the backing store for external strings. |
| const Register str = locs()->in(0).reg(); |
| const Location index = locs()->in(1); |
| |
| Address element_address = __ ElementAddressForRegIndex( |
| true, IsExternal(), class_id(), index_scale(), str, index.reg()); |
| // Warning: element_address may use register TMP as base. |
| |
| Register result = locs()->out(0).reg(); |
| switch (class_id()) { |
| case kOneByteStringCid: |
| case kExternalOneByteStringCid: |
| switch (element_count()) { |
| case 1: __ ldr(result, element_address, kUnsignedByte); break; |
| case 2: __ ldr(result, element_address, kUnsignedHalfword); break; |
| case 4: __ ldr(result, element_address, kUnsignedWord); break; |
| default: UNREACHABLE(); |
| } |
| __ SmiTag(result); |
| break; |
| case kTwoByteStringCid: |
| case kExternalTwoByteStringCid: |
| switch (element_count()) { |
| case 1: __ ldr(result, element_address, kUnsignedHalfword); break; |
| case 2: __ ldr(result, element_address, kUnsignedWord); break; |
| default: UNREACHABLE(); |
| } |
| __ SmiTag(result); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| |
| |
| Representation StoreIndexedInstr::RequiredInputRepresentation( |
| intptr_t idx) const { |
| // Array can be a Dart object or a pointer to external data. |
| if (idx == 0) return kNoRepresentation; // Flexible input representation. |
| if (idx == 1) return kTagged; // Index is a smi. |
| ASSERT(idx == 2); |
| switch (class_id_) { |
| case kArrayCid: |
| case kOneByteStringCid: |
| case kTypedDataInt8ArrayCid: |
| case kTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ArrayCid: |
| case kTypedDataUint8ClampedArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: |
| case kTypedDataInt16ArrayCid: |
| case kTypedDataUint16ArrayCid: |
| return kTagged; |
| case kTypedDataInt32ArrayCid: |
| return kUnboxedInt32; |
| case kTypedDataUint32ArrayCid: |
| return kUnboxedUint32; |
| case kTypedDataFloat32ArrayCid: |
| case kTypedDataFloat64ArrayCid: |
| return kUnboxedDouble; |
| case kTypedDataFloat32x4ArrayCid: |
| return kUnboxedFloat32x4; |
| case kTypedDataInt32x4ArrayCid: |
| return kUnboxedInt32x4; |
| case kTypedDataFloat64x2ArrayCid: |
| return kUnboxedFloat64x2; |
| default: |
| UNREACHABLE(); |
| return kTagged; |
| } |
| } |
| |
| |
| LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 3; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| locs->set_in(0, Location::RequiresRegister()); |
| if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { |
| locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); |
| } else { |
| locs->set_in(1, Location::WritableRegister()); |
| } |
| switch (class_id()) { |
| case kArrayCid: |
| locs->set_in(2, ShouldEmitStoreBarrier() |
| ? Location::WritableRegister() |
| : Location::RegisterOrConstant(value())); |
| break; |
| case kExternalTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: |
| case kTypedDataInt8ArrayCid: |
| case kTypedDataUint8ArrayCid: |
| case kTypedDataUint8ClampedArrayCid: |
| case kOneByteStringCid: |
| case kTypedDataInt16ArrayCid: |
| case kTypedDataUint16ArrayCid: |
| case kTypedDataInt32ArrayCid: |
| case kTypedDataUint32ArrayCid: |
| locs->set_in(2, Location::RequiresRegister()); |
| break; |
| case kTypedDataFloat32ArrayCid: |
| case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants. |
| locs->set_in(2, Location::RequiresFpuRegister()); |
| break; |
| case kTypedDataInt32x4ArrayCid: |
| case kTypedDataFloat32x4ArrayCid: |
| case kTypedDataFloat64x2ArrayCid: |
| locs->set_in(2, Location::RequiresFpuRegister()); |
| break; |
| default: |
| UNREACHABLE(); |
| return NULL; |
| } |
| return locs; |
| } |
| |
| |
| void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| // The array register points to the backing store for external arrays. |
| const Register array = locs()->in(0).reg(); |
| const Location index = locs()->in(1); |
| |
| Address element_address = index.IsRegister() |
| ? __ ElementAddressForRegIndex(false, // Store. |
| IsExternal(), class_id(), index_scale(), |
| array, index.reg()) |
| : __ ElementAddressForIntIndex( |
| IsExternal(), class_id(), index_scale(), |
| array, Smi::Cast(index.constant()).Value()); |
| |
| switch (class_id()) { |
| case kArrayCid: |
| if (ShouldEmitStoreBarrier()) { |
| const Register value = locs()->in(2).reg(); |
| __ StoreIntoObject(array, element_address, value); |
| } else if (locs()->in(2).IsConstant()) { |
| const Object& constant = locs()->in(2).constant(); |
| __ StoreIntoObjectNoBarrier(array, element_address, constant); |
| } else { |
| const Register value = locs()->in(2).reg(); |
| __ StoreIntoObjectNoBarrier(array, element_address, value); |
| } |
| break; |
| case kTypedDataInt8ArrayCid: |
| case kTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ArrayCid: |
| case kOneByteStringCid: { |
| if (locs()->in(2).IsConstant()) { |
| const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
| __ LoadImmediate(TMP, static_cast<int8_t>(constant.Value())); |
| __ str(TMP, element_address, kUnsignedByte); |
| } else { |
| const Register value = locs()->in(2).reg(); |
| __ SmiUntag(TMP, value); |
| __ str(TMP, element_address, kUnsignedByte); |
| } |
| break; |
| } |
| case kTypedDataUint8ClampedArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: { |
| if (locs()->in(2).IsConstant()) { |
| const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
| intptr_t value = constant.Value(); |
| // Clamp to 0x0 or 0xFF respectively. |
| if (value > 0xFF) { |
| value = 0xFF; |
| } else if (value < 0) { |
| value = 0; |
| } |
| __ LoadImmediate(TMP, static_cast<int8_t>(value)); |
| __ str(TMP, element_address, kUnsignedByte); |
| } else { |
| const Register value = locs()->in(2).reg(); |
| __ CompareImmediate(value, 0x1FE); // Smi value and smi 0xFF. |
| // Clamp to 0x00 or 0xFF respectively. |
| __ csetm(TMP, GT); // TMP = value > 0x1FE ? -1 : 0. |
| __ csel(TMP, value, TMP, LS); // TMP = value in range ? value : TMP. |
| __ SmiUntag(TMP); |
| __ str(TMP, element_address, kUnsignedByte); |
| } |
| break; |
| } |
| case kTypedDataInt16ArrayCid: |
| case kTypedDataUint16ArrayCid: { |
| const Register value = locs()->in(2).reg(); |
| __ SmiUntag(TMP, value); |
| __ str(TMP, element_address, kUnsignedHalfword); |
| break; |
| } |
| case kTypedDataInt32ArrayCid: |
| case kTypedDataUint32ArrayCid: { |
| const Register value = locs()->in(2).reg(); |
| __ str(value, element_address, kUnsignedWord); |
| break; |
| } |
| case kTypedDataFloat32ArrayCid: { |
| const VRegister value_reg = locs()->in(2).fpu_reg(); |
| __ fstrs(value_reg, element_address); |
| break; |
| } |
| case kTypedDataFloat64ArrayCid: { |
| const VRegister value_reg = locs()->in(2).fpu_reg(); |
| __ fstrd(value_reg, element_address); |
| break; |
| } |
| case kTypedDataFloat64x2ArrayCid: |
| case kTypedDataInt32x4ArrayCid: |
| case kTypedDataFloat32x4ArrayCid: { |
| const VRegister value_reg = locs()->in(2).fpu_reg(); |
| __ fstrq(value_reg, element_address); |
| break; |
| } |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| static void LoadValueCid(FlowGraphCompiler* compiler, |
| Register value_cid_reg, |
| Register value_reg, |
| Label* value_is_smi = NULL) { |
| Label done; |
| if (value_is_smi == NULL) { |
| __ LoadImmediate(value_cid_reg, kSmiCid); |
| } |
| __ tsti(value_reg, Immediate(kSmiTagMask)); |
| if (value_is_smi == NULL) { |
| __ b(&done, EQ); |
| } else { |
| __ b(value_is_smi, EQ); |
| } |
| __ LoadClassId(value_cid_reg, value_reg); |
| __ Bind(&done); |
| } |
| |
| |
| LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| |
| const intptr_t value_cid = value()->Type()->ToCid(); |
| const intptr_t field_cid = field().guarded_cid(); |
| |
| const bool emit_full_guard = |
| !opt || (field_cid == kIllegalCid); |
| |
| const bool needs_value_cid_temp_reg = emit_full_guard || |
| ((value_cid == kDynamicCid) && (field_cid != kSmiCid)); |
| |
| const bool needs_field_temp_reg = emit_full_guard; |
| |
| intptr_t num_temps = 0; |
| if (needs_value_cid_temp_reg) { |
| num_temps++; |
| } |
| if (needs_field_temp_reg) { |
| num_temps++; |
| } |
| |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, num_temps, LocationSummary::kNoCall); |
| summary->set_in(0, Location::RequiresRegister()); |
| |
| for (intptr_t i = 0; i < num_temps; i++) { |
| summary->set_temp(i, Location::RequiresRegister()); |
| } |
| |
| return summary; |
| } |
| |
| |
| void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT(sizeof(classid_t) == kInt32Size); |
| const intptr_t value_cid = value()->Type()->ToCid(); |
| const intptr_t field_cid = field().guarded_cid(); |
| const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; |
| |
| if (field_cid == kDynamicCid) { |
| ASSERT(!compiler->is_optimizing()); |
| return; // Nothing to emit. |
| } |
| |
| const bool emit_full_guard = |
| !compiler->is_optimizing() || (field_cid == kIllegalCid); |
| |
| const bool needs_value_cid_temp_reg = emit_full_guard || |
| ((value_cid == kDynamicCid) && (field_cid != kSmiCid)); |
| |
| const bool needs_field_temp_reg = emit_full_guard; |
| |
| const Register value_reg = locs()->in(0).reg(); |
| |
| const Register value_cid_reg = needs_value_cid_temp_reg ? |
| locs()->temp(0).reg() : kNoRegister; |
| |
| const Register field_reg = needs_field_temp_reg ? |
| locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister; |
| |
| Label ok, fail_label; |
| |
| Label* deopt = compiler->is_optimizing() ? |
| compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL; |
| |
| Label* fail = (deopt != NULL) ? deopt : &fail_label; |
| |
| if (emit_full_guard) { |
| __ LoadObject(field_reg, Field::ZoneHandle(field().raw())); |
| |
| FieldAddress field_cid_operand( |
| field_reg, Field::guarded_cid_offset(), kUnsignedWord); |
| FieldAddress field_nullability_operand( |
| field_reg, Field::is_nullable_offset(), kUnsignedWord); |
| |
| if (value_cid == kDynamicCid) { |
| LoadValueCid(compiler, value_cid_reg, value_reg); |
| Label skip_length_check; |
| __ ldr(TMP, field_cid_operand, kUnsignedWord); |
| __ CompareRegisters(value_cid_reg, TMP); |
| __ b(&ok, EQ); |
| __ ldr(TMP, field_nullability_operand, kUnsignedWord); |
| __ CompareRegisters(value_cid_reg, TMP); |
| } else if (value_cid == kNullCid) { |
| __ ldr(value_cid_reg, field_nullability_operand, kUnsignedWord); |
| __ CompareImmediate(value_cid_reg, value_cid); |
| } else { |
| Label skip_length_check; |
| __ ldr(value_cid_reg, field_cid_operand, kUnsignedWord); |
| __ CompareImmediate(value_cid_reg, value_cid); |
| } |
| __ b(&ok, EQ); |
| |
| // Check if the tracked state of the guarded field can be initialized |
| // inline. If the field needs length check we fall through to runtime |
| // which is responsible for computing offset of the length field |
| // based on the class id. |
| // Length guard will be emitted separately when needed via GuardFieldLength |
| // instruction after GuardFieldClass. |
| if (!field().needs_length_check()) { |
| // Uninitialized field can be handled inline. Check if the |
| // field is still unitialized. |
| __ ldr(TMP, field_cid_operand, kUnsignedWord); |
| __ CompareImmediate(TMP, kIllegalCid); |
| __ b(fail, NE); |
| |
| if (value_cid == kDynamicCid) { |
| __ str(value_cid_reg, field_cid_operand, kUnsignedWord); |
| __ str(value_cid_reg, field_nullability_operand, kUnsignedWord); |
| } else { |
| __ LoadImmediate(TMP, value_cid); |
| __ str(TMP, field_cid_operand, kUnsignedWord); |
| __ str(TMP, field_nullability_operand, kUnsignedWord); |
| } |
| |
| if (deopt == NULL) { |
| ASSERT(!compiler->is_optimizing()); |
| __ b(&ok); |
| } |
| } |
| |
| if (deopt == NULL) { |
| ASSERT(!compiler->is_optimizing()); |
| __ Bind(fail); |
| |
| __ LoadFieldFromOffset( |
| TMP, field_reg, Field::guarded_cid_offset(), kUnsignedWord); |
| __ CompareImmediate(TMP, kDynamicCid); |
| __ b(&ok, EQ); |
| |
| __ Push(field_reg); |
| __ Push(value_reg); |
| __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); |
| __ Drop(2); // Drop the field and the value. |
| } |
| } else { |
| ASSERT(compiler->is_optimizing()); |
| ASSERT(deopt != NULL); |
| |
| // Field guard class has been initialized and is known. |
| if (value_cid == kDynamicCid) { |
| // Value's class id is not known. |
| __ tsti(value_reg, Immediate(kSmiTagMask)); |
| |
| if (field_cid != kSmiCid) { |
| __ b(fail, EQ); |
| __ LoadClassId(value_cid_reg, value_reg); |
| __ CompareImmediate(value_cid_reg, field_cid); |
| } |
| |
| if (field().is_nullable() && (field_cid != kNullCid)) { |
| __ b(&ok, EQ); |
| __ CompareObject(value_reg, Object::null_object()); |
| } |
| |
| __ b(fail, NE); |
| } else { |
| // Both value's and field's class id is known. |
| ASSERT((value_cid != field_cid) && (value_cid != nullability)); |
| __ b(fail); |
| } |
| } |
| __ Bind(&ok); |
| } |
| |
| |
| LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) { |
| const intptr_t kNumTemps = 3; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| summary->set_in(0, Location::RequiresRegister()); |
| // We need temporaries for field object, length offset and expected length. |
| summary->set_temp(0, Location::RequiresRegister()); |
| summary->set_temp(1, Location::RequiresRegister()); |
| summary->set_temp(2, Location::RequiresRegister()); |
| return summary; |
| } else { |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, 0, LocationSummary::kNoCall); |
| summary->set_in(0, Location::RequiresRegister()); |
| return summary; |
| } |
| UNREACHABLE(); |
| } |
| |
| |
| void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| if (field().guarded_list_length() == Field::kNoFixedLength) { |
| ASSERT(!compiler->is_optimizing()); |
| return; // Nothing to emit. |
| } |
| |
| Label* deopt = compiler->is_optimizing() ? |
| compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL; |
| |
| const Register value_reg = locs()->in(0).reg(); |
| |
| if (!compiler->is_optimizing() || |
| (field().guarded_list_length() == Field::kUnknownFixedLength)) { |
| const Register field_reg = locs()->temp(0).reg(); |
| const Register offset_reg = locs()->temp(1).reg(); |
| const Register length_reg = locs()->temp(2).reg(); |
| |
| Label ok; |
| |
| __ LoadObject(field_reg, Field::ZoneHandle(field().raw())); |
| |
| __ ldr(offset_reg, |
| FieldAddress(field_reg, |
| Field::guarded_list_length_in_object_offset_offset()), |
| kByte); |
| __ ldr(length_reg, FieldAddress(field_reg, |
| Field::guarded_list_length_offset())); |
| |
| __ tst(offset_reg, Operand(offset_reg)); |
| __ b(&ok, MI); |
| |
| // Load the length from the value. GuardFieldClass already verified that |
| // value's class matches guarded class id of the field. |
| // offset_reg contains offset already corrected by -kHeapObjectTag that is |
| // why we use Address instead of FieldAddress. |
| __ ldr(TMP, Address(value_reg, offset_reg)); |
| __ CompareRegisters(length_reg, TMP); |
| |
| if (deopt == NULL) { |
| __ b(&ok, EQ); |
| |
| __ Push(field_reg); |
| __ Push(value_reg); |
| __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); |
| __ Drop(2); // Drop the field and the value. |
| } else { |
| __ b(deopt, NE); |
| } |
| |
| __ Bind(&ok); |
| } else { |
| ASSERT(compiler->is_optimizing()); |
| ASSERT(field().guarded_list_length() >= 0); |
| ASSERT(field().guarded_list_length_in_object_offset() != |
| Field::kUnknownLengthOffset); |
| |
| __ ldr(TMP, FieldAddress(value_reg, |
| field().guarded_list_length_in_object_offset())); |
| __ CompareImmediate(TMP, Smi::RawValue(field().guarded_list_length())); |
| __ b(deopt, NE); |
| } |
| } |
| |
| |
| class BoxAllocationSlowPath : public SlowPathCode { |
| public: |
| BoxAllocationSlowPath(Instruction* instruction, |
| const Class& cls, |
| Register result) |
| : instruction_(instruction), |
| cls_(cls), |
| result_(result) { } |
| |
| virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
| if (Assembler::EmittingComments()) { |
| __ Comment("%s slow path allocation of %s", |
| instruction_->DebugName(), |
| String::Handle(cls_.PrettyName()).ToCString()); |
| } |
| __ Bind(entry_label()); |
| const Code& stub = Code::ZoneHandle(compiler->zone(), |
| StubCode::GetAllocationStubForClass(cls_)); |
| const StubEntry stub_entry(stub); |
| |
| LocationSummary* locs = instruction_->locs(); |
| |
| locs->live_registers()->Remove(Location::RegisterLocation(result_)); |
| |
| compiler->SaveLiveRegisters(locs); |
| compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. |
| stub_entry, |
| RawPcDescriptors::kOther, |
| locs); |
| compiler->AddStubCallTarget(stub); |
| __ mov(result_, R0); |
| compiler->RestoreLiveRegisters(locs); |
| __ b(exit_label()); |
| } |
| |
| static void Allocate(FlowGraphCompiler* compiler, |
| Instruction* instruction, |
| const Class& cls, |
| Register result, |
| Register temp) { |
| if (compiler->intrinsic_mode()) { |
| __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp); |
| } else { |
| BoxAllocationSlowPath* slow_path = |
| new BoxAllocationSlowPath(instruction, cls, result); |
| compiler->AddSlowPathCode(slow_path); |
| |
| __ TryAllocate(cls, slow_path->entry_label(), result, temp); |
| __ Bind(slow_path->exit_label()); |
| } |
| } |
| |
| private: |
| Instruction* instruction_; |
| const Class& cls_; |
| const Register result_; |
| }; |
| |
| |
| static void EnsureMutableBox(FlowGraphCompiler* compiler, |
| StoreInstanceFieldInstr* instruction, |
| Register box_reg, |
| const Class& cls, |
| Register instance_reg, |
| intptr_t offset, |
| Register temp) { |
| Label done; |
| __ LoadFieldFromOffset(box_reg, instance_reg, offset); |
| __ CompareObject(box_reg, Object::null_object()); |
| __ b(&done, NE); |
| BoxAllocationSlowPath::Allocate( |
| compiler, instruction, cls, box_reg, temp); |
| __ mov(temp, box_reg); |
| __ StoreIntoObjectOffset(instance_reg, offset, temp); |
| __ Bind(&done); |
| } |
| |
| |
| LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 2; |
| const intptr_t kNumTemps = |
| (IsUnboxedStore() && opt) ? 2 : |
| ((IsPotentialUnboxedStore()) ? 2 : 0); |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, |
| ((IsUnboxedStore() && opt && is_potential_unboxed_initialization_) || |
| IsPotentialUnboxedStore()) |
| ? LocationSummary::kCallOnSlowPath |
| : LocationSummary::kNoCall); |
| |
| summary->set_in(0, Location::RequiresRegister()); |
| if (IsUnboxedStore() && opt) { |
| summary->set_in(1, Location::RequiresFpuRegister()); |
| summary->set_temp(0, Location::RequiresRegister()); |
| summary->set_temp(1, Location::RequiresRegister()); |
| } else if (IsPotentialUnboxedStore()) { |
| summary->set_in(1, ShouldEmitStoreBarrier() |
| ? Location::WritableRegister() |
| : Location::RequiresRegister()); |
| summary->set_temp(0, Location::RequiresRegister()); |
| summary->set_temp(1, Location::RequiresRegister()); |
| } else { |
| summary->set_in(1, ShouldEmitStoreBarrier() |
| ? Location::WritableRegister() |
| : Location::RegisterOrConstant(value())); |
| } |
| return summary; |
| } |
| |
| |
| void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT(sizeof(classid_t) == kInt32Size); |
| Label skip_store; |
| |
| const Register instance_reg = locs()->in(0).reg(); |
| |
| if (IsUnboxedStore() && compiler->is_optimizing()) { |
| const VRegister value = locs()->in(1).fpu_reg(); |
| const Register temp = locs()->temp(0).reg(); |
| const Register temp2 = locs()->temp(1).reg(); |
| const intptr_t cid = field().UnboxedFieldCid(); |
| |
| if (is_potential_unboxed_initialization_) { |
| const Class* cls = NULL; |
| switch (cid) { |
| case kDoubleCid: |
| cls = &compiler->double_class(); |
| break; |
| case kFloat32x4Cid: |
| cls = &compiler->float32x4_class(); |
| break; |
| case kFloat64x2Cid: |
| cls = &compiler->float64x2_class(); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2); |
| __ mov(temp2, temp); |
| __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2); |
| } else { |
| __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_); |
| } |
| switch (cid) { |
| case kDoubleCid: |
| __ Comment("UnboxedDoubleStoreInstanceFieldInstr"); |
| __ StoreDFieldToOffset(value, temp, Double::value_offset()); |
| break; |
| case kFloat32x4Cid: |
| __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr"); |
| __ StoreQFieldToOffset(value, temp, Float32x4::value_offset()); |
| break; |
| case kFloat64x2Cid: |
| __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr"); |
| __ StoreQFieldToOffset(value, temp, Float64x2::value_offset()); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| return; |
| } |
| |
| if (IsPotentialUnboxedStore()) { |
| const Register value_reg = locs()->in(1).reg(); |
| const Register temp = locs()->temp(0).reg(); |
| const Register temp2 = locs()->temp(1).reg(); |
| |
| if (ShouldEmitStoreBarrier()) { |
| // Value input is a writable register and should be manually preserved |
| // across allocation slow-path. |
| locs()->live_registers()->Add(locs()->in(1), kTagged); |
| } |
| |
| Label store_pointer; |
| Label store_double; |
| Label store_float32x4; |
| Label store_float64x2; |
| |
| __ LoadObject(temp, Field::ZoneHandle(field().raw())); |
| |
| __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(), |
| kUnsignedWord); |
| __ CompareImmediate(temp2, kNullCid); |
| __ b(&store_pointer, EQ); |
| |
| __ LoadFromOffset( |
| temp2, temp, Field::kind_bits_offset() - kHeapObjectTag, |
| kUnsignedByte); |
| __ tsti(temp2, Immediate(1 << Field::kUnboxingCandidateBit)); |
| __ b(&store_pointer, EQ); |
| |
| __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(), |
| kUnsignedWord); |
| __ CompareImmediate(temp2, kDoubleCid); |
| __ b(&store_double, EQ); |
| |
| __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(), |
| kUnsignedWord); |
| __ CompareImmediate(temp2, kFloat32x4Cid); |
| __ b(&store_float32x4, EQ); |
| |
| __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(), |
| kUnsignedWord); |
| __ CompareImmediate(temp2, kFloat64x2Cid); |
| __ b(&store_float64x2, EQ); |
| |
| // Fall through. |
| __ b(&store_pointer); |
| |
| if (!compiler->is_optimizing()) { |
| locs()->live_registers()->Add(locs()->in(0)); |
| locs()->live_registers()->Add(locs()->in(1)); |
| } |
| |
| { |
| __ Bind(&store_double); |
| EnsureMutableBox(compiler, |
| this, |
| temp, |
| compiler->double_class(), |
| instance_reg, |
| offset_in_bytes_, |
| temp2); |
| __ LoadDFieldFromOffset(VTMP, value_reg, Double::value_offset()); |
| __ StoreDFieldToOffset(VTMP, temp, Double::value_offset()); |
| __ b(&skip_store); |
| } |
| |
| { |
| __ Bind(&store_float32x4); |
| EnsureMutableBox(compiler, |
| this, |
| temp, |
| compiler->float32x4_class(), |
| instance_reg, |
| offset_in_bytes_, |
| temp2); |
| __ LoadQFieldFromOffset(VTMP, value_reg, Float32x4::value_offset()); |
| __ StoreQFieldToOffset(VTMP, temp, Float32x4::value_offset()); |
| __ b(&skip_store); |
| } |
| |
| { |
| __ Bind(&store_float64x2); |
| EnsureMutableBox(compiler, |
| this, |
| temp, |
| compiler->float64x2_class(), |
| instance_reg, |
| offset_in_bytes_, |
| temp2); |
| __ LoadQFieldFromOffset(VTMP, value_reg, Float64x2::value_offset()); |
| __ StoreQFieldToOffset(VTMP, temp, Float64x2::value_offset()); |
| __ b(&skip_store); |
| } |
| |
| __ Bind(&store_pointer); |
| } |
| |
| if (ShouldEmitStoreBarrier()) { |
| const Register value_reg = locs()->in(1).reg(); |
| __ StoreIntoObjectOffset( |
| instance_reg, offset_in_bytes_, value_reg, CanValueBeSmi()); |
| } else { |
| if (locs()->in(1).IsConstant()) { |
| __ StoreIntoObjectOffsetNoBarrier( |
| instance_reg, offset_in_bytes_, locs()->in(1).constant()); |
| } else { |
| const Register value_reg = locs()->in(1).reg(); |
| __ StoreIntoObjectOffsetNoBarrier( |
| instance_reg, offset_in_bytes_, value_reg); |
| } |
| } |
| __ Bind(&skip_store); |
| } |
| |
| |
| LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
| summary->set_in(0, Location::RequiresRegister()); |
| summary->set_out(0, Location::RequiresRegister()); |
| return summary; |
| } |
| |
| |
| // When the parser is building an implicit static getter for optimization, |
| // it can generate a function body where deoptimization ids do not line up |
| // with the unoptimized code. |
| // |
| // This is safe only so long as LoadStaticFieldInstr cannot deoptimize. |
| void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register field = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| __ LoadFieldFromOffset(result, field, Field::static_value_offset()); |
| } |
| |
| |
| LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, 1, 1, LocationSummary::kNoCall); |
| locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister() |
| : Location::RequiresRegister()); |
| locs->set_temp(0, Location::RequiresRegister()); |
| return locs; |
| } |
| |
| |
| void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register value = locs()->in(0).reg(); |
| const Register temp = locs()->temp(0).reg(); |
| |
| __ LoadObject(temp, field()); |
| if (this->value()->NeedsStoreBuffer()) { |
| __ StoreIntoObjectOffset( |
| temp, Field::static_value_offset(), value, CanValueBeSmi()); |
| } else { |
| __ StoreIntoObjectOffsetNoBarrier(temp, |
| Field::static_value_offset(), |
| value); |
| } |
| } |
| |
| |
| LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 3; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| summary->set_in(0, Location::RegisterLocation(R0)); |
| summary->set_in(1, Location::RegisterLocation(R2)); |
| summary->set_in(2, Location::RegisterLocation(R1)); |
| summary->set_out(0, Location::RegisterLocation(R0)); |
| return summary; |
| } |
| |
| |
| void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT(locs()->in(0).reg() == R0); // Value. |
| ASSERT(locs()->in(1).reg() == R2); // Instantiator. |
| ASSERT(locs()->in(2).reg() == R1); // Instantiator type arguments. |
| |
| compiler->GenerateInstanceOf(token_pos(), |
| deopt_id(), |
| type(), |
| negate_result(), |
| locs()); |
| ASSERT(locs()->out(0).reg() == R0); |
| } |
| |
| |
| LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 2; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| locs->set_in(kElementTypePos, Location::RegisterLocation(R1)); |
| locs->set_in(kLengthPos, Location::RegisterLocation(R2)); |
| locs->set_out(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| // Inlines array allocation for known constant values. |
| static void InlineArrayAllocation(FlowGraphCompiler* compiler, |
| intptr_t num_elements, |
| Label* slow_path, |
| Label* done) { |
| const int kInlineArraySize = 12; // Same as kInlineInstanceSize. |
| const Register kLengthReg = R2; |
| const Register kElemTypeReg = R1; |
| const intptr_t instance_size = Array::InstanceSize(num_elements); |
| |
| __ TryAllocateArray(kArrayCid, instance_size, slow_path, |
| R0, // instance |
| R3, // end address |
| R6, |
| R8); |
| // R0: new object start as a tagged pointer. |
| // R3: new object end address. |
| |
| // Store the type argument field. |
| __ StoreIntoObjectNoBarrier(R0, |
| FieldAddress(R0, Array::type_arguments_offset()), |
| kElemTypeReg); |
| |
| // Set the length field. |
| __ StoreIntoObjectNoBarrier(R0, |
| FieldAddress(R0, Array::length_offset()), |
| kLengthReg); |
| |
| // TODO(zra): Use stp once added. |
| // Initialize all array elements to raw_null. |
| // R0: new object start as a tagged pointer. |
| // R3: new object end address. |
| // R8: iterator which initially points to the start of the variable |
| // data area to be initialized. |
| // R6: null |
| if (num_elements > 0) { |
| const intptr_t array_size = instance_size - sizeof(RawArray); |
| __ LoadObject(R6, Object::null_object()); |
| __ AddImmediate(R8, R0, sizeof(RawArray) - kHeapObjectTag); |
| if (array_size < (kInlineArraySize * kWordSize)) { |
| intptr_t current_offset = 0; |
| while (current_offset < array_size) { |
| __ str(R6, Address(R8, current_offset)); |
| current_offset += kWordSize; |
| } |
| } else { |
| Label end_loop, init_loop; |
| __ Bind(&init_loop); |
| __ CompareRegisters(R8, R3); |
| __ b(&end_loop, CS); |
| __ str(R6, Address(R8)); |
| __ AddImmediate(R8, R8, kWordSize); |
| __ b(&init_loop); |
| __ Bind(&end_loop); |
| } |
| } |
| __ b(done); |
| } |
| |
| |
| void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register kLengthReg = R2; |
| const Register kElemTypeReg = R1; |
| const Register kResultReg = R0; |
| |
| ASSERT(locs()->in(kElementTypePos).reg() == kElemTypeReg); |
| ASSERT(locs()->in(kLengthPos).reg() == kLengthReg); |
| |
| if (compiler->is_optimizing() && |
| num_elements()->BindsToConstant() && |
| num_elements()->BoundConstant().IsSmi()) { |
| const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value(); |
| if ((length >= 0) && (length <= Array::kMaxElements)) { |
| Label slow_path, done; |
| InlineArrayAllocation(compiler, length, &slow_path, &done); |
| __ Bind(&slow_path); |
| __ PushObject(Object::null_object()); // Make room for the result. |
| __ Push(kLengthReg); // length. |
| __ Push(kElemTypeReg); |
| compiler->GenerateRuntimeCall(token_pos(), |
| deopt_id(), |
| kAllocateArrayRuntimeEntry, |
| 2, |
| locs()); |
| __ Drop(2); |
| __ Pop(kResultReg); |
| __ Bind(&done); |
| return; |
| } |
| } |
| const Code& stub = Code::ZoneHandle(compiler->zone(), |
| StubCode::AllocateArray_entry()->code()); |
| compiler->AddStubCallTarget(stub); |
| compiler->GenerateCall(token_pos(), |
| *StubCode::AllocateArray_entry(), |
| RawPcDescriptors::kOther, |
| locs()); |
| ASSERT(locs()->out(0).reg() == kResultReg); |
| } |
| |
| |
| LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = |
| (IsUnboxedLoad() && opt) ? 1 : |
| ((IsPotentialUnboxedLoad()) ? 1 : 0); |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, |
| (opt && !IsPotentialUnboxedLoad()) |
| ? LocationSummary::kNoCall |
| : LocationSummary::kCallOnSlowPath); |
| |
| locs->set_in(0, Location::RequiresRegister()); |
| |
| if (IsUnboxedLoad() && opt) { |
| locs->set_temp(0, Location::RequiresRegister()); |
| } else if (IsPotentialUnboxedLoad()) { |
| locs->set_temp(0, Location::RequiresRegister()); |
| } |
| locs->set_out(0, Location::RequiresRegister()); |
| return locs; |
| } |
| |
| |
| void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT(sizeof(classid_t) == kInt32Size); |
| const Register instance_reg = locs()->in(0).reg(); |
| if (IsUnboxedLoad() && compiler->is_optimizing()) { |
| const VRegister result = locs()->out(0).fpu_reg(); |
| const Register temp = locs()->temp(0).reg(); |
| __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes()); |
| const intptr_t cid = field()->UnboxedFieldCid(); |
| switch (cid) { |
| case kDoubleCid: |
| __ Comment("UnboxedDoubleLoadFieldInstr"); |
| __ LoadDFieldFromOffset(result, temp, Double::value_offset()); |
| break; |
| case kFloat32x4Cid: |
| __ LoadQFieldFromOffset(result, temp, Float32x4::value_offset()); |
| break; |
| case kFloat64x2Cid: |
| __ LoadQFieldFromOffset(result, temp, Float64x2::value_offset()); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| return; |
| } |
| |
| Label done; |
| const Register result_reg = locs()->out(0).reg(); |
| if (IsPotentialUnboxedLoad()) { |
| const Register temp = locs()->temp(0).reg(); |
| |
| Label load_pointer; |
| Label load_double; |
| Label load_float32x4; |
| Label load_float64x2; |
| |
| __ LoadObject(result_reg, Field::ZoneHandle(field()->raw())); |
| |
| FieldAddress field_cid_operand( |
| result_reg, Field::guarded_cid_offset(), kUnsignedWord); |
| FieldAddress field_nullability_operand( |
| result_reg, Field::is_nullable_offset(), kUnsignedWord); |
| |
| __ ldr(temp, field_nullability_operand, kUnsignedWord); |
| __ CompareImmediate(temp, kNullCid); |
| __ b(&load_pointer, EQ); |
| |
| __ ldr(temp, field_cid_operand, kUnsignedWord); |
| __ CompareImmediate(temp, kDoubleCid); |
| __ b(&load_double, EQ); |
| |
| __ ldr(temp, field_cid_operand, kUnsignedWord); |
| __ CompareImmediate(temp, kFloat32x4Cid); |
| __ b(&load_float32x4, EQ); |
| |
| __ ldr(temp, field_cid_operand, kUnsignedWord); |
| __ CompareImmediate(temp, kFloat64x2Cid); |
| __ b(&load_float64x2, EQ); |
| |
| // Fall through. |
| __ b(&load_pointer); |
| |
| if (!compiler->is_optimizing()) { |
| locs()->live_registers()->Add(locs()->in(0)); |
| } |
| |
| { |
| __ Bind(&load_double); |
| BoxAllocationSlowPath::Allocate(compiler, |
| this, |
| compiler->double_class(), |
| result_reg, |
| temp); |
| __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes()); |
| __ LoadDFieldFromOffset(VTMP, temp, Double::value_offset()); |
| __ StoreDFieldToOffset(VTMP, result_reg, Double::value_offset()); |
| __ b(&done); |
| } |
| |
| { |
| __ Bind(&load_float32x4); |
| BoxAllocationSlowPath::Allocate(compiler, |
| this, |
| compiler->float32x4_class(), |
| result_reg, |
| temp); |
| __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes()); |
| __ LoadQFieldFromOffset(VTMP, temp, Float32x4::value_offset()); |
| __ StoreQFieldToOffset(VTMP, result_reg, Float32x4::value_offset()); |
| __ b(&done); |
| } |
| |
| { |
| __ Bind(&load_float64x2); |
| BoxAllocationSlowPath::Allocate(compiler, |
| this, |
| compiler->float64x2_class(), |
| result_reg, |
| temp); |
| __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes()); |
| __ LoadQFieldFromOffset(VTMP, temp, Float64x2::value_offset()); |
| __ StoreQFieldToOffset(VTMP, result_reg, Float64x2::value_offset()); |
| __ b(&done); |
| } |
| |
| __ Bind(&load_pointer); |
| } |
| __ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes()); |
| __ Bind(&done); |
| } |
| |
| |
| LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| locs->set_in(0, Location::RegisterLocation(R0)); |
| locs->set_out(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register instantiator_reg = locs()->in(0).reg(); |
| const Register result_reg = locs()->out(0).reg(); |
| |
| // 'instantiator_reg' is the instantiator TypeArguments object (or null). |
| // A runtime call to instantiate the type is required. |
| __ PushObject(Object::null_object()); // Make room for the result. |
| __ PushObject(type()); |
| __ Push(instantiator_reg); // Push instantiator type arguments. |
| compiler->GenerateRuntimeCall(token_pos(), |
| deopt_id(), |
| kInstantiateTypeRuntimeEntry, |
| 2, |
| locs()); |
| __ Drop(2); // Drop instantiator and uninstantiated type. |
| __ Pop(result_reg); // Pop instantiated type. |
| ASSERT(instantiator_reg == result_reg); |
| } |
| |
| |
| LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary( |
| Zone* zone, bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| locs->set_in(0, Location::RegisterLocation(R0)); |
| locs->set_out(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| void InstantiateTypeArgumentsInstr::EmitNativeCode( |
| FlowGraphCompiler* compiler) { |
| const Register instantiator_reg = locs()->in(0).reg(); |
| const Register result_reg = locs()->out(0).reg(); |
| ASSERT(instantiator_reg == R0); |
| ASSERT(instantiator_reg == result_reg); |
| |
| // 'instantiator_reg' is the instantiator TypeArguments object (or null). |
| ASSERT(!type_arguments().IsUninstantiatedIdentity() && |
| !type_arguments().CanShareInstantiatorTypeArguments( |
| instantiator_class())); |
| // If the instantiator is null and if the type argument vector |
| // instantiated from null becomes a vector of dynamic, then use null as |
| // the type arguments. |
| Label type_arguments_instantiated; |
| const intptr_t len = type_arguments().Length(); |
| if (type_arguments().IsRawInstantiatedRaw(len)) { |
| __ CompareObject(instantiator_reg, Object::null_object()); |
| __ b(&type_arguments_instantiated, EQ); |
| } |
| |
| __ LoadObject(R2, type_arguments()); |
| __ LoadFieldFromOffset(R2, R2, TypeArguments::instantiations_offset()); |
| __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag); |
| // The instantiations cache is initialized with Object::zero_array() and is |
| // therefore guaranteed to contain kNoInstantiator. No length check needed. |
| Label loop, found, slow_case; |
| __ Bind(&loop); |
| __ LoadFromOffset(R1, R2, 0 * kWordSize); // Cached instantiator. |
| __ CompareRegisters(R1, R0); |
| __ b(&found, EQ); |
| __ AddImmediate(R2, R2, 2 * kWordSize); |
| __ CompareImmediate(R1, Smi::RawValue(StubCode::kNoInstantiator)); |
| __ b(&loop, NE); |
| __ b(&slow_case); |
| __ Bind(&found); |
| __ LoadFromOffset(R0, R2, 1 * kWordSize); // Cached instantiated args. |
| __ b(&type_arguments_instantiated); |
| |
| __ Bind(&slow_case); |
| // Instantiate non-null type arguments. |
| // A runtime call to instantiate the type arguments is required. |
| __ PushObject(Object::null_object()); // Make room for the result. |
| __ PushObject(type_arguments()); |
| __ Push(instantiator_reg); // Push instantiator type arguments. |
| compiler->GenerateRuntimeCall(token_pos(), |
| deopt_id(), |
| kInstantiateTypeArgumentsRuntimeEntry, |
| 2, |
| locs()); |
| __ Drop(2); // Drop instantiator and uninstantiated type arguments. |
| __ Pop(result_reg); // Pop instantiated type arguments. |
| __ Bind(&type_arguments_instantiated); |
| } |
| |
| |
| LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary( |
| Zone* zone, |
| bool opt) const { |
| ASSERT(opt); |
| const intptr_t kNumInputs = 0; |
| const intptr_t kNumTemps = 3; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
| locs->set_temp(0, Location::RegisterLocation(R1)); |
| locs->set_temp(1, Location::RegisterLocation(R2)); |
| locs->set_temp(2, Location::RegisterLocation(R3)); |
| locs->set_out(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| class AllocateContextSlowPath : public SlowPathCode { |
| public: |
| explicit AllocateContextSlowPath( |
| AllocateUninitializedContextInstr* instruction) |
| : instruction_(instruction) { } |
| |
| virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
| __ Comment("AllocateContextSlowPath"); |
| __ Bind(entry_label()); |
| |
| LocationSummary* locs = instruction_->locs(); |
| locs->live_registers()->Remove(locs->out(0)); |
| |
| compiler->SaveLiveRegisters(locs); |
| |
| __ LoadImmediate(R1, instruction_->num_context_variables()); |
| const Code& stub = Code::ZoneHandle( |
| compiler->zone(), StubCode::AllocateContext_entry()->code()); |
| compiler->AddStubCallTarget(stub); |
| compiler->GenerateCall(instruction_->token_pos(), |
| *StubCode::AllocateContext_entry(), |
| RawPcDescriptors::kOther, |
| locs); |
| ASSERT(instruction_->locs()->out(0).reg() == R0); |
| compiler->RestoreLiveRegisters(instruction_->locs()); |
| __ b(exit_label()); |
| } |
| |
| private: |
| AllocateUninitializedContextInstr* instruction_; |
| }; |
| |
| |
| |
| void AllocateUninitializedContextInstr::EmitNativeCode( |
| FlowGraphCompiler* compiler) { |
| Register temp0 = locs()->temp(0).reg(); |
| Register temp1 = locs()->temp(1).reg(); |
| Register temp2 = locs()->temp(2).reg(); |
| Register result = locs()->out(0).reg(); |
| // Try allocate the object. |
| AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this); |
| compiler->AddSlowPathCode(slow_path); |
| intptr_t instance_size = Context::InstanceSize(num_context_variables()); |
| |
| __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(), |
| result, // instance |
| temp0, |
| temp1, |
| temp2); |
| |
| // Setup up number of context variables field. |
| __ LoadImmediate(temp0, num_context_variables()); |
| __ str(temp0, FieldAddress(result, Context::num_variables_offset())); |
| |
| __ Bind(slow_path->exit_label()); |
| } |
| |
| |
| LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 0; |
| const intptr_t kNumTemps = 1; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| locs->set_temp(0, Location::RegisterLocation(R1)); |
| locs->set_out(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| ASSERT(locs()->temp(0).reg() == R1); |
| ASSERT(locs()->out(0).reg() == R0); |
| |
| __ LoadImmediate(R1, num_context_variables()); |
| compiler->GenerateCall(token_pos(), |
| *StubCode::AllocateContext_entry(), |
| RawPcDescriptors::kOther, |
| locs()); |
| } |
| |
| LocationSummary* InitStaticFieldInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 1; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| locs->set_in(0, Location::RegisterLocation(R0)); |
| locs->set_temp(0, Location::RegisterLocation(R1)); |
| return locs; |
| } |
| |
| |
| void InitStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| Register field = locs()->in(0).reg(); |
| Register temp = locs()->temp(0).reg(); |
| Label call_runtime, no_call; |
| |
| __ ldr(temp, FieldAddress(field, Field::static_value_offset())); |
| __ CompareObject(temp, Object::sentinel()); |
| __ b(&call_runtime, EQ); |
| |
| __ CompareObject(temp, Object::transition_sentinel()); |
| __ b(&no_call, NE); |
| |
| __ Bind(&call_runtime); |
| __ PushObject(Object::null_object()); // Make room for (unused) result. |
| __ Push(field); |
| compiler->GenerateRuntimeCall(token_pos(), |
| deopt_id(), |
| kInitStaticFieldRuntimeEntry, |
| 1, |
| locs()); |
| __ Drop(2); // Remove argument and result placeholder. |
| __ Bind(&no_call); |
| } |
| |
| |
| LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 1; |
| const intptr_t kNumTemps = 0; |
| LocationSummary* locs = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
| locs->set_in(0, Location::RegisterLocation(R0)); |
| locs->set_out(0, Location::RegisterLocation(R0)); |
| return locs; |
| } |
| |
| |
| void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| const Register context_value = locs()->in(0).reg(); |
| const Register result = locs()->out(0).reg(); |
| |
| __ PushObject(Object::null_object()); // Make room for the result. |
| __ Push(context_value); |
| compiler->GenerateRuntimeCall(token_pos(), |
| deopt_id(), |
| kCloneContextRuntimeEntry, |
| 1, |
| locs()); |
| __ Drop(1); // Remove argument. |
| __ Pop(result); // Get result (cloned context). |
| } |
| |
| |
| LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| |
| void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| __ Bind(compiler->GetJumpLabel(this)); |
| compiler->AddExceptionHandler(catch_try_index(), |
| try_index(), |
| compiler->assembler()->CodeSize(), |
| catch_handler_types_, |
| needs_stacktrace()); |
| |
| // Restore the pool pointer. |
| __ RestoreCodePointer(); |
| __ LoadPoolPointer(); |
| |
| if (HasParallelMove()) { |
| compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
| } |
| |
| // Restore SP from FP as we are coming from a throw and the code for |
| // popping arguments has not been run. |
| const intptr_t fp_sp_dist = |
| (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; |
| ASSERT(fp_sp_dist <= 0); |
| __ AddImmediate(SP, FP, fp_sp_dist); |
| |
| // Restore stack and initialize the two exception variables: |
| // exception and stack trace variables. |
| __ StoreToOffset(kExceptionObjectReg, |
| FP, exception_var().index() * kWordSize); |
| __ StoreToOffset(kStackTraceObjectReg, |
| FP, stacktrace_var().index() * kWordSize); |
| } |
| |
| |
| LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone, |
| bool opt) const { |
| const intptr_t kNumInputs = 0; |
| const intptr_t kNumTemps = 1; |
| LocationSummary* summary = new(zone) LocationSummary( |
| zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
| summary->set_temp(0, Location::RequiresRegister()); |
| return summary; |
| } |
| |
| |
| class CheckStackOverflowSlowPath : public SlowPathCode { |
| public: |
| explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) |
| : instruction_(instruction) { } |
| |
| virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
| if (FLAG_use_osr && osr_entry_label()->IsLinked()) { |
| uword flags_address = Isolate::Current()->stack_overflow_flags_address(); |
| const Register value = instruction_->locs()->temp(0).reg(); |
| __ Comment("CheckStackOverflowSlowPathOsr"); |
| __ Bind(osr_entry_label()); |
| ASSERT(FLAG_allow_absolute_addresses); |
| __ LoadImmediate(TMP, flags_address); |
| __ LoadImmediate(value, Isolate::kOsrRequest); |
| __ str(value, Address(TMP)); |
| } |
| __ Comment("CheckStackOverflowSlowPath"); |
| __ Bind(entry_label()); |
| compiler->SaveLiveRegisters(instruction_->locs()); |
| // pending_deoptimization_env_ is needed to generate a runtime call that |
| // may throw an exception. |
| ASSERT(compiler->pending_deoptimization_env_ == NULL); |
| Environment* env = compiler->SlowPathEnvironmentFor(instruction_); |
| compiler->pending_deoptimization_env_ = env; |
| compiler->GenerateRuntimeCall(instruction_->token_pos(), |
| instruction_->deopt_id(), |
| kStackOverflowRuntimeEntry, |
| 0, |
| instruction_->locs()); |
| |
| if (FLAG_use_osr && !compiler->is_optimizing() && instruction_->in_loop()) { |
| // In unoptimized code, record loop stack checks as possible OSR entries. |
| compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry, |
| instruction_->deopt_id(), |
| 0); // No token position. |
| } |
| compiler->pending_deoptimization_env_ = NULL; |
| compiler->RestoreLiveRegisters(instruction_->locs()); |
| __ b(exit_label()); |
| } |
| |
| Label* osr_entry_label() { |
| ASSERT(FLAG_use_osr); |
| return &osr_entry_label_; |
| } |
| |
| private: |
| CheckStackOverflowInstr* instruction_; |
| Label osr_entry_label_; |
| }; |
| |
| |
| void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); |
| compiler->AddSlowPathCode(slow_path); |
| |
| if (compiler->is_optimizing() && FLAG_allow_absolute_addresses) { |
| __ LoadImmediate(TMP, Isolate::Current()->stack_limit_address()); |
| __ ldr(TMP, Address(TMP)); |
| } else { |
| __ LoadIsolate(TMP); |
| __ ldr(TMP, Address(TMP, Isolate::stack_limit_offset())); |
| } |
| __ CompareRegisters(SP, TMP); |
| __ b(slow_path->entry_label(), LS); |
| if (compiler->CanOSRFunction() && in_loop()) { |
| const Register temp = locs()->temp(0).reg(); |
| // In unoptimized code check the usage counter to trigger OSR at loop |
| // stack checks. Use progressively higher thresholds for more deeply |
| // nested loops to attempt to hit outer loops with OSR when possible. |
| __ LoadObject(temp, compiler->parsed_function().function()); |
| intptr_t threshold = |
| FLAG_optimization_counter_threshold * (loop_depth() + 1); |
| __ LoadFieldFromOffset( |
| temp, temp, Function::usage_counter_offset(), kWord); |
| __ CompareImmediate(temp, threshold); |
| __ b(slow_path->osr_entry_label(), GE); |
| } |
| if (compiler->ForceSlowPathForStackOverflow()) { |
| __ b(slow_path->entry_label()); |
| } |
| __ Bind(slow_path->exit_label()); |
| } |
| |
| |
| static void EmitJavascriptOverflowCheck(FlowGraphCompiler* compiler, |
| Range* range, |
| Label* overflow, |
| Register result) { |
| if (!RangeUtils::IsWithin(range, -0x20000000000000LL, 0x20000000000000LL)) { |
| ASSERT(overflow != NULL); |
| __ LoadImmediate(TMP, 0x20000000000000LL); |
| __ add(TMP2, result, Operand(TMP)); |
| __ cmp(TMP2, Operand(TMP, LSL, 1)); |
| __ b(overflow, HI); |
| } |
| } |
| |
| |
| static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, |
| BinarySmiOpInstr* shift_left) { |
| const LocationSummary& locs = *shift_left->locs(); |
| const Register left = locs.in(0).reg(); |
| const Register result = locs.out(0).reg(); |
| Label* deopt = shift_left->CanDeoptimize() ? |
| compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp) |
| : NULL; |
| if (locs.in(1).IsConstant()) { |
| const Object& constant = locs.in(1).constant(); |
| ASSERT(constant.IsSmi()); |
| // Immediate shift operation takes 6 bits for the count. |
| const intptr_t kCountLimit = 0x3F; |
| const intptr_t value = Smi::Cast(constant).Value(); |
| ASSERT((0 < value) && (value < kCountLimit)); |
| if (shift_left->can_overflow()) { |
| // Check for overflow (preserve left). |
| __ LslImmediate(TMP, left, value); |
| __ cmp(left, Operand(TMP, ASR, value)); |
| __ b(deopt, NE); // Overflow. |
| } |
| // Shift for result now we know there is no overflow. |
| __ LslImmediate(result, left, value); |
| if (FLAG_throw_on_javascript_int_overflow) { |
| EmitJavascriptOverflowCheck(compiler, shift_left->range(), deopt, result); |
| } |
| return; |
| } |
| |
| // Right (locs.in(1)) is not constant. |
| const Register right = locs.in(1).reg(); |
| Range* right_range = shift_left->right()->definition()->range(); |
| if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) { |
| // TODO(srdjan): Implement code below for is_truncating(). |
| // If left is constant, we know the maximal allowed size for right. |
| const Object& obj = shift_left->left()->BoundConstant(); |
| if (obj.IsSmi()) { |
| const intptr_t left_int = Smi::Cast(obj).Value(); |
| if (left_int == 0) { |
| __ CompareRegisters(right, ZR); |
| __ b(deopt, MI); |
| __ mov(result, ZR); |
| return; |
| } |
| const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int); |
| const bool right_needs_check = |
| !RangeUtils::IsWithin(right_range, 0, max_right - 1); |
| if (right_needs_check) { |
| __ CompareImmediate(right, |
| reinterpret_cast<int64_t>(Smi::New(max_right))); |
| __ b(deopt, CS); |
| } |
| __ SmiUntag(TMP, right); |
| __ lslv(result, left, TMP); |
| } |
| if (FLAG_throw_on_javascript_int_overflow) { |
| EmitJavascriptOverflowCheck(compiler, shift_left->range(), deopt, result); |
| } |
| return; |
| } |
| |
| const bool right_needs_check = |
| !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1)); |
| if (!shift_left->can_overflow()) { |
| if (right_needs_check) { |
| const bool right_may_be_negative = |
| (right_range == NULL) || !right_range->IsPositive(); |
| if (right_may_be_negative) { |
| ASSERT(shift_left->CanDeoptimize()); |
| __ CompareRegisters(right, ZR); |
| __ b(deopt, MI); |
| } |
| |
| __ CompareImmediate( |
| right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits))); |
| __ csel(result, ZR, result, CS); |
| __ SmiUntag(TMP, right); |
| __ lslv(TMP, left, TMP); |
| __ csel(result, TMP, result, CC); |
| } else { |
| __ SmiUntag(TMP, right); |
| __ lslv(result, left, TMP); |
| } |
| } else { |
| if (right_needs_check) { |
| ASSERT(shift_left->CanDeoptimize()); |
| __ CompareImmediate( |
| right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits))); |
| __ b(deopt, CS); |
| } |
| // Left is not a constant. |
| // Check if count too large for handling it inlined. |
| __ SmiUntag(TMP, right); |
| // Overflow test (preserve left, right, and TMP); |
| const Register temp = locs.temp(0).reg(); |
| __ lslv(temp, left, TMP); |
| __ asrv(TMP2, temp, TMP); |
| __ CompareRegisters(left, TMP2); |
| __ b(deopt, NE); // Overflow. |
| // Shift for result now we know there is no overflow. |
| __ lslv(result, left, TMP); |
| }
|