blob: 8c07b90e647fa1bb60f43277ae5a5696f284150f [file] [log] [blame]
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS.
#if defined(TARGET_ARCH_MIPS)
#include "vm/intermediate_language.h"
#include "vm/dart_entry.h"
#include "vm/flow_graph.h"
#include "vm/flow_graph_compiler.h"
#include "vm/flow_graph_range_analysis.h"
#include "vm/locations.h"
#include "vm/object_store.h"
#include "vm/parser.h"
#include "vm/simulator.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
#include "vm/symbols.h"
#define __ compiler->assembler()->
namespace dart {
DECLARE_FLAG(bool, allow_absolute_addresses);
DECLARE_FLAG(bool, emit_edge_counters);
DECLARE_FLAG(int, optimization_counter_threshold);
DECLARE_FLAG(bool, use_osr);
// Generic summary for call instructions that have all arguments pushed
// on the stack and return the result in a fixed register V0.
LocationSummary* Instruction::MakeCallSummary(Zone* zone) {
LocationSummary* result = new(zone) LocationSummary(
zone, 0, 0, LocationSummary::kCall);
result->set_out(0, Location::RegisterLocation(V0));
return result;
}
LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::AnyOrConstant(value()));
return locs;
}
void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// In SSA mode, we need an explicit push. Nothing to do in non-SSA mode
// where PushArgument is handled by BindInstr::EmitNativeCode.
__ Comment("PushArgumentInstr");
if (compiler->is_optimizing()) {
Location value = locs()->in(0);
if (value.IsRegister()) {
__ Push(value.reg());
} else if (value.IsConstant()) {
__ PushObject(value.constant());
} else {
ASSERT(value.IsStackSlot());
const intptr_t value_offset = value.ToStackSlotOffset();
__ LoadFromOffset(TMP, FP, value_offset);
__ Push(TMP);
}
}
}
LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RegisterLocation(V0));
return locs;
}
// Attempt optimized compilation at return instruction instead of at the entry.
// The entry needs to be patchable, no inlined objects are allowed in the area
// that will be overwritten by the patch instructions: a branch macro sequence.
void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("ReturnInstr");
Register result = locs()->in(0).reg();
ASSERT(result == V0);
if (compiler->intrinsic_mode()) {
// Intrinsics don't have a frame.
__ Ret();
return;
}
#if defined(DEBUG)
Label stack_ok;
__ Comment("Stack Check");
const intptr_t fp_sp_dist =
(kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize;
ASSERT(fp_sp_dist <= 0);
__ subu(CMPRES1, SP, FP);
__ BranchEqual(CMPRES1, Immediate(fp_sp_dist), &stack_ok);
__ break_(0);
__ Bind(&stack_ok);
#endif
__ LeaveDartFrameAndReturn();
}
static Condition NegateCondition(Condition condition) {
switch (condition.rel_op()) {
case AL: condition.set_rel_op(NV); break;
case NV: condition.set_rel_op(AL); break;
case EQ: condition.set_rel_op(NE); break;
case NE: condition.set_rel_op(EQ); break;
case LT: condition.set_rel_op(GE); break;
case LE: condition.set_rel_op(GT); break;
case GT: condition.set_rel_op(LE); break;
case GE: condition.set_rel_op(LT); break;
case ULT: condition.set_rel_op(UGE); break;
case ULE: condition.set_rel_op(UGT); break;
case UGT: condition.set_rel_op(ULE); break;
case UGE: condition.set_rel_op(ULT); break;
default:
UNREACHABLE();
}
return condition;
}
LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
comparison()->InitializeLocationSummary(zone, opt);
return comparison()->locs();
}
void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register result = locs()->out(0).reg();
intptr_t true_value = if_true_;
intptr_t false_value = if_false_;
bool swapped = false;
if (true_value == 0) {
// Swap values so that false_value is zero.
intptr_t temp = true_value;
true_value = false_value;
false_value = temp;
swapped = true;
}
// Initialize result with the true value.
__ LoadImmediate(result, Smi::RawValue(true_value));
// Emit comparison code. This must not overwrite the result register.
BranchLabels labels = { NULL, NULL, NULL }; // Emit branch-free code.
Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
if (swapped) {
true_condition = NegateCondition(true_condition);
}
// Evaluate condition and provide result in CMPRES1.
Register left = true_condition.left();
Register right = true_condition.right();
bool zero_is_false = true; // Zero in CMPRES1 indicates a false condition.
switch (true_condition.rel_op()) {
case AL: return; // Result holds true_value.
case NV: __ LoadImmediate(result, false_value); return;
case EQ:
zero_is_false = false;
// fall through.
case NE: {
if (left == IMM) {
__ XorImmediate(CMPRES1, right, true_condition.imm());
} else if (right == IMM) {
__ XorImmediate(CMPRES1, left, true_condition.imm());
} else {
__ xor_(CMPRES1, left, right);
}
break;
}
case GE:
zero_is_false = false;
// fall through.
case LT: {
if (left == IMM) {
__ slti(CMPRES1, right, Immediate(true_condition.imm() + 1));
zero_is_false = !zero_is_false;
} else if (right == IMM) {
__ slti(CMPRES1, left, Immediate(true_condition.imm()));
} else {
__ slt(CMPRES1, left, right);
}
break;
}
case LE:
zero_is_false = false;
// fall through.
case GT: {
if (left == IMM) {
__ slti(CMPRES1, right, Immediate(true_condition.imm()));
} else if (right == IMM) {
__ slti(CMPRES1, left, Immediate(true_condition.imm() + 1));
zero_is_false = !zero_is_false;
} else {
__ slt(CMPRES1, right, left);
}
break;
}
case UGE:
zero_is_false = false;
// fall through.
case ULT: {
ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used.
__ sltu(CMPRES1, left, right);
break;
}
case ULE:
zero_is_false = false;
// fall through.
case UGT: {
ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used.
__ sltu(CMPRES1, right, left);
break;
}
default:
UNREACHABLE();
}
// CMPRES1 is the evaluated condition, zero or non-zero, as specified by the
// flag zero_is_false.
Register false_value_reg;
if (false_value == 0) {
false_value_reg = ZR;
} else {
__ LoadImmediate(CMPRES2, Smi::RawValue(false_value));
false_value_reg = CMPRES2;
}
if (zero_is_false) {
__ movz(result, false_value_reg, CMPRES1);
} else {
__ movn(result, false_value_reg, CMPRES1);
}
}
LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(T0)); // Function.
summary->set_out(0, Location::RegisterLocation(V0));
return summary;
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Load arguments descriptor in S4.
int argument_count = ArgumentCount();
const Array& arguments_descriptor =
Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
argument_names()));
__ LoadObject(S4, arguments_descriptor);
// Load closure function code in T2.
// S4: arguments descriptor array.
// S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
ASSERT(locs()->in(0).reg() == T0);
__ LoadImmediate(S5, 0);
__ lw(T2, FieldAddress(T0, Function::entry_point_offset()));
__ lw(CODE_REG, FieldAddress(T0, Function::code_offset()));
__ jalr(T2);
compiler->RecordSafepoint(locs());
// Marks either the continuation point in unoptimized code or the
// deoptimization point in optimized code, after call.
const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id());
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id_after, token_pos());
}
// Add deoptimization continuation point after the call and before the
// arguments are removed.
// In optimized code this descriptor is needed for exception handling.
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt,
deopt_id_after,
token_pos());
__ Drop(argument_count);
}
LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
return LocationSummary::Make(zone,
0,
Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("LoadLocalInstr");
Register result = locs()->out(0).reg();
__ LoadFromOffset(result, FP, local().index() * kWordSize);
}
LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
return LocationSummary::Make(zone,
1,
Location::SameAsFirstInput(),
LocationSummary::kNoCall);
}
void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("StoreLocalInstr");
Register value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ StoreToOffset(value, FP, local().index() * kWordSize);
}
LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
return LocationSummary::Make(zone,
0,
Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The register allocator drops constant definitions that have no uses.
if (!locs()->out(0).IsInvalid()) {
__ Comment("ConstantInstr");
Register result = locs()->out(0).reg();
__ LoadObject(result, value());
}
}
LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = (representation_ == kUnboxedInt32) ? 0 : 1;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if (representation_ == kUnboxedInt32) {
locs->set_out(0, Location::RequiresRegister());
} else {
ASSERT(representation_ == kUnboxedDouble);
locs->set_out(0, Location::RequiresFpuRegister());
}
if (kNumTemps > 0) {
locs->set_temp(0, Location::RequiresRegister());
}
return locs;
}
void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The register allocator drops constant definitions that have no uses.
if (!locs()->out(0).IsInvalid()) {
switch (representation_) {
case kUnboxedDouble: {
ASSERT(value().IsDouble());
const Register const_value = locs()->temp(0).reg();
const DRegister result = locs()->out(0).fpu_reg();
__ LoadObject(const_value, value());
__ LoadDFromOffset(result, const_value,
Double::value_offset() - kHeapObjectTag);
break;
}
case kUnboxedInt32:
__ LoadImmediate(locs()->out(0).reg(),
Smi::Cast(value()).Value());
break;
default:
UNREACHABLE();
}
}
}
LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(A0)); // Value.
summary->set_in(1, Location::RegisterLocation(A2)); // Instantiator.
summary->set_in(2, Location::RegisterLocation(A1)); // Type arguments.
summary->set_out(0, Location::RegisterLocation(A0));
return summary;
}
LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(A0));
locs->set_out(0, Location::RegisterLocation(A0));
return locs;
}
static void EmitAssertBoolean(Register reg,
intptr_t token_pos,
intptr_t deopt_id,
LocationSummary* locs,
FlowGraphCompiler* compiler) {
// Check that the type of the value is allowed in conditional context.
// Call the runtime if the object is not bool::true or bool::false.
ASSERT(locs->always_calls());
Label done;
if (Isolate::Current()->flags().type_checks()) {
__ BranchEqual(reg, Bool::True(), &done);
__ BranchEqual(reg, Bool::False(), &done);
} else {
ASSERT(Isolate::Current()->flags().asserts());
__ BranchNotEqual(reg, Object::null_instance(), &done);
}
__ Push(reg); // Push the source object.
compiler->GenerateRuntimeCall(token_pos,
deopt_id,
kNonBoolTypeErrorRuntimeEntry,
1,
locs);
// We should never return here.
__ break_(0);
__ Bind(&done);
}
void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out(0).reg();
__ Comment("AssertBooleanInstr");
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
ASSERT(obj == result);
}
LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
if (operation_cid() == kMintCid) {
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
locs->set_in(1, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
locs->set_out(0, Location::RequiresRegister());
return locs;
}
if (operation_cid() == kDoubleCid) {
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresFpuRegister());
locs->set_in(1, Location::RequiresFpuRegister());
locs->set_out(0, Location::RequiresRegister());
return locs;
}
if (operation_cid() == kSmiCid) {
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RegisterOrConstant(left()));
// Only one input can be a constant operand. The case of two constant
// operands should be handled by constant propagation.
locs->set_in(1, locs->in(0).IsConstant()
? Location::RequiresRegister()
: Location::RegisterOrConstant(right()));
locs->set_out(0, Location::RequiresRegister());
return locs;
}
UNREACHABLE();
return NULL;
}
static void LoadValueCid(FlowGraphCompiler* compiler,
Register value_cid_reg,
Register value_reg,
Label* value_is_smi = NULL) {
__ Comment("LoadValueCid");
Label done;
if (value_is_smi == NULL) {
__ LoadImmediate(value_cid_reg, kSmiCid);
}
__ andi(CMPRES1, value_reg, Immediate(kSmiTagMask));
if (value_is_smi == NULL) {
__ beq(CMPRES1, ZR, &done);
} else {
__ beq(CMPRES1, ZR, value_is_smi);
}
__ LoadClassId(value_cid_reg, value_reg);
__ Bind(&done);
}
static RelationOperator TokenKindToIntRelOp(Token::Kind kind) {
switch (kind) {
case Token::kEQ: return EQ;
case Token::kNE: return NE;
case Token::kLT: return LT;
case Token::kGT: return GT;
case Token::kLTE: return LE;
case Token::kGTE: return GE;
default:
UNREACHABLE();
return NV;
}
}
static RelationOperator TokenKindToUintRelOp(Token::Kind kind) {
switch (kind) {
case Token::kEQ: return EQ;
case Token::kNE: return NE;
case Token::kLT: return ULT;
case Token::kGT: return UGT;
case Token::kLTE: return ULE;
case Token::kGTE: return UGE;
default:
UNREACHABLE();
return NV;
}
}
// The comparison code to emit is specified by true_condition.
static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
Condition true_condition,
BranchLabels labels) {
__ Comment("ControlInstruction::EmitBranchOnCondition");
if (labels.fall_through == labels.false_label) {
// If the next block is the false successor, fall through to it.
__ BranchOnCondition(true_condition, labels.true_label);
} else {
// If the next block is not the false successor, branch to it.
Condition false_condition = NegateCondition(true_condition);
__ BranchOnCondition(false_condition, labels.false_label);
// Fall through or jump to the true successor.
if (labels.fall_through != labels.true_label) {
__ b(labels.true_label);
}
}
}
static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
const LocationSummary& locs,
Token::Kind kind) {
__ Comment("EmitSmiComparisonOp");
const Location left = locs.in(0);
const Location right = locs.in(1);
ASSERT(!left.IsConstant() || !right.IsConstant());
ASSERT(left.IsRegister() || left.IsConstant());
ASSERT(right.IsRegister() || right.IsConstant());
int16_t imm = 0;
const Register left_reg = left.IsRegister() ?
left.reg() : __ LoadConditionOperand(CMPRES1, left.constant(), &imm);
const Register right_reg = right.IsRegister() ?
right.reg() : __ LoadConditionOperand(CMPRES2, right.constant(), &imm);
return Condition(left_reg, right_reg, TokenKindToIntRelOp(kind), imm);
}
static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
const LocationSummary& locs,
Token::Kind kind,
BranchLabels labels) {
__ Comment("EmitUnboxedMintEqualityOp");
ASSERT(Token::IsEqualityOperator(kind));
PairLocation* left_pair = locs.in(0).AsPairLocation();
Register left_lo = left_pair->At(0).reg();
Register left_hi = left_pair->At(1).reg();
PairLocation* right_pair = locs.in(1).AsPairLocation();
Register right_lo = right_pair->At(0).reg();
Register right_hi = right_pair->At(1).reg();
if (labels.false_label == NULL) {
// Generate branch-free code.
__ xor_(CMPRES1, left_lo, right_lo);
__ xor_(AT, left_hi, right_hi);
__ or_(CMPRES1, CMPRES1, AT);
return Condition(CMPRES1, ZR, TokenKindToUintRelOp(kind));
} else {
if (kind == Token::kEQ) {
__ bne(left_hi, right_hi, labels.false_label);
} else {
ASSERT(kind == Token::kNE);
__ bne(left_hi, right_hi, labels.true_label);
}
return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind));
}
}
static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
const LocationSummary& locs,
Token::Kind kind,
BranchLabels labels) {
__ Comment("EmitUnboxedMintComparisonOp");
PairLocation* left_pair = locs.in(0).AsPairLocation();
Register left_lo = left_pair->At(0).reg();
Register left_hi = left_pair->At(1).reg();
PairLocation* right_pair = locs.in(1).AsPairLocation();
Register right_lo = right_pair->At(0).reg();
Register right_hi = right_pair->At(1).reg();
if (labels.false_label == NULL) {
// Generate branch-free code (except for skipping the lower words compare).
// Result in CMPRES1, CMPRES2, so that CMPRES1 op CMPRES2 === left op right.
Label done;
// Compare upper halves first.
__ slt(CMPRES1, right_hi, left_hi);
__ slt(CMPRES2, left_hi, right_hi);
// If higher words aren't equal, skip comparing lower words.
__ bne(CMPRES1, CMPRES2, &done);
__ sltu(CMPRES1, right_lo, left_lo);
__ sltu(CMPRES2, left_lo, right_lo);
__ Bind(&done);
return Condition(CMPRES1, CMPRES2, TokenKindToUintRelOp(kind));
} else {
switch (kind) {
case Token::kLT:
case Token::kLTE: {
__ slt(AT, left_hi, right_hi);
__ bne(AT, ZR, labels.true_label);
__ delay_slot()->slt(AT, right_hi, left_hi);
__ bne(AT, ZR, labels.false_label);
break;
}
case Token::kGT:
case Token::kGTE: {
__ slt(AT, left_hi, right_hi);
__ bne(AT, ZR, labels.false_label);
__ delay_slot()->slt(AT, right_hi, left_hi);
__ bne(AT, ZR, labels.true_label);
break;
}
default:
UNREACHABLE();
}
return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind));
}
}
static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
const LocationSummary& locs,
Token::Kind kind,
BranchLabels labels) {
DRegister left = locs.in(0).fpu_reg();
DRegister right = locs.in(1).fpu_reg();
__ Comment("DoubleComparisonOp(left=%d, right=%d)", left, right);
__ cund(left, right);
Label* nan_label = (kind == Token::kNE)
? labels.true_label : labels.false_label;
__ bc1t(nan_label);
switch (kind) {
case Token::kEQ: __ ceqd(left, right); break;
case Token::kNE: __ ceqd(left, right); break;
case Token::kLT: __ coltd(left, right); break;
case Token::kLTE: __ coled(left, right); break;
case Token::kGT: __ coltd(right, left); break;
case Token::kGTE: __ coled(right, left); break;
default: {
// We should only be passing the above conditions to this function.
UNREACHABLE();
break;
}
}
if (labels.false_label == NULL) {
// Generate branch-free code and return result in condition.
__ LoadImmediate(CMPRES1, 1);
if (kind == Token::kNE) {
__ movf(CMPRES1, ZR);
} else {
__ movt(CMPRES1, ZR);
}
return Condition(CMPRES1, ZR, EQ);
} else {
if (labels.fall_through == labels.false_label) {
if (kind == Token::kNE) {
__ bc1f(labels.true_label);
} else {
__ bc1t(labels.true_label);
}
// Since we already branched on true, return the never true condition.
return Condition(CMPRES1, CMPRES2, NV);
} else {
if (kind == Token::kNE) {
__ bc1t(labels.false_label);
} else {
__ bc1f(labels.false_label);
}
// Since we already branched on false, return the always true condition.
return Condition(CMPRES1, CMPRES2, AL);
}
}
}
Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
if (operation_cid() == kSmiCid) {
return EmitSmiComparisonOp(compiler, *locs(), kind());
} else if (operation_cid() == kMintCid) {
return EmitUnboxedMintEqualityOp(compiler, *locs(), kind(), labels);
} else {
ASSERT(operation_cid() == kDoubleCid);
return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
}
}
void EqualityCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT((kind() == Token::kNE) || (kind() == Token::kEQ));
__ Comment("EqualityCompareInstr");
Label is_true, is_false;
BranchLabels labels = { &is_true, &is_false, &is_false };
Condition true_condition = EmitComparisonCode(compiler, labels);
EmitBranchOnCondition(compiler, true_condition, labels);
Register result = locs()->out(0).reg();
Label done;
__ Bind(&is_false);
__ LoadObject(result, Bool::False());
__ b(&done);
__ Bind(&is_true);
__ LoadObject(result, Bool::True());
__ Bind(&done);
}
void EqualityCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler,
BranchInstr* branch) {
__ Comment("EqualityCompareInstr::EmitBranchCode");
ASSERT((kind() == Token::kNE) || (kind() == Token::kEQ));
BranchLabels labels = compiler->CreateBranchLabels(branch);
Condition true_condition = EmitComparisonCode(compiler, labels);
EmitBranchOnCondition(compiler, true_condition, labels);
}
LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
// Only one input can be a constant operand. The case of two constant
// operands should be handled by constant propagation.
locs->set_in(1, Location::RegisterOrConstant(right()));
return locs;
}
Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
Register left = locs()->in(0).reg();
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int32_t imm =
reinterpret_cast<int32_t>(right.constant().raw());
__ AndImmediate(CMPRES1, left, imm);
} else {
__ and_(CMPRES1, left, right.reg());
}
return Condition(CMPRES1, ZR, (kind() == Token::kNE) ? NE : EQ);
}
void TestSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Never emitted outside of the BranchInstr.
UNREACHABLE();
}
void TestSmiInstr::EmitBranchCode(FlowGraphCompiler* compiler,
BranchInstr* branch) {
BranchLabels labels = compiler->CreateBranchLabels(branch);
Condition true_condition = EmitComparisonCode(compiler, labels);
EmitBranchOnCondition(compiler, true_condition, labels);
}
LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
return locs;
}
Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
Register val_reg = locs()->in(0).reg();
Register cid_reg = locs()->temp(0).reg();
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL;
const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
const ZoneGrowableArray<intptr_t>& data = cid_results();
ASSERT(data[0] == kSmiCid);
bool result = data[1] == true_result;
__ andi(CMPRES1, val_reg, Immediate(kSmiTagMask));
__ beq(CMPRES1, ZR, result ? labels.true_label : labels.false_label);
__ LoadClassId(cid_reg, val_reg);
for (intptr_t i = 2; i < data.length(); i += 2) {
const intptr_t test_cid = data[i];
ASSERT(test_cid != kSmiCid);
result = data[i + 1] == true_result;
__ BranchEqual(cid_reg, Immediate(test_cid),
result ? labels.true_label : labels.false_label);
}
// No match found, deoptimize or false.
if (deopt == NULL) {
Label* target = result ? labels.false_label : labels.true_label;
if (target != labels.fall_through) {
__ b(target);
}
} else {
__ b(deopt);
}
// Dummy result as the last instruction is a jump or fall through.
return Condition(CMPRES1, ZR, AL);
}
void TestCidsInstr::EmitBranchCode(FlowGraphCompiler* compiler,
BranchInstr* branch) {
BranchLabels labels = compiler->CreateBranchLabels(branch);
EmitComparisonCode(compiler, labels);
}
void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register result_reg = locs()->out(0).reg();
Label is_true, is_false, done;
BranchLabels labels = { &is_true, &is_false, &is_false };
EmitComparisonCode(compiler, labels);
__ Bind(&is_false);
__ LoadObject(result_reg, Bool::False());
__ b(&done);
__ Bind(&is_true);
__ LoadObject(result_reg, Bool::True());
__ Bind(&done);
}
LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
if (operation_cid() == kMintCid) {
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
locs->set_in(1, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
locs->set_out(0, Location::RequiresRegister());
return locs;
}
if (operation_cid() == kDoubleCid) {
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_in(1, Location::RequiresFpuRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
ASSERT(operation_cid() == kSmiCid);
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RegisterOrConstant(left()));
// Only one input can be a constant operand. The case of two constant
// operands should be handled by constant propagation.
summary->set_in(1, summary->in(0).IsConstant()
? Location::RequiresRegister()
: Location::RegisterOrConstant(right()));
summary->set_out(0, Location::RequiresRegister());
return summary;
}
Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
if (operation_cid() == kSmiCid) {
return EmitSmiComparisonOp(compiler, *locs(), kind());
} else if (operation_cid() == kMintCid) {
return EmitUnboxedMintComparisonOp(compiler, *locs(), kind(), labels);
} else {
ASSERT(operation_cid() == kDoubleCid);
return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
}
}
void RelationalOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("RelationalOpInstr");
Label is_true, is_false;
BranchLabels labels = { &is_true, &is_false, &is_false };
Condition true_condition = EmitComparisonCode(compiler, labels);
EmitBranchOnCondition(compiler, true_condition, labels);
Register result = locs()->out(0).reg();
Label done;
__ Bind(&is_false);
__ LoadObject(result, Bool::False());
__ b(&done);
__ Bind(&is_true);
__ LoadObject(result, Bool::True());
__ Bind(&done);
}
void RelationalOpInstr::EmitBranchCode(FlowGraphCompiler* compiler,
BranchInstr* branch) {
__ Comment("RelationalOpInstr");
BranchLabels labels = compiler->CreateBranchLabels(branch);
Condition true_condition = EmitComparisonCode(compiler, labels);
EmitBranchOnCondition(compiler, true_condition, labels);
}
LocationSummary* NativeCallInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
return MakeCallSummary(zone);
}
void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("NativeCallInstr");
Register result = locs()->out(0).reg();
// Push the result place holder initialized to NULL.
__ PushObject(Object::null_object());
// Pass a pointer to the first argument in A2.
if (!function().HasOptionalParameters()) {
__ AddImmediate(A2, FP, (kParamEndSlotFromFp +
function().NumParameters()) * kWordSize);
} else {
__ AddImmediate(A2, FP, kFirstLocalSlotFromFp * kWordSize);
}
// Compute the effective address. When running under the simulator,
// this is a redirection address that forces the simulator to call
// into the runtime system.
uword entry;
const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
const bool is_leaf_call =
(argc_tag & NativeArguments::AutoSetupScopeMask()) == 0;
const StubEntry* stub_entry;
if (link_lazily()) {
stub_entry = StubCode::CallBootstrapCFunction_entry();
entry = NativeEntry::LinkNativeCallEntry();
} else {
entry = reinterpret_cast<uword>(native_c_function());
if (is_bootstrap_native() || is_leaf_call) {
stub_entry = StubCode::CallBootstrapCFunction_entry();
#if defined(USING_SIMULATOR)
entry = Simulator::RedirectExternalReference(
entry, Simulator::kBootstrapNativeCall, NativeEntry::kNumArguments);
#endif
} else {
// In the case of non bootstrap native methods the CallNativeCFunction
// stub generates the redirection address when running under the simulator
// and hence we do not change 'entry' here.
stub_entry = StubCode::CallNativeCFunction_entry();
#if defined(USING_SIMULATOR)
if (!function().IsNativeAutoSetupScope()) {
entry = Simulator::RedirectExternalReference(
entry, Simulator::kBootstrapNativeCall, NativeEntry::kNumArguments);
}
#endif
}
}
__ LoadImmediate(A1, argc_tag);
ExternalLabel label(entry);
__ LoadNativeEntry(T5, &label, kNotPatchable);
compiler->GenerateCall(token_pos(),
*stub_entry,
RawPcDescriptors::kOther,
locs());
__ Pop(result);
}
LocationSummary* StringFromCharCodeInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
// TODO(fschneider): Allow immediate operands for the char code.
return LocationSummary::Make(zone,
kNumInputs,
Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler->is_optimizing());
Register char_code = locs()->in(0).reg();
Register result = locs()->out(0).reg();
__ Comment("StringFromCharCodeInstr");
__ lw(result, Address(THR, Thread::predefined_symbols_address_offset()));
__ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize);
__ sll(TMP, char_code, 1); // Char code is a smi.
__ addu(TMP, TMP, result);
__ lw(result, Address(TMP));
}
LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone,
kNumInputs,
Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("StringToCharCodeInstr");
ASSERT(cid_ == kOneByteStringCid);
Register str = locs()->in(0).reg();
Register result = locs()->out(0).reg();
ASSERT(str != result);
Label done;
__ lw(result, FieldAddress(str, String::length_offset()));
__ BranchNotEqual(result, Immediate(Smi::RawValue(1)), &done);
__ delay_slot()->addiu(result, ZR, Immediate(Smi::RawValue(-1)));
__ lbu(result, FieldAddress(str, OneByteString::data_offset()));
__ SmiTag(result);
__ Bind(&done);
}
LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(A0));
summary->set_out(0, Location::RegisterLocation(V0));
return summary;
}
void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register array = locs()->in(0).reg();
__ Push(array);
const int kNumberOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
compiler->GenerateStaticCall(deopt_id(),
token_pos(),
CallFunction(),
kNumberOfArguments,
kNoArgumentNames,
locs(),
ICData::Handle());
ASSERT(locs()->out(0).reg() == V0);
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone,
kNumInputs,
Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ LoadFromOffset(result, obj, offset());
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ LoadFieldFromOffset(result, obj, offset());
}
}
LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone,
kNumInputs,
Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register object = locs()->in(0).reg();
Register result = locs()->out(0).reg();
__ LoadTaggedClassIdMayBeSmi(result, object);
}
CompileType LoadIndexedInstr::ComputeType() const {
switch (class_id_) {
case kArrayCid:
case kImmutableArrayCid:
return CompileType::Dynamic();
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return CompileType::FromCid(kDoubleCid);
case kTypedDataFloat32x4ArrayCid:
return CompileType::FromCid(kFloat32x4Cid);
case kTypedDataInt32x4ArrayCid:
return CompileType::FromCid(kInt32x4Cid);
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
return CompileType::FromCid(kSmiCid);
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
return CompileType::Int();
default:
UNIMPLEMENTED();
return CompileType::Dynamic();
}
}
Representation LoadIndexedInstr::representation() const {
switch (class_id_) {
case kArrayCid:
case kImmutableArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return kUnboxedUint32;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
case kTypedDataInt32x4ArrayCid:
return kUnboxedInt32x4;
case kTypedDataFloat32x4ArrayCid:
return kUnboxedFloat32x4;
default:
UNIMPLEMENTED();
return kTagged;
}
}
static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
ConstantInstr* constant = value->definition()->AsConstant();
if ((constant == NULL) || !Assembler::IsSafeSmi(constant->value())) {
return false;
}
const int64_t index = Smi::Cast(constant->value()).AsInt64Value();
const intptr_t scale = Instance::ElementSizeFor(cid);
const int64_t offset = index * scale +
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
if (!Utils::IsInt(32, offset)) {
return false;
}
return Address::CanHoldOffset(static_cast<int32_t>(offset));
}
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (CanBeImmediateIndex(index(), class_id(), IsExternal())) {
locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
} else {
locs->set_in(1, Location::RequiresRegister());
}
if ((representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4)) {
locs->set_out(0, Location::RequiresFpuRegister());
} else {
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("LoadIndexedInstr");
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
Address element_address = index.IsRegister()
? __ ElementAddressForRegIndex(true, // Load.
IsExternal(), class_id(), index_scale(),
array, index.reg())
: __ ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(),
array, Smi::Cast(index.constant()).Value());
// Warning: element_address may use register TMP as base.
if ((representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4)) {
DRegister result = locs()->out(0).fpu_reg();
switch (class_id()) {
case kTypedDataFloat32ArrayCid:
// Load single precision float.
__ lwc1(EvenFRegisterOf(result), element_address);
break;
case kTypedDataFloat64ArrayCid:
__ LoadDFromOffset(result,
element_address.base(), element_address.offset());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
UNIMPLEMENTED();
break;
}
return;
}
if ((representation() == kUnboxedUint32) ||
(representation() == kUnboxedInt32)) {
const Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt32ArrayCid:
ASSERT(representation() == kUnboxedInt32);
__ lw(result, element_address);
break;
case kTypedDataUint32ArrayCid:
ASSERT(representation() == kUnboxedUint32);
__ lw(result, element_address);
break;
default:
UNREACHABLE();
}
return;
}
ASSERT(representation() == kTagged);
const Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt8ArrayCid:
ASSERT(index_scale() == 1);
__ lb(result, element_address);
__ SmiTag(result);
break;
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
ASSERT(index_scale() == 1);
__ lbu(result, element_address);
__ SmiTag(result);
break;
case kTypedDataInt16ArrayCid:
__ lh(result, element_address);
__ SmiTag(result);
break;
case kTypedDataUint16ArrayCid:
case kTwoByteStringCid:
__ lhu(result, element_address);
__ SmiTag(result);
break;
default:
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid));
__ lw(result, element_address);
break;
}
}
LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_in(1, Location::RequiresRegister());
// TODO(zerny): Handle mints properly once possible.
ASSERT(representation() == kTagged);
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The string register points to the backing store for external strings.
const Register str = locs()->in(0).reg();
const Location index = locs()->in(1);
Address element_address = __ ElementAddressForRegIndex(
true, IsExternal(), class_id(), index_scale(), str, index.reg());
// Warning: element_address may use register TMP as base.
ASSERT(representation() == kTagged);
Register result = locs()->out(0).reg();
switch (class_id()) {
case kOneByteStringCid:
case kExternalOneByteStringCid:
switch (element_count()) {
case 1: __ lbu(result, element_address); break;
case 2: __ lhu(result, element_address); break;
case 4: // Loading multiple code units is disabled on MIPS.
default: UNREACHABLE();
}
__ SmiTag(result);
break;
case kTwoByteStringCid:
case kExternalTwoByteStringCid:
switch (element_count()) {
case 1: __ lhu(result, element_address); break;
case 2: // Loading multiple code units is disabled on MIPS.
default: UNREACHABLE();
}
__ SmiTag(result);
break;
default:
UNREACHABLE();
break;
}
}
Representation StoreIndexedInstr::RequiredInputRepresentation(
intptr_t idx) const {
// Array can be a Dart object or a pointer to external data.
if (idx == 0) return kNoRepresentation; // Flexible input representation.
if (idx == 1) return kTagged; // Index is a smi.
ASSERT(idx == 2);
switch (class_id_) {
case kArrayCid:
case kOneByteStringCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return kUnboxedUint32;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
case kTypedDataFloat32x4ArrayCid:
return kUnboxedFloat32x4;
case kTypedDataInt32x4ArrayCid:
return kUnboxedInt32x4;
default:
UNIMPLEMENTED();
return kTagged;
}
}
LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (CanBeImmediateIndex(index(), class_id(), IsExternal())) {
locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
} else {
locs->set_in(1, Location::WritableRegister());
}
switch (class_id()) {
case kArrayCid:
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RegisterOrConstant(value()));
break;
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants.
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
return NULL;
}
return locs;
}
void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("StoreIndexedInstr");
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
Address element_address = index.IsRegister()
? __ ElementAddressForRegIndex(false, // Store.
IsExternal(), class_id(), index_scale(),
array, index.reg())
: __ ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(),
array, Smi::Cast(index.constant()).Value());
ASSERT(element_address.base() != TMP); // Allowed for load only.
switch (class_id()) {
case kArrayCid:
if (ShouldEmitStoreBarrier()) {
Register value = locs()->in(2).reg();
__ StoreIntoObject(array, element_address, value);
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, element_address, constant);
} else {
Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, element_address, value);
}
break;
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kOneByteStringCid: {
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
__ LoadImmediate(TMP, static_cast<int8_t>(constant.Value()));
__ sb(TMP, element_address);
} else {
Register value = locs()->in(2).reg();
__ SmiUntag(TMP, value);
__ sb(TMP, element_address);
}
break;
}
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
__ LoadImmediate(TMP, static_cast<int8_t>(value));
__ sb(TMP, element_address);
} else {
Register value = locs()->in(2).reg();
Label store_value, bigger, smaller;
__ SmiUntag(TMP, value);
__ BranchUnsignedLess(TMP, Immediate(0xFF + 1), &store_value);
__ LoadImmediate(TMP, 0xFF);
__ slti(CMPRES1, value, Immediate(1));
__ movn(TMP, ZR, CMPRES1);
__ Bind(&store_value);
__ sb(TMP, element_address);
}
break;
}
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid: {
Register value = locs()->in(2).reg();
__ SmiUntag(TMP, value);
__ sh(TMP, element_address);
break;
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
__ sw(locs()->in(2).reg(), element_address);
break;
}
case kTypedDataFloat32ArrayCid: {
FRegister value = EvenFRegisterOf(locs()->in(2).fpu_reg());
__ swc1(value, element_address);
break;
}
case kTypedDataFloat64ArrayCid:
__ StoreDToOffset(locs()->in(2).fpu_reg(),
element_address.base(), element_address.offset());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
UNIMPLEMENTED();
break;
default:
UNREACHABLE();
}
}
LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
const bool needs_value_cid_temp_reg =
(value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
const bool needs_field_temp_reg = emit_full_guard;
intptr_t num_temps = 0;
if (needs_value_cid_temp_reg) {
num_temps++;
}
if (needs_field_temp_reg) {
num_temps++;
}
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, num_temps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
for (intptr_t i = 0; i < num_temps; i++) {
summary->set_temp(i, Location::RequiresRegister());
}
return summary;
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(sizeof(classid_t) == kInt16Size);
__ Comment("GuardFieldClassInstr");
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
if (field_cid == kDynamicCid) {
ASSERT(!compiler->is_optimizing());
return; // Nothing to emit.
}
const bool emit_full_guard =
!compiler->is_optimizing() || (field_cid == kIllegalCid);
const bool needs_value_cid_temp_reg =
(value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
const bool needs_field_temp_reg = emit_full_guard;
const Register value_reg = locs()->in(0).reg();
const Register value_cid_reg = needs_value_cid_temp_reg ?
locs()->temp(0).reg() : kNoRegister;
const Register field_reg = needs_field_temp_reg ?
locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister;
Label ok, fail_label;
Label* deopt = compiler->is_optimizing() ?
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL;
Label* fail = (deopt != NULL) ? deopt : &fail_label;
if (emit_full_guard) {
__ LoadObject(field_reg, Field::ZoneHandle(field().raw()));
FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset());
FieldAddress field_nullability_operand(
field_reg, Field::is_nullable_offset());
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
__ lhu(CMPRES1, field_cid_operand);
__ beq(value_cid_reg, CMPRES1, &ok);
__ lhu(TMP, field_nullability_operand);
__ subu(CMPRES1, value_cid_reg, TMP);
} else if (value_cid == kNullCid) {
__ lhu(TMP, field_nullability_operand);
__ LoadImmediate(CMPRES1, value_cid);
__ subu(CMPRES1, TMP, CMPRES1);
} else {
__ lhu(TMP, field_cid_operand);
__ LoadImmediate(CMPRES1, value_cid);
__ subu(CMPRES1, TMP, CMPRES1);
}
__ beq(CMPRES1, ZR, &ok);
// Check if the tracked state of the guarded field can be initialized
// inline. If the field needs length check we fall through to runtime
// which is responsible for computing offset of the length field
// based on the class id.
// Length guard will be emitted separately when needed via GuardFieldLength
// instruction after GuardFieldClass.
if (!field().needs_length_check()) {
// Uninitialized field can be handled inline. Check if the
// field is still unitialized.
__ lhu(CMPRES1, field_cid_operand);
__ BranchNotEqual(CMPRES1, Immediate(kIllegalCid), fail);
if (value_cid == kDynamicCid) {
__ sh(value_cid_reg, field_cid_operand);
__ sh(value_cid_reg, field_nullability_operand);
} else {
__ LoadImmediate(TMP, value_cid);
__ sh(TMP, field_cid_operand);
__ sh(TMP, field_nullability_operand);
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ b(&ok);
}
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ Bind(fail);
__ lhu(CMPRES1, FieldAddress(field_reg, Field::guarded_cid_offset()));
__ BranchEqual(CMPRES1, Immediate(kDynamicCid), &ok);
__ addiu(SP, SP, Immediate(-2 * kWordSize));
__ sw(field_reg, Address(SP, 1 * kWordSize));
__ sw(value_reg, Address(SP, 0 * kWordSize));
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
}
} else {
ASSERT(compiler->is_optimizing());
ASSERT(deopt != NULL);
// Field guard class has been initialized and is known.
if (value_cid == kDynamicCid) {
// Value's class id is not known.
__ andi(CMPRES1, value_reg, Immediate(kSmiTagMask));
if (field_cid != kSmiCid) {
__ beq(CMPRES1, ZR, fail);
__ LoadClassId(value_cid_reg, value_reg);
__ LoadImmediate(TMP, field_cid);
__ subu(CMPRES1, value_cid_reg, TMP);
}
if (field().is_nullable() && (field_cid != kNullCid)) {
__ beq(CMPRES1, ZR, &ok);
if (field_cid != kSmiCid) {
__ LoadImmediate(TMP, kNullCid);
__ subu(CMPRES1, value_cid_reg, TMP);
} else {
__ LoadObject(TMP, Object::null_object());
__ subu(CMPRES1, value_reg, TMP);
}
}
__ bne(CMPRES1, ZR, fail);
} else {
// Both value's and field's class id is known.
ASSERT((value_cid != field_cid) && (value_cid != nullability));
__ b(fail);
}
}
__ Bind(&ok);
}
LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
const intptr_t kNumTemps = 1;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
// We need temporaries for field object.
summary->set_temp(0, Location::RequiresRegister());
return summary;
} else {
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, 0, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
return summary;
}
UNREACHABLE();
}
void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (field().guarded_list_length() == Field::kNoFixedLength) {
ASSERT(!compiler->is_optimizing());
return; // Nothing to emit.
}
Label* deopt = compiler->is_optimizing() ?
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL;
const Register value_reg = locs()->in(0).reg();
if (!compiler->is_optimizing() ||
(field().guarded_list_length() == Field::kUnknownFixedLength)) {
const Register field_reg = locs()->temp(0).reg();
Label ok;
__ LoadObject(field_reg, Field::ZoneHandle(field().raw()));
__ lb(CMPRES1, FieldAddress(field_reg,
Field::guarded_list_length_in_object_offset_offset()));
__ blez(CMPRES1, &ok);
__ lw(CMPRES2, FieldAddress(field_reg,
Field::guarded_list_length_offset()));
// Load the length from the value. GuardFieldClass already verified that
// value's class matches guarded class id of the field.
// CMPRES1 contains offset already corrected by -kHeapObjectTag that is
// why we can use Address instead of FieldAddress.
__ addu(TMP, value_reg, CMPRES1);
__ lw(TMP, Address(TMP));
if (deopt == NULL) {
__ beq(CMPRES2, TMP, &ok);
__ addiu(SP, SP, Immediate(-2 * kWordSize));
__ sw(field_reg, Address(SP, 1 * kWordSize));
__ sw(value_reg, Address(SP, 0 * kWordSize));
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
__ bne(CMPRES2, TMP, deopt);
}
__ Bind(&ok);
} else {
ASSERT(compiler->is_optimizing());
ASSERT(field().guarded_list_length() >= 0);
ASSERT(field().guarded_list_length_in_object_offset() !=
Field::kUnknownLengthOffset);
__ lw(CMPRES1,
FieldAddress(value_reg,
field().guarded_list_length_in_object_offset()));
__ LoadImmediate(TMP, Smi::RawValue(field().guarded_list_length()));
__ bne(CMPRES1, TMP, deopt);
}
}
class BoxAllocationSlowPath : public SlowPathCode {
public:
BoxAllocationSlowPath(Instruction* instruction,
const Class& cls,
Register result)
: instruction_(instruction),
cls_(cls),
result_(result) { }
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
if (Assembler::EmittingComments()) {
__ Comment("%s slow path allocation of %s",
instruction_->DebugName(),
String::Handle(cls_.PrettyName()).ToCString());
}
__ Bind(entry_label());
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
const StubEntry stub_entry(stub);
LocationSummary* locs = instruction_->locs();
locs->live_registers()->Remove(Location::RegisterLocation(result_));
compiler->SaveLiveRegisters(locs);
compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
stub_entry,
RawPcDescriptors::kOther,
locs);
compiler->AddStubCallTarget(stub);
if (result_ != V0) {
__ mov(result_, V0);
}
compiler->RestoreLiveRegisters(locs);
__ b(exit_label());
}
static void Allocate(FlowGraphCompiler* compiler,
Instruction* instruction,
const Class& cls,
Register result,
Register temp) {
if (compiler->intrinsic_mode()) {
__ TryAllocate(cls,
compiler->intrinsic_slow_path_label(),
result,
temp);
} else {
BoxAllocationSlowPath* slow_path =
new BoxAllocationSlowPath(instruction, cls, result);
compiler->AddSlowPathCode(slow_path);
__ TryAllocate(cls,
slow_path->entry_label(),
result,
temp);
__ Bind(slow_path->exit_label());
}
}
private:
Instruction* instruction_;
const Class& cls_;
const Register result_;
};
LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps =
(IsUnboxedStore() && opt) ? 2 :
((IsPotentialUnboxedStore()) ? 3 : 0);
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps,
((IsUnboxedStore() && opt && is_potential_unboxed_initialization_) ||
IsPotentialUnboxedStore())
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (IsUnboxedStore() && opt) {
summary->set_in(1, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
} else if (IsPotentialUnboxedStore()) {
summary->set_in(1, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
summary->set_temp(2, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(D1));
} else {
summary->set_in(1, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RegisterOrConstant(value()));
}
return summary;
}
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreInstanceFieldInstr* instruction,
Register box_reg,
const Class& cls,
Register instance_reg,
intptr_t offset,
Register temp) {
Label done;
__ lw(box_reg, FieldAddress(instance_reg, offset));
__ BranchNotEqual(box_reg, Object::null_object(), &done);
BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
__ mov(temp, box_reg);
__ StoreIntoObjectOffset(instance_reg, offset, temp);
__ Bind(&done);
}
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(sizeof(classid_t) == kInt16Size);
Label skip_store;
Register instance_reg = locs()->in(0).reg();
if (IsUnboxedStore() && compiler->is_optimizing()) {
DRegister value = locs()->in(1).fpu_reg();
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
const intptr_t cid = field().UnboxedFieldCid();
if (is_potential_unboxed_initialization_) {
const Class* cls = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
break;
default:
UNREACHABLE();
}
BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
__ mov(temp2, temp);
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2);
} else {
__ lw(temp, FieldAddress(instance_reg, offset_in_bytes_));
}
switch (cid) {
case kDoubleCid:
__ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag);
break;
default:
UNREACHABLE();
}
return;
}
if (IsPotentialUnboxedStore()) {
Register value_reg = locs()->in(1).reg();
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
DRegister fpu_temp = locs()->temp(2).fpu_reg();
if (ShouldEmitStoreBarrier()) {
// Value input is a writable register and should be manually preserved
// across allocation slow-path.
locs()->live_registers()->Add(locs()->in(1), kTagged);
}
Label store_pointer;
Label store_double;
__ LoadObject(temp, Field::ZoneHandle(field().raw()));
__ lhu(temp2, FieldAddress(temp, Field::is_nullable_offset()));
__ BranchEqual(temp2, Immediate(kNullCid), &store_pointer);
__ lbu(temp2, FieldAddress(temp, Field::kind_bits_offset()));
__ andi(CMPRES1, temp2, Immediate(1 << Field::kUnboxingCandidateBit));
__ beq(CMPRES1, ZR, &store_pointer);
__ lhu(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
__ BranchEqual(temp2, Immediate(kDoubleCid), &store_double);
// Fall through.
__ b(&store_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
locs()->live_registers()->Add(locs()->in(1));
}
{
__ Bind(&store_double);
EnsureMutableBox(compiler,
this,
temp,
compiler->double_class(),
instance_reg,
offset_in_bytes_,
temp2);
__ LoadDFromOffset(fpu_temp,
value_reg,
Double::value_offset() - kHeapObjectTag);
__ StoreDToOffset(fpu_temp, temp,
Double::value_offset() - kHeapObjectTag);
__ b(&skip_store);
}
__ Bind(&store_pointer);
}
if (ShouldEmitStoreBarrier()) {
Register value_reg = locs()->in(1).reg();
__ StoreIntoObjectOffset(instance_reg,
offset_in_bytes_,
value_reg,
CanValueBeSmi());
} else {
if (locs()->in(1).IsConstant()) {
__ StoreIntoObjectNoBarrierOffset(
instance_reg,
offset_in_bytes_,
locs()->in(1).constant());
} else {
Register value_reg = locs()->in(1).reg();
__ StoreIntoObjectNoBarrierOffset(instance_reg,
offset_in_bytes_,
value_reg);
}
}
__ Bind(&skip_store);
}
LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
// When the parser is building an implicit static getter for optimization,
// it can generate a function body where deoptimization ids do not line up
// with the unoptimized code.
//
// This is safe only so long as LoadStaticFieldInstr cannot deoptimize.
void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("LoadStaticFieldInstr");
Register field = locs()->in(0).reg();
Register result = locs()->out(0).reg();
__ LoadFromOffset(result,
field,
Field::static_value_offset() - kHeapObjectTag);
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
LocationSummary* locs = new(zone) LocationSummary(
zone, 1, 1, LocationSummary::kNoCall);
locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister()
: Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
return locs;
}
void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("StoreStaticFieldInstr");
Register value = locs()->in(0).reg();
Register temp = locs()->temp(0).reg();
__ LoadObject(temp, field());
if (this->value()->NeedsStoreBuffer()) {
__ StoreIntoObject(temp,
FieldAddress(temp, Field::static_value_offset()),
value,
CanValueBeSmi());
} else {
__ StoreIntoObjectNoBarrier(
temp, FieldAddress(temp, Field::static_value_offset()), value);
}
}
LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(A0));
summary->set_in(1, Location::RegisterLocation(A2));
summary->set_in(2, Location::RegisterLocation(A1));
summary->set_out(0, Location::RegisterLocation(V0));
return summary;
}
void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->in(0).reg() == A0); // Value.
ASSERT(locs()->in(1).reg() == A2); // Instantiator.
ASSERT(locs()->in(2).reg() == A1); // Instantiator type arguments.
__ Comment("InstanceOfInstr");
compiler->GenerateInstanceOf(token_pos(),
deopt_id(),
type(),
negate_result(),
locs());
ASSERT(locs()->out(0).reg() == V0);
}
LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(A0));
locs->set_in(1, Location::RegisterLocation(A1));
locs->set_out(0, Location::RegisterLocation(V0));
return locs;
}
// Inlines array allocation for known constant values.
static void InlineArrayAllocation(FlowGraphCompiler* compiler,
intptr_t num_elements,
Label* slow_path,
Label* done) {
const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
const Register kLengthReg = A1;
const Register kElemTypeReg = A0;
const intptr_t instance_size = Array::InstanceSize(num_elements);
__ TryAllocateArray(kArrayCid, instance_size, slow_path,
V0, // instance
T1, // end address
T2,
T3);
// V0: new object start as a tagged pointer.
// T1: new object end address.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(V0,
FieldAddress(V0, Array::type_arguments_offset()),
kElemTypeReg);
// Set the length field.
__ StoreIntoObjectNoBarrier(V0,
FieldAddress(V0, Array::length_offset()),
kLengthReg);
// Initialize all array elements to raw_null.
// V0: new object start as a tagged pointer.
// T1: new object end address.
// T2: iterator which initially points to the start of the variable
// data area to be initialized.
// T7: null.
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(RawArray);
__ LoadObject(T7, Object::null_object());
__ AddImmediate(T2, V0, sizeof(RawArray) - kHeapObjectTag);
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
__ sw(T7, Address(T2, current_offset));
current_offset += kWordSize;
}
} else {
Label init_loop;
__ Bind(&init_loop);
__ sw(T7, Address(T2, 0));
__ addiu(T2, T2, Immediate(kWordSize));
__ BranchUnsignedLess(T2, T1, &init_loop);
}
}
__ b(done);
}
void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("CreateArrayInstr");
const Register kLengthReg = A1;
const Register kElemTypeReg = A0;
const Register kResultReg = V0;
ASSERT(locs()->in(0).reg() == kElemTypeReg);
ASSERT(locs()->in(1).reg() == kLengthReg);
Label slow_path, done;
if (compiler->is_optimizing() &&
num_elements()->BindsToConstant() &&
num_elements()->BoundConstant().IsSmi()) {
const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value();
if ((length >= 0) && (length <= Array::kMaxElements)) {
Label slow_path, done;
InlineArrayAllocation(compiler, length, &slow_path, &done);
__ Bind(&slow_path);
__ PushObject(Object::null_object()); // Make room for the result.
__ Push(kLengthReg); // length.
__ Push(kElemTypeReg);
compiler->GenerateRuntimeCall(token_pos(),
deopt_id(),
kAllocateArrayRuntimeEntry,
2,
locs());
__ Drop(2);
__ Pop(kResultReg);
__ Bind(&done);
return;
}
}
__ Bind(&slow_path);
const Code& stub = Code::ZoneHandle(compiler->zone(),
StubCode::AllocateArray_entry()->code());
compiler->AddStubCallTarget(stub);
compiler->GenerateCall(token_pos(),
*StubCode::AllocateArray_entry(),
RawPcDescriptors::kOther,
locs());
__ Bind(&done);
ASSERT(locs()->out(0).reg() == kResultReg);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps =
(IsUnboxedLoad() && opt) ? 1 :
((IsPotentialUnboxedLoad()) ? 2 : 0);
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps,
(opt && !IsPotentialUnboxedLoad())
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
if (IsUnboxedLoad() && opt) {
locs->set_temp(0, Location::RequiresRegister());
} else if (IsPotentialUnboxedLoad()) {
locs->set_temp(0, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(D1));
locs->set_temp(1, Location::RequiresRegister());
}
locs->set_out(0, Location::RequiresRegister());
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(sizeof(classid_t) == kInt16Size);
Register instance_reg = locs()->in(0).reg();
if (IsUnboxedLoad() && compiler->is_optimizing()) {
DRegister result = locs()->out(0).fpu_reg();
Register temp = locs()->temp(0).reg();
__ lw(temp, FieldAddress(instance_reg, offset_in_bytes()));
intptr_t cid = field()->UnboxedFieldCid();
switch (cid) {
case kDoubleCid:
__ LoadDFromOffset(result, temp,
Double::value_offset() - kHeapObjectTag);
break;
default:
UNREACHABLE();
}
return;
}
Label done;
Register result_reg = locs()->out(0).reg();
if (IsPotentialUnboxedLoad()) {
Register temp = locs()->temp(1).reg();
DRegister value = locs()->temp(0).fpu_reg();
Label load_pointer;
Label load_double;
__ LoadObject(result_reg, Field::ZoneHandle(field()->raw()));
FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset());
FieldAddress field_nullability_operand(result_reg,
Field::is_nullable_offset());
__ lhu(temp, field_nullability_operand);
__ BranchEqual(temp, Immediate(kNullCid), &load_pointer);
__ lhu(temp, field_cid_operand);
__ BranchEqual(temp, Immediate(kDoubleCid), &load_double);
// Fall through.
__ b(&load_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
}
{
__ Bind(&load_double);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->double_class(), result_reg, temp);
__ lw(temp, FieldAddress(instance_reg, offset_in_bytes()));
__ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag);
__ StoreDToOffset(value,
result_reg,
Double::value_offset() - kHeapObjectTag);
__ b(&done);
}
__ Bind(&load_pointer);
}
__ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes());
__ Bind(&done);
}
LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(T0));
locs->set_out(0, Location::RegisterLocation(T0));
return locs;
}
void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("InstantiateTypeInstr");
Register instantiator_reg = locs()->in(0).reg();
Register result_reg = locs()->out(0).reg();
// 'instantiator_reg' is the instantiator TypeArguments object (or null).
// A runtime call to instantiate the type is required.
__ addiu(SP, SP, Immediate(-3 * kWordSize));
__ LoadObject(TMP, Object::null_object());
__ sw(TMP, Address(SP, 2 * kWordSize)); // Make room for the result.
__ LoadObject(TMP, type());
__ sw(TMP, Address(SP, 1 * kWordSize));
// Push instantiator type arguments.
__ sw(instantiator_reg, Address(SP, 0 * kWordSize));
compiler->GenerateRuntimeCall(token_pos(),
deopt_id(),
kInstantiateTypeRuntimeEntry,
2,
locs());
// Pop instantiated type.
__ lw(result_reg, Address(SP, 2 * kWordSize));
// Drop instantiator and uninstantiated type.
__ addiu(SP, SP, Immediate(3 * kWordSize));
ASSERT(instantiator_reg == result_reg);
}
LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary(
Zone* zone, bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(T0));
locs->set_out(0, Location::RegisterLocation(T0));
return locs;
}
void InstantiateTypeArgumentsInstr::EmitNativeCode(
FlowGraphCompiler* compiler) {
__ Comment("InstantiateTypeArgumentsInstr");
Register instantiator_reg = locs()->in(0).reg();
Register result_reg = locs()->out(0).reg();
ASSERT(instantiator_reg == T0);
ASSERT(instantiator_reg == result_reg);
// 'instantiator_reg' is the instantiator TypeArguments object (or null).
ASSERT(!type_arguments().IsUninstantiatedIdentity() &&
!type_arguments().CanShareInstantiatorTypeArguments(
instantiator_class()));
// If the instantiator is null and if the type argument vector
// instantiated from null becomes a vector of dynamic, then use null as
// the type arguments.
Label type_arguments_instantiated;
const intptr_t len = type_arguments().Length();
if (type_arguments().IsRawInstantiatedRaw(len)) {
__ BranchEqual(instantiator_reg, Object::null_object(),
&type_arguments_instantiated);
}
__ LoadObject(T2, type_arguments());
__ lw(T2, FieldAddress(T2, TypeArguments::instantiations_offset()));
__ AddImmediate(T2, Array::data_offset() - kHeapObjectTag);
// The instantiations cache is initialized with Object::zero_array() and is
// therefore guaranteed to contain kNoInstantiator. No length check needed.
Label loop, found, slow_case;
__ Bind(&loop);
__ lw(T1, Address(T2, 0 * kWordSize)); // Cached instantiator.
__ beq(T1, T0, &found);
__ BranchNotEqual(
T1, Immediate(Smi::RawValue(StubCode::kNoInstantiator)), &loop);
__ delay_slot()->addiu(T2, T2, Immediate(2 * kWordSize));
__ b(&slow_case);
__ Bind(&found);
__ lw(T0, Address(T2, 1 * kWordSize)); // Cached instantiated args.
__ b(&type_arguments_instantiated);
__ Bind(&slow_case);
// Instantiate non-null type arguments.
// A runtime call to instantiate the type arguments is required.
__ addiu(SP, SP, Immediate(-3 * kWordSize));
__ LoadObject(TMP, Object::null_object());
__ sw(TMP, Address(SP, 2 * kWordSize)); // Make room for the result.
__ LoadObject(TMP, type_arguments());
__ sw(TMP, Address(SP, 1 * kWordSize));
// Push instantiator type arguments.
__ sw(instantiator_reg, Address(SP, 0 * kWordSize));
compiler->GenerateRuntimeCall(token_pos(),
deopt_id(),
kInstantiateTypeArgumentsRuntimeEntry,
2,
locs());
// Pop instantiated type arguments.
__ lw(result_reg, Address(SP, 2 * kWordSize));
// Drop instantiator and uninstantiated type arguments.
__ addiu(SP, SP, Immediate(3 * kWordSize));
__ Bind(&type_arguments_instantiated);
}
LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
ASSERT(opt);
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = 3;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
locs->set_temp(0, Location::RegisterLocation(T1));
locs->set_temp(1, Location::RegisterLocation(T2));
locs->set_temp(2, Location::RegisterLocation(T3));
locs->set_out(0, Location::RegisterLocation(V0));
return locs;
}
class AllocateContextSlowPath : public SlowPathCode {
public:
explicit AllocateContextSlowPath(
AllocateUninitializedContextInstr* instruction)
: instruction_(instruction) { }
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("AllocateContextSlowPath");
__ Bind(entry_label());
LocationSummary* locs = instruction_->locs();
locs->live_registers()->Remove(locs->out(0));
compiler->SaveLiveRegisters(locs);
__ LoadImmediate(T1, instruction_->num_context_variables());
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::AllocateContext_entry()->code());
compiler->AddStubCallTarget(stub);
compiler->GenerateCall(instruction_->token_pos(),
*StubCode::AllocateContext_entry(),
RawPcDescriptors::kOther,
locs);
ASSERT(instruction_->locs()->out(0).reg() == V0);
compiler->RestoreLiveRegisters(instruction_->locs());
__ b(exit_label());
}
private:
AllocateUninitializedContextInstr* instruction_;
};
void AllocateUninitializedContextInstr::EmitNativeCode(
FlowGraphCompiler* compiler) {
Register temp0 = locs()->temp(0).reg();
Register temp1 = locs()->temp(1).reg();
Register temp2 = locs()->temp(2).reg();
Register result = locs()->out(0).reg();
// Try allocate the object.
AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
compiler->AddSlowPathCode(slow_path);
intptr_t instance_size = Context::InstanceSize(num_context_variables());
__ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
result, // instance
temp0,
temp1,
temp2);
// Setup up number of context variables field.
__ LoadImmediate(temp0, num_context_variables());
__ sw(temp0, FieldAddress(result, Context::num_variables_offset()));
__ Bind(slow_path->exit_label());
}
LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = 1;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_temp(0, Location::RegisterLocation(T1));
locs->set_out(0, Location::RegisterLocation(V0));
return locs;
}
void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->temp(0).reg() == T1);
ASSERT(locs()->out(0).reg() == V0);
__ Comment("AllocateContextInstr");
__ LoadImmediate(T1, num_context_variables());
compiler->GenerateCall(token_pos(),
*StubCode::AllocateContext_entry(),
RawPcDescriptors::kOther,
locs());
}
LocationSummary* InitStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(T0));
locs->set_temp(0, Location::RegisterLocation(T1));
return locs;
}
void InitStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register field = locs()->in(0).reg();
Register temp = locs()->temp(0).reg();
Label call_runtime, no_call;
__ Comment("InitStaticFieldInstr");
__ lw(temp, FieldAddress(field, Field::static_value_offset()));
__ BranchEqual(temp, Object::sentinel(), &call_runtime);
__ BranchNotEqual(temp, Object::transition_sentinel(), &no_call);
__ Bind(&call_runtime);
__ addiu(SP, SP, Immediate(-2 * kWordSize));
__ LoadObject(TMP, Object::null_object());
__ sw(TMP, Address(SP, 1 * kWordSize)); // Make room for (unused) result.
__ sw(field, Address(SP, 0 * kWordSize));
compiler->GenerateRuntimeCall(token_pos(),
deopt_id(),
kInitStaticFieldRuntimeEntry,
1,
locs());
__ addiu(SP, SP, Immediate(2 * kWordSize)); // Purge argument and result.
__ Bind(&no_call);
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(T0));
locs->set_out(0, Location::RegisterLocation(T0));
return locs;
}
void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register context_value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
__ Comment("CloneContextInstr");
__ addiu(SP, SP, Immediate(-2 * kWordSize));
__ LoadObject(TMP, Object::null_object()); // Make room for the result.
__ sw(TMP, Address(SP, 1 * kWordSize));
__ sw(context_value, Address(SP, 0 * kWordSize));
compiler->GenerateRuntimeCall(token_pos(),
deopt_id(),
kCloneContextRuntimeEntry,
1,
locs());
__ lw(result, Address(SP, 1 * kWordSize)); // Get result (cloned context).
__ addiu(SP, SP, Immediate(2 * kWordSize));
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(compiler->GetJumpLabel(this));
compiler->AddExceptionHandler(catch_try_index(),
try_index(),
compiler->assembler()->CodeSize(),
catch_handler_types_,
needs_stacktrace());
// Restore pool pointer.
__ RestoreCodePointer();
__ LoadPoolPointer();
if (HasParallelMove()) {
compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
}
// Restore SP from FP as we are coming from a throw and the code for
// popping arguments has not been run.
const intptr_t fp_sp_dist =
(kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize;
ASSERT(fp_sp_dist <= 0);
__ AddImmediate(SP, FP, fp_sp_dist);
// Restore stack and initialize the two exception variables:
// exception and stack trace variables.
__ StoreToOffset(kExceptionObjectReg,
FP, exception_var().index() * kWordSize);
__ StoreToOffset(kStackTraceObjectReg,
FP, stacktrace_var().index() * kWordSize);
}
LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = 1;
LocationSummary* summary = new(zone) LocationSummary(
zone, kNumInputs,
kNumTemps,
LocationSummary::kCallOnSlowPath);
summary->set_temp(0, Location::RequiresRegister());
return summary;
}
class CheckStackOverflowSlowPath : public SlowPathCode {
public:
explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
: instruction_(instruction) { }
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
if (FLAG_use_osr && osr_entry_label()->IsLinked()) {
uword flags_address = Isolate::Current()->stack_overflow_flags_address();
Register value = instruction_->locs()->temp(0).reg();
__ Comment("CheckStackOverflowSlowPathOsr");
__ Bind(osr_entry_label());
ASSERT(FLAG_allow_absolute_addresses);
__ LoadImmediate(TMP, flags_address);
__ LoadImmediate(value, Isolate::kOsrRequest);
__ sw(value, Address(TMP));
}
__ Comment("CheckStackOverflowSlowPath");
__ Bind(entry_label());
compiler->SaveLiveRegisters(instruction_->locs());
// pending_deoptimization_env_ is needed to generate a runtime call that
// may throw an exception.
ASSERT(compiler->pending_deoptimization_env_ == NULL);
Environment* env = compiler->SlowPathEnvironmentFor(instruction_);
compiler->pending_deoptimization_env_ = env;
compiler->GenerateRuntimeCall(instruction_->token_pos(),
instruction_->deopt_id(),
kStackOverflowRuntimeEntry,
0,
instruction_->locs());
if (FLAG_use_osr && !compiler->is_optimizing() && instruction_->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry,
instruction_->deopt_id(),
0); // No token position.
}
compiler->pending_deoptimization_env_ = NULL;
compiler->RestoreLiveRegisters(instruction_->locs());
__ b(exit_label());
}
Label* osr_entry_label() {
ASSERT(FLAG_use_osr);
return &osr_entry_label_;
}
private:
CheckStackOverflowInstr* instruction_;
Label osr_entry_label_;
};
void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("CheckStackOverflowInstr");
CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
compiler->AddSlowPathCode(