blob: 53c0b0289b8cc0aadb13f60d37792a5e75e16b99 [file] [log] [blame]
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
#if defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/compiler/backend/il.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/flow_graph.h"
#include "vm/compiler/backend/flow_graph_compiler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/compiler/backend/locations_helpers.h"
#include "vm/compiler/backend/range_analysis.h"
#include "vm/compiler/ffi.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/dart_entry.h"
#include "vm/instructions.h"
#include "vm/object_store.h"
#include "vm/parser.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
#include "vm/symbols.h"
#include "vm/type_testing_stubs.h"
#define __ compiler->assembler()->
#define Z (compiler->zone())
namespace dart {
// Generic summary for call instructions that have all arguments pushed
// on the stack and return the result in a fixed register RAX.
LocationSummary* Instruction::MakeCallSummary(Zone* zone) {
LocationSummary* result =
new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
result->set_out(0, Location::RegisterLocation(RAX));
return result;
}
DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) {
ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi.
__ movq(out, Address(instr->base_reg(), index, TIMES_4, instr->offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(StoreIndexedUnsafe,
(NoLocation, Register index, Register value)) {
ASSERT(instr->RequiredInputRepresentation(
StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
__ movq(Address(instr->base_reg(), index, TIMES_4, instr->offset()), value);
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(TailCall, (NoLocation, Fixed<Register, ARGS_DESC_REG>)) {
__ LoadObject(CODE_REG, instr->code());
__ LeaveDartFrame(); // The arguments are still on the stack.
__ jmp(FieldAddress(CODE_REG, Code::entry_point_offset()));
// Even though the TailCallInstr will be the last instruction in a basic
// block, the flow graph compiler will emit native code for other blocks after
// the one containing this instruction and needs to be able to use the pool.
// (The `LeaveDartFrame` above disables usages of the pool.)
__ set_constant_pool_allowed(true);
}
LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::AnyOrConstant(value()));
return locs;
}
void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// In SSA mode, we need an explicit push. Nothing to do in non-SSA mode
// where PushArgument is handled by BindInstr::EmitNativeCode.
if (compiler->is_optimizing()) {
Location value = locs()->in(0);
if (value.IsRegister()) {
__ pushq(value.reg());
} else if (value.IsConstant()) {
__ PushObject(value.constant());
} else {
ASSERT(value.IsStackSlot());
__ pushq(value.ToStackSlotAddress());
}
}
}
LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RegisterLocation(RAX));
return locs;
}
// Attempt optimized compilation at return instruction instead of at the entry.
// The entry needs to be patchable, no inlined objects are allowed in the area
// that will be overwritten by the patch instruction: a jump).
void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register result = locs()->in(0).reg();
ASSERT(result == RAX);
if (compiler->intrinsic_mode()) {
// Intrinsics don't have a frame.
__ ret();
return;
}
#if defined(DEBUG)
__ Comment("Stack Check");
Label done;
const intptr_t fp_sp_dist =
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ movq(RDI, RSP);
__ subq(RDI, RBP);
__ CompareImmediate(RDI, Immediate(fp_sp_dist));
__ j(EQUAL, &done, Assembler::kNearJump);
__ int3();
__ Bind(&done);
#endif
ASSERT(__ constant_pool_allowed());
__ LeaveDartFrame(); // Disallows constant pool use.
__ ret();
// This ReturnInstr may be emitted out of order by the optimizer. The next
// block may be a target expecting a properly set constant pool pointer.
__ set_constant_pool_allowed(true);
}
static Condition NegateCondition(Condition condition) {
switch (condition) {
case EQUAL:
return NOT_EQUAL;
case NOT_EQUAL:
return EQUAL;
case LESS:
return GREATER_EQUAL;
case LESS_EQUAL:
return GREATER;
case GREATER:
return LESS_EQUAL;
case GREATER_EQUAL:
return LESS;
case BELOW:
return ABOVE_EQUAL;
case BELOW_EQUAL:
return ABOVE;
case ABOVE:
return BELOW_EQUAL;
case ABOVE_EQUAL:
return BELOW;
case PARITY_EVEN:
return PARITY_ODD;
case PARITY_ODD:
return PARITY_EVEN;
default:
UNIMPLEMENTED();
return EQUAL;
}
}
// Detect pattern when one value is zero and another is a power of 2.
static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
(Utils::IsPowerOfTwo(v2) && (v1 == 0));
}
LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
comparison()->InitializeLocationSummary(zone, opt);
// TODO(dartbug.com/30952) support convertion of Register to corresponding
// least significant byte register (e.g. RAX -> AL, RSI -> SIL, r15 -> r15b).
comparison()->locs()->set_out(0, Location::RegisterLocation(RDX));
return comparison()->locs();
}
void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->out(0).reg() == RDX);
// Clear upper part of the out register. We are going to use setcc on it
// which is a byte move.
__ xorq(RDX, RDX);
// Emit comparison code. This must not overwrite the result register.
// IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
// the labels or returning an invalid condition.
BranchLabels labels = {NULL, NULL, NULL};
Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
ASSERT(true_condition != INVALID_CONDITION);
const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
intptr_t true_value = if_true_;
intptr_t false_value = if_false_;
if (is_power_of_two_kind) {
if (true_value == 0) {
// We need to have zero in RDX on true_condition.
true_condition = NegateCondition(true_condition);
}
} else {
if (true_value == 0) {
// Swap values so that false_value is zero.
intptr_t temp = true_value;
true_value = false_value;
false_value = temp;
} else {
true_condition = NegateCondition(true_condition);
}
}
__ setcc(true_condition, DL);
if (is_power_of_two_kind) {
const intptr_t shift =
Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
__ shlq(RDX, Immediate(shift + kSmiTagSize));
} else {
__ decq(RDX);
__ AndImmediate(
RDX, Immediate(Smi::RawValue(true_value) - Smi::RawValue(false_value)));
if (false_value != 0) {
__ AddImmediate(RDX, Immediate(Smi::RawValue(false_value)));
}
}
}
LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t stack_index =
compiler::target::frame_layout.FrameSlotForVariable(&local());
return LocationSummary::Make(zone, kNumInputs,
Location::StackSlot(stack_index),
LocationSummary::kNoCall);
}
void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!compiler->is_optimizing());
// Nothing to do.
}
LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
LocationSummary::kNoCall);
}
void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ movq(
Address(RBP, compiler::target::FrameOffsetInBytesForVariable(&local())),
value);
}
LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
return LocationSummary::Make(zone, kNumInputs,
Assembler::IsSafe(value())
? Location::Constant(this)
: Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The register allocator drops constant definitions that have no uses.
Location out = locs()->out(0);
ASSERT(out.IsRegister() || out.IsConstant() || out.IsInvalid());
if (out.IsRegister()) {
Register result = out.reg();
__ LoadObject(result, value());
}
}
void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
const Location& destination,
Register tmp) {
if (destination.IsRegister()) {
if (representation() == kUnboxedInt32 ||
representation() == kUnboxedInt64) {
const int64_t value = Integer::Cast(value_).AsInt64Value();
if (value == 0) {
__ xorl(destination.reg(), destination.reg());
} else {
__ movq(destination.reg(), Immediate(value));
}
} else {
ASSERT(representation() == kTagged);
__ LoadObject(destination.reg(), value_);
}
} else if (destination.IsFpuRegister()) {
if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0)) {
__ xorps(destination.fpu_reg(), destination.fpu_reg());
} else {
ASSERT(tmp != kNoRegister);
__ LoadObject(tmp, value_);
__ movsd(destination.fpu_reg(),
FieldAddress(tmp, Double::value_offset()));
}
} else if (destination.IsDoubleStackSlot()) {
if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0)) {
__ xorps(XMM0, XMM0);
} else {
ASSERT(tmp != kNoRegister);
__ LoadObject(tmp, value_);
__ movsd(XMM0, FieldAddress(tmp, Double::value_offset()));
}
__ movsd(destination.ToStackSlotAddress(), XMM0);
} else {
ASSERT(destination.IsStackSlot());
if (representation() == kUnboxedInt32 ||
representation() == kUnboxedInt64) {
const int64_t value = Integer::Cast(value_).AsInt64Value();
__ movq(destination.ToStackSlotAddress(), Immediate(value));
} else {
ASSERT(representation() == kTagged);
__ StoreObject(destination.ToStackSlotAddress(), value_);
}
}
}
LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = IsUnboxedSignedIntegerConstant() ? 0 : 1;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
switch (representation()) {
case kUnboxedDouble:
locs->set_out(0, Location::RequiresFpuRegister());
locs->set_temp(0, Location::RequiresRegister());
break;
case kUnboxedInt32:
case kUnboxedInt64:
locs->set_out(0, Location::RequiresRegister());
break;
default:
UNREACHABLE();
break;
}
return locs;
}
void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The register allocator drops constant definitions that have no uses.
if (!locs()->out(0).IsInvalid()) {
const Register scratch =
IsUnboxedSignedIntegerConstant() ? kNoRegister : locs()->temp(0).reg();
EmitMoveToLocation(compiler, locs()->out(0), scratch);
}
}
LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// When using a type testing stub, we want to prevent spilling of the
// function/instantiator type argument vectors, since stub preserves them. So
// we make this a `kNoCall` summary, even though most other registers can be
// modified by the stub. To tell the register allocator about it, we reserve
// all the other registers as temporary registers.
// TODO(http://dartbug.com/32788): Simplify this.
const Register kInstanceReg = RAX;
const Register kInstantiatorTypeArgumentsReg = RDX;
const Register kFunctionTypeArgumentsReg = RCX;
const bool using_stub =
FlowGraphCompiler::ShouldUseTypeTestingStubFor(opt, dst_type());
const intptr_t kNonChangeableInputRegs =
(1 << kInstanceReg) | (1 << kInstantiatorTypeArgumentsReg) |
(1 << kFunctionTypeArgumentsReg);
const intptr_t kNumInputs = 3;
// We invoke a stub that can potentially clobber any CPU register
// but can only clobber FPU registers on the slow path when
// entering runtime. Preserve all FPU registers that are
// not guarateed to be preserved by the ABI.
const intptr_t kCpuRegistersToPreserve =
kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
const intptr_t kFpuRegistersToPreserve =
CallingConventions::kVolatileXmmRegisters & ~(1 << FpuTMP);
const intptr_t kNumTemps =
using_stub ? (Utils::CountOneBits64(kCpuRegistersToPreserve) +
Utils::CountOneBits64(kFpuRegistersToPreserve))
: 0;
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
using_stub ? LocationSummary::kCallCalleeSafe : LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(kInstanceReg)); // Value.
summary->set_in(1,
Location::RegisterLocation(
kInstantiatorTypeArgumentsReg)); // Instant. type args.
summary->set_in(2, Location::RegisterLocation(
kFunctionTypeArgumentsReg)); // Function type args.
// TODO(http://dartbug.com/32787): Use Location::SameAsFirstInput() instead,
// once register allocator no longer hits assertion.
summary->set_out(0, Location::RegisterLocation(kInstanceReg));
if (using_stub) {
// Let's reserve all registers except for the input ones.
intptr_t next_temp = 0;
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
if (should_preserve) {
summary->set_temp(next_temp++,
Location::RegisterLocation(static_cast<Register>(i)));
}
}
for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
const bool should_preserve = ((1 << i) & kFpuRegistersToPreserve) != 0;
if (should_preserve) {
summary->set_temp(next_temp++, Location::FpuRegisterLocation(
static_cast<FpuRegister>(i)));
}
}
}
return summary;
}
LocationSummary* AssertSubtypeInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(RDX)); // Instant. type args
summary->set_in(1, Location::RegisterLocation(RCX)); // Function type args
return summary;
}
LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(RAX));
locs->set_out(0, Location::RegisterLocation(RAX));
return locs;
}
static void EmitAssertBoolean(Register reg,
TokenPosition token_pos,
intptr_t deopt_id,
LocationSummary* locs,
FlowGraphCompiler* compiler) {
// Check that the type of the value is allowed in conditional context.
// Call the runtime if the object is not bool::true or bool::false.
ASSERT(locs->always_calls());
Label done;
__ CompareObject(reg, Object::null_instance());
__ j(NOT_EQUAL, &done, Assembler::kNearJump);
__ pushq(reg); // Push the source object.
compiler->GenerateRuntimeCall(token_pos, deopt_id,
kNonBoolTypeErrorRuntimeEntry, 1, locs);
// We should never return here.
__ int3();
__ Bind(&done);
}
void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out(0).reg();
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
ASSERT(obj == result);
}
static Condition TokenKindToIntCondition(Token::Kind kind) {
switch (kind) {
case Token::kEQ:
return EQUAL;
case Token::kNE:
return NOT_EQUAL;
case Token::kLT:
return LESS;
case Token::kGT:
return GREATER;
case Token::kLTE:
return LESS_EQUAL;
case Token::kGTE:
return GREATER_EQUAL;
default:
UNREACHABLE();
return OVERFLOW;
}
}
LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
if (operation_cid() == kDoubleCid) {
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresFpuRegister());
locs->set_in(1, Location::RequiresFpuRegister());
locs->set_out(0, Location::RequiresRegister());
return locs;
}
if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RegisterOrConstant(left()));
// Only one input can be a constant operand. The case of two constant
// operands should be handled by constant propagation.
// Only right can be a stack slot.
locs->set_in(1, locs->in(0).IsConstant()
? Location::RequiresRegister()
: Location::RegisterOrConstant(right()));
locs->set_out(0, Location::RequiresRegister());
return locs;
}
UNREACHABLE();
return NULL;
}
static void LoadValueCid(FlowGraphCompiler* compiler,
Register value_cid_reg,
Register value_reg,
Label* value_is_smi = NULL) {
Label done;
if (value_is_smi == NULL) {
__ LoadImmediate(value_cid_reg, Immediate(kSmiCid));
}
__ testq(value_reg, Immediate(kSmiTagMask));
if (value_is_smi == NULL) {
__ j(ZERO, &done, Assembler::kNearJump);
} else {
__ j(ZERO, value_is_smi);
}
__ LoadClassId(value_cid_reg, value_reg);
__ Bind(&done);
}
static Condition FlipCondition(Condition condition) {
switch (condition) {
case EQUAL:
return EQUAL;
case NOT_EQUAL:
return NOT_EQUAL;
case LESS:
return GREATER;
case LESS_EQUAL:
return GREATER_EQUAL;
case GREATER:
return LESS;
case GREATER_EQUAL:
return LESS_EQUAL;
case BELOW:
return ABOVE;
case BELOW_EQUAL:
return ABOVE_EQUAL;
case ABOVE:
return BELOW;
case ABOVE_EQUAL:
return BELOW_EQUAL;
default:
UNIMPLEMENTED();
return EQUAL;
}
}
static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
Condition true_condition,
BranchLabels labels) {
if (labels.fall_through == labels.false_label) {
// If the next block is the false successor, fall through to it.
__ j(true_condition, labels.true_label);
} else {
// If the next block is not the false successor, branch to it.
Condition false_condition = NegateCondition(true_condition);
__ j(false_condition, labels.false_label);
// Fall through or jump to the true successor.
if (labels.fall_through != labels.true_label) {
__ jmp(labels.true_label);
}
}
}
static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler,
const LocationSummary& locs,
Token::Kind kind) {
Location left = locs.in(0);
Location right = locs.in(1);
ASSERT(!left.IsConstant() || !right.IsConstant());
Condition true_condition = TokenKindToIntCondition(kind);
if (left.IsConstant() || right.IsConstant()) {
// Ensure constant is on the right.
ConstantInstr* constant = NULL;
if (left.IsConstant()) {
constant = left.constant_instruction();
Location tmp = right;
right = left;
left = tmp;
true_condition = FlipCondition(true_condition);
} else {
constant = right.constant_instruction();
}
if (constant->IsUnboxedSignedIntegerConstant()) {
__ cmpq(left.reg(),
Immediate(constant->GetUnboxedSignedIntegerConstantValue()));
} else {
ASSERT(constant->representation() == kTagged);
__ CompareObject(left.reg(), right.constant());
}
} else if (right.IsStackSlot()) {
__ cmpq(left.reg(), right.ToStackSlotAddress());
} else {
__ cmpq(left.reg(), right.reg());
}
return true_condition;
}
static Condition TokenKindToDoubleCondition(Token::Kind kind) {
switch (kind) {
case Token::kEQ:
return EQUAL;
case Token::kNE:
return NOT_EQUAL;
case Token::kLT:
return BELOW;
case Token::kGT:
return ABOVE;
case Token::kLTE:
return BELOW_EQUAL;
case Token::kGTE:
return ABOVE_EQUAL;
default:
UNREACHABLE();
return OVERFLOW;
}
}
static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
const LocationSummary& locs,
Token::Kind kind,
BranchLabels labels) {
XmmRegister left = locs.in(0).fpu_reg();
XmmRegister right = locs.in(1).fpu_reg();
__ comisd(left, right);
Condition true_condition = TokenKindToDoubleCondition(kind);
Label* nan_result =
(true_condition == NOT_EQUAL) ? labels.true_label : labels.false_label;
__ j(PARITY_EVEN, nan_result);
return true_condition;
}
Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
if ((operation_cid() == kSmiCid) || (operation_cid() == kMintCid)) {
return EmitInt64ComparisonOp(compiler, *locs(), kind());
} else {
ASSERT(operation_cid() == kDoubleCid);
return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
}
}
void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label is_true, is_false;
BranchLabels labels = {&is_true, &is_false, &is_false};
Condition true_condition = EmitComparisonCode(compiler, labels);
if (true_condition != INVALID_CONDITION) {
EmitBranchOnCondition(compiler, true_condition, labels);
}
Register result = locs()->out(0).reg();
Label done;
__ Bind(&is_false);
__ LoadObject(result, Bool::False());
__ jmp(&done);
__ Bind(&is_true);
__ LoadObject(result, Bool::True());
__ Bind(&done);
}
void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
BranchInstr* branch) {
BranchLabels labels = compiler->CreateBranchLabels(branch);
Condition true_condition = EmitComparisonCode(compiler, labels);
if (true_condition != INVALID_CONDITION) {
EmitBranchOnCondition(compiler, true_condition, labels);
}
}
LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
// Only one input can be a constant operand. The case of two constant
// operands should be handled by constant propagation.
locs->set_in(1, Location::RegisterOrConstant(right()));
return locs;
}
Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
Register left_reg = locs()->in(0).reg();
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int64_t imm = reinterpret_cast<int64_t>(right.constant().raw());
__ TestImmediate(left_reg, Immediate(imm));
} else {
__ testq(left_reg, right.reg());
}
Condition true_condition = (kind() == Token::kNE) ? NOT_ZERO : ZERO;
return true_condition;
}
LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
return locs;
}
Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
Register val_reg = locs()->in(0).reg();
Register cid_reg = locs()->temp(0).reg();
Label* deopt = CanDeoptimize() ? compiler->AddDeoptStub(
deopt_id(), ICData::kDeoptTestCids,
licm_hoisted_ ? ICData::kHoisted : 0)
: NULL;
const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
const ZoneGrowableArray<intptr_t>& data = cid_results();
ASSERT(data[0] == kSmiCid);
bool result = data[1] == true_result;
__ testq(val_reg, Immediate(kSmiTagMask));
__ j(ZERO, result ? labels.true_label : labels.false_label);
__ LoadClassId(cid_reg, val_reg);
for (intptr_t i = 2; i < data.length(); i += 2) {
const intptr_t test_cid = data[i];
ASSERT(test_cid != kSmiCid);
result = data[i + 1] == true_result;
__ cmpq(cid_reg, Immediate(test_cid));
__ j(EQUAL, result ? labels.true_label : labels.false_label);
}
// No match found, deoptimize or default action.
if (deopt == NULL) {
// If the cid is not in the list, jump to the opposite label from the cids
// that are in the list. These must be all the same (see asserts in the
// constructor).
Label* target = result ? labels.false_label : labels.true_label;
if (target != labels.fall_through) {
__ jmp(target);
}
} else {
__ jmp(deopt);
}
// Dummy result as this method already did the jump, there's no need
// for the caller to branch on a condition.
return INVALID_CONDITION;
}
LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
if (operation_cid() == kDoubleCid) {
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_in(1, Location::RequiresFpuRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RegisterOrConstant(left()));
// Only one input can be a constant operand. The case of two constant
// operands should be handled by constant propagation.
summary->set_in(1, summary->in(0).IsConstant()
? Location::RequiresRegister()
: Location::RegisterOrConstant(right()));
summary->set_out(0, Location::RequiresRegister());
return summary;
}
UNREACHABLE();
return NULL;
}
Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
return EmitInt64ComparisonOp(compiler, *locs(), kind());
} else {
ASSERT(operation_cid() == kDoubleCid);
return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
}
}
LocationSummary* NativeCallInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
return MakeCallSummary(zone);
}
void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
SetupNative();
Register result = locs()->out(0).reg();
const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
// All arguments are already @RSP due to preceding PushArgument()s.
ASSERT(ArgumentCount() ==
function().NumParameters() + (function().IsGeneric() ? 1 : 0));
// Push the result place holder initialized to NULL.
__ PushObject(Object::null_object());
// Pass a pointer to the first argument in RAX.
__ leaq(RAX, Address(RSP, ArgumentCount() * kWordSize));
__ LoadImmediate(R10, Immediate(argc_tag));
const Code* stub;
if (link_lazily()) {
stub = &StubCode::CallBootstrapNative();
ExternalLabel label(NativeEntry::LinkNativeCallEntry());
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kPatchable);
compiler->GeneratePatchableCall(token_pos(), *stub,
RawPcDescriptors::kOther, locs());
} else {
if (is_bootstrap_native()) {
stub = &StubCode::CallBootstrapNative();
} else if (is_auto_scope()) {
stub = &StubCode::CallAutoScopeNative();
} else {
stub = &StubCode::CallNoScopeNative();
}
const ExternalLabel label(reinterpret_cast<uword>(native_c_function()));
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kNotPatchable);
compiler->GenerateCall(token_pos(), *stub, RawPcDescriptors::kOther,
locs());
}
__ popq(result);
__ Drop(ArgumentCount()); // Drop the arguments.
}
void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register saved_fp = locs()->temp(0).reg();
Register target_address = locs()->in(TargetAddressIndex()).reg();
// Save frame pointer because we're going to update it when we enter the exit
// frame.
__ movq(saved_fp, FPREG);
// Make a space to put the return address.
__ pushq(Immediate(0));
// We need to create a dummy "exit frame". It will share the same pool pointer
// but have a null code object.
__ LoadObject(CODE_REG, Object::null_object());
__ set_constant_pool_allowed(false);
__ EnterDartFrame(
compiler::ffi::NumStackArguments(arg_locations_) * kWordSize, PP);
// Save exit frame information to enable stack walking as we are about to
// transition to Dart VM C++ code.
__ movq(Address(THR, Thread::top_exit_frame_info_offset()), FPREG);
// Align frame before entering C++ world.
if (OS::ActivationFrameAlignment() > 1) {
__ andq(SPREG, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
for (intptr_t i = 0, n = NativeArgCount(); i < n; ++i) {
Location origin = locs()->in(i);
Location target = arg_locations_[i];
if (target.IsStackSlot()) {
if (origin.IsRegister()) {
__ movq(target.ToStackSlotAddress(), origin.reg());
} else if (origin.IsFpuRegister()) {
__ movq(TMP, origin.fpu_reg());
__ movq(target.ToStackSlotAddress(), TMP);
} else if (origin.IsStackSlot() || origin.IsDoubleStackSlot()) {
// The base register cannot be SPREG because we've moved it.
ASSERT(origin.base_reg() == FPREG);
__ movq(TMP, Address(saved_fp, origin.ToStackSlotOffset()));
__ movq(target.ToStackSlotAddress(), TMP);
}
} else {
ASSERT(origin.Equals(target));
}
}
// Mark that the thread is executing VM code.
__ movq(Assembler::VMTagAddress(), target_address);
// We need to copy the return address up into the dummy stack frame so the
// stack walker will know which safepoint to use.
#if defined(TARGET_OS_WINDOWS)
constexpr intptr_t kCallSequenceLength = 10;
#else
constexpr intptr_t kCallSequenceLength = 6;
#endif
// RIP points to the *next* instruction, so 'AddressRIPRelative' loads the
// address of the following 'movq'.
__ leaq(TMP, Address::AddressRIPRelative(kCallSequenceLength));
const intptr_t call_sequence_start = __ CodeSize();
__ movq(Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), TMP);
__ CallCFunction(target_address);
ASSERT(__ CodeSize() - call_sequence_start == kCallSequenceLength);
compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
// Mark that the thread is executing Dart code.
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
__ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
// Although PP is a callee-saved register, it may have been moved by the GC.
__ LeaveDartFrame(compiler::kRestoreCallerPP);
// Restore the global object pool after returning from runtime (old space is
// moving, so the GOP could have been relocated).
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ movq(PP, Address(THR, Thread::global_object_pool_offset()));
}
__ set_constant_pool_allowed(true);
// Instead of returning to the "fake" return address, we just pop it.
__ popq(TMP);
}
static bool CanBeImmediateIndex(Value* index, intptr_t cid) {
if (!index->definition()->IsConstant()) return false;
const Object& constant = index->definition()->AsConstant()->value();
if (!constant.IsSmi()) return false;
const Smi& smi_const = Smi::Cast(constant);
const intptr_t scale = Instance::ElementSizeFor(cid);
const intptr_t data_offset = Instance::DataOffsetFor(cid);
const int64_t disp = smi_const.AsInt64Value() * scale + data_offset;
return Utils::IsInt(32, disp);
}
LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
// TODO(fschneider): Allow immediate operands for the char code.
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void OneByteStringFromCharCodeInstr::EmitNativeCode(
FlowGraphCompiler* compiler) {
ASSERT(compiler->is_optimizing());
Register char_code = locs()->in(0).reg();
Register result = locs()->out(0).reg();
__ movq(result, Address(THR, Thread::predefined_symbols_address_offset()));
__ movq(result, Address(result, char_code,
TIMES_HALF_WORD_SIZE, // Char code is a smi.
Symbols::kNullCharCodeSymbolOffset * kWordSize));
}
LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(cid_ == kOneByteStringCid);
Register str = locs()->in(0).reg();
Register result = locs()->out(0).reg();
Label is_one, done;
__ movq(result, FieldAddress(str, String::length_offset()));
__ cmpq(result, Immediate(Smi::RawValue(1)));
__ j(EQUAL, &is_one, Assembler::kNearJump);
__ movq(result, Immediate(Smi::RawValue(-1)));
__ jmp(&done);
__ Bind(&is_one);
__ movzxb(result, FieldAddress(str, OneByteString::data_offset()));
__ SmiTag(result);
__ Bind(&done);
}
LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(RAX));
summary->set_out(0, Location::RegisterLocation(RAX));
return summary;
}
void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register array = locs()->in(0).reg();
__ pushq(array);
const int kTypeArgsLen = 0;
const int kNumberOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kNoArgumentNames);
compiler->GenerateStaticCall(deopt_id(), token_pos(), CallFunction(),
args_info, locs(), ICData::Handle(),
ICData::kStatic);
ASSERT(locs()->out(0).reg() == RAX);
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ movq(result, Address(obj, offset()));
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ movq(result, FieldAddress(obj, offset()));
}
}
LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register object = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
const AbstractType& value_type = *this->object()->Type()->ToAbstractType();
if (CompileType::Smi().IsAssignableTo(value_type) ||
value_type.IsTypeParameter()) {
// We don't use Assembler::LoadTaggedClassIdMayBeSmi() here---which uses
// a conditional move instead---because it is slower, probably due to
// branch prediction usually working just fine in this case.
Label load, done;
__ testq(object, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &load, Assembler::kNearJump);
__ LoadImmediate(result, Immediate(Smi::RawValue(kSmiCid)));
__ jmp(&done);
__ Bind(&load);
__ LoadClassId(result, object);
__ SmiTag(result);
__ Bind(&done);
} else {
__ LoadClassId(result, object);
__ SmiTag(result);
}
}
class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
public:
BoxAllocationSlowPath(Instruction* instruction,
const Class& cls,
Register result)
: TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
if (Assembler::EmittingComments()) {
__ Comment("%s slow path allocation of %s", instruction()->DebugName(),
String::Handle(cls_.ScrubbedName()).ToCString());
}
__ Bind(entry_label());
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
LocationSummary* locs = instruction()->locs();
locs->live_registers()->Remove(Location::RegisterLocation(result_));
compiler->SaveLiveRegisters(locs);
compiler->GenerateCall(TokenPosition::kNoSource, // No token position.
stub, RawPcDescriptors::kOther, locs);
__ MoveRegister(result_, RAX);
compiler->RestoreLiveRegisters(locs);
__ jmp(exit_label());
}
static void Allocate(FlowGraphCompiler* compiler,
Instruction* instruction,
const Class& cls,
Register result,
Register temp) {
if (compiler->intrinsic_mode()) {
__ TryAllocate(cls, compiler->intrinsic_slow_path_label(),
Assembler::kFarJump, result, temp);
} else {
BoxAllocationSlowPath* slow_path =
new BoxAllocationSlowPath(instruction, cls, result);
compiler->AddSlowPathCode(slow_path);
__ TryAllocate(cls, slow_path->entry_label(), Assembler::kFarJump, result,
temp);
__ Bind(slow_path->exit_label());
}
}
private:
const Class& cls_;
const Register result_;
};
CompileType LoadIndexedInstr::ComputeType() const {
switch (class_id_) {
case kArrayCid:
case kImmutableArrayCid:
return CompileType::Dynamic();
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return CompileType::FromCid(kDoubleCid);
case kTypedDataFloat32x4ArrayCid:
return CompileType::FromCid(kFloat32x4Cid);
case kTypedDataInt32x4ArrayCid:
return CompileType::FromCid(kInt32x4Cid);
case kTypedDataFloat64x2ArrayCid:
return CompileType::FromCid(kFloat64x2Cid);
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
case kExternalOneByteStringCid:
case kExternalTwoByteStringCid:
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
return CompileType::FromCid(kSmiCid);
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
return CompileType::Int();
default:
UNIMPLEMENTED();
return CompileType::Dynamic();
}
}
Representation LoadIndexedInstr::representation() const {
switch (class_id_) {
case kArrayCid:
case kImmutableArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
case kExternalOneByteStringCid:
case kExternalTwoByteStringCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return kUnboxedUint32;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
return kUnboxedInt64;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
case kTypedDataInt32x4ArrayCid:
return kUnboxedInt32x4;
case kTypedDataFloat32x4ArrayCid:
return kUnboxedFloat32x4;
case kTypedDataFloat64x2ArrayCid:
return kUnboxedFloat64x2;
default:
UNIMPLEMENTED();
return kTagged;
}
}
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
// The smi index is either untagged (element size == 1), or it is left smi
// tagged (for all element sizes > 1).
if (index_scale() == 1) {
locs->set_in(1,
CanBeImmediateIndex(index(), class_id())
? Location::Constant(index()->definition()->AsConstant())
: Location::WritableRegister());
} else {
locs->set_in(1,
CanBeImmediateIndex(index(), class_id())
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
}
if ((representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
locs->set_out(0, Location::RequiresFpuRegister());
} else {
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
Address element_address =
index.IsRegister()
? Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(), array, index.reg())
: Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
if ((representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
if ((index_scale() == 1) && index.IsRegister()) {
__ SmiUntag(index.reg());
}
XmmRegister result = locs()->out(0).fpu_reg();
if (class_id() == kTypedDataFloat32ArrayCid) {
// Load single precision float.
__ movss(result, element_address);
} else if (class_id() == kTypedDataFloat64ArrayCid) {
__ movsd(result, element_address);
} else {
ASSERT((class_id() == kTypedDataInt32x4ArrayCid) ||
(class_id() == kTypedDataFloat32x4ArrayCid) ||
(class_id() == kTypedDataFloat64x2ArrayCid));
__ movups(result, element_address);
}
return;
}
if ((representation() == kUnboxedUint32) ||
(representation() == kUnboxedInt32)) {
if ((index_scale() == 1) && index.IsRegister()) {
__ SmiUntag(index.reg());
}
Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt32ArrayCid:
ASSERT(representation() == kUnboxedInt32);
__ movsxd(result, element_address);
break;
case kTypedDataUint32ArrayCid:
ASSERT(representation() == kUnboxedUint32);
__ movl(result, element_address);
break;
default:
UNREACHABLE();
}
return;
}
if (representation() == kUnboxedInt64) {
ASSERT(class_id() == kTypedDataInt64ArrayCid ||
class_id() == kTypedDataUint64ArrayCid);
if ((index_scale() == 1) && index.IsRegister()) {
__ SmiUntag(index.reg());
}
Register result = locs()->out(0).reg();
__ movq(result, element_address);
return;
}
ASSERT(representation() == kTagged);
if ((index_scale() == 1) && index.IsRegister()) {
__ SmiUntag(index.reg());
}
Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt8ArrayCid:
__ movsxb(result, element_address);
__ SmiTag(result);
break;
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
case kExternalOneByteStringCid:
__ movzxb(result, element_address);
__ SmiTag(result);
break;
case kTypedDataInt16ArrayCid:
__ movsxw(result, element_address);
__ SmiTag(result);
break;
case kTypedDataUint16ArrayCid:
case kTwoByteStringCid:
case kExternalTwoByteStringCid:
__ movzxw(result, element_address);
__ SmiTag(result);
break;
default:
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid));
__ movq(result, element_address);
break;
}
}
LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
// The smi index is either untagged (element size == 1), or it is left smi
// tagged (for all element sizes > 1).
summary->set_in(1, index_scale() == 1 ? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The string register points to the backing store for external strings.
const Register str = locs()->in(0).reg();
const Location index = locs()->in(1);
Address element_address = Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(), str, index.reg());
if ((index_scale() == 1)) {
__ SmiUntag(index.reg());
}
Register result = locs()->out(0).reg();
switch (class_id()) {
case kOneByteStringCid:
case kExternalOneByteStringCid:
switch (element_count()) {
case 1:
__ movzxb(result, element_address);
break;
case 2:
__ movzxw(result, element_address);
break;
case 4:
__ movl(result, element_address);
break;
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
case kTwoByteStringCid:
case kExternalTwoByteStringCid:
switch (element_count()) {
case 1:
__ movzxw(result, element_address);
break;
case 2:
__ movl(result, element_address);
break;
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
default:
UNREACHABLE();
break;
}
}
Representation StoreIndexedInstr::RequiredInputRepresentation(
intptr_t idx) const {
if (idx == 0) return kNoRepresentation;
if (idx == 1) return kTagged;
ASSERT(idx == 2);
switch (class_id_) {
case kArrayCid:
case kOneByteStringCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return kUnboxedUint32;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
return kUnboxedInt64;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
case kTypedDataFloat32x4ArrayCid:
return kUnboxedFloat32x4;
case kTypedDataInt32x4ArrayCid:
return kUnboxedInt32x4;
case kTypedDataFloat64x2ArrayCid:
return kUnboxedFloat64x2;
default:
UNIMPLEMENTED();
return kTagged;
}
}
LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps =
class_id() == kArrayCid && ShouldEmitStoreBarrier() ? 1 : 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
// The smi index is either untagged (element size == 1), or it is left smi
// tagged (for all element sizes > 1).
if (index_scale() == 1) {
locs->set_in(1,
CanBeImmediateIndex(index(), class_id())
? Location::Constant(index()->definition()->AsConstant())
: Location::WritableRegister());
} else {
locs->set_in(1,
CanBeImmediateIndex(index(), class_id())
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
}
switch (class_id()) {
case kArrayCid:
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: Location::RegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
break;
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
// TODO(fschneider): Add location constraint for byte registers (RAX,
// RBX, RCX, RDX) instead of using a fixed register.
locs->set_in(2, Location::FixedRegisterOrSmiConstant(value(), RAX));
break;
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
// Writable register because the value must be untagged before storing.
locs->set_in(2, Location::WritableRegister());
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
// TODO(srdjan): Support Float64 constants.
locs->set_in(2, Location::RequiresFpuRegister());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
case kTypedDataFloat32x4ArrayCid:
locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
return NULL;
}
return locs;
}
void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
Address element_address =
index.IsRegister()
? Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(), array, index.reg())
: Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
if ((index_scale() == 1) && index.IsRegister()) {
__ SmiUntag(index.reg());
}
switch (class_id()) {
case kArrayCid:
if (ShouldEmitStoreBarrier()) {
Register value = locs()->in(2).reg();
Register slot = locs()->temp(0).reg();
__ leaq(slot, element_address);
__ StoreIntoArray(array, slot, value, CanValueBeSmi());
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, element_address, constant);
} else {
Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, element_address, value);
}
break;
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kOneByteStringCid:
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
__ movb(element_address,
Immediate(static_cast<int8_t>(constant.Value())));
} else {
ASSERT(locs()->in(2).reg() == RAX);
__ SmiUntag(RAX);
__ movb(element_address, RAX);
}
break;
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
__ movb(element_address, Immediate(static_cast<int8_t>(value)));
} else {
ASSERT(locs()->in(2).reg() == RAX);
Label store_value, store_0xff;
__ SmiUntag(RAX);
__ CompareImmediate(RAX, Immediate(0xFF));
__ j(BELOW_EQUAL, &store_value, Assembler::kNearJump);
// Clamp to 0x0 or 0xFF respectively.
__ j(GREATER, &store_0xff);
__ xorq(RAX, RAX);
__ jmp(&store_value, Assembler::kNearJump);
__ Bind(&store_0xff);
__ LoadImmediate(RAX, Immediate(0xFF));
__ Bind(&store_value);
__ movb(element_address, RAX);
}
break;
}
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid: {
Register value = locs()->in(2).reg();
__ SmiUntag(value);
__ movw(element_address, value);
break;
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
Register value = locs()->in(2).reg();
__ movl(element_address, value);
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
Register value = locs()->in(2).reg();
__ movq(element_address, value);
break;
}
case kTypedDataFloat32ArrayCid:
__ movss(element_address, locs()->in(2).fpu_reg());
break;
case kTypedDataFloat64ArrayCid:
__ movsd(element_address, locs()->in(2).fpu_reg());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
case kTypedDataFloat32x4ArrayCid:
__ movups(element_address, locs()->in(2).fpu_reg());
break;
default:
UNREACHABLE();
}
}
LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
const bool needs_value_cid_temp_reg =
(value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
const bool needs_field_temp_reg = emit_full_guard;
intptr_t num_temps = 0;
if (needs_value_cid_temp_reg) {
num_temps++;
}
if (needs_field_temp_reg) {
num_temps++;
}
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
for (intptr_t i = 0; i < num_temps; i++) {
summary->set_temp(i, Location::RequiresRegister());
}
return summary;
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(sizeof(classid_t) == kInt16Size);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
if (field_cid == kDynamicCid) {
if (Compiler::IsBackgroundCompilation()) {
// Field state changed while compiling.
Compiler::AbortBackgroundCompilation(
deopt_id(),
"GuardFieldClassInstr: field state changed while compiling");
}
ASSERT(!compiler->is_optimizing());
return; // Nothing to emit.
}
const bool emit_full_guard =
!compiler->is_optimizing() || (field_cid == kIllegalCid);
const bool needs_value_cid_temp_reg =
(value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
const bool needs_field_temp_reg = emit_full_guard;
const Register value_reg = locs()->in(0).reg();
const Register value_cid_reg =
needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
const Register field_reg = needs_field_temp_reg
? locs()->temp(locs()->temp_count() - 1).reg()
: kNoRegister;
Label ok, fail_label;
Label* deopt =
compiler->is_optimizing()
? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
: NULL;
Label* fail = (deopt != NULL) ? deopt : &fail_label;
if (emit_full_guard) {
__ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset());
FieldAddress field_nullability_operand(field_reg,
Field::is_nullable_offset());
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
__ cmpw(value_cid_reg, field_cid_operand);
__ j(EQUAL, &ok);
__ cmpw(value_cid_reg, field_nullability_operand);
} else if (value_cid == kNullCid) {
__ cmpw(field_nullability_operand, Immediate(value_cid));
} else {
__ cmpw(field_cid_operand, Immediate(value_cid));
}
__ j(EQUAL, &ok);
// Check if the tracked state of the guarded field can be initialized
// inline. If the field needs length check or requires type arguments and
// class hierarchy processing for exactness tracking then we fall through
// into runtime which is responsible for computing offset of the length
// field based on the class id.
const bool is_complicated_field =
field().needs_length_check() ||
field().static_type_exactness_state().IsUninitialized();
if (!is_complicated_field) {
// Uninitialized field can be handled inline. Check if the
// field is still unitialized.
__ cmpw(field_cid_operand, Immediate(kIllegalCid));
__ j(NOT_EQUAL, fail);
if (value_cid == kDynamicCid) {
__ movw(field_cid_operand, value_cid_reg);
__ movw(field_nullability_operand, value_cid_reg);
} else {
ASSERT(field_reg != kNoRegister);
__ movw(field_cid_operand, Immediate(value_cid));
__ movw(field_nullability_operand, Immediate(value_cid));
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ jmp(&ok);
}
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ Bind(fail);
__ cmpw(FieldAddress(field_reg, Field::guarded_cid_offset()),
Immediate(kDynamicCid));
__ j(EQUAL, &ok);
__ pushq(field_reg);
__ pushq(value_reg);
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
}
} else {
ASSERT(compiler->is_optimizing());
ASSERT(deopt != NULL);
// Field guard class has been initialized and is known.
if (value_cid == kDynamicCid) {
// Value's class id is not known.
__ testq(value_reg, Immediate(kSmiTagMask));
if (field_cid != kSmiCid) {
__ j(ZERO, fail);
__ LoadClassId(value_cid_reg, value_reg);
__ CompareImmediate(value_cid_reg, Immediate(field_cid));
}
if (field().is_nullable() && (field_cid != kNullCid)) {
__ j(EQUAL, &ok);
__ CompareObject(value_reg, Object::null_object());
}
__ j(NOT_EQUAL, fail);
} else {
// Both value's and field's class id is known.
ASSERT((value_cid != field_cid) && (value_cid != nullability));
__ jmp(fail);
}
}
__ Bind(&ok);
}
LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
const intptr_t kNumTemps = 3;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
// We need temporaries for field object, length offset and expected length.
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
summary->set_temp(2, Location::RequiresRegister());
return summary;
} else {
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
return summary;
}
UNREACHABLE();
}
void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (field().guarded_list_length() == Field::kNoFixedLength) {
if (Compiler::IsBackgroundCompilation()) {
// Field state changed while compiling.
Compiler::AbortBackgroundCompilation(
deopt_id(),
"GuardFieldLengthInstr: field state changed while compiling");
}
ASSERT(!compiler->is_optimizing());
return; // Nothing to emit.
}
Label* deopt =
compiler->is_optimizing()
? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
: NULL;
const Register value_reg = locs()->in(0).reg();
if (!compiler->is_optimizing() ||
(field().guarded_list_length() == Field::kUnknownFixedLength)) {
const Register field_reg = locs()->temp(0).reg();
const Register offset_reg = locs()->temp(1).reg();
const Register length_reg = locs()->temp(2).reg();
Label ok;
__ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
__ movsxb(
offset_reg,
FieldAddress(field_reg,
Field::guarded_list_length_in_object_offset_offset()));
__ movq(length_reg,
FieldAddress(field_reg, Field::guarded_list_length_offset()));
__ cmpq(offset_reg, Immediate(0));
__ j(NEGATIVE, &ok);
// Load the length from the value. GuardFieldClass already verified that
// value's class matches guarded class id of the field.
// offset_reg contains offset already corrected by -kHeapObjectTag that is
// why we use Address instead of FieldAddress.
__ cmpq(length_reg, Address(value_reg, offset_reg, TIMES_1, 0));
if (deopt == NULL) {
__ j(EQUAL, &ok);
__ pushq(field_reg);
__ pushq(value_reg);
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
__ j(NOT_EQUAL, deopt);
}
__ Bind(&ok);
} else {
ASSERT(compiler->is_optimizing());
ASSERT(field().guarded_list_length() >= 0);
ASSERT(field().guarded_list_length_in_object_offset() !=
Field::kUnknownLengthOffset);
__ CompareImmediate(
FieldAddress(value_reg, field().guarded_list_length_in_object_offset()),
Immediate(Smi::RawValue(field().guarded_list_length())));
__ j(NOT_EQUAL, deopt);
}
}
LocationSummary* GuardFieldTypeInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
return summary;
}
void GuardFieldTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Should never emit GuardFieldType for fields that are marked as NotTracking.
ASSERT(field().static_type_exactness_state().IsTracking());
if (!field().static_type_exactness_state().NeedsFieldGuard()) {
// Nothing to do: we only need to perform checks for trivially invariant
// fields. If optimizing Canonicalize pass should have removed
// this instruction.
if (Compiler::IsBackgroundCompilation()) {
Compiler::AbortBackgroundCompilation(
deopt_id(),
"GuardFieldTypeInstr: field state changed during compilation");
}
ASSERT(!compiler->is_optimizing());
return;
}
Label* deopt =
compiler->is_optimizing()
? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
: NULL;
Label ok;
const Register value_reg = locs()->in(0).reg();
const Register temp = locs()->temp(0).reg();
// Skip null values for nullable fields.
if (!compiler->is_optimizing() || field().is_nullable()) {
__ CompareObject(value_reg, Object::Handle());
__ j(EQUAL, &ok);
}
// Get the state.
const Field& original =
Field::ZoneHandle(compiler->zone(), field().Original());
__ LoadObject(temp, original);
__ movsxb(temp,
FieldAddress(temp, Field::static_type_exactness_state_offset()));
if (!compiler->is_optimizing()) {
// Check if field requires checking (it is in unitialized or trivially
// exact state).
__ cmpq(temp, Immediate(StaticTypeExactnessState::kUninitialized));
__ j(LESS, &ok);
}
Label call_runtime;
if (field().static_type_exactness_state().IsUninitialized()) {
// Can't initialize the field state inline in optimized code.
__ cmpq(temp, Immediate(StaticTypeExactnessState::kUninitialized));
__ j(EQUAL, compiler->is_optimizing() ? deopt : &call_runtime);
}
// At this point temp is known to be type arguments offset in words.
__ movq(temp, FieldAddress(value_reg, temp, TIMES_8, 0));
__ CompareObject(temp, TypeArguments::ZoneHandle(
compiler->zone(),
AbstractType::Handle(field().type()).arguments()));
if (deopt != nullptr) {
__ j(NOT_EQUAL, deopt);
} else {
__ j(EQUAL, &ok);
__ Bind(&call_runtime);
__ PushObject(original);
__ pushq(value_reg);
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2);
}
__ Bind(&ok);
}
LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps =
(IsUnboxedStore() && opt) ? 2 : ((IsPotentialUnboxedStore()) ? 3 : 0);
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps,
((IsUnboxedStore() && opt && is_initialization()) ||
IsPotentialUnboxedStore())
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (IsUnboxedStore() && opt) {
summary->set_in(1, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
} else if (IsPotentialUnboxedStore()) {
summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
summary->set_temp(2, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(XMM1));
} else {
summary->set_in(1, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: Location::RegisterOrConstant(value()));
}
return summary;
}
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreInstanceFieldInstr* instruction,
Register box_reg,
const Class& cls,
Register instance_reg,
intptr_t offset,
Register temp) {
Label done;
__ movq(box_reg, FieldAddress(instance_reg, offset));
__ CompareObject(box_reg, Object::null_object());
__ j(NOT_EQUAL, &done);
BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
__ movq(temp, box_reg);
__ StoreIntoObject(instance_reg, FieldAddress(instance_reg, offset), temp,
Assembler::kValueIsNotSmi);
__ Bind(&done);
}
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(sizeof(classid_t) == kInt16Size);
Label skip_store;
const Register instance_reg = locs()->in(0).reg();
const intptr_t offset_in_bytes = OffsetInBytes();
if (IsUnboxedStore() && compiler->is_optimizing()) {
XmmRegister value = locs()->in(1).fpu_reg();
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
const intptr_t cid = slot().field().UnboxedFieldCid();
if (is_initialization()) {
const Class* cls = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
break;
case kFloat32x4Cid:
cls = &compiler->float32x4_class();
break;
case kFloat64x2Cid:
cls = &compiler->float64x2_class();
break;
default:
UNREACHABLE();
}
BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
__ movq(temp2, temp);
__ StoreIntoObject(instance_reg,
FieldAddress(instance_reg, offset_in_bytes), temp2,
Assembler::kValueIsNotSmi);
} else {
__ movq(temp, FieldAddress(instance_reg, offset_in_bytes));
}
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreInstanceFieldInstr");
__ movsd(FieldAddress(temp, Double::value_offset()), value);
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
__ movups(FieldAddress(temp, Float32x4::value_offset()), value);
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreInstanceFieldInstr");
__ movups(FieldAddress(temp, Float64x2::value_offset()), value);
break;
default:
UNREACHABLE();
}
return;
}
if (IsPotentialUnboxedStore()) {
Register value_reg = locs()->in(1).reg();
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
if (ShouldEmitStoreBarrier()) {
// Value input is a writable register and should be manually preserved
// across allocation slow-path.
locs()->live_registers()->Add(locs()->in(1), kTagged);
}
Label store_pointer;
Label store_double;
Label store_float32x4;
Label store_float64x2;
__ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
__ cmpw(FieldAddress(temp, Field::is_nullable_offset()),
Immediate(kNullCid));
__ j(EQUAL, &store_pointer);
__ movzxb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
__ testq(temp2, Immediate(1 << Field::kUnboxingCandidateBit));
__ j(ZERO, &store_pointer);
__ cmpw(FieldAddress(temp, Field::guarded_cid_offset()),
Immediate(kDoubleCid));
__ j(EQUAL, &store_double);
__ cmpw(FieldAddress(temp, Field::guarded_cid_offset()),
Immediate(kFloat32x4Cid));
__ j(EQUAL, &store_float32x4);
__ cmpw(FieldAddress(temp, Field::guarded_cid_offset()),
Immediate(kFloat64x2Cid));
__ j(EQUAL, &store_float64x2);
// Fall through.
__ jmp(&store_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
locs()->live_registers()->Add(locs()->in(1));
}
{
__ Bind(&store_double);
EnsureMutableBox(compiler, this, temp, compiler->double_class(),
instance_reg, offset_in_bytes, temp2);
__ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset()));
__ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp);
__ jmp(&skip_store);
}
{
__ Bind(&store_float32x4);
EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
instance_reg, offset_in_bytes, temp2);
__ movups(fpu_temp, FieldAddress(value_reg, Float32x4::value_offset()));
__ movups(FieldAddress(temp, Float32x4::value_offset()), fpu_temp);
__ jmp(&skip_store);
}
{
__ Bind(&store_float64x2);
EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
instance_reg, offset_in_bytes, temp2);
__ movups(fpu_temp, FieldAddress(value_reg, Float64x2::value_offset()));
__ movups(FieldAddress(temp, Float64x2::value_offset()), fpu_temp);
__ jmp(&skip_store);
}
__ Bind(&store_pointer);
}
if (ShouldEmitStoreBarrier()) {
Register value_reg = locs()->in(1).reg();
__ StoreIntoObject(instance_reg,
FieldAddress(instance_reg, offset_in_bytes), value_reg,
CanValueBeSmi());
} else {
if (locs()->in(1).IsConstant()) {
__ StoreIntoObjectNoBarrier(instance_reg,
FieldAddress(instance_reg, offset_in_bytes),
locs()->in(1).constant());
} else {
Register value_reg = locs()->in(1).reg();
__ StoreIntoObjectNoBarrier(
instance_reg, FieldAddress(instance_reg, offset_in_bytes), value_reg);
}
}
__ Bind(&skip_store);
}
LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
// When the parser is building an implicit static getter for optimization,
// it can generate a function body where deoptimization ids do not line up
// with the unoptimized code.
//
// This is safe only so long as LoadStaticFieldInstr cannot deoptimize.
void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register field = locs()->in(0).reg();
Register result = locs()->out(0).reg();
__ movq(result, FieldAddress(field, Field::static_value_offset()));
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
LocationSummary* locs =
new (zone) LocationSummary(zone, 1, 1, LocationSummary::kNoCall);
locs->set_in(0, Location::RegisterLocation(kWriteBarrierValueReg));
locs->set_temp(0, Location::RequiresRegister());
return locs;
}
void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register temp = locs()->temp(0).reg();
__ LoadObject(temp, Field::ZoneHandle(Z, field().Original()));
if (this->value()->NeedsWriteBarrier()) {
__ StoreIntoObject(temp, FieldAddress(temp, Field::static_value_offset()),
value, CanValueBeSmi());
} else {
__ StoreIntoObjectNoBarrier(
temp, FieldAddress(temp, Field::static_value_offset()), value);
}
}
LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(RAX)); // Instance.
summary->set_in(1, Location::RegisterLocation(RDX)); // Instant. type args.
summary->set_in(2, Location::RegisterLocation(RCX)); // Function type args.
summary->set_out(0, Location::RegisterLocation(RAX));
return summary;
}
void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->in(0).reg() == RAX); // Value.
ASSERT(locs()->in(1).reg() == RDX); // Instantiator type arguments.
ASSERT(locs()->in(2).reg() == RCX); // Function type arguments.
compiler->GenerateInstanceOf(token_pos(), deopt_id(), type(), locs());
ASSERT(locs()->out(0).reg() == RAX);
}
// TODO(srdjan): In case of constant inputs make CreateArray kNoCall and
// use slow path stub.
LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(RBX));
locs->set_in(1, Location::RegisterLocation(R10));
locs->set_out(0, Location::RegisterLocation(RAX));
return locs;
}
// Inlines array allocation for known constant values.
static void InlineArrayAllocation(FlowGraphCompiler* compiler,
intptr_t num_elements,
Label* slow_path,
Label* done) {
const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
const Register kLengthReg = R10;
const Register kElemTypeReg = RBX;
const intptr_t instance_size = Array::InstanceSize(num_elements);
__ TryAllocateArray(kArrayCid, instance_size, slow_path, Assembler::kFarJump,
RAX, // instance
RCX, // end address
R13); // temp
// RAX: new object start as a tagged pointer.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
RAX, FieldAddress(RAX, Array::type_arguments_offset()), kElemTypeReg);
// Set the length field.
__ StoreIntoObjectNoBarrier(RAX, FieldAddress(RAX, Array::length_offset()),
kLengthReg);
// Initialize all array elements to raw_null.
// RAX: new object start as a tagged pointer.
// RCX: new object end address.
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(RawArray);
__ LoadObject(R12, Object::null_object());
__ leaq(RDI, FieldAddress(RAX, sizeof(RawArray)));
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
__ StoreIntoObjectNoBarrier(RAX, Address(RDI, current_offset), R12);
current_offset += kWordSize;
}
} else {
Label init_loop;
__ Bind(&init_loop);
__ StoreIntoObjectNoBarrier(RAX, Address(RDI, 0), R12);
__ addq(RDI, Immediate(kWordSize));
__ cmpq(RDI, RCX);
__ j(BELOW, &init_loop, Assembler::kNearJump);
}
}
__ jmp(done, Assembler::kNearJump);
}
void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
if (type_usage_info != nullptr) {
const Class& list_class = Class::Handle(
compiler->thread()->isolate()->class_table()->At(kArrayCid));
RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
element_type()->definition());
}
// Allocate the array. R10 = length, RBX = element type.
const Register kLengthReg = R10;
const Register kElemTypeReg = RBX;
const Register kResultReg = RAX;
ASSERT(locs()->in(0).reg() == kElemTypeReg);
ASSERT(locs()->in(1).reg() == kLengthReg);
Label slow_path, done;
if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
num_elements()->BindsToConstant() &&
num_elements()->BoundConstant().IsSmi()) {
const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value();
if ((length >= 0) && (length <= Array::kMaxElements)) {
Label slow_path, done;
InlineArrayAllocation(compiler, length, &slow_path, &done);
__ Bind(&slow_path);
__ PushObject(Object::null_object()); // Make room for the result.
__ pushq(kLengthReg);
__ pushq(kElemTypeReg);
compiler->GenerateRuntimeCall(token_pos(), deopt_id(),
kAllocateArrayRuntimeEntry, 2, locs());
__ Drop(2);
__ popq(kResultReg);
__ Bind(&done);
return;
}
}
__ Bind(&slow_path);
compiler->GenerateCallWithDeopt(token_pos(), deopt_id(),
StubCode::AllocateArray(),
RawPcDescriptors::kOther, locs());
__ Bind(&done);
ASSERT(locs()->out(0).reg() == kResultReg);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps =
(IsUnboxedLoad() && opt) ? 1 : ((IsPotentialUnboxedLoad()) ? 2 : 0);
LocationSummary* locs = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
(opt && !IsPotentialUnboxedLoad()) ? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
if (IsUnboxedLoad() && opt) {
locs->set_temp(0, Location::RequiresRegister());
} else if (IsPotentialUnboxedLoad()) {
locs->set_temp(0, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(XMM1));
locs->set_temp(1, Location::RequiresRegister());
}
locs->set_out(0, Location::RequiresRegister());
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(sizeof(classid_t) == kInt16Size);
Register instance_reg = locs()->in(0).reg();
if (IsUnboxedLoad() && compiler->is_optimizing()) {
XmmRegister result = locs()->out(0).fpu_reg();
Register temp = locs()->temp(0).reg();
__ movq(temp, FieldAddress(instance_reg, OffsetInBytes()));
intptr_t cid = slot().field().UnboxedFieldCid();
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ movsd(result, FieldAddress(temp, Double::value_offset()));
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ movups(result, FieldAddress(temp, Float32x4::value_offset()));
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ movups(result, FieldAddress(temp, Float64x2::value_offset()));
break;
default:
UNREACHABLE();
}
return;
}
Label done;
Register result = locs()->out(0).reg();
if (IsPotentialUnboxedLoad()) {
Register temp = locs()->temp(1).reg();
XmmRegister value = locs()->temp(0).fpu_reg();
Label load_pointer;
Label load_double;
Label load_float32x4;
Label load_float64x2;
__ LoadObject(result, Field::ZoneHandle(slot().field().Original()));
FieldAddress field_cid_operand(result, Field::guarded_cid_offset());
FieldAddress field_nullability_operand(result, Field::is_nullable_offset());
__ cmpw(field_nullability_operand, Immediate(kNullCid));
__ j(EQUAL, &load_pointer);
__ cmpw(field_cid_operand, Immediate(kDoubleCid));
__ j(EQUAL, &load_double);
__ cmpw(field_cid_operand, Immediate(kFloat32x4Cid));
__ j(EQUAL, &load_float32x4);
__ cmpw(field_cid_operand, Immediate(kFloat64x2Cid));
__ j(EQUAL, &load_float64x2);
// Fall through.
__ jmp(&load_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
}
{
__ Bind(&load_double);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
result, temp);
__ movq(temp, FieldAddress(instance_reg, OffsetInBytes()));
__ movsd(value, FieldAddress(temp, Double::value_offset()));
__ movsd(FieldAddress(result, Double::value_offset()), value);
__ jmp(&done);
}
{
__ Bind(&load_float32x4);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float32x4_class(), result, temp);
__ movq(temp, FieldAddress(instance_reg, OffsetInBytes()));
__ movups(value, FieldAddress(temp, Float32x4::value_offset()));
__ movups(FieldAddress(result, Float32x4::value_offset()), value);
__ jmp(&done);
}
{
__ Bind(&load_float64x2);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float64x2_class(),