blob: 7f01ecc6de5b4576351b10fd7bbc0cb8e3311181 [file] [log] [blame]
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef VM_INTERMEDIATE_LANGUAGE_H_
#define VM_INTERMEDIATE_LANGUAGE_H_
#include "vm/allocation.h"
#include "vm/ast.h"
#include "vm/growable_array.h"
#include "vm/handles_impl.h"
#include "vm/locations.h"
#include "vm/method_recognizer.h"
#include "vm/object.h"
#include "vm/parser.h"
namespace dart {
DECLARE_FLAG(bool, throw_on_javascript_int_overflow);
class BitVector;
class BlockEntryInstr;
class BoxIntegerInstr;
class BufferFormatter;
class CatchBlockEntryInstr;
class ComparisonInstr;
class Definition;
class Environment;
class FlowGraph;
class FlowGraphBuilder;
class FlowGraphCompiler;
class FlowGraphVisitor;
class Instruction;
class LocalVariable;
class ParsedFunction;
class Range;
class RangeAnalysis;
class RangeBoundary;
class UnboxIntegerInstr;
// CompileType describes type of the value produced by the definition.
//
// It captures the following properties:
// - whether value can potentially be null or it is definitely not null;
// - concrete class id of the value or kDynamicCid if unknown statically;
// - abstract super type of the value, concrete type of the value in runtime
// is guaranteed to be sub type of this type.
//
// Values of CompileType form a lattice with a None type as a bottom and a
// nullable Dynamic type as a top element. Method Union provides a join
// operation for the lattice.
class CompileType : public ValueObject {
public:
static const bool kNullable = true;
static const bool kNonNullable = false;
CompileType(bool is_nullable, intptr_t cid, const AbstractType* type)
: is_nullable_(is_nullable), cid_(cid), type_(type) { }
CompileType(const CompileType& other)
: ValueObject(),
is_nullable_(other.is_nullable_),
cid_(other.cid_),
type_(other.type_) { }
CompileType& operator=(const CompileType& other) {
is_nullable_ = other.is_nullable_;
cid_ = other.cid_;
type_ = other.type_;
return *this;
}
bool is_nullable() const { return is_nullable_; }
// Return type such that concrete value's type in runtime is guaranteed to
// be subtype of it.
const AbstractType* ToAbstractType();
// Return class id such that it is either kDynamicCid or in runtime
// value is guaranteed to have an equal class id.
intptr_t ToCid();
// Return class id such that it is either kDynamicCid or in runtime
// value is guaranteed to be either null or have an equal class id.
intptr_t ToNullableCid();
// Returns true if the value is guaranteed to be not-null or is known to be
// always null.
bool HasDecidableNullability();
// Returns true if the value is known to be always null.
bool IsNull();
// Returns true if this type is more specific than given type.
bool IsMoreSpecificThan(const AbstractType& other);
// Returns true if value of this type is assignable to a location of the
// given type.
bool IsAssignableTo(const AbstractType& type) {
bool is_instance;
return CanComputeIsInstanceOf(type, kNullable, &is_instance) &&
is_instance;
}
// Create a new CompileType representing given combination of class id and
// abstract type. The pair is assumed to be coherent.
static CompileType Create(intptr_t cid, const AbstractType& type);
CompileType CopyNonNullable() const {
return CompileType(kNonNullable, cid_, type_);
}
static CompileType CreateNullable(bool is_nullable, intptr_t cid) {
return CompileType(is_nullable, cid, NULL);
}
// Create a new CompileType representing given abstract type. By default
// values as assumed to be nullable.
static CompileType FromAbstractType(const AbstractType& type,
bool is_nullable = kNullable);
// Create a new CompileType representing an value with the given class id.
// Resulting CompileType is nullable only if cid is kDynamicCid or kNullCid.
static CompileType FromCid(intptr_t cid);
// Create None CompileType. It is the bottom of the lattice and is used to
// represent type of the phi that was not yet inferred.
static CompileType None() {
return CompileType(kNullable, kIllegalCid, NULL);
}
// Create Dynamic CompileType. It is the top of the lattice and is used to
// represent unknown type.
static CompileType Dynamic();
static CompileType Null();
// Create non-nullable Bool type.
static CompileType Bool();
// Create non-nullable Int type.
static CompileType Int();
// Create non-nullable String type.
static CompileType String();
// Perform a join operation over the type lattice.
void Union(CompileType* other);
// Returns true if this and other types are the same.
bool IsEqualTo(CompileType* other) {
return (is_nullable_ == other->is_nullable_) &&
(ToNullableCid() == other->ToNullableCid()) &&
(ToAbstractType()->Equals(*other->ToAbstractType()));
}
bool IsNone() const {
return (cid_ == kIllegalCid) && (type_ == NULL);
}
bool IsInt() {
return !is_nullable() &&
((ToCid() == kSmiCid) ||
(ToCid() == kMintCid) ||
((type_ != NULL) && (type_->Equals(Type::Handle(Type::IntType())))));
}
void PrintTo(BufferFormatter* f) const;
const char* ToCString() const;
private:
bool CanComputeIsInstanceOf(const AbstractType& type,
bool is_nullable,
bool* is_instance);
bool is_nullable_;
intptr_t cid_;
const AbstractType* type_;
};
// Zone allocated wrapper for the CompileType value.
class ZoneCompileType : public ZoneAllocated {
public:
static CompileType* Wrap(const CompileType& type) {
ZoneCompileType* zone_type = new ZoneCompileType(type);
return zone_type->ToCompileType();
}
CompileType* ToCompileType() {
return &type_;
}
protected:
explicit ZoneCompileType(const CompileType& type) : type_(type) { }
CompileType type_;
};
// ConstrainedCompileType represents a compile type that is computed from
// another compile type.
class ConstrainedCompileType : public ZoneCompileType {
public:
virtual ~ConstrainedCompileType() { }
// Recompute compile type.
virtual void Update() = 0;
protected:
explicit ConstrainedCompileType(const CompileType& type)
: ZoneCompileType(type) { }
};
// NotNullConstrainedCompileType represents not-null constraint applied to
// the source compile type. Result is non-nullable version of the incoming
// compile type. It is used to represent compile type propagated downwards
// from strict comparison with the null constant.
class NotNullConstrainedCompileType : public ConstrainedCompileType {
public:
explicit NotNullConstrainedCompileType(CompileType* source)
: ConstrainedCompileType(source->CopyNonNullable()), source_(source) { }
virtual void Update() {
type_ = source_->CopyNonNullable();
}
private:
CompileType* source_;
};
class EffectSet : public ValueObject {
public:
enum Effects {
kNoEffects = 0,
kExternalization = 1,
kLastEffect = kExternalization
};
EffectSet(const EffectSet& other)
: ValueObject(), effects_(other.effects_) {
}
bool IsNone() const { return effects_ == kNoEffects; }
static EffectSet None() { return EffectSet(kNoEffects); }
static EffectSet All() {
ASSERT(EffectSet::kLastEffect == 1);
return EffectSet(kExternalization);
}
static EffectSet Externalization() {
return EffectSet(kExternalization);
}
bool ToInt() { return effects_; }
private:
explicit EffectSet(intptr_t effects) : effects_(effects) { }
intptr_t effects_;
};
class Value : public ZoneAllocated {
public:
// A forward iterator that allows removing the current value from the
// underlying use list during iteration.
class Iterator {
public:
explicit Iterator(Value* head) : next_(head) { Advance(); }
Value* Current() const { return current_; }
bool Done() const { return current_ == NULL; }
void Advance() {
// Pre-fetch next on advance and cache it.
current_ = next_;
if (next_ != NULL) next_ = next_->next_use();
}
private:
Value* current_;
Value* next_;
};
explicit Value(Definition* definition)
: definition_(definition),
previous_use_(NULL),
next_use_(NULL),
instruction_(NULL),
use_index_(-1),
reaching_type_(NULL) { }
Definition* definition() const { return definition_; }
void set_definition(Definition* definition) { definition_ = definition; }
Value* previous_use() const { return previous_use_; }
void set_previous_use(Value* previous) { previous_use_ = previous; }
Value* next_use() const { return next_use_; }
void set_next_use(Value* next) { next_use_ = next; }
bool IsSingleUse() const {
return (next_use_ == NULL) && (previous_use_ == NULL);
}
Instruction* instruction() const { return instruction_; }
void set_instruction(Instruction* instruction) { instruction_ = instruction; }
intptr_t use_index() const { return use_index_; }
void set_use_index(intptr_t index) { use_index_ = index; }
static void AddToList(Value* value, Value** list);
void RemoveFromUseList();
// Change the definition after use lists have been computed.
inline void BindTo(Definition* definition);
inline void BindToEnvironment(Definition* definition);
Value* Copy(Zone* zone) { return new(zone) Value(definition_); }
// This function must only be used when the new Value is dominated by
// the original Value.
Value* CopyWithType() {
Value* copy = new Value(definition_);
copy->reaching_type_ = reaching_type_;
return copy;
}
CompileType* Type();
void SetReachingType(CompileType* type) {
reaching_type_ = type;
}
void PrintTo(BufferFormatter* f) const;
const char* ToCString() const;
const char* DebugName() const { return "Value"; }
bool IsSmiValue() { return Type()->ToCid() == kSmiCid; }
// Returns true if this value binds to the constant: 0xFFFFFFFF.
bool BindsTo32BitMaskConstant() const;
// Return true if the value represents a constant.
bool BindsToConstant() const;
// Return true if the value represents the constant null.
bool BindsToConstantNull() const;
// Assert if BindsToConstant() is false, otherwise returns the constant value.
const Object& BoundConstant() const;
// Compile time constants, Bool, Smi and Nulls do not need to update
// the store buffer.
bool NeedsStoreBuffer();
bool Equals(Value* other) const;
private:
friend class FlowGraphPrinter;
Definition* definition_;
Value* previous_use_;
Value* next_use_;
Instruction* instruction_;
intptr_t use_index_;
CompileType* reaching_type_;
DISALLOW_COPY_AND_ASSIGN(Value);
};
// An embedded container with N elements of type T. Used (with partial
// specialization for N=0) because embedded arrays cannot have size 0.
template<typename T, intptr_t N>
class EmbeddedArray {
public:
EmbeddedArray() : elements_() { }
intptr_t length() const { return N; }
const T& operator[](intptr_t i) const {
ASSERT(i < length());
return elements_[i];
}
T& operator[](intptr_t i) {
ASSERT(i < length());
return elements_[i];
}
const T& At(intptr_t i) const {
return (*this)[i];
}
void SetAt(intptr_t i, const T& val) {
(*this)[i] = val;
}
private:
T elements_[N];
};
template<typename T>
class EmbeddedArray<T, 0> {
public:
intptr_t length() const { return 0; }
const T& operator[](intptr_t i) const {
UNREACHABLE();
static T sentinel = 0;
return sentinel;
}
T& operator[](intptr_t i) {
UNREACHABLE();
static T sentinel = 0;
return sentinel;
}
};
// Instructions.
// M is a single argument macro. It is applied to each concrete instruction
// type name. The concrete instruction classes are the name with Instr
// concatenated.
#define FOR_EACH_INSTRUCTION(M) \
M(GraphEntry) \
M(JoinEntry) \
M(TargetEntry) \
M(IndirectEntry) \
M(CatchBlockEntry) \
M(Phi) \
M(Redefinition) \
M(Parameter) \
M(ParallelMove) \
M(PushArgument) \
M(Return) \
M(Throw) \
M(ReThrow) \
M(Stop) \
M(Goto) \
M(IndirectGoto) \
M(Branch) \
M(AssertAssignable) \
M(AssertBoolean) \
M(CurrentContext) \
M(ClosureCall) \
M(InstanceCall) \
M(PolymorphicInstanceCall) \
M(StaticCall) \
M(LoadLocal) \
M(PushTemp) \
M(DropTemps) \
M(StoreLocal) \
M(StrictCompare) \
M(EqualityCompare) \
M(RelationalOp) \
M(NativeCall) \
M(DebugStepCheck) \
M(LoadIndexed) \
M(LoadCodeUnits) \
M(StoreIndexed) \
M(StoreInstanceField) \
M(InitStaticField) \
M(LoadStaticField) \
M(StoreStaticField) \
M(BooleanNegate) \
M(InstanceOf) \
M(CreateArray) \
M(AllocateObject) \
M(LoadField) \
M(LoadUntagged) \
M(LoadClassId) \
M(InstantiateType) \
M(InstantiateTypeArguments) \
M(AllocateContext) \
M(AllocateUninitializedContext) \
M(CloneContext) \
M(BinarySmiOp) \
M(BinaryInt32Op) \
M(UnarySmiOp) \
M(UnaryDoubleOp) \
M(CheckStackOverflow) \
M(SmiToDouble) \
M(Int32ToDouble) \
M(MintToDouble) \
M(DoubleToInteger) \
M(DoubleToSmi) \
M(DoubleToDouble) \
M(DoubleToFloat) \
M(FloatToDouble) \
M(CheckClass) \
M(CheckClassId) \
M(CheckSmi) \
M(Constant) \
M(UnboxedConstant) \
M(CheckEitherNonSmi) \
M(BinaryDoubleOp) \
M(MathUnary) \
M(MathMinMax) \
M(Box) \
M(Unbox) \
M(BoxInt64) \
M(UnboxInt64) \
M(CaseInsensitiveCompareUC16) \
M(BinaryMintOp) \
M(ShiftMintOp) \
M(UnaryMintOp) \
M(CheckArrayBound) \
M(Constraint) \
M(StringToCharCode) \
M(StringFromCharCode) \
M(StringInterpolate) \
M(InvokeMathCFunction) \
M(MergedMath) \
M(GuardFieldClass) \
M(GuardFieldLength) \
M(IfThenElse) \
M(BinaryFloat32x4Op) \
M(Simd32x4Shuffle) \
M(Simd32x4ShuffleMix) \
M(Simd32x4GetSignMask) \
M(Float32x4Constructor) \
M(Float32x4Zero) \
M(Float32x4Splat) \
M(Float32x4Comparison) \
M(Float32x4MinMax) \
M(Float32x4Scale) \
M(Float32x4Sqrt) \
M(Float32x4ZeroArg) \
M(Float32x4Clamp) \
M(Float32x4With) \
M(Float32x4ToInt32x4) \
M(MaterializeObject) \
M(Int32x4Constructor) \
M(Int32x4BoolConstructor) \
M(Int32x4GetFlag) \
M(Int32x4Select) \
M(Int32x4SetFlag) \
M(Int32x4ToFloat32x4) \
M(BinaryInt32x4Op) \
M(TestSmi) \
M(TestCids) \
M(BinaryFloat64x2Op) \
M(Float64x2Zero) \
M(Float64x2Constructor) \
M(Float64x2Splat) \
M(Float32x4ToFloat64x2) \
M(Float64x2ToFloat32x4) \
M(Simd64x2Shuffle) \
M(Float64x2ZeroArg) \
M(Float64x2OneArg) \
M(ExtractNthOutput) \
M(BinaryUint32Op) \
M(ShiftUint32Op) \
M(UnaryUint32Op) \
M(BoxUint32) \
M(UnboxUint32) \
M(BoxInt32) \
M(UnboxInt32) \
M(UnboxedIntConverter) \
M(GrowRegExpStack) \
M(Deoptimize)
#define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
M(BlockEntry) \
M(BoxInteger) \
M(UnboxInteger) \
M(Comparison) \
M(UnaryIntegerOp) \
M(BinaryIntegerOp) \
#define FORWARD_DECLARATION(type) class type##Instr;
FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
FOR_EACH_ABSTRACT_INSTRUCTION(FORWARD_DECLARATION)
#undef FORWARD_DECLARATION
#define DEFINE_INSTRUCTION_TYPE_CHECK(type) \
virtual type##Instr* As##type() { return this; } \
virtual const char* DebugName() const { return #type; } \
// Functions required in all concrete instruction classes.
#define DECLARE_INSTRUCTION_NO_BACKEND(type) \
virtual Tag tag() const { return k##type; } \
virtual void Accept(FlowGraphVisitor* visitor); \
DEFINE_INSTRUCTION_TYPE_CHECK(type)
#define DECLARE_INSTRUCTION_BACKEND() \
virtual LocationSummary* MakeLocationSummary(Zone* zone, \
bool optimizing) const; \
virtual void EmitNativeCode(FlowGraphCompiler* compiler); \
// Functions required in all concrete instruction classes.
#define DECLARE_INSTRUCTION(type) \
DECLARE_INSTRUCTION_NO_BACKEND(type) \
DECLARE_INSTRUCTION_BACKEND() \
class Instruction : public ZoneAllocated {
public:
#define DECLARE_TAG(type) k##type,
enum Tag {
FOR_EACH_INSTRUCTION(DECLARE_TAG)
};
#undef DECLARE_TAG
explicit Instruction(intptr_t deopt_id = Thread::kNoDeoptId)
: deopt_id_(deopt_id),
lifetime_position_(kNoPlaceId),
previous_(NULL),
next_(NULL),
env_(NULL),
locs_(NULL),
inlining_id_(-1) { }
virtual ~Instruction() { }
virtual Tag tag() const = 0;
intptr_t deopt_id() const {
ASSERT(CanDeoptimize() || CanBecomeDeoptimizationTarget());
return GetDeoptId();
}
const ICData* GetICData(
const ZoneGrowableArray<const ICData*>& ic_data_array) const;
virtual intptr_t token_pos() const { return Scanner::kNoSourcePos; }
virtual intptr_t InputCount() const = 0;
virtual Value* InputAt(intptr_t i) const = 0;
void SetInputAt(intptr_t i, Value* value) {
ASSERT(value != NULL);
value->set_instruction(this);
value->set_use_index(i);
RawSetInputAt(i, value);
}
// Remove all inputs (including in the environment) from their
// definition's use lists.
void UnuseAllInputs();
// Call instructions override this function and return the number of
// pushed arguments.
virtual intptr_t ArgumentCount() const { return 0; }
virtual PushArgumentInstr* PushArgumentAt(intptr_t index) const {
UNREACHABLE();
return NULL;
}
inline Definition* ArgumentAt(intptr_t index) const;
// Returns true, if this instruction can deoptimize.
virtual bool CanDeoptimize() const = 0;
// Visiting support.
virtual void Accept(FlowGraphVisitor* visitor) = 0;
Instruction* previous() const { return previous_; }
void set_previous(Instruction* instr) {
ASSERT(!IsBlockEntry());
previous_ = instr;
}
Instruction* next() const { return next_; }
void set_next(Instruction* instr) {
ASSERT(!IsGraphEntry());
ASSERT(!IsReturn());
ASSERT(!IsBranch() || (instr == NULL));
ASSERT(!IsPhi());
ASSERT(instr == NULL || !instr->IsBlockEntry());
// TODO(fschneider): Also add Throw and ReThrow to the list of instructions
// that do not have a successor. Currently, the graph builder will continue
// to append instruction in case of a Throw inside an expression. This
// condition should be handled in the graph builder
next_ = instr;
}
// Link together two instruction.
void LinkTo(Instruction* next) {
ASSERT(this != next);
this->set_next(next);
next->set_previous(this);
}
// Removed this instruction from the graph, after use lists have been
// computed. If the instruction is a definition with uses, those uses are
// unaffected (so the instruction can be reinserted, e.g., hoisting).
Instruction* RemoveFromGraph(bool return_previous = true);
// Normal instructions can have 0 (inside a block) or 1 (last instruction in
// a block) successors. Branch instruction with >1 successors override this
// function.
virtual intptr_t SuccessorCount() const;
virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
void Goto(JoinEntryInstr* entry);
virtual const char* DebugName() const = 0;
// Printing support.
const char* ToCString() const;
virtual void PrintTo(BufferFormatter* f) const;
virtual void PrintOperandsTo(BufferFormatter* f) const;
#define DECLARE_INSTRUCTION_TYPE_CHECK(Name, Type) \
bool Is##Name() { return (As##Name() != NULL); } \
virtual Type* As##Name() { return NULL; }
#define INSTRUCTION_TYPE_CHECK(Name) \
DECLARE_INSTRUCTION_TYPE_CHECK(Name, Name##Instr)
DECLARE_INSTRUCTION_TYPE_CHECK(Definition, Definition)
FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
#undef INSTRUCTION_TYPE_CHECK
#undef DECLARE_INSTRUCTION_TYPE_CHECK
// Returns structure describing location constraints required
// to emit native code for this instruction.
LocationSummary* locs() {
ASSERT(locs_ != NULL);
return locs_;
}
bool HasLocs() const { return locs_ != NULL; }
virtual LocationSummary* MakeLocationSummary(Zone* zone,
bool is_optimizing) const = 0;
void InitializeLocationSummary(Zone* zone, bool optimizing) {
ASSERT(locs_ == NULL);
locs_ = MakeLocationSummary(zone, optimizing);
}
static LocationSummary* MakeCallSummary(Zone* zone);
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
Environment* env() const { return env_; }
void SetEnvironment(Environment* deopt_env);
void RemoveEnvironment();
intptr_t lifetime_position() const { return lifetime_position_; }
void set_lifetime_position(intptr_t pos) {
lifetime_position_ = pos;
}
bool HasUnmatchedInputRepresentations() const;
// Returns representation expected for the input operand at the given index.
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
return kTagged;
}
// Representation of the value produced by this computation.
virtual Representation representation() const {
return kTagged;
}
bool WasEliminated() const {
return next() == NULL;
}
// Returns deoptimization id that corresponds to the deoptimization target
// that input operands conversions inserted for this instruction can jump
// to.
virtual intptr_t DeoptimizationTarget() const {
UNREACHABLE();
return Thread::kNoDeoptId;
}
// Returns a replacement for the instruction or NULL if the instruction can
// be eliminated. By default returns the this instruction which means no
// change.
virtual Instruction* Canonicalize(FlowGraph* flow_graph);
// Insert this instruction before 'next' after use lists are computed.
// Instructions cannot be inserted before a block entry or any other
// instruction without a previous instruction.
void InsertBefore(Instruction* next) { InsertAfter(next->previous()); }
// Insert this instruction after 'prev' after use lists are computed.
void InsertAfter(Instruction* prev);
// Append an instruction to the current one and return the tail.
// This function updated def-use chains of the newly appended
// instruction.
Instruction* AppendInstruction(Instruction* tail);
virtual bool AllowsDCE() const {
return false;
}
// Returns true if CSE and LICM are allowed for this instruction.
virtual bool AllowsCSE() const {
return false;
}
// Returns set of effects created by this instruction.
virtual EffectSet Effects() const = 0;
// Returns set of effects that affect this instruction.
virtual EffectSet Dependencies() const {
UNREACHABLE();
return EffectSet::All();
}
// Get the block entry for this instruction.
virtual BlockEntryInstr* GetBlock() const;
// Place identifiers used by the load optimization pass.
intptr_t place_id() const { return place_id_; }
void set_place_id(intptr_t place_id) { place_id_ = place_id; }
bool HasPlaceId() const { return place_id_ != kNoPlaceId; }
intptr_t inlining_id() const { return inlining_id_; }
void set_inlining_id(intptr_t value) {
ASSERT(value >= 0);
inlining_id_ = value;
}
bool has_inlining_id() const { return inlining_id_ >= 0; }
// Returns a hash code for use with hash maps.
virtual intptr_t Hashcode() const;
// Compares two instructions. Returns true, iff:
// 1. They have the same tag.
// 2. All input operands are Equals.
// 3. They satisfy AttributesEqual.
bool Equals(Instruction* other) const;
// Compare attributes of a instructions (except input operands and tag).
// All instructions that participate in CSE have to override this function.
// This function can assume that the argument has the same type as this.
virtual bool AttributesEqual(Instruction* other) const {
UNREACHABLE();
return false;
}
virtual void InheritDeoptTarget(Zone* zone, Instruction* other);
bool NeedsEnvironment() const {
return CanDeoptimize() || CanBecomeDeoptimizationTarget();
}
virtual bool CanBecomeDeoptimizationTarget() const {
return false;
}
void InheritDeoptTargetAfter(FlowGraph* flow_graph,
Definition* call,
Definition* result);
virtual bool MayThrow() const = 0;
bool IsDominatedBy(Instruction* dom);
void ClearEnv() { env_ = NULL; }
protected:
// GetDeoptId and/or CopyDeoptIdFrom.
friend class CallSiteInliner;
friend class LICM;
friend class ComparisonInstr;
friend class Scheduler;
friend class BlockEntryInstr;
// Fetch deopt id without checking if this computation can deoptimize.
intptr_t GetDeoptId() const {
return deopt_id_;
}
void CopyDeoptIdFrom(const Instruction& instr) {
deopt_id_ = instr.deopt_id_;
}
private:
virtual void RawSetInputAt(intptr_t i, Value* value) = 0;
enum {
kNoPlaceId = -1
};
intptr_t deopt_id_;
union {
intptr_t lifetime_position_; // Position used by register allocator.
intptr_t place_id_;
};
Instruction* previous_;
Instruction* next_;
Environment* env_;
LocationSummary* locs_;
intptr_t inlining_id_;
DISALLOW_COPY_AND_ASSIGN(Instruction);
};
class PureInstruction : public Instruction {
public:
explicit PureInstruction(intptr_t deopt_id)
: Instruction(deopt_id) { }
virtual bool AllowsCSE() const { return true; }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual EffectSet Effects() const { return EffectSet::None(); }
};
// Types to be used as ThrowsTrait for TemplateInstruction/TemplateDefinition.
struct Throws {
static const bool kCanThrow = true;
};
struct NoThrow {
static const bool kCanThrow = false;
};
// Types to be used as CSETrait for TemplateInstruction/TemplateDefinition.
// Pure instructions are those that allow CSE and have no effects and
// no dependencies.
template<typename DefaultBase, typename PureBase>
struct Pure {
typedef PureBase Base;
};
template<typename DefaultBase, typename PureBase>
struct NoCSE {
typedef DefaultBase Base;
};
template<intptr_t N,
typename ThrowsTrait,
template<typename Default, typename Pure> class CSETrait = NoCSE>
class TemplateInstruction: public CSETrait<Instruction, PureInstruction>::Base {
public:
explicit TemplateInstruction(intptr_t deopt_id = Thread::kNoDeoptId)
: CSETrait<Instruction, PureInstruction>::Base(deopt_id), inputs_() { }
virtual intptr_t InputCount() const { return N; }
virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
protected:
EmbeddedArray<Value*, N> inputs_;
private:
virtual void RawSetInputAt(intptr_t i, Value* value) {
inputs_[i] = value;
}
};
class MoveOperands : public ZoneAllocated {
public:
MoveOperands(Location dest, Location src) : dest_(dest), src_(src) { }
Location src() const { return src_; }
Location dest() const { return dest_; }
Location* src_slot() { return &src_; }
Location* dest_slot() { return &dest_; }
void set_src(const Location& value) { src_ = value; }
void set_dest(const Location& value) { dest_ = value; }
// The parallel move resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
Location MarkPending() {
ASSERT(!IsPending());
Location dest = dest_;
dest_ = Location::NoLocation();
return dest;
}
void ClearPending(Location dest) {
ASSERT(IsPending());
dest_ = dest;
}
bool IsPending() const {
ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
return dest_.IsInvalid() && !src_.IsInvalid();
}
// True if this move a move from the given location.
bool Blocks(Location loc) const {
return !IsEliminated() && src_.Equals(loc);
}
// A move is redundant if it's been eliminated, if its source and
// destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
return IsEliminated() || dest_.IsInvalid() || src_.Equals(dest_);
}
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { src_ = dest_ = Location::NoLocation(); }
bool IsEliminated() const {
ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
return src_.IsInvalid();
}
private:
Location dest_;
Location src_;
DISALLOW_COPY_AND_ASSIGN(MoveOperands);
};
class ParallelMoveInstr : public TemplateInstruction<0, NoThrow> {
public:
ParallelMoveInstr() : moves_(4) { }
DECLARE_INSTRUCTION(ParallelMove)
virtual intptr_t ArgumentCount() const { return 0; }
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const {
UNREACHABLE(); // This instruction never visited by optimization passes.
return EffectSet::None();
}
virtual EffectSet Dependencies() const {
UNREACHABLE(); // This instruction never visited by optimization passes.
return EffectSet::None();
}
MoveOperands* AddMove(Location dest, Location src) {
MoveOperands* move = new MoveOperands(dest, src);
moves_.Add(move);
return move;
}
MoveOperands* MoveOperandsAt(intptr_t index) const { return moves_[index]; }
intptr_t NumMoves() const { return moves_.length(); }
bool IsRedundant() const;
virtual void PrintTo(BufferFormatter* f) const;
private:
GrowableArray<MoveOperands*> moves_; // Elements cannot be null.
DISALLOW_COPY_AND_ASSIGN(ParallelMoveInstr);
};
// Basic block entries are administrative nodes. There is a distinguished
// graph entry with no predecessor. Joins are the only nodes with multiple
// predecessors. Targets are all other basic block entries. The types
// enforce edge-split form---joins are forbidden as the successors of
// branches.
class BlockEntryInstr : public Instruction {
public:
virtual intptr_t PredecessorCount() const = 0;
virtual BlockEntryInstr* PredecessorAt(intptr_t index) const = 0;
intptr_t preorder_number() const { return preorder_number_; }
void set_preorder_number(intptr_t number) { preorder_number_ = number; }
intptr_t postorder_number() const { return postorder_number_; }
void set_postorder_number(intptr_t number) { postorder_number_ = number; }
intptr_t block_id() const { return block_id_; }
void set_start_pos(intptr_t pos) { start_pos_ = pos; }
intptr_t start_pos() const { return start_pos_; }
void set_end_pos(intptr_t pos) { end_pos_ = pos; }
intptr_t end_pos() const { return end_pos_; }
BlockEntryInstr* dominator() const { return dominator_; }
BlockEntryInstr* ImmediateDominator() const;
const GrowableArray<BlockEntryInstr*>& dominated_blocks() {
return dominated_blocks_;
}
void AddDominatedBlock(BlockEntryInstr* block) {
block->set_dominator(this);
dominated_blocks_.Add(block);
}
void ClearDominatedBlocks() { dominated_blocks_.Clear(); }
bool Dominates(BlockEntryInstr* other) const;
Instruction* last_instruction() const { return last_instruction_; }
void set_last_instruction(Instruction* instr) { last_instruction_ = instr; }
ParallelMoveInstr* parallel_move() const {
return parallel_move_;
}
bool HasParallelMove() const {
return parallel_move_ != NULL;
}
bool HasNonRedundantParallelMove() const {
return HasParallelMove() && !parallel_move()->IsRedundant();
}
ParallelMoveInstr* GetParallelMove() {
if (parallel_move_ == NULL) {
parallel_move_ = new ParallelMoveInstr();
}
return parallel_move_;
}
// Discover basic-block structure of the current block. Must be called
// on all graph blocks in preorder to yield valid results. As a side effect,
// the block entry instructions in the graph are assigned preorder numbers.
// The array 'preorder' maps preorder block numbers to the block entry
// instruction with that number. The depth first spanning tree is recorded
// in the array 'parent', which maps preorder block numbers to the preorder
// number of the block's spanning-tree parent. As a side effect of this
// function, the set of basic block predecessors (e.g., block entry
// instructions of predecessor blocks) and also the last instruction in the
// block is recorded in each entry instruction. Returns true when called the
// first time on this particular block within one graph traversal, and false
// on all successive calls.
bool DiscoverBlock(
BlockEntryInstr* predecessor,
GrowableArray<BlockEntryInstr*>* preorder,
GrowableArray<intptr_t>* parent);
// Perform a depth first search to prune code not reachable from an OSR
// entry point.
bool PruneUnreachable(FlowGraphBuilder* builder,
GraphEntryInstr* graph_entry,
Instruction* parent,
intptr_t osr_id,
BitVector* block_marks);
virtual intptr_t InputCount() const { return 0; }
virtual Value* InputAt(intptr_t i) const {
UNREACHABLE();
return NULL;
}
virtual intptr_t ArgumentCount() const { return 0; }
virtual bool CanBecomeDeoptimizationTarget() const {
// BlockEntry environment is copied to Goto and Branch instructions
// when we insert new blocks targeting this block.
return true;
}
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual bool MayThrow() const { return false; }
intptr_t try_index() const { return try_index_; }
void set_try_index(intptr_t index) {
try_index_ = index;
}
// True for blocks inside a try { } region.
bool InsideTryBlock() const {
return try_index_ != CatchClauseNode::kInvalidTryIndex;
}
BitVector* loop_info() const { return loop_info_; }
void set_loop_info(BitVector* loop_info) {
loop_info_ = loop_info;
}
virtual BlockEntryInstr* GetBlock() const {
return const_cast<BlockEntryInstr*>(this);
}
// Helper to mutate the graph during inlining. This block should be
// replaced with new_block as a predecessor of all of this block's
// successors.
void ReplaceAsPredecessorWith(BlockEntryInstr* new_block);
void set_block_id(intptr_t block_id) { block_id_ = block_id; }
intptr_t offset() const { return offset_; }
void set_offset(intptr_t offset) { offset_ = offset; }
// For all instruction in this block: Remove all inputs (including in the
// environment) from their definition's use lists for all instructions.
void ClearAllInstructions();
DEFINE_INSTRUCTION_TYPE_CHECK(BlockEntry)
protected:
BlockEntryInstr(intptr_t block_id, intptr_t try_index)
: Instruction(Thread::Current()->GetNextDeoptId()),
block_id_(block_id),
try_index_(try_index),
preorder_number_(-1),
postorder_number_(-1),
dominator_(NULL),
dominated_blocks_(1),
last_instruction_(NULL),
offset_(-1),
parallel_move_(NULL),
loop_info_(NULL) {
}
private:
virtual void RawSetInputAt(intptr_t i, Value* value) { UNREACHABLE(); }
virtual void ClearPredecessors() = 0;
virtual void AddPredecessor(BlockEntryInstr* predecessor) = 0;
void set_dominator(BlockEntryInstr* instr) { dominator_ = instr; }
intptr_t block_id_;
intptr_t try_index_;
intptr_t preorder_number_;
intptr_t postorder_number_;
// Starting and ending lifetime positions for this block. Used by
// the linear scan register allocator.
intptr_t start_pos_;
intptr_t end_pos_;
BlockEntryInstr* dominator_; // Immediate dominator, NULL for graph entry.
// TODO(fschneider): Optimize the case of one child to save space.
GrowableArray<BlockEntryInstr*> dominated_blocks_;
Instruction* last_instruction_;
// Offset of this block from the start of the emitted code.
intptr_t offset_;
// Parallel move that will be used by linear scan register allocator to
// connect live ranges at the start of the block.
ParallelMoveInstr* parallel_move_;
// Bit vector containg loop blocks for a loop header indexed by block
// preorder number.
BitVector* loop_info_;
DISALLOW_COPY_AND_ASSIGN(BlockEntryInstr);
};
class ForwardInstructionIterator : public ValueObject {
public:
explicit ForwardInstructionIterator(BlockEntryInstr* block_entry)
: current_(block_entry) {
Advance();
}
void Advance() {
ASSERT(!Done());
current_ = current_->next();
}
bool Done() const { return current_ == NULL; }
// Removes 'current_' from graph and sets 'current_' to previous instruction.
void RemoveCurrentFromGraph();
Instruction* Current() const { return current_; }
private:
Instruction* current_;
};
class BackwardInstructionIterator : public ValueObject {
public:
explicit BackwardInstructionIterator(BlockEntryInstr* block_entry)
: block_entry_(block_entry), current_(block_entry->last_instruction()) {
ASSERT(block_entry_->previous() == NULL);
}
void Advance() {
ASSERT(!Done());
current_ = current_->previous();
}
bool Done() const { return current_ == block_entry_; }
void RemoveCurrentFromGraph();
Instruction* Current() const { return current_; }
private:
BlockEntryInstr* block_entry_;
Instruction* current_;
};
class GraphEntryInstr : public BlockEntryInstr {
public:
GraphEntryInstr(const ParsedFunction& parsed_function,
TargetEntryInstr* normal_entry,
intptr_t osr_id);
DECLARE_INSTRUCTION(GraphEntry)
virtual intptr_t PredecessorCount() const { return 0; }
virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
UNREACHABLE();
return NULL;
}
virtual intptr_t SuccessorCount() const;
virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
void AddCatchEntry(CatchBlockEntryInstr* entry) { catch_entries_.Add(entry); }
CatchBlockEntryInstr* GetCatchEntry(intptr_t index);
void AddIndirectEntry(IndirectEntryInstr* entry) {
indirect_entries_.Add(entry);
}
GrowableArray<Definition*>* initial_definitions() {
return &initial_definitions_;
}
ConstantInstr* constant_null();
bool IsCompiledForOsr() const { return osr_id_ != Thread::kNoDeoptId; }
intptr_t entry_count() const { return entry_count_; }
void set_entry_count(intptr_t count) { entry_count_ = count; }
intptr_t spill_slot_count() const { return spill_slot_count_; }
void set_spill_slot_count(intptr_t count) {
ASSERT(count >= 0);
spill_slot_count_ = count;
}
// Number of stack slots reserved for compiling try-catch. For functions
// without try-catch, this is 0. Otherwise, it is the number of local
// variables.
intptr_t fixed_slot_count() const { return fixed_slot_count_; }
void set_fixed_slot_count(intptr_t count) {
ASSERT(count >= 0);
fixed_slot_count_ = count;
}
TargetEntryInstr* normal_entry() const { return normal_entry_; }
const ParsedFunction& parsed_function() const {
return parsed_function_;
}
const GrowableArray<CatchBlockEntryInstr*>& catch_entries() const {
return catch_entries_;
}
const GrowableArray<IndirectEntryInstr*>& indirect_entries() const {
return indirect_entries_;
}
virtual void PrintTo(BufferFormatter* f) const;
private:
virtual void ClearPredecessors() {}
virtual void AddPredecessor(BlockEntryInstr* predecessor) { UNREACHABLE(); }
const ParsedFunction& parsed_function_;
TargetEntryInstr* normal_entry_;
GrowableArray<CatchBlockEntryInstr*> catch_entries_;
// Indirect targets are blocks reachable only through indirect gotos.
GrowableArray<IndirectEntryInstr*> indirect_entries_;
GrowableArray<Definition*> initial_definitions_;
const intptr_t osr_id_;
intptr_t entry_count_;
intptr_t spill_slot_count_;
intptr_t fixed_slot_count_; // For try-catch in optimized code.
DISALLOW_COPY_AND_ASSIGN(GraphEntryInstr);
};
class JoinEntryInstr : public BlockEntryInstr {
public:
JoinEntryInstr(intptr_t block_id, intptr_t try_index)
: BlockEntryInstr(block_id, try_index),
predecessors_(2), // Two is the assumed to be the common case.
phis_(NULL) { }
DECLARE_INSTRUCTION(JoinEntry)
virtual intptr_t PredecessorCount() const { return predecessors_.length(); }
virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
return predecessors_[index];
}
// Returns -1 if pred is not in the list.
intptr_t IndexOfPredecessor(BlockEntryInstr* pred) const;
ZoneGrowableArray<PhiInstr*>* phis() const { return phis_; }
PhiInstr* InsertPhi(intptr_t var_index, intptr_t var_count);
void RemoveDeadPhis(Definition* replacement);
void InsertPhi(PhiInstr* phi);
void RemovePhi(PhiInstr* phi);
virtual void PrintTo(BufferFormatter* f) const;
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
private:
// Classes that have access to predecessors_ when inlining.
friend class BlockEntryInstr;
friend class InlineExitCollector;
friend class PolymorphicInliner;
friend class IndirectEntryInstr; // Access in il_printer.cc.
// Direct access to phis_ in order to resize it due to phi elimination.
friend class ConstantPropagator;
friend class DeadCodeElimination;
virtual void ClearPredecessors() { predecessors_.Clear(); }
virtual void AddPredecessor(BlockEntryInstr* predecessor);
GrowableArray<BlockEntryInstr*> predecessors_;
ZoneGrowableArray<PhiInstr*>* phis_;
DISALLOW_COPY_AND_ASSIGN(JoinEntryInstr);
};
class PhiIterator : public ValueObject {
public:
explicit PhiIterator(JoinEntryInstr* join)
: phis_(join->phis()), index_(0) { }
void Advance() {
ASSERT(!Done());
index_++;
}
bool Done() const {
return (phis_ == NULL) || (index_ >= phis_->length());
}
PhiInstr* Current() const {
return (*phis_)[index_];
}
private:
ZoneGrowableArray<PhiInstr*>* phis_;
intptr_t index_;
};
class TargetEntryInstr : public BlockEntryInstr {
public:
TargetEntryInstr(intptr_t block_id, intptr_t try_index)
: BlockEntryInstr(block_id, try_index),
predecessor_(NULL),
edge_weight_(0.0) { }
DECLARE_INSTRUCTION(TargetEntry)
double edge_weight() const { return edge_weight_; }
void set_edge_weight(double weight) { edge_weight_ = weight; }
void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
virtual intptr_t PredecessorCount() const {
return (predecessor_ == NULL) ? 0 : 1;
}
virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
ASSERT((index == 0) && (predecessor_ != NULL));
return predecessor_;
}
virtual void PrintTo(BufferFormatter* f) const;
private:
friend class BlockEntryInstr; // Access to predecessor_ when inlining.
virtual void ClearPredecessors() { predecessor_ = NULL; }
virtual void AddPredecessor(BlockEntryInstr* predecessor) {
ASSERT(predecessor_ == NULL);
predecessor_ = predecessor;
}
BlockEntryInstr* predecessor_;
double edge_weight_;
DISALLOW_COPY_AND_ASSIGN(TargetEntryInstr);
};
class IndirectEntryInstr : public JoinEntryInstr {
public:
IndirectEntryInstr(intptr_t block_id,
intptr_t indirect_id,
intptr_t try_index)
: JoinEntryInstr(block_id, try_index),
indirect_id_(indirect_id) { }
DECLARE_INSTRUCTION(IndirectEntry)
virtual void PrintTo(BufferFormatter* f) const;
intptr_t indirect_id() const { return indirect_id_; }
private:
const intptr_t indirect_id_;
};
class CatchBlockEntryInstr : public BlockEntryInstr {
public:
CatchBlockEntryInstr(intptr_t block_id,
intptr_t try_index,
const Array& handler_types,
intptr_t catch_try_index,
const LocalVariable& exception_var,
const LocalVariable& stacktrace_var,
bool needs_stacktrace)
: BlockEntryInstr(block_id, try_index),
predecessor_(NULL),
catch_handler_types_(Array::ZoneHandle(handler_types.raw())),
catch_try_index_(catch_try_index),
exception_var_(exception_var),
stacktrace_var_(stacktrace_var),
needs_stacktrace_(needs_stacktrace) { }
DECLARE_INSTRUCTION(CatchBlockEntry)
virtual intptr_t PredecessorCount() const {
return (predecessor_ == NULL) ? 0 : 1;
}
virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
ASSERT((index == 0) && (predecessor_ != NULL));
return predecessor_;
}
const LocalVariable& exception_var() const { return exception_var_; }
const LocalVariable& stacktrace_var() const { return stacktrace_var_; }
bool needs_stacktrace() const { return needs_stacktrace_; }
// Returns try index for the try block to which this catch handler
// corresponds.
intptr_t catch_try_index() const {
return catch_try_index_;
}
GrowableArray<Definition*>* initial_definitions() {
return &initial_definitions_;
}
virtual void PrintTo(BufferFormatter* f) const;
private:
friend class BlockEntryInstr; // Access to predecessor_ when inlining.
virtual void ClearPredecessors() { predecessor_ = NULL; }
virtual void AddPredecessor(BlockEntryInstr* predecessor) {
ASSERT(predecessor_ == NULL);
predecessor_ = predecessor;
}
BlockEntryInstr* predecessor_;
const Array& catch_handler_types_;
const intptr_t catch_try_index_;
GrowableArray<Definition*> initial_definitions_;
const LocalVariable& exception_var_;
const LocalVariable& stacktrace_var_;
const bool needs_stacktrace_;
DISALLOW_COPY_AND_ASSIGN(CatchBlockEntryInstr);
};
// If the result of the allocation is not stored into any field, passed
// as an argument or used in a phi then it can't alias with any other
// SSA value.
class AliasIdentity : public ValueObject {
public:
// It is unknown if value has aliases.
static AliasIdentity Unknown() { return AliasIdentity(kUnknown); }
// It is known that value can have aliases.
static AliasIdentity Aliased() { return AliasIdentity(kAliased); }
// It is known that value has no aliases.
static AliasIdentity NotAliased() { return AliasIdentity(kNotAliased); }
// It is known that value has no aliases and it was selected by
// allocation sinking pass as a candidate.
static AliasIdentity AllocationSinkingCandidate() {
return AliasIdentity(kAllocationSinkingCandidate);
}
bool IsUnknown() const { return value_ == kUnknown; }
bool IsAliased() const { return value_ == kAliased; }
bool IsNotAliased() const { return (value_ & kNotAliased) != 0; }
bool IsAllocationSinkingCandidate() const {
return value_ == kAllocationSinkingCandidate;
}
AliasIdentity(const AliasIdentity& other)
: ValueObject(), value_(other.value_) {
}
AliasIdentity& operator=(const AliasIdentity& other) {
value_ = other.value_;
return *this;
}
private:
explicit AliasIdentity(intptr_t value) : value_(value) { }
enum {
kUnknown = 0,
kNotAliased = 1,
kAliased = 2,
kAllocationSinkingCandidate = 3,
};
COMPILE_ASSERT((kUnknown & kNotAliased) == 0);
COMPILE_ASSERT((kAliased & kNotAliased) == 0);
COMPILE_ASSERT((kAllocationSinkingCandidate & kNotAliased) != 0);
intptr_t value_;
};
// Abstract super-class of all instructions that define a value (Bind, Phi).
class Definition : public Instruction {
public:
explicit Definition(intptr_t deopt_id = Thread::kNoDeoptId);
// Overridden by definitions that have call counts.
virtual intptr_t CallCount() const {
UNREACHABLE();
return -1;
}
intptr_t temp_index() const { return temp_index_; }
void set_temp_index(intptr_t index) { temp_index_ = index; }
void ClearTempIndex() { temp_index_ = -1; }
bool HasTemp() const { return temp_index_ >= 0; }
intptr_t ssa_temp_index() const { return ssa_temp_index_; }
void set_ssa_temp_index(intptr_t index) {
ASSERT(index >= 0);
ssa_temp_index_ = index;
}
bool HasSSATemp() const { return ssa_temp_index_ >= 0; }
void ClearSSATempIndex() { ssa_temp_index_ = -1; }
bool HasPairRepresentation() const {
#if defined(TARGET_ARCH_X64)
return (representation() == kPairOfTagged) ||
(representation() == kPairOfUnboxedDouble);
#else
return (representation() == kPairOfTagged) ||
(representation() == kPairOfUnboxedDouble) ||
(representation() == kUnboxedMint);
#endif
}
// Compile time type of the definition, which may be requested before type
// propagation during graph building.
CompileType* Type() {
if (type_ == NULL) {
type_ = ZoneCompileType::Wrap(ComputeType());
}
return type_;
}
// Does this define a mint?
inline bool IsMintDefinition();
bool IsInt32Definition() {
return IsBinaryInt32Op() ||
IsBoxInt32() ||
IsUnboxInt32() ||
IsUnboxedIntConverter();
}
// Compute compile type for this definition. It is safe to use this
// approximation even before type propagator was run (e.g. during graph
// building).
virtual CompileType ComputeType() const {
return CompileType::Dynamic();
}
// Update CompileType of the definition. Returns true if the type has changed.
virtual bool RecomputeType() {
return false;
}
bool UpdateType(CompileType new_type) {
if (type_ == NULL) {
type_ = ZoneCompileType::Wrap(new_type);
return true;
}
if (type_->IsNone() || !type_->IsEqualTo(&new_type)) {
*type_ = new_type;
return true;
}
return false;
}
bool HasUses() const {
return (input_use_list_ != NULL) || (env_use_list_ != NULL);
}
bool HasOnlyUse(Value* use) const;
bool HasOnlyInputUse(Value* use) const;
Value* input_use_list() const { return input_use_list_; }
void set_input_use_list(Value* head) { input_use_list_ = head; }
Value* env_use_list() const { return env_use_list_; }
void set_env_use_list(Value* head) { env_use_list_ = head; }
void AddInputUse(Value* value) { Value::AddToList(value, &input_use_list_); }
void AddEnvUse(Value* value) { Value::AddToList(value, &env_use_list_); }
// Replace uses of this definition with uses of other definition or value.
// Precondition: use lists must be properly calculated.
// Postcondition: use lists and use values are still valid.
void ReplaceUsesWith(Definition* other);
// Replace this definition and all uses with another definition. If
// replacing during iteration, pass the iterator so that the instruction
// can be replaced without affecting iteration order, otherwise pass a
// NULL iterator.
void ReplaceWith(Definition* other, ForwardInstructionIterator* iterator);
// Printing support. These functions are sometimes overridden for custom
// formatting. Otherwise, it prints in the format "opcode(op1, op2, op3)".
virtual void PrintTo(BufferFormatter* f) const;
virtual void PrintOperandsTo(BufferFormatter* f) const;
// A value in the constant propagation lattice.
// - non-constant sentinel
// - a constant (any non-sentinel value)
// - unknown sentinel
Object& constant_value();
virtual void InferRange(RangeAnalysis* analysis, Range* range);
Range* range() const { return range_; }
void set_range(const Range&);
// Definitions can be canonicalized only into definitions to ensure
// this check statically we override base Canonicalize with a Canonicalize
// returning Definition (return type is covariant).
virtual Definition* Canonicalize(FlowGraph* flow_graph);
static const intptr_t kReplacementMarker = -2;
Definition* Replacement() {
if (ssa_temp_index_ == kReplacementMarker) {
return reinterpret_cast<Definition*>(temp_index_);
}
return this;
}
void SetReplacement(Definition* other) {
ASSERT(ssa_temp_index_ >= 0);
ASSERT(WasEliminated());
ssa_temp_index_ = kReplacementMarker;
temp_index_ = reinterpret_cast<intptr_t>(other);
}
virtual AliasIdentity Identity() const {
return AliasIdentity::Unknown();
}
virtual void SetIdentity(AliasIdentity identity) {
UNREACHABLE();
}
Definition* OriginalDefinition();
virtual Definition* AsDefinition() { return this; }
protected:
friend class RangeAnalysis;
friend class Value;
Range* range_;
CompileType* type_;
private:
intptr_t temp_index_;
intptr_t ssa_temp_index_;
Value* input_use_list_;
Value* env_use_list_;
Object* constant_value_;
DISALLOW_COPY_AND_ASSIGN(Definition);
};
// Change a value's definition after use lists have been computed.
inline void Value::BindTo(Definition* def) {
RemoveFromUseList();
set_definition(def);
def->AddInputUse(this);
}
inline void Value::BindToEnvironment(Definition* def) {
RemoveFromUseList();
set_definition(def);
def->AddEnvUse(this);
}
class PureDefinition : public Definition {
public:
explicit PureDefinition(intptr_t deopt_id)
: Definition(deopt_id) { }
virtual bool AllowsCSE() const { return true; }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual EffectSet Effects() const { return EffectSet::None(); }
};
template<intptr_t N,
typename ThrowsTrait,
template<typename Impure, typename Pure> class CSETrait = NoCSE>
class TemplateDefinition : public CSETrait<Definition, PureDefinition>::Base {
public:
explicit TemplateDefinition(intptr_t deopt_id = Thread::kNoDeoptId)
: CSETrait<Definition, PureDefinition>::Base(deopt_id), inputs_() { }
virtual intptr_t InputCount() const { return N; }
virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
protected:
EmbeddedArray<Value*, N> inputs_;
private:
friend class BranchInstr;
friend class IfThenElseInstr;
virtual void RawSetInputAt(intptr_t i, Value* value) {
inputs_[i] = value;
}
};
struct BranchLabels {
Label* true_label;
Label* false_label;
Label* fall_through;
};
class InductionVariableInfo;
class PhiInstr : public Definition {
public:
PhiInstr(JoinEntryInstr* block, intptr_t num_inputs)
: block_(block),
inputs_(num_inputs),
is_alive_(false),
representation_(kTagged),
reaching_defs_(NULL),
loop_variable_info_(NULL) {
for (intptr_t i = 0; i < num_inputs; ++i) {
inputs_.Add(NULL);
}
}
// Get the block entry for that instruction.
virtual BlockEntryInstr* GetBlock() const { return block(); }
JoinEntryInstr* block() const { return block_; }
virtual CompileType ComputeType() const;
virtual bool RecomputeType();
intptr_t InputCount() const { return inputs_.length(); }
Value* InputAt(intptr_t i) const { return inputs_[i]; }
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
// Phi is alive if it reaches a non-environment use.
bool is_alive() const { return is_alive_; }
void mark_alive() { is_alive_ = true; }
void mark_dead() { is_alive_ = false; }
virtual Representation RequiredInputRepresentation(intptr_t i) const {
return representation_;
}
virtual Representation representation() const {
return representation_;
}
virtual void set_representation(Representation r) {
representation_ = r;
}
virtual intptr_t Hashcode() const {
UNREACHABLE();
return 0;
}
DECLARE_INSTRUCTION(Phi)
virtual void PrintTo(BufferFormatter* f) const;
virtual void InferRange(RangeAnalysis* analysis, Range* range);
BitVector* reaching_defs() const {
return reaching_defs_;
}
void set_reaching_defs(BitVector* reaching_defs) {
reaching_defs_ = reaching_defs;
}
virtual bool MayThrow() const { return false; }
// A phi is redundant if all input operands are the same.
bool IsRedundant() const;
void set_induction_variable_info(InductionVariableInfo* info) {
loop_variable_info_ = info;
}
InductionVariableInfo* induction_variable_info() {
return loop_variable_info_;
}
private:
// Direct access to inputs_ in order to resize it due to unreachable
// predecessors.
friend class ConstantPropagator;
void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
JoinEntryInstr* block_;
GrowableArray<Value*> inputs_;
bool is_alive_;
Representation representation_;
BitVector* reaching_defs_;
InductionVariableInfo* loop_variable_info_;
DISALLOW_COPY_AND_ASSIGN(PhiInstr);
};
class ParameterInstr : public Definition {
public:
ParameterInstr(intptr_t index,
BlockEntryInstr* block,
Register base_reg = FPREG)
: index_(index), base_reg_(base_reg), block_(block) { }
DECLARE_INSTRUCTION(Parameter)
intptr_t index() const { return index_; }
Register base_reg() const { return base_reg_; }
// Get the block entry for that instruction.
virtual BlockEntryInstr* GetBlock() const { return block_; }
intptr_t InputCount() const { return 0; }
Value* InputAt(intptr_t i) const {
UNREACHABLE();
return NULL;
}
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual intptr_t Hashcode() const {
UNREACHABLE();
return 0;
}
virtual void PrintOperandsTo(BufferFormatter* f) const;
virtual CompileType ComputeType() const;
virtual bool MayThrow() const { return false; }
private:
virtual void RawSetInputAt(intptr_t i, Value* value) { UNREACHABLE(); }
const intptr_t index_;
const Register base_reg_;
BlockEntryInstr* block_;
DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
};
class PushArgumentInstr : public TemplateDefinition<1, NoThrow> {
public:
explicit PushArgumentInstr(Value* value) {
SetInputAt(0, value);
}
DECLARE_INSTRUCTION(PushArgument)
virtual CompileType ComputeType() const;
Value* value() const { return InputAt(0); }
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual void PrintOperandsTo(BufferFormatter* f) const;
private:
DISALLOW_COPY_AND_ASSIGN(PushArgumentInstr);
};
inline Definition* Instruction::ArgumentAt(intptr_t index) const {
return PushArgumentAt(index)->value()->definition();
}
class ReturnInstr : public TemplateInstruction<1, NoThrow> {
public:
ReturnInstr(intptr_t token_pos, Value* value)
: TemplateInstruction(Thread::Current()->GetNextDeoptId()),
token_pos_(token_pos) {
SetInputAt(0, value);
}
DECLARE_INSTRUCTION(Return)
virtual intptr_t token_pos() const { return token_pos_; }
Value* value() const { return inputs_[0]; }
virtual bool CanBecomeDeoptimizationTarget() const {
// Return instruction might turn into a Goto instruction after inlining.
// Every Goto must have an environment.
return true;
}
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
private:
const intptr_t token_pos_;
DISALLOW_COPY_AND_ASSIGN(ReturnInstr);
};
class ThrowInstr : public TemplateInstruction<0, Throws> {
public:
explicit ThrowInstr(intptr_t token_pos)
: TemplateInstruction(Thread::Current()->GetNextDeoptId()),
token_pos_(token_pos) {
}
DECLARE_INSTRUCTION(Throw)
virtual intptr_t ArgumentCount() const { return 1; }
virtual intptr_t token_pos() const { return token_pos_; }
virtual bool CanDeoptimize() const { return true; }
virtual EffectSet Effects() const { return EffectSet::None(); }
private:
const intptr_t token_pos_;
DISALLOW_COPY_AND_ASSIGN(ThrowInstr);
};
class ReThrowInstr : public TemplateInstruction<0, Throws> {
public:
// 'catch_try_index' can be CatchClauseNode::kInvalidTryIndex if the
// rethrow has been artifically generated by the parser.
ReThrowInstr(intptr_t token_pos, intptr_t catch_try_index)
: TemplateInstruction(Thread::Current()->GetNextDeoptId()),
token_pos_(token_pos),
catch_try_index_(catch_try_index) {
}
DECLARE_INSTRUCTION(ReThrow)
virtual intptr_t ArgumentCount() const { return 2; }
virtual intptr_t token_pos() const { return token_pos_; }
intptr_t catch_try_index() const { return catch_try_index_; }
virtual bool CanDeoptimize() const { return true; }
virtual EffectSet Effects() const { return EffectSet::None(); }
private:
const intptr_t token_pos_;
const intptr_t catch_try_index_;
DISALLOW_COPY_AND_ASSIGN(ReThrowInstr);
};
class StopInstr : public TemplateInstruction<0, NoThrow> {
public:
explicit StopInstr(const char* message)
: message_(message) {
ASSERT(message != NULL);
}
const char* message() const { return message_; }
DECLARE_INSTRUCTION(Stop);
virtual intptr_t ArgumentCount() const { return 0; }
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
private:
const char* message_;
DISALLOW_COPY_AND_ASSIGN(StopInstr);
};
class GotoInstr : public TemplateInstruction<0, NoThrow> {
public:
explicit GotoInstr(JoinEntryInstr* entry)
: TemplateInstruction(Thread::Current()->GetNextDeoptId()),
block_(NULL),
successor_(entry),
edge_weight_(0.0),
parallel_move_(NULL) {
}
DECLARE_INSTRUCTION(Goto)
BlockEntryInstr* block() const { return block_; }
void set_block(BlockEntryInstr* block) { block_ = block; }
JoinEntryInstr* successor() const { return successor_; }
void set_successor(JoinEntryInstr* successor) { successor_ = successor; }
virtual intptr_t SuccessorCount() const;
virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
double edge_weight() const { return edge_weight_; }
void set_edge_weight(double weight) { edge_weight_ = weight; }
void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
virtual bool CanBecomeDeoptimizationTarget() const {
// Goto instruction can be used as a deoptimization target when LICM
// hoists instructions out of the loop.
return true;
}
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
ParallelMoveInstr* parallel_move() const {
return parallel_move_;
}
bool HasParallelMove() const {
return parallel_move_ != NULL;
}
bool HasNonRedundantParallelMove() const {
return HasParallelMove() && !parallel_move()->IsRedundant();
}
ParallelMoveInstr* GetParallelMove() {
if (parallel_move_ == NULL) {
parallel_move_ = new ParallelMoveInstr();
}
return parallel_move_;
}
virtual void PrintTo(BufferFormatter* f) const;
private:
BlockEntryInstr* block_;
JoinEntryInstr* successor_;
double edge_weight_;
// Parallel move that will be used by linear scan register allocator to
// connect live ranges at the end of the block and resolve phis.
ParallelMoveInstr* parallel_move_;
};
// IndirectGotoInstr represents a dynamically computed jump. Only
// IndirectEntryInstr targets are valid targets of an indirect goto. The
// concrete target to jump to is given as a parameter to the indirect goto.
//
// In order to preserve split-edge form, an indirect goto does not itself point
// to its targets. Instead, for each possible target, the successors_ field
// will contain an ordinary goto instruction that jumps to the target.
// TODO(zerny): Implement direct support instead of embedding gotos.
//
// Byte offsets of all possible targets are stored in the offsets_ array. The
// desired offset is looked up while the generated code is executing, and passed
// to IndirectGoto as an input.
class IndirectGotoInstr : public TemplateInstruction<1, NoThrow> {
public:
IndirectGotoInstr(TypedData* offsets,
Value* offset_from_start)
: offsets_(*offsets) {
SetInputAt(0, offset_from_start);
}
DECLARE_INSTRUCTION(IndirectGoto)
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return kNoRepresentation;
}
virtual intptr_t ArgumentCount() const { return 0; }
void AddSuccessor(TargetEntryInstr* successor) {
ASSERT(successor->next()->IsGoto());
ASSERT(successor->next()->AsGoto()->successor()->IsIndirectEntry());
successors_.Add(successor);
}
virtual intptr_t SuccessorCount() const { return successors_.length(); }
virtual TargetEntryInstr* SuccessorAt(intptr_t index) const {
ASSERT(index < SuccessorCount());
return successors_[index];
}
virtual bool CanDeoptimize() const { return false; }
virtual bool CanBecomeDeoptimizationTarget() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual void PrintTo(BufferFormatter* f) const;
Value* offset() const { return inputs_[0]; }
void ComputeOffsetTable();
private:
GrowableArray<TargetEntryInstr*> successors_;
TypedData& offsets_;
};
class ComparisonInstr : public TemplateDefinition<2, NoThrow, Pure> {
public:
Value* left() const { return inputs_[0]; }
Value* right() const { return inputs_[1]; }
virtual intptr_t token_pos() const { return token_pos_; }
Token::Kind kind() const { return kind_; }
virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right) = 0;
virtual void EmitBranchCode(FlowGraphCompiler* compiler,
BranchInstr* branch) = 0;
virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) = 0;
void SetDeoptId(const Instruction& instr) {
CopyDeoptIdFrom(instr);
}
// Operation class id is computed from collected ICData.
void set_operation_cid(intptr_t value) { operation_cid_ = value; }
intptr_t operation_cid() const { return operation_cid_; }
void NegateComparison() {
kind_ = Token::NegateComparison(kind_);
}
virtual bool CanBecomeDeoptimizationTarget() const { return true; }
virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
virtual bool AttributesEqual(Instruction* other) const {
ComparisonInstr* other_comparison = other->AsComparison();
return kind() == other_comparison->kind() &&
(operation_cid() == other_comparison->operation_cid());
}
DEFINE_INSTRUCTION_TYPE_CHECK(Comparison)
protected:
ComparisonInstr(intptr_t token_pos,
Token::Kind kind,
Value* left,
Value* right,
intptr_t deopt_id = Thread::kNoDeoptId)
: TemplateDefinition(deopt_id),
token_pos_(token_pos),
kind_(kind),
operation_cid_(kIllegalCid) {
SetInputAt(0, left);
if (right != NULL) {
SetInputAt(1, right);
}
}
private:
const intptr_t token_pos_;
Token::Kind kind_;
intptr_t operation_cid_; // Set by optimizer.
DISALLOW_COPY_AND_ASSIGN(ComparisonInstr);
};
class BranchInstr : public Instruction {
public:
explicit BranchInstr(ComparisonInstr* comparison)
: Instruction(Thread::Current()->GetNextDeoptId()),
comparison_(comparison),
is_checked_(false),
constrained_type_(NULL),
constant_target_(NULL) {
ASSERT(comparison->env() == NULL);
for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
comparison->InputAt(i)->set_instruction(this);
}
}
DECLARE_INSTRUCTION(Branch)
virtual intptr_t ArgumentCount() const {
return comparison()->ArgumentCount();
}
intptr_t InputCount() const { return comparison()->InputCount(); }
Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
virtual intptr_t token_pos() const { return comparison_->token_pos(); }
virtual bool CanDeoptimize() const {
// Branches need a deoptimization info in checked mode if they
// can throw a type check error.
return comparison()->CanDeoptimize() || is_checked();
}
virtual bool CanBecomeDeoptimizationTarget() const {
return comparison()->CanBecomeDeoptimizationTarget();
}
virtual EffectSet Effects() const {
return comparison()->Effects();
}
ComparisonInstr* comparison() const { return comparison_; }
void SetComparison(ComparisonInstr* comp);
void set_is_checked(bool value) { is_checked_ = value; }
bool is_checked() const { return is_checked_; }
virtual intptr_t DeoptimizationTarget() const {
return comparison()->DeoptimizationTarget();
}
virtual Representation RequiredInputRepresentation(intptr_t i) const {
return comparison()->RequiredInputRepresentation(i);
}
virtual Instruction* Canonicalize(FlowGraph* flow_graph);
virtual void PrintTo(BufferFormatter* f) const;
// Set compile type constrained by the comparison of this branch.
// FlowGraphPropagator propagates it downwards into either true or false
// successor.
void set_constrained_type(ConstrainedCompileType* type) {
constrained_type_ = type;
}
// Return compile type constrained by the comparison of this branch.
ConstrainedCompileType* constrained_type() const {
return constrained_type_;
}
void set_constant_target(TargetEntryInstr* target) {
ASSERT(target == true_successor() || target == false_successor());
constant_target_ = target;
}
TargetEntryInstr* constant_target() const {
return constant_target_;
}
virtual void InheritDeoptTarget(Zone* zone, Instruction* other);
virtual bool MayThrow() const {
return comparison()->MayThrow();
}
TargetEntryInstr* true_successor() const { return true_successor_; }
TargetEntryInstr* false_successor() const { return false_successor_; }
TargetEntryInstr** true_successor_address() { return &true_successor_; }
TargetEntryInstr** false_successor_address() { return &false_successor_; }
virtual intptr_t SuccessorCount() const;
virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
private:
virtual void RawSetInputAt(intptr_t i, Value* value) {
comparison()->RawSetInputAt(i, value);
}
TargetEntryInstr* true_successor_;
TargetEntryInstr* false_successor_;
ComparisonInstr* comparison_;
bool is_checked_;
ConstrainedCompileType* constrained_type_;
TargetEntryInstr* constant_target_;
DISALLOW_COPY_AND_ASSIGN(BranchInstr);
};
class DeoptimizeInstr : public TemplateInstruction<0, NoThrow, Pure> {
public:
DeoptimizeInstr(ICData::DeoptReasonId deopt_reason, intptr_t deopt_id)
: TemplateInstruction(deopt_id),
deopt_reason_(deopt_reason) {
}
virtual bool CanDeoptimize() const { return true; }
virtual bool AttributesEqual(Instruction* other) const {
return true;
}
DECLARE_INSTRUCTION(Deoptimize)
private:
const ICData::DeoptReasonId deopt_reason_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizeInstr);
};
class RedefinitionInstr : public TemplateDefinition<1, NoThrow> {
public:
explicit RedefinitionInstr(Value* value) {
SetInputAt(0, value);
}
DECLARE_INSTRUCTION(Redefinition)
Value* value() const { return inputs_[0]; }
virtual CompileType ComputeType() const;
virtual bool RecomputeType();
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual EffectSet Effects() const { return EffectSet::None(); }
private:
DISALLOW_COPY_AND_ASSIGN(RedefinitionInstr);
};
class ConstraintInstr : public TemplateDefinition<1, NoThrow> {
public:
ConstraintInstr(Value* value, Range* constraint)
: constraint_(constraint),
target_(NULL) {
SetInputAt(0, value);
}
DECLARE_INSTRUCTION(Constraint)
virtual CompileType ComputeType() const;
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual bool AttributesEqual(Instruction* other) const {
UNREACHABLE();
return false;
}
virtual void PrintOperandsTo(BufferFormatter* f) const;
Value* value() const { return inputs_[0]; }
Range* constraint() const { return constraint_; }
virtual void InferRange(RangeAnalysis* analysis, Range* range);
// Constraints for branches have their target block stored in order
// to find the the comparsion that generated the constraint:
// target->predecessor->last_instruction->comparison.
void set_target(TargetEntryInstr* target) {
target_ = target;
}
TargetEntryInstr* target() const {
return target_;
}
private:
Range* constraint_;
TargetEntryInstr* target_;
DISALLOW_COPY_AND_ASSIGN(ConstraintInstr);
};
class ConstantInstr : public TemplateDefinition<0, NoThrow, Pure> {
public:
explicit ConstantInstr(const Object& value);
DECLARE_INSTRUCTION(Constant)
virtual CompileType ComputeType() const;
virtual Definition* Canonicalize(FlowGraph* flow_graph);
const Object& value() const { return value_; }
virtual void PrintOperandsTo(BufferFormatter* f) const;
virtual bool CanDeoptimize() const { return false; }
virtual void InferRange(RangeAnalysis* analysis, Range* range);
virtual bool AttributesEqual(Instruction* other) const;
private:
const Object& value_;
DISALLOW_COPY_AND_ASSIGN(ConstantInstr);
};
// Merged ConstantInstr -> UnboxedXXX into UnboxedConstantInstr.
// TODO(srdjan): Implemented currently for doubles only, should implement
// for other unboxing instructions.
class UnboxedConstantInstr : public ConstantInstr {
public:
explicit UnboxedConstantInstr(const Object& value,
Representation representation);
virtual Representation representation() const {
return representation_;
}
// Either NULL or the address of the unboxed constant.
uword constant_address() const { return constant_address_; }
DECLARE_INSTRUCTION(UnboxedConstant)
private:
const Representation representation_;
uword constant_address_; // Either NULL or points to the untagged constant.
DISALLOW_COPY_AND_ASSIGN(UnboxedConstantInstr);
};
class AssertAssignableInstr : public TemplateDefinition<3, Throws, Pure> {
public:
AssertAssignableInstr(intptr_t token_pos,
Value* value,
Value* instantiator,
Value* instantiator_type_arguments,
const AbstractType& dst_type,
const String& dst_name,
intptr_t deopt_id)
: TemplateDefinition(deopt_id),
token_pos_(token_pos),
dst_type_(AbstractType::ZoneHandle(dst_type.raw())),
dst_name_(dst_name) {
ASSERT(!dst_type.IsNull());
ASSERT(!dst_name.IsNull());
SetInputAt(0, value);
SetInputAt(1, instantiator);
SetInputAt(2, instantiator_type_arguments);
}
DECLARE_INSTRUCTION(AssertAssignable)
virtual CompileType ComputeType() const;
virtual bool RecomputeType();
Value* value() const { return inputs_[0]; }
Value* instantiator() const { return inputs_[1]; }
Value* instantiator_type_arguments() const { return inputs_[2]; }
virtual intptr_t token_pos() const { return token_pos_; }
const AbstractType& dst_type() const { return dst_type_; }
void set_dst_type(const AbstractType& dst_type) {
dst_type_ = dst_type.raw();
}
const String& dst_name() const { return dst_name_; }
virtual void PrintOperandsTo(BufferFormatter* f) const;
virtual bool CanDeoptimize() const { return true; }
virtual bool CanBecomeDeoptimizationTarget() const {
// AssertAssignable instructions that are specialized by the optimizer
// (e.g. replaced with CheckClass) need a deoptimization descriptor before.
return true;
}
virtual Definition* Canonicalize(FlowGraph* flow_graph);
virtual bool AttributesEqual(Instruction* other) const;
private:
const intptr_t token_pos_;
AbstractType& dst_type_;
const String& dst_name_;
DISALLOW_COPY_AND_ASSIGN(AssertAssignableInstr);
};
class AssertBooleanInstr : public TemplateDefinition<1, Throws, Pure> {
public:
AssertBooleanInstr(intptr_t token_pos, Value* value)
: TemplateDefinition(Thread::Current()->GetNextDeoptId()),
token_pos_(token_pos) {
SetInputAt(0, value);
}
DECLARE_INSTRUCTION(AssertBoolean)
virtual CompileType ComputeType() const;
virtual intptr_t token_pos() const { return token_pos_; }
Value* value() const { return inputs_[0]; }
virtual void PrintOperandsTo(BufferFormatter* f) const;
virtual bool CanDeoptimize() const { return true; }
virtual Definition* Canonicalize(FlowGraph* flow_graph);
virtual bool AttributesEqual(Instruction* other) const { return true; }
private:
const intptr_t token_pos_;
DISALLOW_COPY_AND_ASSIGN(AssertBooleanInstr);
};
// Denotes the current context, normally held in a register. This is
// a computation, not a value, because it's mutable.
class CurrentContextInstr : public TemplateDefinition<0, NoThrow> {
public:
CurrentContextInstr()
: TemplateDefinition(Thread::Current()->GetNextDeoptId()) {
}
DECLARE_INSTRUCTION(CurrentContext)
virtual CompileType ComputeType() const;
virtual bool CanDeoptimize() const { return false; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual bool AttributesEqual(Instruction* other) const { return true; }
private:
DISALLOW_COPY_AND_ASSIGN(CurrentContextInstr);
};
class ClosureCallInstr : public TemplateDefinition<1, Throws> {
public:
ClosureCallInstr(Value* function,
ClosureCallNode* node,
ZoneGrowableArray<PushArgumentInstr*>* arguments)
: TemplateDefinition(Thread::Current()->GetNextDeoptId()),
ast_node_(*node),
arguments_(arguments) {
SetInputAt(0, function);
}
DECLARE_INSTRUCTION(ClosureCall)
const Array& argument_names() const { return ast_node_.arguments()->names(); }
virtual intptr_t token_pos() const { return ast_node_.token_pos(); }
virtual intptr_t ArgumentCount() const { return arguments_->length(); }
virtual PushArgumentInstr* PushArgumentAt(intptr_t index) const {
return (*arguments_)[index];
}
// TODO(kmillikin): implement exact call counts for closure calls.
virtual intptr_t CallCount() const { return 1; }
virtual void PrintOperandsTo(BufferFormatter* f) const;
virtual bool CanDeoptimize() const { return true; }
virtual EffectSet Effects() const { return EffectSet::All(); }
private:
const ClosureCallNode& ast_node_;
ZoneGrowableArray<PushArgumentInstr*>* arguments_;
DISALLOW_COPY_AND_ASSIGN(ClosureCallInstr);
};
class InstanceCallInstr : public TemplateDefinition<0, Throws> {
public:
InstanceCallInstr(intptr_t token_pos,
const String& function_name,
Token::Kind token_kind,
ZoneGrowableArray<PushArgumentInstr*>* arguments,
const Array& argument_names,
intptr_t checked_argument_count,
const ZoneGrowableArray<const ICData*>& ic_data_array)
: TemplateDefinition(Thread::Current()->GetNextDeoptId()),
ic_data_(NULL),
token_pos_(token_pos),
function_name_(function_name),
token_kind_(token_kind),
arguments_(arguments),
argument_names_(argument_names),
checked_argument_count_(checked_argument_count) {
ic_data_ = GetICData(ic_data_array);
ASSERT(function_name.IsNotTemporaryScopedHandle());
ASSERT(!arguments->is_empty());
ASSERT(argument_names.IsZoneHandle() || argument_names.InVMHeap());
ASSERT(Token::IsBinaryOperator(token_kind) ||
Token::IsEqualityOperator(token_kind) ||
Token::IsRelationalOperator(token_kind) ||
Token::IsUnaryOperator(token_kind) ||
Token::IsIndexOperator(token_kind) ||
Token::IsTypeTestOperator(token_kind) ||
Token::IsTypeCastOperator(token_kind) ||
token_kind == Token::kGET ||
token_kind == Token::kSET ||
token_kind == Token::kILLEGAL);
}
DECLARE_INSTRUCTION(InstanceCall)
const ICData* ic_data() const { return ic_data_; }
bool HasICData() const {
return (ic_data() != NULL) && !ic_data()->IsNull();
}
// ICData can be replaced by optimizer.
void set_ic_data(const ICData* value) { ic_data_ = value; }
virtual intptr_t token_pos() const { return token_pos_; }
const String& function_name() const { return function_name_; }
Token::Kind token_kind() const { return token_kind_; }
virtual intptr_t ArgumentCount() const { return arguments_->length(); }
virtual PushArgumentInstr* PushArgumentAt(intptr_t index) const {
return (*arguments_)[index];
}
const Array& argument_names() const { return argument_names_; }
intptr_t checked_argument_count() const { return checked_argument_count_; }
virtual void PrintOperandsTo(BufferFormatter* f) const;
virtual bool CanDeoptimize() const { return true; }
virtual bool CanBecomeDeoptimizationTarget() const {
// Instance calls that are specialized by the optimizer need a
// deoptimization descriptor before the call.
return true;
}
virtual EffectSet Effects() const { return EffectSet::All(); }
protected:
friend class FlowGraphOptimizer;
void set_ic_data(ICData* value) { ic_data_ = value; }
private:
const ICData* ic_data_;
const intptr_t token_pos_;
const String& function_name_;
const Token::Kind token_kind_; // Binary op, unary op, kGET or kILLEGAL.
ZoneGrowableArray<PushArgumentInstr*>* const arguments_;
const Array& argument_names_;
const intptr_t checked_argument_count_;
DISALLOW_COPY_AND_ASSIGN(InstanceCallInstr);
};
class PolymorphicInstanceCallInstr : public TemplateDefinition<0, Throws> {
public:
PolymorphicInstanceCallInstr(InstanceCallInstr* instance_call,
const ICData& ic_data,
bool with_checks)
: TemplateDefinition(instance_call->deopt_id()),
instance_call_(instance_call),
ic_data_(ic_data),
with_checks_(with_checks) {
ASSERT(instance_call_ != NULL);
ASSERT(ic_data.NumberOfChecks() > 0);
}
InstanceCallInstr* instance_call() const { return instance_call_; }
bool with_checks() const { return with_checks_; }
virtual intptr_t token_pos() const { return instance_call_->token_pos(); }
virtual intptr_t ArgumentCount() const {
return instance_call()->ArgumentCount();
}
virtual PushArgumentInstr* PushArgumentAt(intptr_t index) const {
return instance_call()->PushArgumentAt(index);
}
bool HasSingleRecognizedTarget() const;
bool HasOnlyDispatcherTargets() const;
virtual intptr_t CallCount() const { return ic_data().AggregateCount(); }
DECLARE_INSTRUCTION(PolymorphicInstanceCall)
const ICData& ic_data() const { return ic_data_; }
virtual bool CanDeoptimize() const { return true; }
virtual EffectSet Effects() const { return EffectSet::All(); }
virtual void PrintOperandsTo(BufferFormatter* f) const;
private:
InstanceCallInstr* instance_call_;
const ICData& ic_data_;
const bool with_checks_;
DISALLOW_COPY_AND_ASSIGN(PolymorphicInstanceCallInstr);
};
class StrictCompareInstr : public ComparisonInstr {
public:
StrictCompareInstr(intptr_t token_pos,
Token::Kind kind,
Value* left,
Value* right,
bool needs_number_check);
DECLARE_INSTRUCTION(StrictCompare)
virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
virtual CompileType ComputeType() const;
virtual void PrintOperandsTo(BufferFormatter* f) const;
virtual bool CanDeoptimize() const { return false; }
virtual Definition* Canonicalize(FlowGraph* flow_graph);
virtual void EmitBranchCode(FlowGraphCompiler* compiler,
BranchInstr* branch);
virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels);
bool needs_number_check() const { return needs_number_check_; }
void set_needs_number_check(bool value) { needs_number_check_ = value; }
bool AttributesEqual(Instruction* other) const;
private:
// True if the comparison must check for double, Mint or Bigint and
// use value comparison instead.
bool needs_number_check_;
DISALLOW_COPY_AND_ASSIGN(StrictCompareInstr);
};
// Comparison instruction that is equivalent to the (left & right) == 0
// comparison pattern.
class TestSmiInstr : public ComparisonInstr {
public:
TestSmiInstr(intptr_t token_pos, Token::Kind kind, Value* left, Value* right)
: ComparisonInstr(token_pos, kind, left, right) {
ASSERT(kind == Token::kEQ || kind == Token::kNE);
}
DECLARE_INSTRUCTION(TestSmi);
virtual ComparisonInstr* CopyWithNewOperands(Value* left,