| // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #ifndef RUNTIME_VM_COMPILER_BACKEND_IL_H_ |
| #define RUNTIME_VM_COMPILER_BACKEND_IL_H_ |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| #error "AOT runtime should not use compiler sources (including header files)" |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| #include <memory> |
| #include <tuple> |
| #include <utility> |
| |
| #include "vm/allocation.h" |
| #include "vm/code_descriptors.h" |
| #include "vm/compiler/backend/compile_type.h" |
| #include "vm/compiler/backend/locations.h" |
| #include "vm/compiler/backend/slot.h" |
| #include "vm/compiler/compiler_pass.h" |
| #include "vm/compiler/compiler_state.h" |
| #include "vm/compiler/ffi/marshaller.h" |
| #include "vm/compiler/ffi/native_calling_convention.h" |
| #include "vm/compiler/ffi/native_location.h" |
| #include "vm/compiler/ffi/native_type.h" |
| #include "vm/compiler/method_recognizer.h" |
| #include "vm/dart_entry.h" |
| #include "vm/flags.h" |
| #include "vm/growable_array.h" |
| #include "vm/native_entry.h" |
| #include "vm/object.h" |
| #include "vm/parser.h" |
| #include "vm/runtime_entry.h" |
| #include "vm/static_type_exactness_state.h" |
| #include "vm/token_position.h" |
| |
| namespace dart { |
| |
| class BaseTextBuffer; |
| class BinaryFeedback; |
| class BitVector; |
| class BlockEntryInstr; |
| class BlockEntryWithInitialDefs; |
| class BoxIntegerInstr; |
| class CallTargets; |
| class CatchBlockEntryInstr; |
| class CheckBoundBase; |
| class ComparisonInstr; |
| class Definition; |
| class Environment; |
| class FlowGraph; |
| class FlowGraphCompiler; |
| class FlowGraphVisitor; |
| class ForwardInstructionIterator; |
| class Instruction; |
| class InstructionVisitor; |
| class LocalVariable; |
| class LoopInfo; |
| class ParsedFunction; |
| class Range; |
| class RangeAnalysis; |
| class RangeBoundary; |
| class TypeUsageInfo; |
| class UnboxIntegerInstr; |
| |
| namespace compiler { |
| class BlockBuilder; |
| struct TableSelector; |
| } // namespace compiler |
| |
| class Value : public ZoneAllocated { |
| public: |
| // A forward iterator that allows removing the current value from the |
| // underlying use list during iteration. |
| class Iterator { |
| public: |
| explicit Iterator(Value* head) : next_(head) { Advance(); } |
| Value* Current() const { return current_; } |
| bool Done() const { return current_ == NULL; } |
| void Advance() { |
| // Pre-fetch next on advance and cache it. |
| current_ = next_; |
| if (next_ != NULL) next_ = next_->next_use(); |
| } |
| |
| private: |
| Value* current_; |
| Value* next_; |
| }; |
| |
| explicit Value(Definition* definition) |
| : definition_(definition), |
| previous_use_(NULL), |
| next_use_(NULL), |
| instruction_(NULL), |
| use_index_(-1), |
| reaching_type_(NULL) {} |
| |
| Definition* definition() const { return definition_; } |
| void set_definition(Definition* definition) { |
| definition_ = definition; |
| // Clone the reaching type if there was one and the owner no longer matches |
| // this value's definition. |
| SetReachingType(reaching_type_); |
| } |
| |
| Value* previous_use() const { return previous_use_; } |
| void set_previous_use(Value* previous) { previous_use_ = previous; } |
| |
| Value* next_use() const { return next_use_; } |
| void set_next_use(Value* next) { next_use_ = next; } |
| |
| bool IsSingleUse() const { |
| return (next_use_ == NULL) && (previous_use_ == NULL); |
| } |
| |
| Instruction* instruction() const { return instruction_; } |
| void set_instruction(Instruction* instruction) { instruction_ = instruction; } |
| |
| intptr_t use_index() const { return use_index_; } |
| void set_use_index(intptr_t index) { use_index_ = index; } |
| |
| static void AddToList(Value* value, Value** list); |
| void RemoveFromUseList(); |
| |
| // Change the definition after use lists have been computed. |
| inline void BindTo(Definition* definition); |
| inline void BindToEnvironment(Definition* definition); |
| |
| Value* Copy(Zone* zone) { return new (zone) Value(definition_); } |
| |
| // CopyWithType() must only be used when the new Value is dominated by |
| // the original Value. |
| Value* CopyWithType(Zone* zone) { |
| Value* copy = new (zone) Value(definition_); |
| copy->reaching_type_ = reaching_type_; |
| return copy; |
| } |
| Value* CopyWithType() { return CopyWithType(Thread::Current()->zone()); } |
| |
| CompileType* Type(); |
| |
| CompileType* reaching_type() const { return reaching_type_; } |
| void SetReachingType(CompileType* type); |
| void RefineReachingType(CompileType* type); |
| |
| #if defined(INCLUDE_IL_PRINTER) |
| void PrintTo(BaseTextBuffer* f) const; |
| #endif // defined(INCLUDE_IL_PRINTER) |
| |
| const char* ToCString() const; |
| |
| bool IsSmiValue() { return Type()->ToCid() == kSmiCid; } |
| |
| // Return true if the value represents a constant. |
| bool BindsToConstant() const; |
| |
| // Return true if the value represents the constant null. |
| bool BindsToConstantNull() const; |
| |
| // Assert if BindsToConstant() is false, otherwise returns the constant value. |
| const Object& BoundConstant() const; |
| |
| // Return true if the value represents Smi constant. |
| bool BindsToSmiConstant() const; |
| |
| // Return value of represented Smi constant. |
| intptr_t BoundSmiConstant() const; |
| |
| // Return true if storing the value into a heap object requires applying the |
| // write barrier. Can change the reaching type of the Value or other Values |
| // in the same chain of redefinitions. |
| bool NeedsWriteBarrier(); |
| |
| bool Equals(const Value& other) const; |
| |
| // Returns true if this |Value| can evaluate to the given |value| during |
| // execution. |
| inline bool CanBe(const Object& value); |
| |
| private: |
| friend class FlowGraphPrinter; |
| |
| Definition* definition_; |
| Value* previous_use_; |
| Value* next_use_; |
| Instruction* instruction_; |
| intptr_t use_index_; |
| |
| CompileType* reaching_type_; |
| |
| DISALLOW_COPY_AND_ASSIGN(Value); |
| }; |
| |
| // Represents a range of class-ids for use in class checks and polymorphic |
| // dispatches. The range includes both ends, i.e. it is [cid_start, cid_end]. |
| struct CidRange : public ZoneAllocated { |
| CidRange(intptr_t cid_start_arg, intptr_t cid_end_arg) |
| : cid_start(cid_start_arg), cid_end(cid_end_arg) {} |
| CidRange() : cid_start(kIllegalCid), cid_end(kIllegalCid) {} |
| |
| bool IsSingleCid() const { return cid_start == cid_end; } |
| bool Contains(intptr_t cid) const { |
| return cid_start <= cid && cid <= cid_end; |
| } |
| int32_t Extent() const { return cid_end - cid_start; } |
| |
| // The number of class ids this range covers. |
| intptr_t size() const { return cid_end - cid_start + 1; } |
| |
| bool IsIllegalRange() const { |
| return cid_start == kIllegalCid && cid_end == kIllegalCid; |
| } |
| |
| intptr_t cid_start; |
| intptr_t cid_end; |
| |
| DISALLOW_COPY_AND_ASSIGN(CidRange); |
| }; |
| |
| struct CidRangeValue { |
| CidRangeValue(intptr_t cid_start_arg, intptr_t cid_end_arg) |
| : cid_start(cid_start_arg), cid_end(cid_end_arg) {} |
| CidRangeValue(const CidRange& other) // NOLINT |
| : cid_start(other.cid_start), cid_end(other.cid_end) {} |
| |
| bool IsSingleCid() const { return cid_start == cid_end; } |
| bool Contains(intptr_t cid) const { |
| return cid_start <= cid && cid <= cid_end; |
| } |
| int32_t Extent() const { return cid_end - cid_start; } |
| |
| // The number of class ids this range covers. |
| intptr_t size() const { return cid_end - cid_start + 1; } |
| |
| bool IsIllegalRange() const { |
| return cid_start == kIllegalCid && cid_end == kIllegalCid; |
| } |
| |
| bool Equals(const CidRangeValue& other) const { |
| return cid_start == other.cid_start && cid_end == other.cid_end; |
| } |
| |
| intptr_t cid_start; |
| intptr_t cid_end; |
| }; |
| |
| typedef MallocGrowableArray<CidRangeValue> CidRangeVector; |
| |
| class CidRangeVectorUtils : public AllStatic { |
| public: |
| static bool ContainsCid(const CidRangeVector& ranges, intptr_t cid) { |
| for (const CidRangeValue& range : ranges) { |
| if (range.Contains(cid)) { |
| return true; |
| } |
| } |
| return false; |
| } |
| }; |
| |
| class HierarchyInfo : public ThreadStackResource { |
| public: |
| explicit HierarchyInfo(Thread* thread) |
| : ThreadStackResource(thread), |
| cid_subtype_ranges_nullable_(), |
| cid_subtype_ranges_abstract_nullable_(), |
| cid_subtype_ranges_nonnullable_(), |
| cid_subtype_ranges_abstract_nonnullable_() { |
| thread->set_hierarchy_info(this); |
| } |
| |
| ~HierarchyInfo() { thread()->set_hierarchy_info(NULL); } |
| |
| // Returned from FindBestTAVOffset and SplitOnConsistentTypeArguments |
| // to denote a failure to find a compatible concrete, finalized class. |
| static const intptr_t kNoCompatibleTAVOffset = 0; |
| |
| const CidRangeVector& SubtypeRangesForClass(const Class& klass, |
| bool include_abstract, |
| bool exclude_null); |
| |
| bool InstanceOfHasClassRange(const AbstractType& type, |
| intptr_t* lower_limit, |
| intptr_t* upper_limit); |
| |
| // Returns `true` if a simple [CidRange]-based subtype-check can be used to |
| // determine if a given instance's type is a subtype of [type]. |
| // |
| // This is the case for [type]s without type arguments or where the type |
| // arguments are all dynamic (known as "rare type"). |
| bool CanUseSubtypeRangeCheckFor(const AbstractType& type); |
| |
| // Returns `true` if a combination of [CidRange]-based checks can be used to |
| // determine if a given instance's type is a subtype of [type]. |
| // |
| // This is the case for [type]s with type arguments where we are able to do a |
| // [CidRange]-based subclass-check against the class and [CidRange]-based |
| // subtype-checks against the type arguments. |
| // |
| // This method should only be called if [CanUseSubtypeRangecheckFor] returned |
| // false. |
| bool CanUseGenericSubtypeRangeCheckFor(const AbstractType& type); |
| |
| private: |
| // Does not use any hierarchy information available in the system but computes |
| // it via O(n) class table traversal. |
| // |
| // The boolean parameters denote: |
| // include_abstract : if set, include abstract types (don't care otherwise) |
| // exclude_null : if set, exclude null types (don't care otherwise) |
| void BuildRangesUsingClassTableFor(ClassTable* table, |
| CidRangeVector* ranges, |
| const Class& klass, |
| bool include_abstract, |
| bool exclude_null); |
| |
| // Uses hierarchy information stored in the [Class]'s direct_subclasses() and |
| // direct_implementors() arrays, unless that information is not available |
| // in which case we fall back to the class table. |
| // |
| // The boolean parameters denote: |
| // include_abstract : if set, include abstract types (don't care otherwise) |
| // exclude_null : if set, exclude null types (don't care otherwise) |
| void BuildRangesFor(ClassTable* table, |
| CidRangeVector* ranges, |
| const Class& klass, |
| bool include_abstract, |
| bool exclude_null); |
| |
| std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_nullable_; |
| std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nullable_; |
| std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_nonnullable_; |
| std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nonnullable_; |
| }; |
| |
| // An embedded container with N elements of type T. Used (with partial |
| // specialization for N=0) because embedded arrays cannot have size 0. |
| template <typename T, intptr_t N> |
| class EmbeddedArray { |
| public: |
| EmbeddedArray() : elements_() {} |
| |
| intptr_t length() const { return N; } |
| |
| const T& operator[](intptr_t i) const { |
| ASSERT(i < length()); |
| return elements_[i]; |
| } |
| |
| T& operator[](intptr_t i) { |
| ASSERT(i < length()); |
| return elements_[i]; |
| } |
| |
| const T& At(intptr_t i) const { return (*this)[i]; } |
| |
| void SetAt(intptr_t i, const T& val) { (*this)[i] = val; } |
| |
| private: |
| T elements_[N]; |
| }; |
| |
| template <typename T> |
| class EmbeddedArray<T, 0> { |
| public: |
| intptr_t length() const { return 0; } |
| const T& operator[](intptr_t i) const { |
| UNREACHABLE(); |
| static T sentinel = 0; |
| return sentinel; |
| } |
| T& operator[](intptr_t i) { |
| UNREACHABLE(); |
| static T sentinel = 0; |
| return sentinel; |
| } |
| }; |
| |
| // Instructions. |
| |
| // M is a two argument macro. It is applied to each concrete instruction type |
| // name. The concrete instruction classes are the name with Instr concatenated. |
| |
| struct InstrAttrs { |
| enum Attributes { |
| _ = 0, // No special attributes. |
| // |
| // The instruction is guaranteed to not trigger GC on a non-exceptional |
| // path. If the conditions depend on parameters of the instruction, do not |
| // use this attribute but overload CanTriggerGC() instead. |
| kNoGC = 1, |
| }; |
| }; |
| |
| #define FOR_EACH_INSTRUCTION(M) \ |
| M(GraphEntry, kNoGC) \ |
| M(JoinEntry, kNoGC) \ |
| M(TargetEntry, kNoGC) \ |
| M(FunctionEntry, kNoGC) \ |
| M(NativeEntry, kNoGC) \ |
| M(OsrEntry, kNoGC) \ |
| M(IndirectEntry, kNoGC) \ |
| M(CatchBlockEntry, kNoGC) \ |
| M(Phi, kNoGC) \ |
| M(Redefinition, kNoGC) \ |
| M(ReachabilityFence, kNoGC) \ |
| M(Parameter, kNoGC) \ |
| M(NativeParameter, kNoGC) \ |
| M(LoadIndexedUnsafe, kNoGC) \ |
| M(StoreIndexedUnsafe, kNoGC) \ |
| M(MemoryCopy, kNoGC) \ |
| M(TailCall, kNoGC) \ |
| M(ParallelMove, kNoGC) \ |
| M(PushArgument, kNoGC) \ |
| M(Return, kNoGC) \ |
| M(NativeReturn, kNoGC) \ |
| M(Throw, kNoGC) \ |
| M(ReThrow, kNoGC) \ |
| M(Stop, kNoGC) \ |
| M(Goto, kNoGC) \ |
| M(IndirectGoto, kNoGC) \ |
| M(Branch, kNoGC) \ |
| M(AssertAssignable, _) \ |
| M(AssertSubtype, _) \ |
| M(AssertBoolean, _) \ |
| M(SpecialParameter, kNoGC) \ |
| M(ClosureCall, _) \ |
| M(FfiCall, _) \ |
| M(CCall, kNoGC) \ |
| M(RawStoreField, kNoGC) \ |
| M(InstanceCall, _) \ |
| M(PolymorphicInstanceCall, _) \ |
| M(DispatchTableCall, _) \ |
| M(StaticCall, _) \ |
| M(LoadLocal, kNoGC) \ |
| M(DropTemps, kNoGC) \ |
| M(MakeTemp, kNoGC) \ |
| M(StoreLocal, kNoGC) \ |
| M(StrictCompare, kNoGC) \ |
| M(EqualityCompare, kNoGC) \ |
| M(RelationalOp, kNoGC) \ |
| M(NativeCall, _) \ |
| M(DebugStepCheck, _) \ |
| M(RecordCoverage, kNoGC) \ |
| M(LoadIndexed, kNoGC) \ |
| M(LoadCodeUnits, _) \ |
| M(StoreIndexed, kNoGC) \ |
| M(StoreField, _) \ |
| M(LoadStaticField, _) \ |
| M(StoreStaticField, kNoGC) \ |
| M(BooleanNegate, kNoGC) \ |
| M(InstanceOf, _) \ |
| M(CreateArray, _) \ |
| M(AllocateObject, _) \ |
| M(AllocateClosure, _) \ |
| M(AllocateTypedData, _) \ |
| M(LoadField, _) \ |
| M(LoadUntagged, kNoGC) \ |
| M(LoadClassId, kNoGC) \ |
| M(InstantiateType, _) \ |
| M(InstantiateTypeArguments, _) \ |
| M(AllocateContext, _) \ |
| M(AllocateUninitializedContext, _) \ |
| M(CloneContext, _) \ |
| M(BinarySmiOp, kNoGC) \ |
| M(BinaryInt32Op, kNoGC) \ |
| M(UnarySmiOp, kNoGC) \ |
| M(UnaryDoubleOp, kNoGC) \ |
| M(CheckStackOverflow, _) \ |
| M(SmiToDouble, kNoGC) \ |
| M(Int32ToDouble, kNoGC) \ |
| M(Int64ToDouble, kNoGC) \ |
| M(DoubleToInteger, _) \ |
| M(DoubleToSmi, kNoGC) \ |
| M(DoubleToDouble, kNoGC) \ |
| M(DoubleToFloat, kNoGC) \ |
| M(FloatToDouble, kNoGC) \ |
| M(CheckClass, kNoGC) \ |
| M(CheckClassId, kNoGC) \ |
| M(CheckSmi, kNoGC) \ |
| M(CheckNull, kNoGC) \ |
| M(CheckCondition, kNoGC) \ |
| M(Constant, kNoGC) \ |
| M(UnboxedConstant, kNoGC) \ |
| M(CheckEitherNonSmi, kNoGC) \ |
| M(BinaryDoubleOp, kNoGC) \ |
| M(DoubleTestOp, kNoGC) \ |
| M(MathUnary, kNoGC) \ |
| M(MathMinMax, kNoGC) \ |
| M(Box, _) \ |
| M(Unbox, kNoGC) \ |
| M(BoxInt64, _) \ |
| M(UnboxInt64, kNoGC) \ |
| M(CaseInsensitiveCompare, kNoGC) \ |
| M(BinaryInt64Op, kNoGC) \ |
| M(ShiftInt64Op, kNoGC) \ |
| M(SpeculativeShiftInt64Op, kNoGC) \ |
| M(UnaryInt64Op, kNoGC) \ |
| M(CheckArrayBound, kNoGC) \ |
| M(GenericCheckBound, kNoGC) \ |
| M(CheckWritable, kNoGC) \ |
| M(Constraint, kNoGC) \ |
| M(StringToCharCode, kNoGC) \ |
| M(OneByteStringFromCharCode, kNoGC) \ |
| M(Utf8Scan, kNoGC) \ |
| M(InvokeMathCFunction, kNoGC) \ |
| M(TruncDivMod, kNoGC) \ |
| /*We could be more precise about when these 2 instructions can trigger GC.*/ \ |
| M(GuardFieldClass, _) \ |
| M(GuardFieldLength, _) \ |
| M(GuardFieldType, _) \ |
| M(IfThenElse, kNoGC) \ |
| M(MaterializeObject, _) \ |
| M(TestSmi, kNoGC) \ |
| M(TestCids, kNoGC) \ |
| M(ExtractNthOutput, kNoGC) \ |
| M(BinaryUint32Op, kNoGC) \ |
| M(ShiftUint32Op, kNoGC) \ |
| M(SpeculativeShiftUint32Op, kNoGC) \ |
| M(UnaryUint32Op, kNoGC) \ |
| M(BoxUint32, _) \ |
| M(UnboxUint32, kNoGC) \ |
| M(BoxInt32, _) \ |
| M(UnboxInt32, kNoGC) \ |
| M(BoxSmallInt, kNoGC) \ |
| M(IntConverter, kNoGC) \ |
| M(BitCast, kNoGC) \ |
| M(Call1ArgStub, _) \ |
| M(LoadThread, kNoGC) \ |
| M(Deoptimize, kNoGC) \ |
| M(SimdOp, kNoGC) \ |
| M(Suspend, _) |
| |
| #define FOR_EACH_ABSTRACT_INSTRUCTION(M) \ |
| M(Allocation, _) \ |
| M(ArrayAllocation, _) \ |
| M(BinaryIntegerOp, _) \ |
| M(BlockEntry, _) \ |
| M(BoxInteger, _) \ |
| M(Comparison, _) \ |
| M(InstanceCallBase, _) \ |
| M(ShiftIntegerOp, _) \ |
| M(UnaryIntegerOp, _) \ |
| M(UnboxInteger, _) |
| |
| #define FORWARD_DECLARATION(type, attrs) class type##Instr; |
| FOR_EACH_INSTRUCTION(FORWARD_DECLARATION) |
| FOR_EACH_ABSTRACT_INSTRUCTION(FORWARD_DECLARATION) |
| #undef FORWARD_DECLARATION |
| |
| #define DEFINE_INSTRUCTION_TYPE_CHECK(type) \ |
| virtual type##Instr* As##type() { return this; } \ |
| virtual const type##Instr* As##type() const { return this; } \ |
| virtual const char* DebugName() const { return #type; } |
| |
| // Functions required in all concrete instruction classes. |
| #define DECLARE_INSTRUCTION_NO_BACKEND(type) \ |
| virtual Tag tag() const { return k##type; } \ |
| virtual void Accept(InstructionVisitor* visitor); \ |
| DEFINE_INSTRUCTION_TYPE_CHECK(type) |
| |
| #define DECLARE_INSTRUCTION_BACKEND() \ |
| virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \ |
| const; \ |
| virtual void EmitNativeCode(FlowGraphCompiler* compiler); |
| |
| // Functions required in all concrete instruction classes. |
| #define DECLARE_INSTRUCTION(type) \ |
| DECLARE_INSTRUCTION_NO_BACKEND(type) \ |
| DECLARE_INSTRUCTION_BACKEND() |
| |
| // Functions required in all abstract instruction classes. |
| #define DECLARE_ABSTRACT_INSTRUCTION(type) \ |
| /* Prevents allocating an instance of abstract instruction */ \ |
| /* even if it has a concrete base class. */ \ |
| virtual Tag tag() const = 0; \ |
| DEFINE_INSTRUCTION_TYPE_CHECK(type) |
| |
| #define DECLARE_COMPARISON_METHODS \ |
| virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \ |
| const; \ |
| virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler, \ |
| BranchLabels labels); |
| |
| #define DECLARE_COMPARISON_INSTRUCTION(type) \ |
| DECLARE_INSTRUCTION_NO_BACKEND(type) \ |
| DECLARE_COMPARISON_METHODS |
| |
| #if defined(INCLUDE_IL_PRINTER) |
| #define PRINT_TO_SUPPORT virtual void PrintTo(BaseTextBuffer* f) const; |
| #define PRINT_OPERANDS_TO_SUPPORT \ |
| virtual void PrintOperandsTo(BaseTextBuffer* f) const; |
| #define DECLARE_ATTRIBUTES(...) \ |
| auto GetAttributes() const { return std::make_tuple(__VA_ARGS__); } \ |
| static auto GetAttributeNames() { return std::make_tuple(#__VA_ARGS__); } |
| #else |
| #define PRINT_TO_SUPPORT |
| #define PRINT_OPERANDS_TO_SUPPORT |
| #define DECLARE_ATTRIBUTES(...) |
| #endif // defined(INCLUDE_IL_PRINTER) |
| |
| // Together with CidRange, this represents a mapping from a range of class-ids |
| // to a method for a given selector (method name). Also can contain an |
| // indication of how frequently a given method has been called at a call site. |
| // This information can be harvested from the inline caches (ICs). |
| struct TargetInfo : public CidRange { |
| TargetInfo(intptr_t cid_start_arg, |
| intptr_t cid_end_arg, |
| const Function* target_arg, |
| intptr_t count_arg, |
| StaticTypeExactnessState exactness) |
| : CidRange(cid_start_arg, cid_end_arg), |
| target(target_arg), |
| count(count_arg), |
| exactness(exactness) { |
| ASSERT(target->IsZoneHandle()); |
| } |
| const Function* target; |
| intptr_t count; |
| StaticTypeExactnessState exactness; |
| |
| DISALLOW_COPY_AND_ASSIGN(TargetInfo); |
| }; |
| |
| // A set of class-ids, arranged in ranges. Used for the CheckClass |
| // and PolymorphicInstanceCall instructions. |
| class Cids : public ZoneAllocated { |
| public: |
| explicit Cids(Zone* zone) : cid_ranges_(zone, 6) {} |
| // Creates the off-heap Cids object that reflects the contents |
| // of the on-VM-heap IC data. |
| // Ranges of Cids are merged if there is only one target function and |
| // it is used for all cids in the gaps between ranges. |
| static Cids* CreateForArgument(Zone* zone, |
| const BinaryFeedback& binary_feedback, |
| int argument_number); |
| static Cids* CreateMonomorphic(Zone* zone, intptr_t cid); |
| |
| bool Equals(const Cids& other) const; |
| |
| bool HasClassId(intptr_t cid) const; |
| |
| void Add(CidRange* target) { cid_ranges_.Add(target); } |
| |
| CidRange& operator[](intptr_t index) const { return *cid_ranges_[index]; } |
| |
| CidRange* At(int index) const { return cid_ranges_[index]; } |
| |
| intptr_t length() const { return cid_ranges_.length(); } |
| |
| void SetLength(intptr_t len) { cid_ranges_.SetLength(len); } |
| |
| bool is_empty() const { return cid_ranges_.is_empty(); } |
| |
| void Sort(int compare(CidRange* const* a, CidRange* const* b)) { |
| cid_ranges_.Sort(compare); |
| } |
| |
| bool IsMonomorphic() const; |
| intptr_t MonomorphicReceiverCid() const; |
| intptr_t ComputeLowestCid() const; |
| intptr_t ComputeHighestCid() const; |
| |
| protected: |
| GrowableArray<CidRange*> cid_ranges_; |
| |
| private: |
| DISALLOW_IMPLICIT_CONSTRUCTORS(Cids); |
| }; |
| |
| class CallTargets : public Cids { |
| public: |
| explicit CallTargets(Zone* zone) : Cids(zone) {} |
| |
| static const CallTargets* CreateMonomorphic(Zone* zone, |
| intptr_t receiver_cid, |
| const Function& target); |
| |
| // Creates the off-heap CallTargets object that reflects the contents |
| // of the on-VM-heap IC data. |
| static const CallTargets* Create(Zone* zone, const ICData& ic_data); |
| |
| // This variant also expands the class-ids to neighbouring classes that |
| // inherit the same method. |
| static const CallTargets* CreateAndExpand(Zone* zone, const ICData& ic_data); |
| |
| TargetInfo* TargetAt(int i) const { return static_cast<TargetInfo*>(At(i)); } |
| |
| intptr_t AggregateCallCount() const; |
| |
| StaticTypeExactnessState MonomorphicExactness() const; |
| bool HasSingleTarget() const; |
| bool HasSingleRecognizedTarget() const; |
| const Function& FirstTarget() const; |
| const Function& MostPopularTarget() const; |
| |
| void Print() const; |
| |
| bool ReceiverIs(intptr_t cid) const { |
| return IsMonomorphic() && MonomorphicReceiverCid() == cid; |
| } |
| bool ReceiverIsSmiOrMint() const { |
| if (cid_ranges_.is_empty()) { |
| return false; |
| } |
| for (intptr_t i = 0, n = cid_ranges_.length(); i < n; i++) { |
| for (intptr_t j = cid_ranges_[i]->cid_start; j <= cid_ranges_[i]->cid_end; |
| j++) { |
| if (j != kSmiCid && j != kMintCid) { |
| return false; |
| } |
| } |
| } |
| return true; |
| } |
| |
| private: |
| void CreateHelper(Zone* zone, const ICData& ic_data); |
| void MergeIntoRanges(); |
| }; |
| |
| // Represents type feedback for the binary operators, and a few recognized |
| // static functions (see MethodRecognizer::NumArgsCheckedForStaticCall). |
| class BinaryFeedback : public ZoneAllocated { |
| public: |
| explicit BinaryFeedback(Zone* zone) : feedback_(zone, 2) {} |
| |
| static const BinaryFeedback* Create(Zone* zone, const ICData& ic_data); |
| static const BinaryFeedback* CreateMonomorphic(Zone* zone, |
| intptr_t receiver_cid, |
| intptr_t argument_cid); |
| |
| bool ArgumentIs(intptr_t cid) const { |
| if (feedback_.is_empty()) { |
| return false; |
| } |
| for (intptr_t i = 0, n = feedback_.length(); i < n; i++) { |
| if (feedback_[i].second != cid) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| bool OperandsAreEither(intptr_t cid_a, intptr_t cid_b) const { |
| if (feedback_.is_empty()) { |
| return false; |
| } |
| for (intptr_t i = 0, n = feedback_.length(); i < n; i++) { |
| if ((feedback_[i].first != cid_a) && (feedback_[i].first != cid_b)) { |
| return false; |
| } |
| if ((feedback_[i].second != cid_a) && (feedback_[i].second != cid_b)) { |
| return false; |
| } |
| } |
| return true; |
| } |
| bool OperandsAreSmiOrNull() const { |
| return OperandsAreEither(kSmiCid, kNullCid); |
| } |
| bool OperandsAreSmiOrMint() const { |
| return OperandsAreEither(kSmiCid, kMintCid); |
| } |
| bool OperandsAreSmiOrDouble() const { |
| return OperandsAreEither(kSmiCid, kDoubleCid); |
| } |
| |
| bool OperandsAre(intptr_t cid) const { |
| if (feedback_.length() != 1) return false; |
| return (feedback_[0].first == cid) && (feedback_[0].second == cid); |
| } |
| |
| bool IncludesOperands(intptr_t cid) const { |
| for (intptr_t i = 0, n = feedback_.length(); i < n; i++) { |
| if ((feedback_[i].first == cid) && (feedback_[i].second == cid)) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| private: |
| GrowableArray<std::pair<intptr_t, intptr_t>> feedback_; |
| |
| friend class Cids; |
| }; |
| |
| typedef GrowableArray<Value*> InputsArray; |
| typedef ZoneGrowableArray<PushArgumentInstr*> PushArgumentsArray; |
| |
| template <typename Trait> |
| class InstructionIndexedPropertyIterable { |
| public: |
| struct Iterator { |
| const Instruction* instr; |
| intptr_t index; |
| |
| decltype(Trait::At(instr, index)) operator*() const { |
| return Trait::At(instr, index); |
| } |
| Iterator& operator++() { |
| index++; |
| return *this; |
| } |
| |
| bool operator==(const Iterator& other) { |
| return instr == other.instr && index == other.index; |
| } |
| |
| bool operator!=(const Iterator& other) { return !(*this == other); } |
| }; |
| |
| explicit InstructionIndexedPropertyIterable(const Instruction* instr) |
| : instr_(instr) {} |
| |
| Iterator begin() const { return {instr_, 0}; } |
| Iterator end() const { return {instr_, Trait::Length(instr_)}; } |
| |
| private: |
| const Instruction* instr_; |
| }; |
| |
| class ValueListIterable { |
| public: |
| struct Iterator { |
| Value* value; |
| |
| Value* operator*() const { return value; } |
| |
| Iterator& operator++() { |
| value = value->next_use(); |
| return *this; |
| } |
| |
| bool operator==(const Iterator& other) { return value == other.value; } |
| |
| bool operator!=(const Iterator& other) { return !(*this == other); } |
| }; |
| |
| explicit ValueListIterable(Value* value) : value_(value) {} |
| |
| Iterator begin() const { return {value_}; } |
| Iterator end() const { return {nullptr}; } |
| |
| private: |
| Value* value_; |
| }; |
| |
| class Instruction : public ZoneAllocated { |
| public: |
| #define DECLARE_TAG(type, attrs) k##type, |
| enum Tag { FOR_EACH_INSTRUCTION(DECLARE_TAG) kNumInstructions }; |
| #undef DECLARE_TAG |
| |
| static const intptr_t kInstructionAttrs[kNumInstructions]; |
| |
| enum SpeculativeMode { |
| // Types of inputs should be checked when unboxing for this instruction. |
| kGuardInputs, |
| // Each input is guaranteed to have a valid type for the input |
| // representation and its type should not be checked when unboxing. |
| kNotSpeculative |
| }; |
| |
| // If the source has the inlining ID of the root function, then don't set |
| // the inlining ID to that; instead, treat it as unset. |
| explicit Instruction(const InstructionSource& source, |
| intptr_t deopt_id = DeoptId::kNone) |
| : deopt_id_(deopt_id), inlining_id_(source.inlining_id) {} |
| |
| explicit Instruction(intptr_t deopt_id = DeoptId::kNone) |
| : Instruction(InstructionSource(), deopt_id) {} |
| |
| virtual ~Instruction() {} |
| |
| virtual Tag tag() const = 0; |
| |
| virtual intptr_t statistics_tag() const { return tag(); } |
| |
| intptr_t deopt_id() const { |
| ASSERT(ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall() || |
| CanBecomeDeoptimizationTarget() || MayThrow() || |
| CompilerState::Current().is_aot()); |
| return GetDeoptId(); |
| } |
| |
| static const ICData* GetICData( |
| const ZoneGrowableArray<const ICData*>& ic_data_array, |
| intptr_t deopt_id, |
| bool is_static_call); |
| |
| virtual TokenPosition token_pos() const { return TokenPosition::kNoSource; } |
| |
| // Returns the source information for this instruction. |
| InstructionSource source() const { |
| return InstructionSource(token_pos(), inlining_id()); |
| } |
| |
| virtual intptr_t InputCount() const = 0; |
| virtual Value* InputAt(intptr_t i) const = 0; |
| void SetInputAt(intptr_t i, Value* value) { |
| ASSERT(value != NULL); |
| value->set_instruction(this); |
| value->set_use_index(i); |
| RawSetInputAt(i, value); |
| } |
| |
| struct InputsTrait { |
| static Definition* At(const Instruction* instr, intptr_t index) { |
| return instr->InputAt(index)->definition(); |
| } |
| |
| static intptr_t Length(const Instruction* instr) { |
| return instr->InputCount(); |
| } |
| }; |
| |
| using InputsIterable = InstructionIndexedPropertyIterable<InputsTrait>; |
| |
| InputsIterable inputs() { return InputsIterable(this); } |
| |
| // Remove all inputs (including in the environment) from their |
| // definition's use lists. |
| void UnuseAllInputs(); |
| |
| // Call instructions override this function and return the number of |
| // pushed arguments. |
| virtual intptr_t ArgumentCount() const { return 0; } |
| inline Value* ArgumentValueAt(intptr_t index) const; |
| inline Definition* ArgumentAt(intptr_t index) const; |
| |
| // Sets array of PushArgument instructions. |
| virtual void SetPushArguments(PushArgumentsArray* push_arguments) { |
| UNREACHABLE(); |
| } |
| // Returns array of PushArgument instructions |
| virtual PushArgumentsArray* GetPushArguments() const { |
| UNREACHABLE(); |
| return nullptr; |
| } |
| // Replace inputs with separate PushArgument instructions detached from call. |
| virtual void ReplaceInputsWithPushArguments( |
| PushArgumentsArray* push_arguments) { |
| UNREACHABLE(); |
| } |
| bool HasPushArguments() const { return GetPushArguments() != nullptr; } |
| |
| // Repairs trailing PushArgs in environment. |
| void RepairPushArgsInEnvironment() const; |
| |
| // Returns true, if this instruction can deoptimize with its current inputs. |
| // This property can change if we add or remove redefinitions that constrain |
| // the type or the range of input operands during compilation. |
| virtual bool ComputeCanDeoptimize() const = 0; |
| |
| virtual bool ComputeCanDeoptimizeAfterCall() const { |
| // TODO(dartbug.com/45213): Incrementally migrate IR instructions from using |
| // [ComputeCanDeoptimze] to either [ComputeCanDeoptimizeAfterCall] if they |
| // can only lazy deoptimize. |
| return false; |
| } |
| |
| // Once we removed the deopt environment, we assume that this |
| // instruction can't deoptimize. |
| bool CanDeoptimize() const { |
| return env() != nullptr && |
| (ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall()); |
| } |
| |
| // Visiting support. |
| virtual void Accept(InstructionVisitor* visitor) = 0; |
| |
| Instruction* previous() const { return previous_; } |
| void set_previous(Instruction* instr) { |
| ASSERT(!IsBlockEntry()); |
| previous_ = instr; |
| } |
| |
| Instruction* next() const { return next_; } |
| void set_next(Instruction* instr) { |
| ASSERT(!IsGraphEntry()); |
| ASSERT(!IsReturn()); |
| ASSERT(!IsBranch() || (instr == NULL)); |
| ASSERT(!IsPhi()); |
| ASSERT(instr == NULL || !instr->IsBlockEntry()); |
| // TODO(fschneider): Also add Throw and ReThrow to the list of instructions |
| // that do not have a successor. Currently, the graph builder will continue |
| // to append instruction in case of a Throw inside an expression. This |
| // condition should be handled in the graph builder |
| next_ = instr; |
| } |
| |
| // Link together two instruction. |
| void LinkTo(Instruction* next) { |
| ASSERT(this != next); |
| this->set_next(next); |
| next->set_previous(this); |
| } |
| |
| // Removed this instruction from the graph, after use lists have been |
| // computed. If the instruction is a definition with uses, those uses are |
| // unaffected (so the instruction can be reinserted, e.g., hoisting). |
| Instruction* RemoveFromGraph(bool return_previous = true); |
| |
| // Normal instructions can have 0 (inside a block) or 1 (last instruction in |
| // a block) successors. Branch instruction with >1 successors override this |
| // function. |
| virtual intptr_t SuccessorCount() const; |
| virtual BlockEntryInstr* SuccessorAt(intptr_t index) const; |
| |
| struct SuccessorsTrait { |
| static BlockEntryInstr* At(const Instruction* instr, intptr_t index) { |
| return instr->SuccessorAt(index); |
| } |
| |
| static intptr_t Length(const Instruction* instr) { |
| return instr->SuccessorCount(); |
| } |
| }; |
| |
| using SuccessorsIterable = |
| InstructionIndexedPropertyIterable<SuccessorsTrait>; |
| |
| inline SuccessorsIterable successors() const { |
| return SuccessorsIterable(this); |
| } |
| |
| void Goto(JoinEntryInstr* entry); |
| |
| virtual const char* DebugName() const = 0; |
| |
| #if defined(DEBUG) |
| // Checks that the field stored in an instruction has proper form: |
| // - must be a zone-handle |
| // - In background compilation, must be cloned. |
| // Aborts if field is not OK. |
| void CheckField(const Field& field) const; |
| #else |
| void CheckField(const Field& field) const {} |
| #endif // DEBUG |
| |
| // Printing support. |
| const char* ToCString() const; |
| PRINT_TO_SUPPORT |
| PRINT_OPERANDS_TO_SUPPORT |
| |
| #define DECLARE_INSTRUCTION_TYPE_CHECK(Name, Type) \ |
| bool Is##Name() const { return (As##Name() != nullptr); } \ |
| Type* As##Name() { \ |
| auto const_this = static_cast<const Instruction*>(this); \ |
| return const_cast<Type*>(const_this->As##Name()); \ |
| } \ |
| virtual const Type* As##Name() const { return nullptr; } |
| #define INSTRUCTION_TYPE_CHECK(Name, Attrs) \ |
| DECLARE_INSTRUCTION_TYPE_CHECK(Name, Name##Instr) |
| |
| DECLARE_INSTRUCTION_TYPE_CHECK(Definition, Definition) |
| DECLARE_INSTRUCTION_TYPE_CHECK(BlockEntryWithInitialDefs, |
| BlockEntryWithInitialDefs) |
| DECLARE_INSTRUCTION_TYPE_CHECK(CheckBoundBase, CheckBoundBase) |
| FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK) |
| FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK) |
| |
| #undef INSTRUCTION_TYPE_CHECK |
| #undef DECLARE_INSTRUCTION_TYPE_CHECK |
| |
| template <typename T> |
| T* Cast() { |
| return static_cast<T*>(this); |
| } |
| |
| template <typename T> |
| const T* Cast() const { |
| return static_cast<const T*>(this); |
| } |
| |
| // Returns structure describing location constraints required |
| // to emit native code for this instruction. |
| LocationSummary* locs() { |
| ASSERT(locs_ != NULL); |
| return locs_; |
| } |
| |
| bool HasLocs() const { return locs_ != NULL; } |
| |
| virtual LocationSummary* MakeLocationSummary(Zone* zone, |
| bool is_optimizing) const = 0; |
| |
| void InitializeLocationSummary(Zone* zone, bool optimizing) { |
| ASSERT(locs_ == NULL); |
| locs_ = MakeLocationSummary(zone, optimizing); |
| } |
| |
| // Makes a new call location summary (or uses `locs`) and initializes the |
| // output register constraints depending on the representation of [instr]. |
| static LocationSummary* MakeCallSummary(Zone* zone, |
| const Instruction* instr, |
| LocationSummary* locs = nullptr); |
| |
| virtual void EmitNativeCode(FlowGraphCompiler* compiler) { UNIMPLEMENTED(); } |
| |
| Environment* env() const { return env_; } |
| void SetEnvironment(Environment* deopt_env); |
| void RemoveEnvironment(); |
| void ReplaceInEnvironment(Definition* current, Definition* replacement); |
| |
| virtual intptr_t NumberOfInputsConsumedBeforeCall() const { return 0; } |
| |
| // Different compiler passes can assign pass specific ids to the instruction. |
| // Only one id can be stored at a time. |
| intptr_t GetPassSpecificId(CompilerPass::Id pass) const { |
| return (PassSpecificId::DecodePass(pass_specific_id_) == pass) |
| ? PassSpecificId::DecodeId(pass_specific_id_) |
| : PassSpecificId::kNoId; |
| } |
| void SetPassSpecificId(CompilerPass::Id pass, intptr_t id) { |
| pass_specific_id_ = PassSpecificId::Encode(pass, id); |
| } |
| bool HasPassSpecificId(CompilerPass::Id pass) const { |
| return (PassSpecificId::DecodePass(pass_specific_id_) == pass) && |
| (PassSpecificId::DecodeId(pass_specific_id_) != |
| PassSpecificId::kNoId); |
| } |
| |
| bool HasUnmatchedInputRepresentations() const; |
| |
| // Returns representation expected for the input operand at the given index. |
| virtual Representation RequiredInputRepresentation(intptr_t idx) const { |
| return kTagged; |
| } |
| |
| SpeculativeMode SpeculativeModeOfInputs() const { |
| for (intptr_t i = 0; i < InputCount(); i++) { |
| if (SpeculativeModeOfInput(i) == kGuardInputs) { |
| return kGuardInputs; |
| } |
| } |
| return kNotSpeculative; |
| } |
| |
| // By default, instructions should check types of inputs when unboxing |
| virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const { |
| return kGuardInputs; |
| } |
| |
| // Representation of the value produced by this computation. |
| virtual Representation representation() const { return kTagged; } |
| |
| bool WasEliminated() const { return next() == NULL; } |
| |
| // Returns deoptimization id that corresponds to the deoptimization target |
| // that input operands conversions inserted for this instruction can jump |
| // to. |
| virtual intptr_t DeoptimizationTarget() const { |
| UNREACHABLE(); |
| return DeoptId::kNone; |
| } |
| |
| // Returns a replacement for the instruction or NULL if the instruction can |
| // be eliminated. By default returns the this instruction which means no |
| // change. |
| virtual Instruction* Canonicalize(FlowGraph* flow_graph); |
| |
| // Insert this instruction before 'next' after use lists are computed. |
| // Instructions cannot be inserted before a block entry or any other |
| // instruction without a previous instruction. |
| void InsertBefore(Instruction* next) { InsertAfter(next->previous()); } |
| |
| // Insert this instruction after 'prev' after use lists are computed. |
| void InsertAfter(Instruction* prev); |
| |
| // Append an instruction to the current one and return the tail. |
| // This function updated def-use chains of the newly appended |
| // instruction. |
| Instruction* AppendInstruction(Instruction* tail); |
| |
| // Returns true if CSE and LICM are allowed for this instruction. |
| virtual bool AllowsCSE() const { return false; } |
| |
| // Returns true if this instruction has any side-effects besides storing. |
| // See StoreFieldInstr::HasUnknownSideEffects() for rationale. |
| virtual bool HasUnknownSideEffects() const = 0; |
| |
| // Whether this instruction can call Dart code without going through |
| // the runtime. |
| // |
| // Must be true for any instruction which can call Dart code without |
| // first creating an exit frame to transition into the runtime. |
| // |
| // See also WriteBarrierElimination and Thread::RememberLiveTemporaries(). |
| virtual bool CanCallDart() const { return false; } |
| |
| virtual bool CanTriggerGC() const; |
| |
| // Get the block entry for this instruction. |
| virtual BlockEntryInstr* GetBlock(); |
| |
| virtual intptr_t inlining_id() const { return inlining_id_; } |
| virtual void set_inlining_id(intptr_t value) { |
| ASSERT(value >= 0); |
| ASSERT(!has_inlining_id() || inlining_id_ == value); |
| inlining_id_ = value; |
| } |
| virtual bool has_inlining_id() const { return inlining_id_ >= 0; } |
| |
| // Returns a hash code for use with hash maps. |
| virtual uword Hash() const; |
| |
| // Compares two instructions. Returns true, iff: |
| // 1. They have the same tag. |
| // 2. All input operands are Equals. |
| // 3. They satisfy AttributesEqual. |
| bool Equals(const Instruction& other) const; |
| |
| // Compare attributes of a instructions (except input operands and tag). |
| // All instructions that participate in CSE have to override this function. |
| // This function can assume that the argument has the same type as this. |
| virtual bool AttributesEqual(const Instruction& other) const { |
| UNREACHABLE(); |
| return false; |
| } |
| |
| virtual void InheritDeoptTarget(Zone* zone, Instruction* other); |
| |
| bool NeedsEnvironment() const { |
| return ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall() || |
| CanBecomeDeoptimizationTarget() || MayThrow(); |
| } |
| |
| virtual bool CanBecomeDeoptimizationTarget() const { return false; } |
| |
| void InheritDeoptTargetAfter(FlowGraph* flow_graph, |
| Definition* call, |
| Definition* result); |
| |
| virtual bool MayThrow() const = 0; |
| |
| bool IsDominatedBy(Instruction* dom); |
| |
| void ClearEnv() { env_ = NULL; } |
| |
| void Unsupported(FlowGraphCompiler* compiler); |
| |
| static bool SlowPathSharingSupported(bool is_optimizing) { |
| #if defined(TARGET_ARCH_IA32) |
| return false; |
| #else |
| return FLAG_enable_slow_path_sharing && FLAG_precompiled_mode && |
| is_optimizing; |
| #endif |
| } |
| |
| virtual bool UseSharedSlowPathStub(bool is_optimizing) const { return false; } |
| |
| // 'RegisterKindForResult()' returns the register kind necessary to hold the |
| // result. |
| // |
| // This is not virtual because instructions should override representation() |
| // instead. |
| Location::Kind RegisterKindForResult() const { |
| const Representation rep = representation(); |
| if ((rep == kUnboxedFloat) || (rep == kUnboxedDouble) || |
| (rep == kUnboxedFloat32x4) || (rep == kUnboxedInt32x4) || |
| (rep == kUnboxedFloat64x2)) { |
| return Location::kFpuRegister; |
| } |
| return Location::kRegister; |
| } |
| |
| protected: |
| // GetDeoptId and/or CopyDeoptIdFrom. |
| friend class CallSiteInliner; |
| friend class LICM; |
| friend class ComparisonInstr; |
| friend class Scheduler; |
| friend class BlockEntryInstr; |
| friend class CatchBlockEntryInstr; // deopt_id_ |
| friend class DebugStepCheckInstr; // deopt_id_ |
| friend class StrictCompareInstr; // deopt_id_ |
| |
| // Fetch deopt id without checking if this computation can deoptimize. |
| intptr_t GetDeoptId() const { return deopt_id_; } |
| |
| void CopyDeoptIdFrom(const Instruction& instr) { |
| deopt_id_ = instr.deopt_id_; |
| } |
| |
| private: |
| friend class BranchInstr; // For RawSetInputAt. |
| friend class IfThenElseInstr; // For RawSetInputAt. |
| friend class CheckConditionInstr; // For RawSetInputAt. |
| |
| virtual void RawSetInputAt(intptr_t i, Value* value) = 0; |
| |
| class PassSpecificId { |
| public: |
| static intptr_t Encode(CompilerPass::Id pass, intptr_t id) { |
| return (id << kPassBits) | pass; |
| } |
| |
| static CompilerPass::Id DecodePass(intptr_t value) { |
| return static_cast<CompilerPass::Id>(value & Utils::NBitMask(kPassBits)); |
| } |
| |
| static intptr_t DecodeId(intptr_t value) { return (value >> kPassBits); } |
| |
| static constexpr intptr_t kNoId = -1; |
| |
| private: |
| static constexpr intptr_t kPassBits = 8; |
| static_assert(CompilerPass::kNumPasses <= (1 << kPassBits), |
| "Pass Id does not fit into the bit field"); |
| }; |
| |
| intptr_t deopt_id_ = DeoptId::kNone; |
| intptr_t pass_specific_id_ = PassSpecificId::kNoId; |
| Instruction* previous_ = nullptr; |
| Instruction* next_ = nullptr; |
| Environment* env_ = nullptr; |
| LocationSummary* locs_ = nullptr; |
| intptr_t inlining_id_; |
| |
| DISALLOW_COPY_AND_ASSIGN(Instruction); |
| }; |
| |
| struct BranchLabels { |
| compiler::Label* true_label; |
| compiler::Label* false_label; |
| compiler::Label* fall_through; |
| }; |
| |
| class PureInstruction : public Instruction { |
| public: |
| explicit PureInstruction(intptr_t deopt_id) : Instruction(deopt_id) {} |
| explicit PureInstruction(const InstructionSource& source, intptr_t deopt_id) |
| : Instruction(source, deopt_id) {} |
| |
| virtual bool AllowsCSE() const { return true; } |
| virtual bool HasUnknownSideEffects() const { return false; } |
| }; |
| |
| // Types to be used as ThrowsTrait for TemplateInstruction/TemplateDefinition. |
| struct Throws { |
| static const bool kCanThrow = true; |
| }; |
| |
| struct NoThrow { |
| static const bool kCanThrow = false; |
| }; |
| |
| // Types to be used as CSETrait for TemplateInstruction/TemplateDefinition. |
| // Pure instructions are those that allow CSE and have no effects and |
| // no dependencies. |
| template <typename DefaultBase, typename PureBase> |
| struct Pure { |
| typedef PureBase Base; |
| }; |
| |
| template <typename DefaultBase, typename PureBase> |
| struct NoCSE { |
| typedef DefaultBase Base; |
| }; |
| |
| template <intptr_t N, |
| typename ThrowsTrait, |
| template <typename Default, typename Pure> class CSETrait = NoCSE> |
| class TemplateInstruction |
| : public CSETrait<Instruction, PureInstruction>::Base { |
| public: |
| using BaseClass = typename CSETrait<Instruction, PureInstruction>::Base; |
| |
| explicit TemplateInstruction(intptr_t deopt_id = DeoptId::kNone) |
| : BaseClass(deopt_id), inputs_() {} |
| |
| TemplateInstruction(const InstructionSource& source, |
| intptr_t deopt_id = DeoptId::kNone) |
| : BaseClass(source, deopt_id), inputs_() {} |
| |
| virtual intptr_t InputCount() const { return N; } |
| virtual Value* InputAt(intptr_t i) const { return inputs_[i]; } |
| |
| virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; } |
| |
| protected: |
| EmbeddedArray<Value*, N> inputs_; |
| |
| private: |
| virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; } |
| }; |
| |
| class MoveOperands : public ZoneAllocated { |
| public: |
| MoveOperands(Location dest, Location src) : dest_(dest), src_(src) {} |
| |
| MoveOperands& operator=(const MoveOperands& other) { |
| dest_ = other.dest_; |
| src_ = other.src_; |
| return *this; |
| } |
| |
| Location src() const { return src_; } |
| Location dest() const { return dest_; } |
| |
| Location* src_slot() { return &src_; } |
| Location* dest_slot() { return &dest_; } |
| |
| void set_src(const Location& value) { src_ = value; } |
| void set_dest(const Location& value) { dest_ = value; } |
| |
| // The parallel move resolver marks moves as "in-progress" by clearing the |
| // destination (but not the source). |
| Location MarkPending() { |
| ASSERT(!IsPending()); |
| Location dest = dest_; |
| dest_ = Location::NoLocation(); |
| return dest; |
| } |
| |
| void ClearPending(Location dest) { |
| ASSERT(IsPending()); |
| dest_ = dest; |
| } |
| |
| bool IsPending() const { |
| ASSERT(!src_.IsInvalid() || dest_.IsInvalid()); |
| return dest_.IsInvalid() && !src_.IsInvalid(); |
| } |
| |
| // True if this move a move from the given location. |
| bool Blocks(Location loc) const { |
| return !IsEliminated() && src_.Equals(loc); |
| } |
| |
| // A move is redundant if it's been eliminated, if its source and |
| // destination are the same, or if its destination is unneeded. |
| bool IsRedundant() const { |
| return IsEliminated() || dest_.IsInvalid() || src_.Equals(dest_); |
| } |
| |
| // We clear both operands to indicate move that's been eliminated. |
| void Eliminate() { src_ = dest_ = Location::NoLocation(); } |
| bool IsEliminated() const { |
| ASSERT(!src_.IsInvalid() || dest_.IsInvalid()); |
| return src_.IsInvalid(); |
| } |
| |
| private: |
| Location dest_; |
| Location src_; |
| }; |
| |
| class ParallelMoveInstr : public TemplateInstruction<0, NoThrow> { |
| public: |
| ParallelMoveInstr() : moves_(4) {} |
| |
| DECLARE_INSTRUCTION(ParallelMove) |
| |
| virtual bool ComputeCanDeoptimize() const { return false; } |
| |
| virtual bool HasUnknownSideEffects() const { |
| UNREACHABLE(); // This instruction never visited by optimization passes. |
| return false; |
| } |
| |
| const GrowableArray<MoveOperands*>& moves() const { return moves_; } |
| |
| MoveOperands* AddMove(Location dest, Location src) { |
| MoveOperands* move = new MoveOperands(dest, src); |
| moves_.Add(move); |
| return move; |
| } |
| |
| MoveOperands* MoveOperandsAt(intptr_t index) const { return moves_[index]; } |
| |
| intptr_t NumMoves() const { return moves_.length(); } |
| |
| bool IsRedundant() const; |
| |
| virtual TokenPosition token_pos() const { |
| return TokenPosition::kParallelMove; |
| } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| GrowableArray<MoveOperands*> moves_; // Elements cannot be null. |
| |
| DISALLOW_COPY_AND_ASSIGN(ParallelMoveInstr); |
| }; |
| |
| // Basic block entries are administrative nodes. There is a distinguished |
| // graph entry with no predecessor. Joins are the only nodes with multiple |
| // predecessors. Targets are all other basic block entries. The types |
| // enforce edge-split form---joins are forbidden as the successors of |
| // branches. |
| class BlockEntryInstr : public TemplateInstruction<0, NoThrow> { |
| public: |
| virtual intptr_t PredecessorCount() const = 0; |
| virtual BlockEntryInstr* PredecessorAt(intptr_t index) const = 0; |
| |
| intptr_t preorder_number() const { return preorder_number_; } |
| void set_preorder_number(intptr_t number) { preorder_number_ = number; } |
| |
| intptr_t postorder_number() const { return postorder_number_; } |
| void set_postorder_number(intptr_t number) { postorder_number_ = number; } |
| |
| intptr_t block_id() const { return block_id_; } |
| |
| // NOTE: These are SSA positions and not token positions. These are used by |
| // the register allocator. |
| void set_start_pos(intptr_t pos) { start_pos_ = pos; } |
| intptr_t start_pos() const { return start_pos_; } |
| void set_end_pos(intptr_t pos) { end_pos_ = pos; } |
| intptr_t end_pos() const { return end_pos_; } |
| |
| BlockEntryInstr* dominator() const { return dominator_; } |
| BlockEntryInstr* ImmediateDominator() const; |
| |
| const GrowableArray<BlockEntryInstr*>& dominated_blocks() { |
| return dominated_blocks_; |
| } |
| |
| void AddDominatedBlock(BlockEntryInstr* block) { |
| ASSERT(!block->IsFunctionEntry() || this->IsGraphEntry()); |
| block->set_dominator(this); |
| dominated_blocks_.Add(block); |
| } |
| void ClearDominatedBlocks() { dominated_blocks_.Clear(); } |
| |
| bool Dominates(BlockEntryInstr* other) const; |
| |
| Instruction* last_instruction() const { return last_instruction_; } |
| void set_last_instruction(Instruction* instr) { last_instruction_ = instr; } |
| |
| ParallelMoveInstr* parallel_move() const { return parallel_move_; } |
| |
| bool HasParallelMove() const { return parallel_move_ != NULL; } |
| |
| bool HasNonRedundantParallelMove() const { |
| return HasParallelMove() && !parallel_move()->IsRedundant(); |
| } |
| |
| ParallelMoveInstr* GetParallelMove() { |
| if (parallel_move_ == NULL) { |
| parallel_move_ = new ParallelMoveInstr(); |
| } |
| return parallel_move_; |
| } |
| |
| // Discover basic-block structure of the current block. Must be called |
| // on all graph blocks in preorder to yield valid results. As a side effect, |
| // the block entry instructions in the graph are assigned preorder numbers. |
| // The array 'preorder' maps preorder block numbers to the block entry |
| // instruction with that number. The depth first spanning tree is recorded |
| // in the array 'parent', which maps preorder block numbers to the preorder |
| // number of the block's spanning-tree parent. As a side effect of this |
| // function, the set of basic block predecessors (e.g., block entry |
| // instructions of predecessor blocks) and also the last instruction in the |
| // block is recorded in each entry instruction. Returns true when called the |
| // first time on this particular block within one graph traversal, and false |
| // on all successive calls. |
| bool DiscoverBlock(BlockEntryInstr* predecessor, |
| GrowableArray<BlockEntryInstr*>* preorder, |
| GrowableArray<intptr_t>* parent); |
| |
| virtual bool CanBecomeDeoptimizationTarget() const { |
| // BlockEntry environment is copied to Goto and Branch instructions |
| // when we insert new blocks targeting this block. |
| return true; |
| } |
| |
| virtual bool ComputeCanDeoptimize() const { return false; } |
| |
| virtual bool HasUnknownSideEffects() const { return false; } |
| |
| intptr_t try_index() const { return try_index_; } |
| void set_try_index(intptr_t index) { try_index_ = index; } |
| |
| // True for blocks inside a try { } region. |
| bool InsideTryBlock() const { return try_index_ != kInvalidTryIndex; } |
| |
| // Loop related methods. |
| LoopInfo* loop_info() const { return loop_info_; } |
| void set_loop_info(LoopInfo* loop_info) { loop_info_ = loop_info; } |
| bool IsLoopHeader() const; |
| intptr_t NestingDepth() const; |
| |
| virtual BlockEntryInstr* GetBlock() { return this; } |
| |
| virtual TokenPosition token_pos() const { |
| return TokenPosition::kControlFlow; |
| } |
| |
| // Helper to mutate the graph during inlining. This block should be |
| // replaced with new_block as a predecessor of all of this block's |
| // successors. |
| void ReplaceAsPredecessorWith(BlockEntryInstr* new_block); |
| |
| void set_block_id(intptr_t block_id) { block_id_ = block_id; } |
| |
| // Stack-based IR bookkeeping. |
| intptr_t stack_depth() const { return stack_depth_; } |
| void set_stack_depth(intptr_t s) { stack_depth_ = s; } |
| |
| // For all instruction in this block: Remove all inputs (including in the |
| // environment) from their definition's use lists for all instructions. |
| void ClearAllInstructions(); |
| |
| class InstructionsIterable { |
| public: |
| explicit InstructionsIterable(BlockEntryInstr* block) : block_(block) {} |
| |
| inline ForwardInstructionIterator begin() const; |
| inline ForwardInstructionIterator end() const; |
| |
| private: |
| BlockEntryInstr* block_; |
| }; |
| |
| InstructionsIterable instructions() { return InstructionsIterable(this); } |
| |
| DECLARE_ABSTRACT_INSTRUCTION(BlockEntry) |
| |
| protected: |
| BlockEntryInstr(intptr_t block_id, |
| intptr_t try_index, |
| intptr_t deopt_id, |
| intptr_t stack_depth) |
| : TemplateInstruction(deopt_id), |
| block_id_(block_id), |
| try_index_(try_index), |
| stack_depth_(stack_depth), |
| dominated_blocks_(1) {} |
| |
| // Perform a depth first search to find OSR entry and |
| // link it to the given graph entry. |
| bool FindOsrEntryAndRelink(GraphEntryInstr* graph_entry, |
| Instruction* parent, |
| BitVector* block_marks); |
| |
| private: |
| virtual void ClearPredecessors() = 0; |
| virtual void AddPredecessor(BlockEntryInstr* predecessor) = 0; |
| |
| void set_dominator(BlockEntryInstr* instr) { dominator_ = instr; } |
| |
| intptr_t block_id_; |
| intptr_t try_index_; |
| intptr_t preorder_number_ = -1; |
| intptr_t postorder_number_ = -1; |
| // Expected stack depth on entry (for stack-based IR only). |
| intptr_t stack_depth_; |
| // Starting and ending lifetime positions for this block. Used by |
| // the linear scan register allocator. |
| intptr_t start_pos_ = -1; |
| intptr_t end_pos_ = -1; |
| // Immediate dominator, nullptr for graph entry. |
| BlockEntryInstr* dominator_ = nullptr; |
| // TODO(fschneider): Optimize the case of one child to save space. |
| GrowableArray<BlockEntryInstr*> dominated_blocks_; |
| Instruction* last_instruction_ = nullptr; |
| |
| // Parallel move that will be used by linear scan register allocator to |
| // connect live ranges at the start of the block. |
| ParallelMoveInstr* parallel_move_ = nullptr; |
| |
| // Closest enveloping loop in loop hierarchy (nullptr at nesting depth 0). |
| LoopInfo* loop_info_ = nullptr; |
| |
| DISALLOW_COPY_AND_ASSIGN(BlockEntryInstr); |
| }; |
| |
| class ForwardInstructionIterator { |
| public: |
| ForwardInstructionIterator(const ForwardInstructionIterator& other) = default; |
| ForwardInstructionIterator& operator=( |
| const ForwardInstructionIterator& other) = default; |
| |
| ForwardInstructionIterator() : current_(nullptr) {} |
| |
| explicit ForwardInstructionIterator(BlockEntryInstr* block_entry) |
| : current_(block_entry) { |
| Advance(); |
| } |
| |
| void Advance() { |
| ASSERT(!Done()); |
| current_ = current_->next(); |
| } |
| |
| bool Done() const { return current_ == NULL; } |
| |
| // Removes 'current_' from graph and sets 'current_' to previous instruction. |
| void RemoveCurrentFromGraph(); |
| |
| Instruction* Current() const { return current_; } |
| |
| Instruction* operator*() const { return Current(); } |
| |
| bool operator==(const ForwardInstructionIterator& other) const { |
| return current_ == other.current_; |
| } |
| |
| bool operator!=(const ForwardInstructionIterator& other) const { |
| return !(*this == other); |
| } |
| |
| ForwardInstructionIterator& operator++() { |
| Advance(); |
| return *this; |
| } |
| |
| private: |
| Instruction* current_; |
| }; |
| |
| ForwardInstructionIterator BlockEntryInstr::InstructionsIterable::begin() |
| const { |
| return ForwardInstructionIterator(block_); |
| } |
| |
| ForwardInstructionIterator BlockEntryInstr::InstructionsIterable::end() const { |
| return ForwardInstructionIterator(); |
| } |
| |
| class BackwardInstructionIterator : public ValueObject { |
| public: |
| explicit BackwardInstructionIterator(BlockEntryInstr* block_entry) |
| : block_entry_(block_entry), current_(block_entry->last_instruction()) { |
| ASSERT(block_entry_->previous() == NULL); |
| } |
| |
| void Advance() { |
| ASSERT(!Done()); |
| current_ = current_->previous(); |
| } |
| |
| bool Done() const { return current_ == block_entry_; } |
| |
| void RemoveCurrentFromGraph(); |
| |
| Instruction* Current() const { return current_; } |
| |
| private: |
| BlockEntryInstr* block_entry_; |
| Instruction* current_; |
| }; |
| |
| // Base class shared by all block entries which define initial definitions. |
| // |
| // The initial definitions define parameters, special parameters and constants. |
| class BlockEntryWithInitialDefs : public BlockEntryInstr { |
| public: |
| BlockEntryWithInitialDefs(intptr_t block_id, |
| intptr_t try_index, |
| intptr_t deopt_id, |
| intptr_t stack_depth) |
| : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth) {} |
| |
| GrowableArray<Definition*>* initial_definitions() { |
| return &initial_definitions_; |
| } |
| const GrowableArray<Definition*>* initial_definitions() const { |
| return &initial_definitions_; |
| } |
| |
| virtual BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() { |
| return this; |
| } |
| virtual const BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() const { |
| return this; |
| } |
| |
| protected: |
| void PrintInitialDefinitionsTo(BaseTextBuffer* f) const; |
| |
| private: |
| GrowableArray<Definition*> initial_definitions_; |
| |
| DISALLOW_COPY_AND_ASSIGN(BlockEntryWithInitialDefs); |
| }; |
| |
| class GraphEntryInstr : public BlockEntryWithInitialDefs { |
| public: |
| GraphEntryInstr(const ParsedFunction& parsed_function, intptr_t osr_id); |
| |
| DECLARE_INSTRUCTION(GraphEntry) |
| |
| virtual intptr_t PredecessorCount() const { return 0; } |
| virtual BlockEntryInstr* PredecessorAt(intptr_t index) const { |
| UNREACHABLE(); |
| return NULL; |
| } |
| virtual intptr_t SuccessorCount() const; |
| virtual BlockEntryInstr* SuccessorAt(intptr_t index) const; |
| |
| void AddCatchEntry(CatchBlockEntryInstr* entry) { catch_entries_.Add(entry); } |
| |
| CatchBlockEntryInstr* GetCatchEntry(intptr_t index); |
| |
| void AddIndirectEntry(IndirectEntryInstr* entry) { |
| indirect_entries_.Add(entry); |
| } |
| |
| ConstantInstr* constant_null(); |
| |
| void RelinkToOsrEntry(Zone* zone, intptr_t max_block_id); |
| bool IsCompiledForOsr() const; |
| intptr_t osr_id() const { return osr_id_; } |
| |
| intptr_t entry_count() const { return entry_count_; } |
| void set_entry_count(intptr_t count) { entry_count_ = count; } |
| |
| intptr_t spill_slot_count() const { return spill_slot_count_; } |
| void set_spill_slot_count(intptr_t count) { |
| ASSERT(count >= 0); |
| spill_slot_count_ = count; |
| } |
| |
| // Returns true if this flow graph needs a stack frame. |
| bool NeedsFrame() const { return needs_frame_; } |
| void MarkFrameless() { needs_frame_ = false; } |
| |
| // Number of stack slots reserved for compiling try-catch. For functions |
| // without try-catch, this is 0. Otherwise, it is the number of local |
| // variables. |
| intptr_t fixed_slot_count() const { return fixed_slot_count_; } |
| void set_fixed_slot_count(intptr_t count) { |
| ASSERT(count >= 0); |
| fixed_slot_count_ = count; |
| } |
| FunctionEntryInstr* normal_entry() const { return normal_entry_; } |
| FunctionEntryInstr* unchecked_entry() const { return unchecked_entry_; } |
| void set_normal_entry(FunctionEntryInstr* entry) { normal_entry_ = entry; } |
| void set_unchecked_entry(FunctionEntryInstr* target) { |
| unchecked_entry_ = target; |
| } |
| OsrEntryInstr* osr_entry() const { return osr_entry_; } |
| void set_osr_entry(OsrEntryInstr* entry) { osr_entry_ = entry; } |
| |
| const ParsedFunction& parsed_function() const { return parsed_function_; } |
| |
| const GrowableArray<CatchBlockEntryInstr*>& catch_entries() const { |
| return catch_entries_; |
| } |
| |
| const GrowableArray<IndirectEntryInstr*>& indirect_entries() const { |
| return indirect_entries_; |
| } |
| |
| bool HasSingleEntryPoint() const { |
| return catch_entries().is_empty() && unchecked_entry() == nullptr; |
| } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| GraphEntryInstr(const ParsedFunction& parsed_function, |
| intptr_t osr_id, |
| intptr_t deopt_id); |
| |
| virtual void ClearPredecessors() {} |
| virtual void AddPredecessor(BlockEntryInstr* predecessor) { UNREACHABLE(); } |
| |
| const ParsedFunction& parsed_function_; |
| FunctionEntryInstr* normal_entry_ = nullptr; |
| FunctionEntryInstr* unchecked_entry_ = nullptr; |
| OsrEntryInstr* osr_entry_ = nullptr; |
| GrowableArray<CatchBlockEntryInstr*> catch_entries_; |
| // Indirect targets are blocks reachable only through indirect gotos. |
| GrowableArray<IndirectEntryInstr*> indirect_entries_; |
| const intptr_t osr_id_; |
| intptr_t entry_count_; |
| intptr_t spill_slot_count_; |
| intptr_t fixed_slot_count_; // For try-catch in optimized code. |
| bool needs_frame_ = true; |
| |
| DISALLOW_COPY_AND_ASSIGN(GraphEntryInstr); |
| }; |
| |
| class JoinEntryInstr : public BlockEntryInstr { |
| public: |
| JoinEntryInstr(intptr_t block_id, |
| intptr_t try_index, |
| intptr_t deopt_id, |
| intptr_t stack_depth = 0) |
| : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth), |
| predecessors_(2), // Two is the assumed to be the common case. |
| phis_(NULL) {} |
| |
| DECLARE_INSTRUCTION(JoinEntry) |
| |
| virtual intptr_t PredecessorCount() const { return predecessors_.length(); } |
| virtual BlockEntryInstr* PredecessorAt(intptr_t index) const { |
| return predecessors_[index]; |
| } |
| |
| // Returns -1 if pred is not in the list. |
| intptr_t IndexOfPredecessor(BlockEntryInstr* pred) const; |
| |
| ZoneGrowableArray<PhiInstr*>* phis() const { return phis_; } |
| |
| PhiInstr* InsertPhi(intptr_t var_index, intptr_t var_count); |
| void RemoveDeadPhis(Definition* replacement); |
| |
| void InsertPhi(PhiInstr* phi); |
| void RemovePhi(PhiInstr* phi); |
| |
| virtual bool HasUnknownSideEffects() const { return false; } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| // Classes that have access to predecessors_ when inlining. |
| friend class BlockEntryInstr; |
| friend class InlineExitCollector; |
| friend class PolymorphicInliner; |
| friend class IndirectEntryInstr; // Access in il_printer.cc. |
| |
| // Direct access to phis_ in order to resize it due to phi elimination. |
| friend class ConstantPropagator; |
| friend class DeadCodeElimination; |
| |
| virtual void ClearPredecessors() { predecessors_.Clear(); } |
| virtual void AddPredecessor(BlockEntryInstr* predecessor); |
| |
| GrowableArray<BlockEntryInstr*> predecessors_; |
| ZoneGrowableArray<PhiInstr*>* phis_; |
| |
| DISALLOW_COPY_AND_ASSIGN(JoinEntryInstr); |
| }; |
| |
| class PhiIterator : public ValueObject { |
| public: |
| explicit PhiIterator(JoinEntryInstr* join) : phis_(join->phis()), index_(0) {} |
| |
| void Advance() { |
| ASSERT(!Done()); |
| index_++; |
| } |
| |
| bool Done() const { return (phis_ == NULL) || (index_ >= phis_->length()); } |
| |
| PhiInstr* Current() const { return (*phis_)[index_]; } |
| |
| // Removes current phi from graph and sets current to previous phi. |
| void RemoveCurrentFromGraph(); |
| |
| private: |
| ZoneGrowableArray<PhiInstr*>* phis_; |
| intptr_t index_; |
| }; |
| |
| class TargetEntryInstr : public BlockEntryInstr { |
| public: |
| TargetEntryInstr(intptr_t block_id, |
| intptr_t try_index, |
| intptr_t deopt_id, |
| intptr_t stack_depth = 0) |
| : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth), |
| predecessor_(NULL), |
| edge_weight_(0.0) {} |
| |
| DECLARE_INSTRUCTION(TargetEntry) |
| |
| double edge_weight() const { return edge_weight_; } |
| void set_edge_weight(double weight) { edge_weight_ = weight; } |
| void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; } |
| |
| virtual intptr_t PredecessorCount() const { |
| return (predecessor_ == NULL) ? 0 : 1; |
| } |
| virtual BlockEntryInstr* PredecessorAt(intptr_t index) const { |
| ASSERT((index == 0) && (predecessor_ != NULL)); |
| return predecessor_; |
| } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| friend class BlockEntryInstr; // Access to predecessor_ when inlining. |
| |
| virtual void ClearPredecessors() { predecessor_ = NULL; } |
| virtual void AddPredecessor(BlockEntryInstr* predecessor) { |
| ASSERT(predecessor_ == NULL); |
| predecessor_ = predecessor; |
| } |
| |
| BlockEntryInstr* predecessor_; |
| double edge_weight_; |
| |
| DISALLOW_COPY_AND_ASSIGN(TargetEntryInstr); |
| }; |
| |
| // Represents an entrypoint to a function which callers can invoke (i.e. not |
| // used for OSR entries). |
| // |
| // The flow graph builder might decide to create create multiple entrypoints |
| // (e.g. checked/unchecked entrypoints) and will attach those to the |
| // [GraphEntryInstr]. |
| // |
| // Every entrypoint has it's own initial definitions. The SSA renaming |
| // will insert phi's for parameter instructions if necessary. |
| class FunctionEntryInstr : public BlockEntryWithInitialDefs { |
| public: |
| FunctionEntryInstr(GraphEntryInstr* graph_entry, |
| intptr_t block_id, |
| intptr_t try_index, |
| intptr_t deopt_id) |
| : BlockEntryWithInitialDefs(block_id, |
| try_index, |
| deopt_id, |
| /*stack_depth=*/0), |
| graph_entry_(graph_entry) {} |
| |
| DECLARE_INSTRUCTION(FunctionEntry) |
| |
| virtual intptr_t PredecessorCount() const { |
| return (graph_entry_ == nullptr) ? 0 : 1; |
| } |
| virtual BlockEntryInstr* PredecessorAt(intptr_t index) const { |
| ASSERT(index == 0 && graph_entry_ != nullptr); |
| return graph_entry_; |
| } |
| |
| GraphEntryInstr* graph_entry() const { return graph_entry_; } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| virtual void ClearPredecessors() { graph_entry_ = nullptr; } |
| virtual void AddPredecessor(BlockEntryInstr* predecessor) { |
| ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry()); |
| graph_entry_ = predecessor->AsGraphEntry(); |
| } |
| |
| GraphEntryInstr* graph_entry_; |
| |
| DISALLOW_COPY_AND_ASSIGN(FunctionEntryInstr); |
| }; |
| |
| // Represents entry into a function from native code. |
| // |
| // Native entries are not allowed to have regular parameters. They should use |
| // NativeParameter instead (which doesn't count as an initial definition). |
| class NativeEntryInstr : public FunctionEntryInstr { |
| public: |
| NativeEntryInstr(const compiler::ffi::CallbackMarshaller& marshaller, |
| GraphEntryInstr* graph_entry, |
| intptr_t block_id, |
| intptr_t try_index, |
| intptr_t deopt_id, |
| intptr_t callback_id) |
| : FunctionEntryInstr(graph_entry, block_id, try_index, deopt_id), |
| callback_id_(callback_id), |
| marshaller_(marshaller) {} |
| |
| DECLARE_INSTRUCTION(NativeEntry) |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| void SaveArguments(FlowGraphCompiler* compiler) const; |
| void SaveArgument(FlowGraphCompiler* compiler, |
| const compiler::ffi::NativeLocation& loc) const; |
| |
| const intptr_t callback_id_; |
| const compiler::ffi::CallbackMarshaller& marshaller_; |
| }; |
| |
| // Represents an OSR entrypoint to a function. |
| // |
| // The OSR entry has it's own initial definitions. |
| class OsrEntryInstr : public BlockEntryWithInitialDefs { |
| public: |
| OsrEntryInstr(GraphEntryInstr* graph_entry, |
| intptr_t block_id, |
| intptr_t try_index, |
| intptr_t deopt_id, |
| intptr_t stack_depth) |
| : BlockEntryWithInitialDefs(block_id, try_index, deopt_id, stack_depth), |
| graph_entry_(graph_entry) {} |
| |
| DECLARE_INSTRUCTION(OsrEntry) |
| |
| virtual intptr_t PredecessorCount() const { |
| return (graph_entry_ == nullptr) ? 0 : 1; |
| } |
| virtual BlockEntryInstr* PredecessorAt(intptr_t index) const { |
| ASSERT(index == 0 && graph_entry_ != nullptr); |
| return graph_entry_; |
| } |
| |
| GraphEntryInstr* graph_entry() const { return graph_entry_; } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| virtual void ClearPredecessors() { graph_entry_ = nullptr; } |
| virtual void AddPredecessor(BlockEntryInstr* predecessor) { |
| ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry()); |
| graph_entry_ = predecessor->AsGraphEntry(); |
| } |
| |
| GraphEntryInstr* graph_entry_; |
| |
| DISALLOW_COPY_AND_ASSIGN(OsrEntryInstr); |
| }; |
| |
| class IndirectEntryInstr : public JoinEntryInstr { |
| public: |
| IndirectEntryInstr(intptr_t block_id, |
| intptr_t indirect_id, |
| intptr_t try_index, |
| intptr_t deopt_id) |
| : JoinEntryInstr(block_id, try_index, deopt_id), |
| indirect_id_(indirect_id) {} |
| |
| DECLARE_INSTRUCTION(IndirectEntry) |
| |
| intptr_t indirect_id() const { return indirect_id_; } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| const intptr_t indirect_id_; |
| }; |
| |
| class CatchBlockEntryInstr : public BlockEntryWithInitialDefs { |
| public: |
| CatchBlockEntryInstr(bool is_generated, |
| intptr_t block_id, |
| intptr_t try_index, |
| GraphEntryInstr* graph_entry, |
| const Array& handler_types, |
| intptr_t catch_try_index, |
| bool needs_stacktrace, |
| intptr_t deopt_id, |
| const LocalVariable* exception_var, |
| const LocalVariable* stacktrace_var, |
| const LocalVariable* raw_exception_var, |
| const LocalVariable* raw_stacktrace_var) |
| : BlockEntryWithInitialDefs(block_id, |
| try_index, |
| deopt_id, |
| /*stack_depth=*/0), |
| graph_entry_(graph_entry), |
| predecessor_(NULL), |
| catch_handler_types_(Array::ZoneHandle(handler_types.ptr())), |
| catch_try_index_(catch_try_index), |
| exception_var_(exception_var), |
| stacktrace_var_(stacktrace_var), |
| raw_exception_var_(raw_exception_var), |
| raw_stacktrace_var_(raw_stacktrace_var), |
| needs_stacktrace_(needs_stacktrace), |
| is_generated_(is_generated) {} |
| |
| DECLARE_INSTRUCTION(CatchBlockEntry) |
| |
| virtual intptr_t PredecessorCount() const { |
| return (predecessor_ == NULL) ? 0 : 1; |
| } |
| virtual BlockEntryInstr* PredecessorAt(intptr_t index) const { |
| ASSERT((index == 0) && (predecessor_ != NULL)); |
| return predecessor_; |
| } |
| |
| GraphEntryInstr* graph_entry() const { return graph_entry_; } |
| |
| const LocalVariable* exception_var() const { return exception_var_; } |
| const LocalVariable* stacktrace_var() const { return stacktrace_var_; } |
| |
| const LocalVariable* raw_exception_var() const { return raw_exception_var_; } |
| const LocalVariable* raw_stacktrace_var() const { |
| return raw_stacktrace_var_; |
| } |
| |
| bool needs_stacktrace() const { return needs_stacktrace_; } |
| |
| bool is_generated() const { return is_generated_; } |
| |
| // Returns try index for the try block to which this catch handler |
| // corresponds. |
| intptr_t catch_try_index() const { return catch_try_index_; } |
| |
| PRINT_TO_SUPPORT |
| |
| private: |
| friend class BlockEntryInstr; // Access to predecessor_ when inlining. |
| |
| virtual void ClearPredecessors() { predecessor_ = NULL; } |
| virtual void AddPredecessor(BlockEntryInstr* predecessor) { |
| ASSERT(predecessor_ == NULL); |
| predecessor_ = predecessor; |
| } |
| |
| GraphEntryInstr* graph_entry_; |
| BlockEntryInstr* predecessor_; |
| const Array& catch_handler_types_; |
| const intptr_t catch_try_index_; |
| GrowableArray<Definition*> initial_definitions_; |
| const LocalVariable* exception_var_; |
| const LocalVariable* stacktrace_var_; |
| const LocalVariable* raw_exception_var_; |
| const LocalVariable* raw_stacktrace_var_; |
| const bool needs_stacktrace_; |
| bool is_generated_; |
| |
| DISALLOW_COPY_AND_ASSIGN(CatchBlockEntryInstr); |
| }; |
| |
| // If the result of the allocation is not stored into any field, passed |
| // as an argument or used in a phi then it can't alias with any other |
| // SSA value. |
| class AliasIdentity : public ValueObject { |
| public: |
| // It is unknown if value has aliases. |
| static AliasIdentity Unknown() { return AliasIdentity(kUnknown); } |
| |
| // It is known that value can have aliases. |
| static AliasIdentity Aliased() { return AliasIdentity(kAliased); } |
| |
| // It is known that value has no aliases. |
| static AliasIdentity NotAliased() { return AliasIdentity(kNotAliased); } |
| |
| // It is known that value has no aliases and it was selected by |
| // allocation sinking pass as a candidate. |
| static AliasIdentity AllocationSinkingCandidate() { |
| return AliasIdentity(kAllocationSinkingCandidate); |
| } |
| |
| #define FOR_EACH_ALIAS_IDENTITY_VALUE(V) \ |
| V(Unknown, 0) \ |
| V(NotAliased, 1) \ |
| V(Aliased, 2) \ |
| V(AllocationSinkingCandidate, 3) |
| |
| const char* ToCString() { |
| switch (value_) { |
| #define VALUE_CASE(name, val) \ |
| case k##name: \ |
| return #name; |
| FOR_EACH_ALIAS_IDENTITY_VALUE(VALUE_CASE) |
| #undef VALUE_CASE |
| default: |
| UNREACHABLE(); |
| return nullptr; |
| } |
| } |
| |
| bool IsUnknown() const { return value_ == kUnknown; } |
| bool IsAliased() const { return value_ == kAliased; } |
| bool IsNotAliased() const { return (value_ & kNotAliased) != 0; } |
| bool IsAllocationSinkingCandidate() const { |
| return value_ == kAllocationSinkingCandidate; |
| } |
| |
| AliasIdentity(const AliasIdentity& other) |
| : ValueObject(), value_(other.value_) {} |
| |
| AliasIdentity& operator=(const AliasIdentity& other) { |
| value_ = other.value_; |
| return *this; |
| } |
| |
| private: |
| explicit AliasIdentity(intptr_t value) : value_(value) {} |
| |
| #define VALUE_DEFN(name, val) k##name = val, |
| enum { FOR_EACH_ALIAS_IDENTITY_VALUE(VALUE_DEFN) }; |
| #undef VALUE_DEFN |
| |
| // Undef the FOR_EACH helper macro, since the enum is private. |
| #undef FOR_EACH_ALIAS_IDENTITY_VALUE |
| |
| COMPILE_ASSERT((kUnknown & kNotAliased) == 0); |
| COMPILE_ASSERT((kAliased & kNotAliased) == 0); |
| COMPILE_ASSERT((kAllocationSinkingCandidate & kNotAliased) != 0); |
| |
| intptr_t value_; |
| }; |
| |
| // Abstract super-class of all instructions that define a value (Bind, Phi). |
| class Definition : public Instruction { |
| public: |
| explicit Definition(intptr_t deopt_id = DeoptId::kNone) |
| : Instruction(deopt_id) {} |
| |
| explicit Definition(const InstructionSource& source, |
| intptr_t deopt_id = DeoptId::kNone) |
| : Instruction(source, deopt_id) {} |
| |
| // Overridden by definitions that have call counts. |
| virtual intptr_t CallCount() const { return -1; } |
| |
| intptr_t temp_index() const { return temp_index_; } |
| void set_temp_index(intptr_t index) { temp_index_ = index; } |
| void ClearTempIndex() { temp_index_ = -1; } |
| bool HasTemp() const { return temp_index_ >= 0; } |
| |
| intptr_t ssa_temp_index() const { return ssa_temp_index_; } |
| void set_ssa_temp_index(intptr_t index) { |
| ASSERT(index >= 0); |
| ssa_temp_index_ = index; |
| } |
| bool HasSSATemp() const { return ssa_temp_index_ >= 0; } |
| void ClearSSATempIndex() { ssa_temp_index_ = -1; } |
| |
| intptr_t vreg(intptr_t index) const { |
| ASSERT((index >= 0) && (index < location_count())); |
| if (ssa_temp_index_ == -1) return -1; |
| return ssa_temp_index_ * kMaxLocationCount + index; |
| } |
| intptr_t location_count() const { return LocationCount(representation()); } |
| bool HasPairRepresentation() const { return location_count() == 2; } |
| |
| // Compile time type of the definition, which may be requested before type |
| // propagation during graph building. |
| CompileType* Type() { |
| if (type_ == NULL) { |
| auto type = new CompileType(ComputeType()); |
| type->set_owner(this); |
| set_type(type); |
| } |
| return type_; |
| } |
| |
| bool HasType() const { return (type_ != NULL); } |
| |
| inline bool IsInt64Definition(); |
| |
| bool IsInt32Definition() { |
| return IsBinaryInt32Op() || IsBoxInt32() || IsUnboxInt32() || |
| IsIntConverter(); |
| } |
| |
| // Compute compile type for this definition. It is safe to use this |
| // approximation even before type propagator was run (e.g. during graph |
| // building). |
| virtual CompileType ComputeType() const { return CompileType::Dynamic(); } |
| |
| // Update CompileType of the definition. Returns true if the type has changed. |
| virtual bool RecomputeType() { return false; } |
| |
| PRINT_OPERANDS_TO_SUPPORT |
| PRINT_TO_SUPPORT |
| |
| bool UpdateType(CompileType new_type) { |
| if (type_ == nullptr) { |
| auto type = new CompileType(new_type); |
| type->set_owner(this); |
| set_type(type); |
| return true; |
| } |
| |
| if (type_->IsNone() || !type_->IsEqualTo(&new_type)) { |
| *type_ = new_type; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| bool HasUses() const { |
| return (input_use_list_ != NULL) || (env_use_list_ != NULL); |
| } |
| bool HasOnlyUse(Value* use) const; |
| bool HasOnlyInputUse(Value* use) const; |
| |
| Value* input_use_list() const { return input_use_list_; } |
| void set_input_use_list(Value* head) { input_use_list_ = head; } |
| |
| Value* env_use_list() const { return env_use_list_; } |
| void set_env_use_list(Value* head) { env_use_list_ = head; } |
| |
| ValueListIterable input_uses() const { |
| return ValueListIterable(input_use_list_); |
| } |
| |
| void AddInputUse(Value* value) { Value::AddToList(value, &input_use_list_); } |
| void AddEnvUse(Value* value) { Value::AddToList(value, &env_use_list_); } |
| |
| // Replace uses of this definition with uses of other definition or value. |
| // Precondition: use lists must be properly calculated. |
| // Postcondition: use lists and use values are still valid. |
| void ReplaceUsesWith(Definition* other); |
| |
| // Replace this definition with another instruction. Use the provided result |
| // definition to replace uses of the original definition. If replacing during |
| // iteration, pass the iterator so that the instruction can be replaced |
| // without affecting iteration order, otherwise pass a NULL iterator. |
| void ReplaceWithResult(Instruction* replacement, |
| Definition* replacement_for_uses, |
| ForwardInstructionIterator* iterator); |
| |
| // Replace this definition and all uses with another definition. If |
| // replacing during iteration, pass the iterator so that the instruction |
| // can be replaced without affecting iteration order, otherwise pass a |
| // NULL iterator. |
| void ReplaceWith(Definition* other, ForwardInstructionIterator* iterator); |
| |
| // A value in the constant propagation lattice. |
| // - non-constant sentinel |
| // - a constant (any non-sentinel value) |
| // - unknown sentinel |
| Object& constant_value(); |
| |
| virtual void InferRange(RangeAnalysis* analysis, Range* range); |
| |
| Range* range() const { return range_; } |
| void set_range(const Range&); |
| |
| // Definitions can be canonicalized only into definitions to ensure |
| // this check statically we override base Canonicalize with a Canonicalize |
| // returning Definition (return type is covariant). |
| virtual Definition* Canonicalize(FlowGraph* flow_graph); |
| |
| static const intptr_t kReplacementMarker = -2; |
| |
| Definition* Replacement() { |
| if (ssa_temp_index_ == kReplacementMarker) { |
| return reinterpret_cast<Definition*>(temp_index_); |
| } |
| return this; |
| } |
| |
| void SetReplacement(Definition* other) { |
| ASSERT(ssa_temp_index_ >= 0); |
| ASSERT(WasEliminated()); |
| ssa_temp_index_ = kReplacementMarker; |
| temp_index_ = reinterpret_cast<intptr_t>(other); |
| } |
| |
| virtual AliasIdentity Identity() const { return AliasIdentity::Unknown(); } |
| |
| virtual void SetIdentity(AliasIdentity identity) { UNREACHABLE(); } |
| |
| // Find the original definition of [this] by following through any |
| // redefinition and check instructions. |
| Definition* OriginalDefinition(); |
| |
| // If this definition is a redefinition (in a broad sense, this includes |
| // CheckArrayBound and CheckNull instructions) return [Value] corresponding |
| // to the input which is being redefined. |
| // Otherwise return [nullptr]. |
| virtual Value* RedefinedValue() const; |
| |
| // Find the original definition of [this]. |
| // |
| // This is an extension of [OriginalDefinition] which also follows through any |
| // boxing/unboxing and constraint instructions. |
| Definition* OriginalDefinitionIgnoreBoxingAndConstraints(); |
| |
| // Helper method to determine if definition denotes an array length. |
| static bool IsArrayLength(Definition* def); |
| |
| virtual Definition* AsDefinition() { return this; } |
| virtual const Definition* AsDefinition() const { return this; } |
| |
| protected: |
| friend class RangeAnalysis; |
| friend class Value; |
| |
| Range* range_ = nullptr; |
| |
| void set_type(CompileType* type) { |
| ASSERT(type->owner() == this); |
| type_ = type; |
| } |
| |
| #if defined(INCLUDE_IL_PRINTER) |
| const char* TypeAsCString() const { |
| return HasType() ? type_->ToCString() : ""; |
| } |
| #endif |
| |
| private: |
| intptr_t temp_index_ = -1; |
| intptr_t ssa_temp_index_ = -1; |
| Value* input_use_list_ = nullptr; |
| Value* env_use_list_ = nullptr; |
| |
| Object* constant_value_ = nullptr; |
| CompileType* type_ = nullptr; |
| |
| DISALLOW_COPY_AND_ASSIGN(Definition); |
| }; |
| |
| // Change a value's definition after use lists have been computed. |
| inline void Value::BindTo(Definition* def) { |
| RemoveFromUseList(); |
| set_definition(def); |
| def->AddInputUse(this); |
| } |
| |
| inline void Value::BindToEnvironment(Definition* def) { |
| RemoveFromUseList(); |
| set_definition(def); |
| def->AddEnvUse(this); |
| } |
| |
| class PureDefinition : public Definition { |
| public: |
| explicit PureDefinition(intptr_t deopt_id) : Definition(deopt_id) {} |
| explicit PureDefinition(const InstructionSource& source, intptr_t deopt_id) |
| : Definition(source, deopt_id) {} |
| |
| virtual bool AllowsCSE() const { return true; } |
| virtual bool HasUnknownSideEffects() const { return false; } |
| }; |
| |
| template <intptr_t N, |
| typename ThrowsTrait, |
| template <typename Impure, typename Pure> class CSETrait = NoCSE> |
| class TemplateDefinition : public CSETrait<Definition, PureDefinition>::Base { |
| public: |
| using BaseClass = typename CSETrait<Definition, PureDefinition>::Base; |
| |
| explicit TemplateDefinition(intptr_t deopt_id = DeoptId::kNone) |
| : BaseClass(deopt_id), inputs_() {} |
| TemplateDefinition(const InstructionSource& source, |
| intptr_t deopt_id = DeoptId::kNone) |
| : BaseClass(source, deopt_id), inputs_() {} |
| |
| virtual intptr_t InputCount() const { return N; } |
| virtual Value* InputAt(intptr_t i) const { return inputs_[i]; } |
| |
| virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; } |
| |
| protected: |
| EmbeddedArray<Value*, N> inputs_; |
| |
| private: |
| friend class BranchInstr; |
| friend class IfThenElseInstr; |
| |
| virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; } |
| }; |
| |
| class VariadicDefinition : public Definition { |
| public: |
| explicit VariadicDefinition(InputsArray&& inputs, |
| intptr_t deopt_id = DeoptId::kNone) |
| : Definition(deopt_id), inputs_(std::move(inputs)) { |
| for (intptr_t i = 0, n = inputs_.length(); i < n; ++i) { |
| SetInputAt(i, inputs_[i]); |
| } |
| } |
| VariadicDefinition(InputsArray&& inputs, |
| const InstructionSource& source, |
| intptr_t deopt_id = DeoptId::kNone) |
| : Definition(source, deopt_id), inputs_(std::move(inputs)) { |
| for (intptr_t i = 0, n = inputs_.length(); i < n; ++i) { |
| SetInputAt(i, inputs_[i]); |
| } |
| } |
| explicit VariadicDefinition(const intptr_t num_inputs, |
| intptr_t deopt_id = DeoptId::kNone) |
| : Definition(deopt_id), inputs_(num_inputs) { |
| inputs_.EnsureLength(num_inputs, nullptr); |
| } |
| VariadicDefinition(const intptr_t num_inputs, |
| const InstructionSource& source, |
| intptr_t deopt_id = DeoptId::kNone) |
| : Definition(source, deopt_id), inputs_(num_inputs) { |
| inputs_.EnsureLength(num_inputs, nullptr); |
| } |
| |
| intptr_t InputCount() const { return inputs_.length(); } |
| Value* InputAt(intptr_t i) const { return inputs_[i]; } |
| |
| protected: |
| InputsArray inputs_; |
| |
| private: |
| void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; } |
| }; |
| |
| class PhiInstr : public VariadicDefinition { |
| public: |
| PhiInstr(JoinEntryInstr* block, intptr_t num_inputs) |
| : VariadicDefinition(num_inputs), |
| block_(block), |
| representation_(kTagged), |
| is_alive_(false), |
| is_receiver_(kUnknownReceiver) {} |
| |
| // Get the block entry for that instruction. |
| virtual BlockEntryInstr* GetBlock() { return block(); } |
| JoinEntryInstr* block() const { return block_; } |
| |
| virtual CompileType ComputeType() const; |
| virtual bool RecomputeType(); |
| |
| virtual bool ComputeCanDeoptimize() const { return false; } |
| |
| virtual bool HasUnknownSideEffects() const { return false; } |
| |
| // Phi is alive if it reaches a non-environment use. |
| bool is_alive() const { return is_alive_; } |
| void mark_alive() { is_alive_ = true; } |
| void mark_dead() { is_alive_ = false; } |
| |
| virtual Representation RequiredInputRepresentation(intptr_t i) const { |
| return representation_; |
| } |
| |
| virtual Representation representation() const { return representation_; } |
| |
| virtual void set_representation(Representation r) { representation_ = r; } |
| |
| // Only Int32 phis in JIT mode are unboxed optimistically. |
| virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const { |
| return (CompilerState::Current().is_aot() || |
| (representation_ != kUnboxedInt32)) |
| ? kNotSpeculative |
| : kGuardInputs; |
| } |
| |
| virtual uword Hash() const { |
| UNREACHABLE(); |
| return 0; |
| } |
| |
| DECLARE_INSTRUCTION(Phi) |
| |
| virtual void InferRange(RangeAnalysis* analysis, Range* range); |
| |
| BitVector* reaching_defs() const { return reaching_defs_; } |
| |
| void set_reaching_defs(BitVector* reaching_defs) { |
| reaching_defs_ = reaching_defs; |
| } |
| |
| virtual bool MayThrow() const { return false; } |
| |
| // A phi is redundant if all input operands are the same. |
| bool IsRedundant() const; |
| |
| // A phi is redundant if all input operands are redefinitions of the same |
| // value. Returns the replacement for this phi if it is redundant. |
| // The replacement is selected among values redefined by inputs. |
| Definition* GetReplacementForRedundantPhi() const; |
| |
| virtual Definition* Canonicalize(FlowGraph* flow_graph); |
| |
| PRINT_TO_SUPPORT |
| |
| enum ReceiverType { kUnknownReceiver = -1, kNotReceiver = 0, kReceiver = 1 }; |
| |
| ReceiverType is_receiver() const { |
| return static_cast<ReceiverType>(is_receiver_); |
| } |
| |
| void set_is_receiver(ReceiverType is_receiver) { is_receiver_ = is_receiver; } |
| |
| private: |
| // Direct access to inputs_ in order to resize it due to unreachable |
| // predecessors. |
| friend class ConstantPropagator; |
| |
| JoinEntryInstr* block_; |
| Representation representation_; |
| BitVector* reaching_defs_ = nullptr; |
| bool is_alive_; |
| int8_t is_receiver_; |
| |
| DISALLOW_COPY_AND_ASSIGN(PhiInstr); |
| }; |
| |
| // This instruction represents an incomming parameter for a function entry, |
| // or incoming value for OSR entry or incomming value for a catch entry. |
| // Value [index] always denotes the position of the parameter. When [base_reg] |
| // is set to FPREG, value [index] corresponds to environment variable index |
| // (0 is the very first parameter, 1 is next and so on). When [base_reg] is |
| // set to SPREG, value [index] needs to be reversed (0 is the very last |
| // parameter, 1 is next and so on) to get the sp relative position. |
| class ParameterInstr : public TemplateDefinition<0, NoThrow> { |
| public: |
| ParameterInstr(intptr_t index, |
| intptr_t param_offset, |
| BlockEntryInstr* block, |
| Representation representation, |
| Register base_reg = FPREG) |
| : index_(index), |
| param_offset_(param_offset), |
| base_reg_(base_reg), |
| representation_(representation), |
| block_(block) {} |
| |
| DECLARE_INSTRUCTION(Parameter) |
| DECLARE_ATTRIBUTES(index()) |
| |
| intptr_t index() const { return index_; } |
| intptr_t param_offset() const { return param_offset_; } |
| Register base_reg() const { return base_reg_; } |
| |
| // Get the block entry for that instruction. |
| virtual BlockEntryInstr* GetBlock() { return block_; } |
| void set_block(BlockEntryInstr* block) { block_ = block; } |
| |
| virtual Representation representation() const { return representation_; } |
| |
| virtual Representation RequiredInputRepresentation(intptr_t index) const { |
| ASSERT(index == 0); |
| return representation(); |
| } |
| |
| virtual bool ComputeCanDeoptimize() const { return false; } |
| |
| virtual bool HasUnknownSideEffects() const { return false; } |
| |
| virtual uword Hash() const { |
| UNREACHABLE(); |
| return 0; |
| } |
| |
| virtual CompileType ComputeType() const; |
| |
| PRINT_OPERANDS_TO_SUPPORT |
| |
| private: |
| const intptr_t index_; |
| |
| // The offset (in words) of the last slot of the parameter, relative |
| // to the first parameter. |
| // It is used in the FlowGraphAllocator when it sets the assigned location |
| // and spill slot for the parameter definition. |
| const intptr_t param_offset_; |
| const Register base_reg_; |
| const Representation representation_; |
| BlockEntryInstr* block_; |
| |
| DISALLOW_COPY_AND_ASSIGN(ParameterInstr); |
| }; |
| |
| // Native parameters are not treated as initial definitions because they cannot |
| // be inlined and are only usable in optimized code. The location must be a |
| // stack location relative to the position of the stack (SPREG) after |
| // register-based arguments have been saved on entry to a native call. See |
| // NativeEntryInstr::EmitNativeCode for more details. |
| // |
| // TOOD(33549): Unify with ParameterInstr. |
| class NativeParameterInstr : public TemplateDefinition<0, NoThrow> { |
| public: |
| NativeParameterInstr(const compiler::ffi::CallbackMarshaller& marshaller, |
| intptr_t def_index) |
| : marshaller_(marshaller), def_index_(def_index) {} |
| |
| DECLARE_INSTRUCTION(NativeParameter) |
| |
| virtual Representation representation() const { |
| return marshaller_.RepInFfiCall(def_index_); |
| } |
| |
| virtual bool ComputeCanDeoptimize() const { return false; } |
| |
| virtual bool HasUnknownSideEffects() const { return false; } |
| |
| // TODO(sjindel): We can make this more precise. |
| virtual CompileType ComputeType() const { return CompileType::Dynamic(); } |
| |
| PRINT_OPERANDS_TO_SUPPORT |
| |
| private: |
| const compiler::ffi::CallbackMarshaller& marshaller_; |
| const intptr_t def_index_; |
| |
| DISALLOW_COPY_AND_ASSIGN(NativeParameterInstr); |
| }; |
| |
| // Stores a tagged pointer to a slot accessible from a fixed register. It has |
| // the form: |
| // |
| // base_reg[index + #constant] = value |
| // |
| // Input 0: A tagged Smi [index] |
| // Input 1: A tagged pointer [value] |
| // offset: A signed constant offset which fits into 8 bits |
| // |
| // Currently this instruction uses pinpoints the register to be FP. |
| // |
| // This low-level instruction is non-inlinable since it makes assumptions about |
| // the frame. This is asserted via `inliner.cc::CalleeGraphValidator`. |
| class StoreIndexedUnsafeInstr : public TemplateInstruction<2, NoThrow> { |
| public: |
| StoreIndexedUnsafeInstr(Value* index, Value* value, intptr_t offset) |
| : offset_(offset) { |
| SetInputAt(kIndexPos, index); |
| SetInputAt(kValuePos, value); |
| } |
| |
| enum { kIndexPos = 0, kValuePos = 1 }; |
| |
| DECLARE_INSTRUCTION(StoreIndexedUnsafe) |
| |
| virtual Representation RequiredInputRepresentation(intptr_t index) const { |
| ASSERT(index == kIndexPos || index == kValuePos); |
| return kTagged; |
| } |
| virtual bool ComputeCanDeoptimize() const { return false; } |
| virtual bool HasUnknownSideEffects() const { return false; } |
| |
| virtual bool AttributesEqual(const Instruction& other) const { |
| return other.AsStoreIndexedUnsafe()->offset() == offset(); |
| } |
| |
| Value* index() const { return inputs_[kIndexPos]; } |
| Value* value() const { return inputs_[kValuePos]; } |
| Register base_reg() const { return FPREG; } |
| intptr_t offset() const { return offset_; } |
| |
| PRINT_OPERANDS_TO_SUPPORT |
| |
| private: |
| const intptr_t offset_; |
| |
| DISALLOW_COPY_AND_ASSIGN(StoreIndexedUnsafeInstr); |
| }; |
| |
| // Loads a value from slot accessable from a fixed register. It has |
| // the form: |
| // |
| // base_reg[index + #constant] |
| // |
| // Input 0: A tagged Smi [index] |
| // offset: A signed constant offset which fits into 8 bits |
| // |
| // Currently this instruction uses pinpoints the register to be FP. |
| // |
| // This lowlevel instruction is non-inlinable since it makes assumptons about |
| // the frame. This is asserted via `inliner.cc::CalleeGraphValidator`. |
| class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> { |
| public: |
| LoadIndexedUnsafeInstr(Value* index, |
| intptr_t offset, |
| CompileType result_type, |
| Representation representation = kTagged) |
| : offset_(offset), representation_(representation) { |
| UpdateType(result_type); |
| SetInputAt(0, index); |
| } |
| |
| DECLARE_INSTRUCTION(LoadIndexedUnsafe) |
| |
| virtual Representation RequiredInputRepresentation(intptr_t index) const { |
| ASSERT(index == 0); |
| return kTagged; |
| } |
| virtual bool ComputeCanDeoptimize() const { return false; } |
| virtual bool HasUnknownSideEffects() const { return false; } |
| |
| virtual bool AttributesEqual(const Instruction& other) const { |
| return other.AsLoadIndexedUnsafe()->offset() == offset(); |
| |