blob: 5a742d070ee702fe040c1ccfbbc952ac0df1e32c [file] [log] [blame]
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/compiler/backend/il.h"
#include "vm/bit_vector.h"
#include "vm/bootstrap.h"
#include "vm/compiler/aot/dispatch_table_generator.h"
#include "vm/compiler/backend/code_statistics.h"
#include "vm/compiler/backend/constant_propagator.h"
#include "vm/compiler/backend/evaluator.h"
#include "vm/compiler/backend/flow_graph_compiler.h"
#include "vm/compiler/backend/linearscan.h"
#include "vm/compiler/backend/locations.h"
#include "vm/compiler/backend/loops.h"
#include "vm/compiler/backend/range_analysis.h"
#include "vm/compiler/ffi/frame_rebase.h"
#include "vm/compiler/ffi/marshaller.h"
#include "vm/compiler/ffi/native_calling_convention.h"
#include "vm/compiler/ffi/native_location.h"
#include "vm/compiler/ffi/native_type.h"
#include "vm/compiler/frontend/flow_graph_builder.h"
#include "vm/compiler/frontend/kernel_translation_helper.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/compiler/method_recognizer.h"
#include "vm/cpu.h"
#include "vm/dart_entry.h"
#include "vm/object.h"
#include "vm/object_store.h"
#include "vm/os.h"
#include "vm/regexp_assembler_ir.h"
#include "vm/resolver.h"
#include "vm/runtime_entry.h"
#include "vm/scopes.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
#include "vm/symbols.h"
#include "vm/type_testing_stubs.h"
#include "vm/compiler/backend/il_printer.h"
namespace dart {
DEFINE_FLAG(bool,
propagate_ic_data,
true,
"Propagate IC data from unoptimized to optimized IC calls.");
DEFINE_FLAG(bool,
two_args_smi_icd,
true,
"Generate special IC stubs for two args Smi operations");
DECLARE_FLAG(bool, inline_alloc);
DECLARE_FLAG(bool, use_slow_path);
class SubtypeFinder {
public:
SubtypeFinder(Zone* zone,
GrowableArray<intptr_t>* cids,
bool include_abstract)
: array_handles_(zone),
class_handles_(zone),
cids_(cids),
include_abstract_(include_abstract) {}
void ScanImplementorClasses(const Class& klass) {
// An implementor of [klass] is
// * the [klass] itself.
// * all implementors of the direct subclasses of [klass].
// * all implementors of the direct implementors of [klass].
if (include_abstract_ || !klass.is_abstract()) {
cids_->Add(klass.id());
}
ScopedHandle<GrowableObjectArray> array(&array_handles_);
ScopedHandle<Class> subclass_or_implementor(&class_handles_);
*array = klass.direct_subclasses();
if (!array->IsNull()) {
for (intptr_t i = 0; i < array->Length(); ++i) {
*subclass_or_implementor ^= (*array).At(i);
ScanImplementorClasses(*subclass_or_implementor);
}
}
*array = klass.direct_implementors();
if (!array->IsNull()) {
for (intptr_t i = 0; i < array->Length(); ++i) {
*subclass_or_implementor ^= (*array).At(i);
ScanImplementorClasses(*subclass_or_implementor);
}
}
}
private:
ReusableHandleStack<GrowableObjectArray> array_handles_;
ReusableHandleStack<Class> class_handles_;
GrowableArray<intptr_t>* cids_;
const bool include_abstract_;
};
const CidRangeVector& HierarchyInfo::SubtypeRangesForClass(
const Class& klass,
bool include_abstract,
bool exclude_null) {
ClassTable* table = thread()->isolate_group()->class_table();
const intptr_t cid_count = table->NumCids();
std::unique_ptr<CidRangeVector[]>* cid_ranges = nullptr;
if (include_abstract) {
cid_ranges = exclude_null ? &cid_subtype_ranges_abstract_nonnullable_
: &cid_subtype_ranges_abstract_nullable_;
} else {
cid_ranges = exclude_null ? &cid_subtype_ranges_nonnullable_
: &cid_subtype_ranges_nullable_;
}
if (*cid_ranges == nullptr) {
cid_ranges->reset(new CidRangeVector[cid_count]);
}
CidRangeVector& ranges = (*cid_ranges)[klass.id()];
if (ranges.length() == 0) {
BuildRangesFor(table, &ranges, klass, include_abstract, exclude_null);
}
return ranges;
}
class CidCheckerForRanges : public ValueObject {
public:
CidCheckerForRanges(Thread* thread,
ClassTable* table,
const Class& cls,
bool include_abstract,
bool exclude_null)
: thread_(thread),
table_(table),
supertype_(AbstractType::Handle(zone(), cls.RareType())),
include_abstract_(include_abstract),
exclude_null_(exclude_null),
to_check_(Class::Handle(zone())),
subtype_(AbstractType::Handle(zone())) {}
bool MayInclude(intptr_t cid) {
if (!table_->HasValidClassAt(cid)) return true;
if (cid == kTypeArgumentsCid) return true;
if (cid == kVoidCid) return true;
if (cid == kDynamicCid) return true;
if (cid == kNeverCid) return true;
if (!exclude_null_ && cid == kNullCid) return true;
to_check_ = table_->At(cid);
ASSERT(!to_check_.IsNull());
if (!include_abstract_ && to_check_.is_abstract()) return true;
return to_check_.IsTopLevel();
}
bool MustInclude(intptr_t cid) {
ASSERT(!MayInclude(cid));
if (cid == kNullCid) return false;
to_check_ = table_->At(cid);
subtype_ = to_check_.RareType();
// Create local zone because deep hierarchies may allocate lots of handles.
StackZone stack_zone(thread_);
HANDLESCOPE(thread_);
return subtype_.IsSubtypeOf(supertype_, Heap::kNew);
}
private:
Zone* zone() const { return thread_->zone(); }
Thread* const thread_;
ClassTable* const table_;
const AbstractType& supertype_;
const bool include_abstract_;
const bool exclude_null_;
Class& to_check_;
AbstractType& subtype_;
};
// Build the ranges either for:
// "<obj> as <Type>", or
// "<obj> is <Type>"
void HierarchyInfo::BuildRangesUsingClassTableFor(ClassTable* table,
CidRangeVector* ranges,
const Class& klass,
bool include_abstract,
bool exclude_null) {
CidCheckerForRanges checker(thread(), table, klass, include_abstract,
exclude_null);
// Iterate over all cids to find the ones to be included in the ranges.
const intptr_t cid_count = table->NumCids();
intptr_t start = -1;
intptr_t end = -1;
for (intptr_t cid = kInstanceCid; cid < cid_count; ++cid) {
// Some cases are "don't care", i.e., they may or may not be included,
// whatever yields the least number of ranges for efficiency.
if (checker.MayInclude(cid)) continue;
if (checker.MustInclude(cid)) {
// On success, open a new or continue any open range.
if (start == -1) start = cid;
end = cid;
} else if (start != -1) {
// On failure, close any open range from start to end
// (the latter is the most recent succesful "do-care" cid).
ranges->Add({start, end});
start = end = -1;
}
}
// Construct last range if there is a open one.
if (start != -1) {
ranges->Add({start, end});
}
}
void HierarchyInfo::BuildRangesFor(ClassTable* table,
CidRangeVector* ranges,
const Class& dst_klass,
bool include_abstract,
bool exclude_null) {
// Use the class table in cases where the direct subclasses and implementors
// are not filled out.
if (dst_klass.InVMIsolateHeap() || dst_klass.id() == kInstanceCid) {
BuildRangesUsingClassTableFor(table, ranges, dst_klass, include_abstract,
exclude_null);
return;
}
Zone* zone = thread()->zone();
GrowableArray<intptr_t> cids;
SubtypeFinder finder(zone, &cids, include_abstract);
{
SafepointReadRwLocker ml(thread(),
thread()->isolate_group()->program_lock());
finder.ScanImplementorClasses(dst_klass);
}
if (cids.is_empty()) return;
// Sort all collected cids.
intptr_t* cids_array = cids.data();
qsort(cids_array, cids.length(), sizeof(intptr_t),
[](const void* a, const void* b) {
// MSAN seems unaware of allocations inside qsort. The linker flag
// -fsanitize=memory should give us a MSAN-aware version of libc...
MSAN_UNPOISON(static_cast<const intptr_t*>(a), sizeof(intptr_t));
MSAN_UNPOISON(static_cast<const intptr_t*>(b), sizeof(intptr_t));
return static_cast<int>(*static_cast<const intptr_t*>(a) -
*static_cast<const intptr_t*>(b));
});
// Build ranges of all the cids.
CidCheckerForRanges checker(thread(), table, dst_klass, include_abstract,
exclude_null);
intptr_t left_cid = -1;
intptr_t right_cid = -1;
intptr_t previous_cid = -1;
for (intptr_t i = 0; i < cids.length(); ++i) {
const intptr_t current_cid = cids[i];
if (current_cid == previous_cid) continue; // Skip duplicates.
// We sorted, after all!
RELEASE_ASSERT(previous_cid < current_cid);
if (left_cid != -1) {
ASSERT(previous_cid != -1);
// Check the cids between the previous cid from cids and this one.
for (intptr_t j = previous_cid + 1; j < current_cid; ++j) {
// Stop if we find a do-care class before reaching the current cid.
if (!checker.MayInclude(j)) {
ranges->Add({left_cid, right_cid});
left_cid = right_cid = -1;
break;
}
}
}
previous_cid = current_cid;
if (checker.MayInclude(current_cid)) continue;
if (checker.MustInclude(current_cid)) {
if (left_cid == -1) {
// Open a new range starting at this cid.
left_cid = current_cid;
}
right_cid = current_cid;
} else if (left_cid != -1) {
// Close the existing range.
ranges->Add({left_cid, right_cid});
left_cid = right_cid = -1;
}
}
// If there is an open cid-range which we haven't finished yet, we'll
// complete it.
if (left_cid != -1) {
ranges->Add(CidRange{left_cid, right_cid});
}
}
bool HierarchyInfo::CanUseSubtypeRangeCheckFor(const AbstractType& type) {
ASSERT(type.IsFinalized());
if (!type.IsInstantiated() || !type.IsType()) {
return false;
}
// The FutureOr<T> type cannot be handled by checking whether the instance is
// a subtype of FutureOr and then checking whether the type argument `T`
// matches.
//
// Instead we would need to perform multiple checks:
//
// instance is Null || instance is T || instance is Future<T>
//
if (type.IsFutureOrType()) {
return false;
}
Zone* zone = thread()->zone();
const Class& type_class = Class::Handle(zone, type.type_class());
// We can use class id range checks only if we don't have to test type
// arguments.
//
// This is e.g. true for "String" but also for "List<dynamic>". (A type for
// which the type arguments vector is filled with "dynamic" is known as a rare
// type)
if (type_class.IsGeneric()) {
// TODO(kustermann): We might want to consider extending this when the type
// arguments are not "dynamic" but instantiated-to-bounds.
const Type& rare_type =
Type::Handle(zone, Type::RawCast(type_class.RareType()));
if (!rare_type.IsSubtypeOf(type, Heap::kNew)) {
ASSERT(type.arguments() != TypeArguments::null());
return false;
}
}
return true;
}
bool HierarchyInfo::CanUseGenericSubtypeRangeCheckFor(
const AbstractType& type) {
ASSERT(type.IsFinalized());
if (!type.IsType() || type.IsDartFunctionType()) {
return false;
}
// The FutureOr<T> type cannot be handled by checking whether the instance is
// a subtype of FutureOr and then checking whether the type argument `T`
// matches.
//
// Instead we would need to perform multiple checks:
//
// instance is Null || instance is T || instance is Future<T>
//
if (type.IsFutureOrType()) {
return false;
}
// NOTE: We do allow non-instantiated types here (in comparison to
// [CanUseSubtypeRangeCheckFor], since we handle type parameters in the type
// expression in some cases (see below).
Zone* zone = thread()->zone();
const Class& type_class = Class::Handle(zone, type.type_class());
const intptr_t num_type_parameters = type_class.NumTypeParameters();
const intptr_t num_type_arguments = type_class.NumTypeArguments();
// This function should only be called for generic classes.
ASSERT(type_class.NumTypeParameters() > 0 &&
type.arguments() != TypeArguments::null());
const TypeArguments& ta =
TypeArguments::Handle(zone, Type::Cast(type).arguments());
ASSERT(ta.Length() == num_type_arguments);
// The last [num_type_pararameters] entries in the [TypeArguments] vector [ta]
// are the values we have to check against. Ensure we can handle all of them
// via [CidRange]-based checks or that it is a type parameter.
AbstractType& type_arg = AbstractType::Handle(zone);
for (intptr_t i = 0; i < num_type_parameters; ++i) {
type_arg = ta.TypeAt(num_type_arguments - num_type_parameters + i);
if (!CanUseSubtypeRangeCheckFor(type_arg) && !type_arg.IsTypeParameter()) {
return false;
}
}
return true;
}
bool HierarchyInfo::InstanceOfHasClassRange(const AbstractType& type,
intptr_t* lower_limit,
intptr_t* upper_limit) {
ASSERT(CompilerState::Current().is_aot());
if (type.IsNullable()) {
// 'is' test for nullable types should accept null cid in addition to the
// class range. In most cases it is not possible to extend class range to
// include kNullCid.
return false;
}
if (CanUseSubtypeRangeCheckFor(type)) {
const Class& type_class =
Class::Handle(thread()->zone(), type.type_class());
const CidRangeVector& ranges =
SubtypeRangesForClass(type_class,
/*include_abstract=*/false,
/*exclude_null=*/true);
if (ranges.length() == 1) {
const CidRangeValue& range = ranges[0];
ASSERT(!range.IsIllegalRange());
*lower_limit = range.cid_start;
*upper_limit = range.cid_end;
return true;
}
}
return false;
}
// The set of supported non-integer unboxed representations.
// Format: (unboxed representations suffix, boxed class type)
#define FOR_EACH_NON_INT_BOXED_REPRESENTATION(M) \
M(Double, Double) \
M(Float, Double) \
M(Float32x4, Float32x4) \
M(Float64x2, Float64x2) \
M(Int32x4, Int32x4)
#define BOXING_IN_SET_CASE(unboxed, boxed) \
case kUnboxed##unboxed: \
return true;
#define BOXING_VALUE_OFFSET_CASE(unboxed, boxed) \
case kUnboxed##unboxed: \
return compiler::target::boxed::value_offset();
#define BOXING_CID_CASE(unboxed, boxed) \
case kUnboxed##unboxed: \
return k##boxed##Cid;
bool Boxing::Supports(Representation rep) {
if (RepresentationUtils::IsUnboxedInteger(rep)) {
return true;
}
switch (rep) {
FOR_EACH_NON_INT_BOXED_REPRESENTATION(BOXING_IN_SET_CASE)
default:
return false;
}
}
bool Boxing::RequiresAllocation(Representation rep) {
if (RepresentationUtils::IsUnboxedInteger(rep)) {
return (kBitsPerByte * RepresentationUtils::ValueSize(rep)) >
compiler::target::kSmiBits;
}
return true;
}
intptr_t Boxing::ValueOffset(Representation rep) {
if (RepresentationUtils::IsUnboxedInteger(rep) &&
Boxing::RequiresAllocation(rep) &&
RepresentationUtils::ValueSize(rep) <= sizeof(int64_t)) {
return compiler::target::Mint::value_offset();
}
switch (rep) {
FOR_EACH_NON_INT_BOXED_REPRESENTATION(BOXING_VALUE_OFFSET_CASE)
default:
UNREACHABLE();
return 0;
}
}
// Note that not all boxes require allocation (e.g., Smis).
intptr_t Boxing::BoxCid(Representation rep) {
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (!Boxing::RequiresAllocation(rep)) {
return kSmiCid;
} else if (RepresentationUtils::ValueSize(rep) <= sizeof(int64_t)) {
return kMintCid;
}
}
switch (rep) {
FOR_EACH_NON_INT_BOXED_REPRESENTATION(BOXING_CID_CASE)
default:
UNREACHABLE();
return kIllegalCid;
}
}
#undef BOXING_CID_CASE
#undef BOXING_VALUE_OFFSET_CASE
#undef BOXING_IN_SET_CASE
#undef FOR_EACH_NON_INT_BOXED_REPRESENTATION
#if defined(DEBUG)
void Instruction::CheckField(const Field& field) const {
ASSERT(field.IsZoneHandle());
ASSERT(!Compiler::IsBackgroundCompilation() || !field.IsOriginal());
}
#endif // DEBUG
// A value in the constant propagation lattice.
// - non-constant sentinel
// - a constant (any non-sentinel value)
// - unknown sentinel
Object& Definition::constant_value() {
if (constant_value_ == NULL) {
constant_value_ = &Object::ZoneHandle(ConstantPropagator::Unknown());
}
return *constant_value_;
}
Definition* Definition::OriginalDefinition() {
Definition* defn = this;
Value* unwrapped;
while ((unwrapped = defn->RedefinedValue()) != nullptr) {
defn = unwrapped->definition();
}
return defn;
}
Value* Definition::RedefinedValue() const {
return nullptr;
}
Value* RedefinitionInstr::RedefinedValue() const {
return value();
}
Value* AssertAssignableInstr::RedefinedValue() const {
return value();
}
Value* AssertBooleanInstr::RedefinedValue() const {
return value();
}
Value* CheckBoundBase::RedefinedValue() const {
return index();
}
Value* CheckNullInstr::RedefinedValue() const {
return value();
}
Definition* Definition::OriginalDefinitionIgnoreBoxingAndConstraints() {
Definition* def = this;
while (true) {
Definition* orig;
if (def->IsConstraint() || def->IsBox() || def->IsUnbox() ||
def->IsIntConverter()) {
orig = def->InputAt(0)->definition();
} else {
orig = def->OriginalDefinition();
}
if (orig == def) return def;
def = orig;
}
}
bool Definition::IsArrayLength(Definition* def) {
if (def != nullptr) {
if (auto load = def->OriginalDefinitionIgnoreBoxingAndConstraints()
->AsLoadField()) {
return load->IsImmutableLengthLoad();
}
}
return false;
}
const ICData* Instruction::GetICData(
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t deopt_id,
bool is_static_call) {
// The deopt_id can be outside the range of the IC data array for
// computations added in the optimizing compiler.
ASSERT(deopt_id != DeoptId::kNone);
if (deopt_id >= ic_data_array.length()) {
return nullptr;
}
const ICData* result = ic_data_array[deopt_id];
ASSERT(result == nullptr || is_static_call == result->is_static_call());
return result;
}
uword Instruction::Hash() const {
uword result = tag();
for (intptr_t i = 0; i < InputCount(); ++i) {
Value* value = InputAt(i);
result = CombineHashes(result, value->definition()->ssa_temp_index());
}
return FinalizeHash(result, kBitsPerInt32 - 1);
}
bool Instruction::Equals(const Instruction& other) const {
if (tag() != other.tag()) return false;
if (InputCount() != other.InputCount()) return false;
for (intptr_t i = 0; i < InputCount(); ++i) {
if (!InputAt(i)->Equals(*other.InputAt(i))) return false;
}
return AttributesEqual(other);
}
void Instruction::Unsupported(FlowGraphCompiler* compiler) {
compiler->Bailout(ToCString());
UNREACHABLE();
}
bool Value::Equals(const Value& other) const {
return definition() == other.definition();
}
static int OrderById(CidRange* const* a, CidRange* const* b) {
// Negative if 'a' should sort before 'b'.
ASSERT((*a)->IsSingleCid());
ASSERT((*b)->IsSingleCid());
return (*a)->cid_start - (*b)->cid_start;
}
static int OrderByFrequencyThenId(CidRange* const* a, CidRange* const* b) {
const TargetInfo* target_info_a = static_cast<const TargetInfo*>(*a);
const TargetInfo* target_info_b = static_cast<const TargetInfo*>(*b);
// Negative if 'a' should sort before 'b'.
if (target_info_b->count != target_info_a->count) {
return (target_info_b->count - target_info_a->count);
} else {
return (*a)->cid_start - (*b)->cid_start;
}
}
bool Cids::Equals(const Cids& other) const {
if (length() != other.length()) return false;
for (int i = 0; i < length(); i++) {
if (cid_ranges_[i]->cid_start != other.cid_ranges_[i]->cid_start ||
cid_ranges_[i]->cid_end != other.cid_ranges_[i]->cid_end) {
return false;
}
}
return true;
}
intptr_t Cids::ComputeLowestCid() const {
intptr_t min = kIntptrMax;
for (intptr_t i = 0; i < cid_ranges_.length(); ++i) {
min = Utils::Minimum(min, cid_ranges_[i]->cid_start);
}
return min;
}
intptr_t Cids::ComputeHighestCid() const {
intptr_t max = -1;
for (intptr_t i = 0; i < cid_ranges_.length(); ++i) {
max = Utils::Maximum(max, cid_ranges_[i]->cid_end);
}
return max;
}
bool Cids::HasClassId(intptr_t cid) const {
for (int i = 0; i < length(); i++) {
if (cid_ranges_[i]->Contains(cid)) {
return true;
}
}
return false;
}
Cids* Cids::CreateMonomorphic(Zone* zone, intptr_t cid) {
Cids* cids = new (zone) Cids(zone);
cids->Add(new (zone) CidRange(cid, cid));
return cids;
}
Cids* Cids::CreateForArgument(Zone* zone,
const BinaryFeedback& binary_feedback,
int argument_number) {
Cids* cids = new (zone) Cids(zone);
for (intptr_t i = 0; i < binary_feedback.feedback_.length(); i++) {
ASSERT((argument_number == 0) || (argument_number == 1));
const intptr_t cid = argument_number == 0
? binary_feedback.feedback_[i].first
: binary_feedback.feedback_[i].second;
cids->Add(new (zone) CidRange(cid, cid));
}
if (cids->length() != 0) {
cids->Sort(OrderById);
// Merge adjacent class id ranges.
int dest = 0;
for (int src = 1; src < cids->length(); src++) {
if (cids->cid_ranges_[dest]->cid_end + 1 >=
cids->cid_ranges_[src]->cid_start) {
cids->cid_ranges_[dest]->cid_end = cids->cid_ranges_[src]->cid_end;
} else {
dest++;
if (src != dest) cids->cid_ranges_[dest] = cids->cid_ranges_[src];
}
}
cids->SetLength(dest + 1);
}
return cids;
}
static intptr_t Usage(const Function& function) {
intptr_t count = function.usage_counter();
if (count < 0) {
if (function.HasCode()) {
// 'function' is queued for optimized compilation
count = FLAG_optimization_counter_threshold;
} else {
count = 0;
}
} else if (Code::IsOptimized(function.CurrentCode())) {
// 'function' was optimized and stopped counting
count = FLAG_optimization_counter_threshold;
}
return count;
}
void CallTargets::CreateHelper(Zone* zone, const ICData& ic_data) {
Function& dummy = Function::Handle(zone);
const intptr_t num_args_tested = ic_data.NumArgsTested();
for (int i = 0, n = ic_data.NumberOfChecks(); i < n; i++) {
if (ic_data.GetCountAt(i) == 0) {
continue;
}
intptr_t id = kDynamicCid;
if (num_args_tested == 0) {
} else if (num_args_tested == 1) {
ic_data.GetOneClassCheckAt(i, &id, &dummy);
} else {
ASSERT(num_args_tested == 2);
GrowableArray<intptr_t> arg_ids;
ic_data.GetCheckAt(i, &arg_ids, &dummy);
id = arg_ids[0];
}
Function& function = Function::ZoneHandle(zone, ic_data.GetTargetAt(i));
intptr_t count = ic_data.GetCountAt(i);
cid_ranges_.Add(new (zone) TargetInfo(id, id, &function, count,
ic_data.GetExactnessAt(i)));
}
if (ic_data.is_megamorphic()) {
ASSERT(num_args_tested == 1); // Only 1-arg ICData will turn megamorphic.
const String& name = String::Handle(zone, ic_data.target_name());
const Array& descriptor =
Array::Handle(zone, ic_data.arguments_descriptor());
Thread* thread = Thread::Current();
const auto& cache = MegamorphicCache::Handle(
zone, MegamorphicCacheTable::Lookup(thread, name, descriptor));
{
SafepointMutexLocker ml(thread->isolate_group()->type_feedback_mutex());
MegamorphicCacheEntries entries(Array::Handle(zone, cache.buckets()));
for (intptr_t i = 0, n = entries.Length(); i < n; i++) {
const intptr_t id =
Smi::Value(entries[i].Get<MegamorphicCache::kClassIdIndex>());
if (id == kIllegalCid) {
continue;
}
Function& function = Function::ZoneHandle(zone);
function ^= entries[i].Get<MegamorphicCache::kTargetFunctionIndex>();
const intptr_t filled_entry_count = cache.filled_entry_count();
ASSERT(filled_entry_count > 0);
cid_ranges_.Add(new (zone) TargetInfo(
id, id, &function, Usage(function) / filled_entry_count,
StaticTypeExactnessState::NotTracking()));
}
}
}
}
bool Cids::IsMonomorphic() const {
if (length() != 1) return false;
return cid_ranges_[0]->IsSingleCid();
}
intptr_t Cids::MonomorphicReceiverCid() const {
ASSERT(IsMonomorphic());
return cid_ranges_[0]->cid_start;
}
StaticTypeExactnessState CallTargets::MonomorphicExactness() const {
ASSERT(IsMonomorphic());
return TargetAt(0)->exactness;
}
const char* AssertAssignableInstr::KindToCString(Kind kind) {
switch (kind) {
#define KIND_CASE(name) \
case k##name: \
return #name;
FOR_EACH_ASSERT_ASSIGNABLE_KIND(KIND_CASE)
#undef KIND_CASE
default:
UNREACHABLE();
return nullptr;
}
}
bool AssertAssignableInstr::ParseKind(const char* str, Kind* out) {
#define KIND_CASE(name) \
if (strcmp(str, #name) == 0) { \
*out = Kind::k##name; \
return true; \
}
FOR_EACH_ASSERT_ASSIGNABLE_KIND(KIND_CASE)
#undef KIND_CASE
return false;
}
CheckClassInstr::CheckClassInstr(Value* value,
intptr_t deopt_id,
const Cids& cids,
const InstructionSource& source)
: TemplateInstruction(source, deopt_id),
cids_(cids),
licm_hoisted_(false),
is_bit_test_(IsCompactCidRange(cids)),
token_pos_(source.token_pos) {
// Expected useful check data.
const intptr_t number_of_checks = cids.length();
ASSERT(number_of_checks > 0);
SetInputAt(0, value);
// Otherwise use CheckSmiInstr.
ASSERT(number_of_checks != 1 || !cids[0].IsSingleCid() ||
cids[0].cid_start != kSmiCid);
}
bool CheckClassInstr::AttributesEqual(const Instruction& other) const {
auto const other_check = other.AsCheckClass();
ASSERT(other_check != NULL);
return cids().Equals(other_check->cids());
}
bool CheckClassInstr::IsDeoptIfNull() const {
if (!cids().IsMonomorphic()) {
return false;
}
CompileType* in_type = value()->Type();
const intptr_t cid = cids().MonomorphicReceiverCid();
// Performance check: use CheckSmiInstr instead.
ASSERT(cid != kSmiCid);
return in_type->is_nullable() && (in_type->ToNullableCid() == cid);
}
// Null object is a singleton of null-class (except for some sentinel,
// transitional temporaries). Instead of checking against the null class only
// we can check against null instance instead.
bool CheckClassInstr::IsDeoptIfNotNull() const {
if (!cids().IsMonomorphic()) {
return false;
}
const intptr_t cid = cids().MonomorphicReceiverCid();
return cid == kNullCid;
}
bool CheckClassInstr::IsCompactCidRange(const Cids& cids) {
const intptr_t number_of_checks = cids.length();
// If there are only two checks, the extra register pressure needed for the
// dense-cid-range code is not justified.
if (number_of_checks <= 2) return false;
// TODO(fschneider): Support smis in dense cid checks.
if (cids.HasClassId(kSmiCid)) return false;
intptr_t min = cids.ComputeLowestCid();
intptr_t max = cids.ComputeHighestCid();
return (max - min) < compiler::target::kBitsPerWord;
}
bool CheckClassInstr::IsBitTest() const {
return is_bit_test_;
}
intptr_t CheckClassInstr::ComputeCidMask() const {
ASSERT(IsBitTest());
const uintptr_t one = 1;
intptr_t min = cids_.ComputeLowestCid();
intptr_t mask = 0;
for (intptr_t i = 0; i < cids_.length(); ++i) {
uintptr_t run;
uintptr_t range = one + cids_[i].Extent();
if (range >= static_cast<uintptr_t>(compiler::target::kBitsPerWord)) {
run = -1;
} else {
run = (one << range) - 1;
}
mask |= run << (cids_[i].cid_start - min);
}
return mask;
}
bool LoadFieldInstr::IsUnboxedDartFieldLoad() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsUnboxed();
}
bool LoadFieldInstr::IsPotentialUnboxedDartFieldLoad() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsPotentialUnboxed();
}
Representation LoadFieldInstr::representation() const {
if (IsUnboxedDartFieldLoad() && CompilerState::Current().is_optimizing()) {
return slot().UnboxedRepresentation();
}
return slot().representation();
}
AllocateUninitializedContextInstr::AllocateUninitializedContextInstr(
const InstructionSource& source,
intptr_t num_context_variables,
intptr_t deopt_id)
: TemplateAllocation(source, deopt_id),
num_context_variables_(num_context_variables) {
// This instruction is not used in AOT for code size reasons.
ASSERT(!CompilerState::Current().is_aot());
}
LocationSummary* AllocateClosureInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = inputs_.length();
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(kFunctionPos,
Location::RegisterLocation(AllocateClosureABI::kFunctionReg));
locs->set_in(kContextPos,
Location::RegisterLocation(AllocateClosureABI::kContextReg));
locs->set_out(0, Location::RegisterLocation(AllocateClosureABI::kResultReg));
return locs;
}
void AllocateClosureInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Code& stub = Code::ZoneHandle(
compiler->zone(),
compiler->isolate_group()->object_store()->allocate_closure_stub());
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), deopt_id(), env());
}
LocationSummary* AllocateTypedDataInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(kLengthPos, Location::RegisterLocation(
AllocateTypedDataArrayABI::kLengthReg));
locs->set_out(
0, Location::RegisterLocation(AllocateTypedDataArrayABI::kResultReg));
return locs;
}
void AllocateTypedDataInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForTypedData(class_id()));
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), deopt_id(), env());
}
bool StoreInstanceFieldInstr::IsUnboxedDartFieldStore() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsUnboxed();
}
bool StoreInstanceFieldInstr::IsPotentialUnboxedDartFieldStore() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsPotentialUnboxed();
}
Representation StoreInstanceFieldInstr::RequiredInputRepresentation(
intptr_t index) const {
ASSERT((index == 0) || (index == 1));
if (index == 0) {
// The instance is always tagged.
return kTagged;
}
if (IsUnboxedDartFieldStore() && CompilerState::Current().is_optimizing()) {
return slot().UnboxedRepresentation();
}
return slot().representation();
}
Instruction* StoreInstanceFieldInstr::Canonicalize(FlowGraph* flow_graph) {
// Dart objects are allocated null-initialized, which means we can eliminate
// all initializing stores which store null value.
// Context objects can be allocated uninitialized as a performance
// optimization in JIT mode - however in AOT mode we always allocate them
// null initialized.
if (is_initialization_ && slot().representation() == kTagged &&
(!slot().IsContextSlot() ||
!instance()->definition()->IsAllocateUninitializedContext()) &&
value()->BindsToConstantNull()) {
return nullptr;
}
return this;
}
bool GuardFieldClassInstr::AttributesEqual(const Instruction& other) const {
return field().ptr() == other.AsGuardFieldClass()->field().ptr();
}
bool GuardFieldLengthInstr::AttributesEqual(const Instruction& other) const {
return field().ptr() == other.AsGuardFieldLength()->field().ptr();
}
bool GuardFieldTypeInstr::AttributesEqual(const Instruction& other) const {
return field().ptr() == other.AsGuardFieldType()->field().ptr();
}
Instruction* AssertSubtypeInstr::Canonicalize(FlowGraph* flow_graph) {
// If all inputs needed to check instantation are constant, instantiate the
// sub and super type and remove the instruction if the subtype test succeeds.
if (super_type()->BindsToConstant() && sub_type()->BindsToConstant() &&
instantiator_type_arguments()->BindsToConstant() &&
function_type_arguments()->BindsToConstant()) {
auto Z = Thread::Current()->zone();
const auto& constant_instantiator_type_args =
instantiator_type_arguments()->BoundConstant().IsNull()
? TypeArguments::null_type_arguments()
: TypeArguments::Cast(
instantiator_type_arguments()->BoundConstant());
const auto& constant_function_type_args =
function_type_arguments()->BoundConstant().IsNull()
? TypeArguments::null_type_arguments()
: TypeArguments::Cast(function_type_arguments()->BoundConstant());
auto& constant_sub_type = AbstractType::Handle(
Z, AbstractType::Cast(sub_type()->BoundConstant()).ptr());
auto& constant_super_type = AbstractType::Handle(
Z, AbstractType::Cast(super_type()->BoundConstant()).ptr());
ASSERT(!constant_super_type.IsTypeRef());
ASSERT(!constant_sub_type.IsTypeRef());
if (AbstractType::InstantiateAndTestSubtype(
&constant_sub_type, &constant_super_type,
constant_instantiator_type_args, constant_function_type_args)) {
return nullptr;
}
}
return this;
}
bool StrictCompareInstr::AttributesEqual(const Instruction& other) const {
auto const other_op = other.AsStrictCompare();
ASSERT(other_op != NULL);
return ComparisonInstr::AttributesEqual(other) &&
(needs_number_check() == other_op->needs_number_check());
}
bool MathMinMaxInstr::AttributesEqual(const Instruction& other) const {
auto const other_op = other.AsMathMinMax();
ASSERT(other_op != NULL);
return (op_kind() == other_op->op_kind()) &&
(result_cid() == other_op->result_cid());
}
bool BinaryIntegerOpInstr::AttributesEqual(const Instruction& other) const {
ASSERT(other.tag() == tag());
auto const other_op = other.AsBinaryIntegerOp();
return (op_kind() == other_op->op_kind()) &&
(can_overflow() == other_op->can_overflow()) &&
(is_truncating() == other_op->is_truncating());
}
bool LoadFieldInstr::AttributesEqual(const Instruction& other) const {
auto const other_load = other.AsLoadField();
ASSERT(other_load != NULL);
return &this->slot_ == &other_load->slot_;
}
bool LoadStaticFieldInstr::AttributesEqual(const Instruction& other) const {
ASSERT(AllowsCSE());
return field().ptr() == other.AsLoadStaticField()->field().ptr();
}
ConstantInstr::ConstantInstr(const Object& value,
const InstructionSource& source)
: TemplateDefinition(source), value_(value), token_pos_(source.token_pos) {
// Check that the value is not an incorrect Integer representation.
ASSERT(!value.IsMint() || !Smi::IsValid(Mint::Cast(value).AsInt64Value()));
// Check that clones of fields are not stored as constants.
ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
// Check that all non-Smi objects are heap allocated and in old space.
ASSERT(value.IsSmi() || value.IsOld());
#if defined(DEBUG)
// Generally, instances in the flow graph should be canonical. Smis, null
// values, and sentinel values are canonical by construction and so we skip
// them here.
if (!value.IsNull() && !value.IsSmi() && value.IsInstance() &&
!value.IsCanonical() && (value.ptr() != Object::sentinel().ptr())) {
// Arrays in ConstantInstrs are usually immutable and canonicalized, but
// there are at least a couple of cases where one or both is not true:
//
// * The Arrays created as backing for ArgumentsDescriptors may not be
// canonicalized for space reasons when inlined in the IL. However, they
// are still immutable.
// * The backtracking stack for IRRegExps is put into a ConstantInstr for
// immediate use as an argument to the operations on that stack. In this
// case, the Array representing it is neither immutable or canonical.
//
// In addition to complicating the story for Arrays, IRRegExp compilation
// also uses other non-canonical values as "constants". For example, the bit
// tables used for certain character classes are represented as TypedData,
// and so those values are also neither immutable (as there are no immutable
// TypedData values) or canonical.
//
// LibraryPrefixes are also never canonicalized since their equality is
// their identity.
ASSERT(value.IsArray() || value.IsTypedData() || value.IsLibraryPrefix());
}
#endif
}
bool ConstantInstr::AttributesEqual(const Instruction& other) const {
auto const other_constant = other.AsConstant();
ASSERT(other_constant != NULL);
return (value().ptr() == other_constant->value().ptr() &&
representation() == other_constant->representation());
}
UnboxedConstantInstr::UnboxedConstantInstr(const Object& value,
Representation representation)
: ConstantInstr(value),
representation_(representation),
constant_address_(0) {
if (representation_ == kUnboxedDouble) {
ASSERT(value.IsDouble());
constant_address_ = FindDoubleConstant(Double::Cast(value).value());
}
}
// Returns true if the value represents a constant.
bool Value::BindsToConstant() const {
return definition()->OriginalDefinition()->IsConstant();
}
// Returns true if the value represents constant null.
bool Value::BindsToConstantNull() const {
ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
return (constant != NULL) && constant->value().IsNull();
}
const Object& Value::BoundConstant() const {
ASSERT(BindsToConstant());
ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
ASSERT(constant != NULL);
return constant->value();
}
bool Value::BindsToSmiConstant() const {
return BindsToConstant() && BoundConstant().IsSmi();
}
intptr_t Value::BoundSmiConstant() const {
ASSERT(BindsToSmiConstant());
return Smi::Cast(BoundConstant()).Value();
}
GraphEntryInstr::GraphEntryInstr(const ParsedFunction& parsed_function,
intptr_t osr_id)
: GraphEntryInstr(parsed_function,
osr_id,
CompilerState::Current().GetNextDeoptId()) {}
GraphEntryInstr::GraphEntryInstr(const ParsedFunction& parsed_function,
intptr_t osr_id,
intptr_t deopt_id)
: BlockEntryWithInitialDefs(0,
kInvalidTryIndex,
deopt_id,
/*stack_depth*/ 0),
parsed_function_(parsed_function),
catch_entries_(),
indirect_entries_(),
osr_id_(osr_id),
entry_count_(0),
spill_slot_count_(0),
fixed_slot_count_(0) {}
ConstantInstr* GraphEntryInstr::constant_null() {
ASSERT(initial_definitions()->length() > 0);
for (intptr_t i = 0; i < initial_definitions()->length(); ++i) {
ConstantInstr* defn = (*initial_definitions())[i]->AsConstant();
if (defn != NULL && defn->value().IsNull()) return defn;
}
UNREACHABLE();
return NULL;
}
CatchBlockEntryInstr* GraphEntryInstr::GetCatchEntry(intptr_t index) {
// TODO(fschneider): Sort the catch entries by catch_try_index to avoid
// searching.
for (intptr_t i = 0; i < catch_entries_.length(); ++i) {
if (catch_entries_[i]->catch_try_index() == index) return catch_entries_[i];
}
return NULL;
}
bool GraphEntryInstr::IsCompiledForOsr() const {
return osr_id_ != Compiler::kNoOSRDeoptId;
}
// ==== Support for visiting flow graphs.
#define DEFINE_ACCEPT(ShortName, Attrs) \
void ShortName##Instr::Accept(InstructionVisitor* visitor) { \
visitor->Visit##ShortName(this); \
}
FOR_EACH_INSTRUCTION(DEFINE_ACCEPT)
#undef DEFINE_ACCEPT
void Instruction::SetEnvironment(Environment* deopt_env) {
intptr_t use_index = 0;
for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) {
Value* use = it.CurrentValue();
use->set_instruction(this);
use->set_use_index(use_index++);
}
env_ = deopt_env;
}
void Instruction::RemoveEnvironment() {
for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
it.CurrentValue()->RemoveFromUseList();
}
env_ = NULL;
}
void Instruction::ReplaceInEnvironment(Definition* current,
Definition* replacement) {
for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
Value* use = it.CurrentValue();
if (use->definition() == current) {
use->RemoveFromUseList();
use->set_definition(replacement);
replacement->AddEnvUse(use);
}
}
}
Instruction* Instruction::RemoveFromGraph(bool return_previous) {
ASSERT(!IsBlockEntry());
ASSERT(!IsBranch());
ASSERT(!IsThrow());
ASSERT(!IsReturn());
ASSERT(!IsReThrow());
ASSERT(!IsGoto());
ASSERT(previous() != NULL);
// We cannot assert that the instruction, if it is a definition, has no
// uses. This function is used to remove instructions from the graph and
// reinsert them elsewhere (e.g., hoisting).
Instruction* prev_instr = previous();
Instruction* next_instr = next();
ASSERT(next_instr != NULL);
ASSERT(!next_instr->IsBlockEntry());
prev_instr->LinkTo(next_instr);
UnuseAllInputs();
// Reset the successor and previous instruction to indicate that the
// instruction is removed from the graph.
set_previous(NULL);
set_next(NULL);
return return_previous ? prev_instr : next_instr;
}
void Instruction::InsertAfter(Instruction* prev) {
ASSERT(previous_ == NULL);
ASSERT(next_ == NULL);
previous_ = prev;
next_ = prev->next_;
next_->previous_ = this;
previous_->next_ = this;
// Update def-use chains whenever instructions are added to the graph
// after initial graph construction.
for (intptr_t i = InputCount() - 1; i >= 0; --i) {
Value* input = InputAt(i);
input->definition()->AddInputUse(input);
}
}
Instruction* Instruction::AppendInstruction(Instruction* tail) {
LinkTo(tail);
// Update def-use chains whenever instructions are added to the graph
// after initial graph construction.
for (intptr_t i = tail->InputCount() - 1; i >= 0; --i) {
Value* input = tail->InputAt(i);
input->definition()->AddInputUse(input);
}
return tail;
}
BlockEntryInstr* Instruction::GetBlock() {
// TODO(fschneider): Implement a faster way to get the block of an
// instruction.
Instruction* result = previous();
while ((result != nullptr) && !result->IsBlockEntry()) {
result = result->previous();
}
// InlineExitCollector::RemoveUnreachableExits may call
// Instruction::GetBlock on instructions which are not properly linked
// to the flow graph (as collected exits may belong to unreachable
// fragments), so this code should gracefully handle the absence of
// BlockEntry.
return (result != nullptr) ? result->AsBlockEntry() : nullptr;
}
void ForwardInstructionIterator::RemoveCurrentFromGraph() {
current_ = current_->RemoveFromGraph(true); // Set current_ to previous.
}
void BackwardInstructionIterator::RemoveCurrentFromGraph() {
current_ = current_->RemoveFromGraph(false); // Set current_ to next.
}
// Default implementation of visiting basic blocks. Can be overridden.
void FlowGraphVisitor::VisitBlocks() {
ASSERT(current_iterator_ == NULL);
for (intptr_t i = 0; i < block_order_->length(); ++i) {
BlockEntryInstr* entry = (*block_order_)[i];
entry->Accept(this);
ForwardInstructionIterator it(entry);
current_iterator_ = &it;
for (; !it.Done(); it.Advance()) {
it.Current()->Accept(this);
}
current_iterator_ = NULL;
}
}
bool Value::NeedsWriteBarrier() {
Value* value = this;
do {
if (value->Type()->IsNull() ||
(value->Type()->ToNullableCid() == kSmiCid) ||
(value->Type()->ToNullableCid() == kBoolCid)) {
return false;
}
// Strictly speaking, the incremental barrier can only be skipped for
// immediate objects (Smis) or permanent objects (vm-isolate heap or
// image pages). Here we choose to skip the barrier for any constant on
// the assumption it will remain reachable through the object pool.
if (value->BindsToConstant()) {
return false;
}
// Follow the chain of redefinitions as redefined value could have a more
// accurate type (for example, AssertAssignable of Smi to a generic T).
value = value->definition()->RedefinedValue();
} while (value != nullptr);
return true;
}
void JoinEntryInstr::AddPredecessor(BlockEntryInstr* predecessor) {
// Require the predecessors to be sorted by block_id to make managing
// their corresponding phi inputs simpler.
intptr_t pred_id = predecessor->block_id();
intptr_t index = 0;
while ((index < predecessors_.length()) &&
(predecessors_[index]->block_id() < pred_id)) {
++index;
}
#if defined(DEBUG)
for (intptr_t i = index; i < predecessors_.length(); ++i) {
ASSERT(predecessors_[i]->block_id() != pred_id);
}
#endif
predecessors_.InsertAt(index, predecessor);
}
intptr_t JoinEntryInstr::IndexOfPredecessor(BlockEntryInstr* pred) const {
for (intptr_t i = 0; i < predecessors_.length(); ++i) {
if (predecessors_[i] == pred) return i;
}
return -1;
}
void Value::AddToList(Value* value, Value** list) {
ASSERT(value->next_use() == nullptr);
ASSERT(value->previous_use() == nullptr);
Value* next = *list;
ASSERT(value != next);
*list = value;
value->set_next_use(next);
value->set_previous_use(NULL);
if (next != NULL) next->set_previous_use(value);
}
void Value::RemoveFromUseList() {
Definition* def = definition();
Value* next = next_use();
if (this == def->input_use_list()) {
def->set_input_use_list(next);
if (next != NULL) next->set_previous_use(NULL);
} else if (this == def->env_use_list()) {
def->set_env_use_list(next);
if (next != NULL) next->set_previous_use(NULL);
} else if (Value* prev = previous_use()) {
prev->set_next_use(next);
if (next != NULL) next->set_previous_use(prev);
}
set_previous_use(NULL);
set_next_use(NULL);
}
// True if the definition has a single input use and is used only in
// environments at the same instruction as that input use.
bool Definition::HasOnlyUse(Value* use) const {
if (!HasOnlyInputUse(use)) {
return false;
}
Instruction* target = use->instruction();
for (Value::Iterator it(env_use_list()); !it.Done(); it.Advance()) {
if (it.Current()->instruction() != target) return false;
}
return true;
}
bool Definition::HasOnlyInputUse(Value* use) const {
return (input_use_list() == use) && (use->next_use() == NULL);
}
void Definition::ReplaceUsesWith(Definition* other) {
ASSERT(other != NULL);
ASSERT(this != other);
Value* current = NULL;
Value* next = input_use_list();
if (next != NULL) {
// Change all the definitions.
while (next != NULL) {
current = next;
current->set_definition(other);
current->RefineReachingType(other->Type());
next = current->next_use();
}
// Concatenate the lists.
next = other->input_use_list();
current->set_next_use(next);
if (next != NULL) next->set_previous_use(current);
other->set_input_use_list(input_use_list());
set_input_use_list(NULL);
}
// Repeat for environment uses.
current = NULL;
next = env_use_list();
if (next != NULL) {
while (next != NULL) {
current = next;
current->set_definition(other);
current->RefineReachingType(other->Type());
next = current->next_use();
}
next = other->env_use_list();
current->set_next_use(next);
if (next != NULL) next->set_previous_use(current);
other->set_env_use_list(env_use_list());
set_env_use_list(NULL);
}
}
void Instruction::UnuseAllInputs() {
for (intptr_t i = InputCount() - 1; i >= 0; --i) {
InputAt(i)->RemoveFromUseList();
}
for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
it.CurrentValue()->RemoveFromUseList();
}
}
void Instruction::RepairPushArgsInEnvironment() const {
// Some calls (e.g. closure calls) have more inputs than actual arguments.
// Those extra inputs will be consumed from the stack before the call.
const intptr_t after_args_input_count = env()->LazyDeoptPruneCount();
PushArgumentsArray* push_arguments = GetPushArguments();
ASSERT(push_arguments != nullptr);
const intptr_t arg_count = ArgumentCount();
ASSERT((arg_count + after_args_input_count) <= env()->Length());
const intptr_t env_base =
env()->Length() - arg_count - after_args_input_count;
for (intptr_t i = 0; i < arg_count; ++i) {
env()->ValueAt(env_base + i)->BindToEnvironment(push_arguments->At(i));
}
}
void Instruction::InheritDeoptTargetAfter(FlowGraph* flow_graph,
Definition* call,
Definition* result) {
ASSERT(call->env() != NULL);
deopt_id_ = DeoptId::ToDeoptAfter(call->deopt_id_);
call->env()->DeepCopyAfterTo(
flow_graph->zone(), this, call->ArgumentCount(),
flow_graph->constant_dead(),
result != NULL ? result : flow_graph->constant_dead());
}
void Instruction::InheritDeoptTarget(Zone* zone, Instruction* other) {
ASSERT(other->env() != NULL);
CopyDeoptIdFrom(*other);
other->env()->DeepCopyTo(zone, this);
}
void BranchInstr::InheritDeoptTarget(Zone* zone, Instruction* other) {
ASSERT(env() == NULL);
Instruction::InheritDeoptTarget(zone, other);
comparison()->SetDeoptId(*this);
}
bool Instruction::IsDominatedBy(Instruction* dom) {
BlockEntryInstr* block = GetBlock();
BlockEntryInstr* dom_block = dom->GetBlock();
if (dom->IsPhi()) {
dom = dom_block;
}
if (block == dom_block) {
if ((block == dom) || (this == block->last_instruction())) {
return true;
}
if (IsPhi()) {
return false;
}
for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) {
if (curr == this) return true;
}
return false;
}
return dom_block->Dominates(block);
}
bool Instruction::HasUnmatchedInputRepresentations() const {
for (intptr_t i = 0; i < InputCount(); i++) {
Definition* input = InputAt(i)->definition();
const Representation input_representation = RequiredInputRepresentation(i);
if (input_representation != kNoRepresentation &&
input_representation != input->representation()) {
return true;
}
}
return false;
}
const intptr_t Instruction::kInstructionAttrs[Instruction::kNumInstructions] = {
#define INSTR_ATTRS(type, attrs) InstrAttrs::attrs,
FOR_EACH_INSTRUCTION(INSTR_ATTRS)
#undef INSTR_ATTRS
};
bool Instruction::CanTriggerGC() const {
return (kInstructionAttrs[tag()] & InstrAttrs::kNoGC) == 0;
}
void Definition::ReplaceWithResult(Instruction* replacement,
Definition* replacement_for_uses,
ForwardInstructionIterator* iterator) {
// Record replacement's input uses.
for (intptr_t i = replacement->InputCount() - 1; i >= 0; --i) {
Value* input = replacement->InputAt(i);
input->definition()->AddInputUse(input);
}
// Take replacement's environment from this definition.
ASSERT(replacement->env() == NULL);
replacement->SetEnvironment(env());
ClearEnv();
// Replace all uses of this definition with replacement_for_uses.
ReplaceUsesWith(replacement_for_uses);
// Finally replace this one with the replacement instruction in the graph.
previous()->LinkTo(replacement);
if ((iterator != NULL) && (this == iterator->Current())) {
// Remove through the iterator.
replacement->LinkTo(this);
iterator->RemoveCurrentFromGraph();
} else {
replacement->LinkTo(next());
// Remove this definition's input uses.
UnuseAllInputs();
}
set_previous(NULL);
set_next(NULL);
}
void Definition::ReplaceWith(Definition* other,
ForwardInstructionIterator* iterator) {
// Reuse this instruction's SSA name for other.
ASSERT(!other->HasSSATemp());
if (HasSSATemp()) {
other->set_ssa_temp_index(ssa_temp_index());
}
ReplaceWithResult(other, other, iterator);
}
void BranchInstr::SetComparison(ComparisonInstr* new_comparison) {
for (intptr_t i = new_comparison->InputCount() - 1; i >= 0; --i) {
Value* input = new_comparison->InputAt(i);
input->definition()->AddInputUse(input);
input->set_instruction(this);
}
// There should be no need to copy or unuse an environment.
ASSERT(comparison()->env() == NULL);
ASSERT(new_comparison->env() == NULL);
// Remove the current comparison's input uses.
comparison()->UnuseAllInputs();
ASSERT(!new_comparison->HasUses());
comparison_ = new_comparison;
}
// ==== Postorder graph traversal.
static bool IsMarked(BlockEntryInstr* block,
GrowableArray<BlockEntryInstr*>* preorder) {
// Detect that a block has been visited as part of the current
// DiscoverBlocks (we can call DiscoverBlocks multiple times). The block
// will be 'marked' by (1) having a preorder number in the range of the
// preorder array and (2) being in the preorder array at that index.
intptr_t i = block->preorder_number();
return (i >= 0) && (i < preorder->length()) && ((*preorder)[i] == block);
}
// Base class implementation used for JoinEntry and TargetEntry.
bool BlockEntryInstr::DiscoverBlock(BlockEntryInstr* predecessor,
GrowableArray<BlockEntryInstr*>* preorder,
GrowableArray<intptr_t>* parent) {
// If this block has a predecessor (i.e., is not the graph entry) we can
// assume the preorder array is non-empty.
ASSERT((predecessor == NULL) || !preorder->is_empty());
// Blocks with a single predecessor cannot have been reached before.
ASSERT(IsJoinEntry() || !IsMarked(this, preorder));
// 1. If the block has already been reached, add current_block as a
// basic-block predecessor and we are done.
if (IsMarked(this, preorder)) {
ASSERT(predecessor != NULL);
AddPredecessor(predecessor);
return false;
}
// 2. Otherwise, clear the predecessors which might have been computed on
// some earlier call to DiscoverBlocks and record this predecessor.
ClearPredecessors();
if (predecessor != NULL) AddPredecessor(predecessor);
// 3. The predecessor is the spanning-tree parent. The graph entry has no
// parent, indicated by -1.
intptr_t parent_number =
(predecessor == NULL) ? -1 : predecessor->preorder_number();
parent->Add(parent_number);
// 4. Assign the preorder number and add the block entry to the list.
set_preorder_number(preorder->length());
preorder->Add(this);
// The preorder and parent arrays are indexed by
// preorder block number, so they should stay in lockstep.
ASSERT(preorder->length() == parent->length());
// 5. Iterate straight-line successors to record assigned variables and
// find the last instruction in the block. The graph entry block consists
// of only the entry instruction, so that is the last instruction in the
// block.
Instruction* last = this;
for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
last = it.Current();
}
set_last_instruction(last);
if (last->IsGoto()) last->AsGoto()->set_block(this);
return true;
}
void GraphEntryInstr::RelinkToOsrEntry(Zone* zone, intptr_t max_block_id) {
ASSERT(osr_id_ != Compiler::kNoOSRDeoptId);
BitVector* block_marks = new (zone) BitVector(zone, max_block_id + 1);
bool found = FindOsrEntryAndRelink(this, /*parent=*/NULL, block_marks);
ASSERT(found);
}
bool BlockEntryInstr::FindOsrEntryAndRelink(GraphEntryInstr* graph_entry,
Instruction* parent,
BitVector* block_marks) {
const intptr_t osr_id = graph_entry->osr_id();
// Search for the instruction with the OSR id. Use a depth first search
// because basic blocks have not been discovered yet. Prune unreachable
// blocks by replacing the normal entry with a jump to the block
// containing the OSR entry point.
// Do not visit blocks more than once.
if (block_marks->Contains(block_id())) return false;
block_marks->Add(block_id());
// Search this block for the OSR id.
Instruction* instr = this;
for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
instr = it.Current();
if (instr->GetDeoptId() == osr_id) {
// Sanity check that we found a stack check instruction.
ASSERT(instr->IsCheckStackOverflow());
// Loop stack check checks are always in join blocks so that they can
// be the target of a goto.
ASSERT(IsJoinEntry());
// The instruction should be the first instruction in the block so
// we can simply jump to the beginning of the block.
ASSERT(instr->previous() == this);
ASSERT(stack_depth() == instr->AsCheckStackOverflow()->stack_depth());
auto normal_entry = graph_entry->normal_entry();
auto osr_entry = new OsrEntryInstr(
graph_entry, normal_entry->block_id(), normal_entry->try_index(),
normal_entry->deopt_id(), stack_depth());
auto goto_join = new GotoInstr(AsJoinEntry(),
CompilerState::Current().GetNextDeoptId());
ASSERT(parent != nullptr);
goto_join->CopyDeoptIdFrom(*parent);
osr_entry->LinkTo(goto_join);
// Remove normal function entries & add osr entry.
graph_entry->set_normal_entry(nullptr);
graph_entry->set_unchecked_entry(nullptr);
graph_entry->set_osr_entry(osr_entry);
return true;
}
}
// Recursively search the successors.
for (intptr_t i = instr->SuccessorCount() - 1; i >= 0; --i) {
if (instr->SuccessorAt(i)->FindOsrEntryAndRelink(graph_entry, instr,
block_marks)) {
return true;
}
}
return false;
}
bool BlockEntryInstr::Dominates(BlockEntryInstr* other) const {
// TODO(fschneider): Make this faster by e.g. storing dominators for each
// block while computing the dominator tree.
ASSERT(other != NULL);
BlockEntryInstr* current = other;
while (current != NULL && current != this) {
current = current->dominator();
}
return current == this;
}
BlockEntryInstr* BlockEntryInstr::ImmediateDominator() const {
Instruction* last = dominator()->last_instruction();
if ((last->SuccessorCount() == 1) && (last->SuccessorAt(0) == this)) {
return dominator();
}
return NULL;
}
bool BlockEntryInstr::IsLoopHeader() const {
return loop_info_ != nullptr && loop_info_->header() == this;
}
intptr_t BlockEntryInstr::NestingDepth() const {
return loop_info_ == nullptr ? 0 : loop_info_->NestingDepth();
}
// Helper to mutate the graph during inlining. This block should be
// replaced with new_block as a predecessor of all of this block's
// successors. For each successor, the predecessors will be reordered
// to preserve block-order sorting of the predecessors as well as the
// phis if the successor is a join.
void BlockEntryInstr::ReplaceAsPredecessorWith(BlockEntryInstr* new_block) {
// Set the last instruction of the new block to that of the old block.
Instruction* last = last_instruction();
new_block->set_last_instruction(last);
// For each successor, update the predecessors.
for (intptr_t sidx = 0; sidx < last->SuccessorCount(); ++sidx) {
// If the successor is a target, update its predecessor.
TargetEntryInstr* target = last->SuccessorAt(sidx)->AsTargetEntry();
if (target != NULL) {
target->predecessor_ = new_block;
continue;
}
// If the successor is a join, update each predecessor and the phis.
JoinEntryInstr* join = last->SuccessorAt(sidx)->AsJoinEntry();
ASSERT(join != NULL);
// Find the old predecessor index.
intptr_t old_index = join->IndexOfPredecessor(this);
intptr_t pred_count = join->PredecessorCount();
ASSERT(old_index >= 0);
ASSERT(old_index < pred_count);
// Find the new predecessor index while reordering the predecessors.
intptr_t new_id = new_block->block_id();
intptr_t new_index = old_index;
if (block_id() < new_id) {
// Search upwards, bubbling down intermediate predecessors.
for (; new_index < pred_count - 1; ++new_index) {
if (join->predecessors_[new_index + 1]->block_id() > new_id) break;
join->predecessors_[new_index] = join->predecessors_[new_index + 1];
}
} else {
// Search downwards, bubbling up intermediate predecessors.
for (; new_index > 0; --new_index) {
if (join->predecessors_[new_index - 1]->block_id() < new_id) break;
join->predecessors_[new_index] = join->predecessors_[new_index - 1];
}
}
join->predecessors_[new_index] = new_block;
// If the new and old predecessor index match there is nothing to update.
if ((join->phis() == NULL) || (old_index == new_index)) return;
// Otherwise, reorder the predecessor uses in each phi.
for (PhiIterator it(join); !it.Done(); it.Advance()) {
PhiInstr* phi = it.Current();
ASSERT(phi != NULL);
ASSERT(pred_count == phi->InputCount());
// Save the predecessor use.
Value* pred_use = phi->InputAt(old_index);
// Move uses between old and new.
intptr_t step = (old_index < new_index) ? 1 : -1;
for (intptr_t use_idx = old_index; use_idx != new_index;
use_idx += step) {
phi->SetInputAt(use_idx, phi->InputAt(use_idx + step));
}
// Write the predecessor use.
phi->SetInputAt(new_index, pred_use);
}
}
}
void BlockEntryInstr::ClearAllInstructions() {
JoinEntryInstr* join = this->AsJoinEntry();
if (join != NULL) {
for (PhiIterator it(join); !it.Done(); it.Advance()) {
it.Current()->UnuseAllInputs();
}
}
UnuseAllInputs();
for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
it.Current()->UnuseAllInputs();
}
}
PhiInstr* JoinEntryInstr::InsertPhi(intptr_t var_index, intptr_t var_count) {
// Lazily initialize the array of phis.
// Currently, phis are stored in a sparse array that holds the phi
// for variable with index i at position i.
// TODO(fschneider): Store phis in a more compact way.
if (phis_ == NULL) {
phis_ = new ZoneGrowableArray<PhiInstr*>(var_count);
for (intptr_t i = 0; i < var_count; i++) {
phis_->Add(NULL);
}
}
ASSERT((*phis_)[var_index] == NULL);
return (*phis_)[var_index] = new PhiInstr(this, PredecessorCount());
}
void JoinEntryInstr::InsertPhi(PhiInstr* phi) {
// Lazily initialize the array of phis.
if (phis_ == NULL) {
phis_ = new ZoneGrowableArray<PhiInstr*>(1);
}
phis_->Add(phi);
}
void JoinEntryInstr::RemovePhi(PhiInstr* phi) {
ASSERT(phis_ != NULL);
for (intptr_t index = 0; index < phis_->length(); ++index) {
if (phi == (*phis_)[index]) {
(*phis_)[index] = phis_->Last();
phis_->RemoveLast();
return;
}
}
}
void JoinEntryInstr::RemoveDeadPhis(Definition* replacement) {
if (phis_ == NULL) return;
intptr_t to_index = 0;
for (intptr_t from_index = 0; from_index < phis_->length(); ++from_index) {
PhiInstr* phi = (*phis_)[from_index];
if (phi != NULL) {
if (phi->is_alive()) {
(*phis_)[to_index++] = phi;
for (intptr_t i = phi->InputCount() - 1; i >= 0; --i) {
Value* input = phi->InputAt(i);
input->definition()->AddInputUse(input);
}
} else {
phi->ReplaceUsesWith(replacement);
}
}
}
if (to_index == 0) {
phis_ = NULL;
} else {
phis_->TruncateTo(to_index);
}
}
intptr_t Instruction::SuccessorCount() const {
return 0;
}
BlockEntryInstr* Instruction::SuccessorAt(intptr_t index) const {
// Called only if index is in range. Only control-transfer instructions
// can have non-zero successor counts and they override this function.
UNREACHABLE();
return NULL;
}
intptr_t GraphEntryInstr::SuccessorCount() const {
return (normal_entry() == nullptr ? 0 : 1) +
(unchecked_entry() == nullptr ? 0 : 1) +
(osr_entry() == nullptr ? 0 : 1) + catch_entries_.length();
}
BlockEntryInstr* GraphEntryInstr::SuccessorAt(intptr_t index) const {
if (normal_entry() != nullptr) {
if (index == 0) return normal_entry_;
index--;
}
if (unchecked_entry() != nullptr) {
if (index == 0) return unchecked_entry();
index--;
}
if (osr_entry() != nullptr) {
if (index == 0) return osr_entry();
index--;
}
return catch_entries_[index];
}
intptr_t BranchInstr::SuccessorCount() const {
return 2;
}
BlockEntryInstr* BranchInstr::SuccessorAt(intptr_t index) const {
if (index == 0) return true_successor_;
if (index == 1) return false_successor_;
UNREACHABLE();
return NULL;
}
intptr_t GotoInstr::SuccessorCount() const {
return 1;
}
BlockEntryInstr* GotoInstr::SuccessorAt(intptr_t index) const {
ASSERT(index == 0);
return successor();
}
void Instruction::Goto(JoinEntryInstr* entry) {
LinkTo(new GotoInstr(entry, CompilerState::Current().GetNextDeoptId()));
}
bool IntConverterInstr::ComputeCanDeoptimize() const {
return (to() == kUnboxedInt32) && !is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
}
bool UnboxInt32Instr::ComputeCanDeoptimize() const {
if (SpeculativeModeOfInputs() == kNotSpeculative) {
return false;
}
const intptr_t value_cid = value()->Type()->ToCid();
if (value_cid == kSmiCid) {
return (compiler::target::kSmiBits > 32) && !is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
} else if (value_cid == kMintCid) {
return !is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
} else if (is_truncating() && value()->definition()->IsBoxInteger()) {
return false;
} else if ((compiler::target::kSmiBits < 32) && value()->Type()->IsInt()) {
return !RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
} else {
return true;
}
}
bool UnboxUint32Instr::ComputeCanDeoptimize() const {
ASSERT(is_truncating());
if (SpeculativeModeOfInputs() == kNotSpeculative) {
return false;
}
if ((value()->Type()->ToCid() == kSmiCid) ||
(value()->Type()->ToCid() == kMintCid)) {
return false;
}
// Check input value's range.
Range* value_range = value()->definition()->range();
return !RangeUtils::Fits(value_range, RangeBoundary::kRangeBoundaryInt64);
}
bool BinaryInt32OpInstr::ComputeCanDeoptimize() const {
switch (op_kind()) {
case Token::kBIT_AND:
case Token::kBIT_OR:
case Token::kBIT_XOR:
return false;
case Token::kSHR:
return false;
case Token::kUSHR:
case Token::kSHL:
// Currently only shifts by in range constant are supported, see
// BinaryInt32OpInstr::IsSupported.
return can_overflow();
case Token::kMOD: {
UNREACHABLE();
}
default:
return can_overflow();
}
}
bool BinarySmiOpInstr::ComputeCanDeoptimize() const {
switch (op_kind()) {
case Token::kBIT_AND:
case Token::kBIT_OR:
case Token::kBIT_XOR:
return false;
case Token::kSHR:
return !RangeUtils::IsPositive(right_range());
case Token::kUSHR:
case Token::kSHL:
return can_overflow() || !RangeUtils::IsPositive(right_range());
case Token::kMOD:
return RangeUtils::CanBeZero(right_range());
case Token::kTRUNCDIV:
return RangeUtils::CanBeZero(right_range()) ||
RangeUtils::Overlaps(right_range(), -1, -1);
default:
return can_overflow();
}
}
bool ShiftIntegerOpInstr::IsShiftCountInRange(int64_t max) const {
return RangeUtils::IsWithin(shift_range(), 0, max);
}
bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const {
if (!right()->definition()->IsConstant()) return false;
const Object& constant = right()->definition()->AsConstant()->value();
if (!constant.IsSmi()) return false;
const intptr_t int_value = Smi::Cast(constant).Value();
ASSERT(int_value != kIntptrMin);
return Utils::IsPowerOfTwo(Utils::Abs(int_value));
}
static intptr_t RepresentationBits(Representation r) {
switch (r) {
case kTagged:
return compiler::target::kSmiBits + 1;
case kUnboxedInt32:
case kUnboxedUint32:
return 32;
case kUnboxedInt64:
return 64;
default:
UNREACHABLE();
return 0;
}
}
static int64_t RepresentationMask(Representation r) {
return static_cast<int64_t>(static_cast<uint64_t>(-1) >>
(64 - RepresentationBits(r)));
}
static Definition* CanonicalizeCommutativeDoubleArithmetic(Token::Kind op,
Value* left,
Value* right) {
int64_t left_value;
if (!Evaluator::ToIntegerConstant(left, &left_value)) {
return NULL;
}
// Can't apply 0.0 * x -> 0.0 equivalence to double operation because
// 0.0 * NaN is NaN not 0.0.
// Can't apply 0.0 + x -> x to double because 0.0 + (-0.0) is 0.0 not -0.0.
switch (op) {
case Token::kMUL:
if (left_value == 1) {
if (right->definition()->representation() != kUnboxedDouble) {
// Can't yet apply the equivalence because representation selection
// did not run yet. We need it to guarantee that right value is
// correctly coerced to double. The second canonicalization pass
// will apply this equivalence.
return NULL;
} else {
return right->definition();
}
}
break;
default:
break;
}
return NULL;
}
Definition* DoubleToFloatInstr::Canonicalize(FlowGraph* flow_graph) {
#ifdef DEBUG
// Must only be used in Float32 StoreIndexedInstr, FloatToDoubleInstr,
// Phis introduce by load forwarding, or MaterializeObject for
// eliminated Float32 array.
ASSERT(env_use_list() == NULL);
for (Value* use = input_use_list(); use != NULL; use = use->next_use()) {
ASSERT(use->instruction()->IsPhi() ||
use->instruction()->IsFloatToDouble() ||
(use->instruction()->IsStoreIndexed() &&
(use->instruction()->AsStoreIndexed()->class_id() ==
kTypedDataFloat32ArrayCid)) ||
(use->instruction()->IsMaterializeObject() &&
(use->instruction()->AsMaterializeObject()->cls().id() ==
kTypedDataFloat32ArrayCid)));
}
#endif
if (!HasUses()) return NULL;
if (value()->definition()->IsFloatToDouble()) {
// F2D(D2F(v)) == v.
return value()->definition()->AsFloatToDouble()->value()->definition();
}
return this;
}
Definition* FloatToDoubleInstr::Canonicalize(FlowGraph* flow_graph) {
return HasUses() ? this : NULL;
}
Definition* BinaryDoubleOpInstr::Canonicalize(FlowGraph* flow_graph) {
if (!HasUses()) return NULL;
Definition* result = NULL;
result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), left(), right());
if (result != NULL) {
return result;
}
result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), right(), left());
if (result != NULL) {
return result;
}
if ((op_kind() == Token::kMUL) &&
(left()->definition() == right()->definition())) {
MathUnaryInstr* math_unary = new MathUnaryInstr(
MathUnaryInstr::kDoubleSquare, new Value(left()->definition()),
DeoptimizationTarget());
flow_graph->InsertBefore(this, math_unary, env(), FlowGraph::kValue);
return math_unary;
}
return this;
}
Definition* DoubleTestOpInstr::Canonicalize(FlowGraph* flow_graph) {
return HasUses() ? this : NULL;
}
static bool IsCommutative(Token::Kind op) {
switch (op) {
case Token::kMUL:
FALL_THROUGH;
case Token::kADD:
FALL_THROUGH;
case Token::kBIT_AND:
FALL_THROUGH;
case Token::kBIT_OR:
FALL_THROUGH;
case Token::kBIT_XOR:
return true;
default:
return false;
}
}
UnaryIntegerOpInstr* UnaryIntegerOpInstr::Make(Representation representation,
Token::Kind op_kind,
Value* value,
intptr_t deopt_id,
Range* range) {
UnaryIntegerOpInstr* op = NULL;
switch (representation) {
case kTagged:
op = new UnarySmiOpInstr(op_kind, value, deopt_id);
break;
case kUnboxedInt32:
return NULL;
case kUnboxedUint32:
op = new UnaryUint32OpInstr(op_kind, value, deopt_id);
break;
case kUnboxedInt64:
op = new UnaryInt64OpInstr(op_kind, value, deopt_id);
break;
default:
UNREACHABLE();
return NULL;
}
if (op == NULL) {
return op;
}
if (!Range::IsUnknown(range)) {
op->set_range(*range);
}
ASSERT(op->representation() == representation);
return op;
}
BinaryIntegerOpInstr* BinaryIntegerOpInstr::Make(
Representation representation,
Token::Kind op_kind,
Value* left,
Value* right,
intptr_t deopt_id,
SpeculativeMode speculative_mode) {
BinaryIntegerOpInstr* op = nullptr;
Range* right_range = nullptr;
switch (op_kind) {
case Token::kMOD:
case Token::kTRUNCDIV:
if (representation != kTagged) break;
FALL_THROUGH;
case Token::kSHL:
case Token::kSHR:
case Token::kUSHR:
if (auto const const_def = right->definition()->AsConstant()) {
right_range = new Range();
const_def->InferRange(nullptr, right_range);
}
break;
default:
break;
}
switch (representation) {
case kTagged:
op = new BinarySmiOpInstr(op_kind, left, right, deopt_id, right_range);
break;
case kUnboxedInt32:
if (!BinaryInt32OpInstr::IsSupported(op_kind, left, right)) {
return nullptr;
}
op = new BinaryInt32OpInstr(op_kind, left, right, deopt_id);
break;
case kUnboxedUint32:
if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
(op_kind == Token::kUSHR)) {
if (speculative_mode == kNotSpeculative) {
op = new ShiftUint32OpInstr(op_kind, left, right, deopt_id,
right_range);
} else {
op = new SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id,
right_range);
}
} else {
op = new BinaryUint32OpInstr(op_kind, left, right, deopt_id);
}
break;
case kUnboxedInt64:
if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
(op_kind == Token::kUSHR)) {
if (speculative_mode == kNotSpeculative) {
op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id,
right_range);
} else {
op = new SpeculativeShiftInt64OpInstr(op_kind, left, right, deopt_id,
right_range);
}
} else {
op = new BinaryInt64OpInstr(op_kind, left, right, deopt_id);
}
break;
default:
UNREACHABLE();
return nullptr;
}
ASSERT(op->representation() == representation);
return op;
}
BinaryIntegerOpInstr* BinaryIntegerOpInstr::Make(
Representation representation,
Token::Kind op_kind,
Value* left,
Value* right,
intptr_t deopt_id,
bool can_overflow,
bool is_truncating,
Range* range,
SpeculativeMode speculative_mode) {
BinaryIntegerOpInstr* op = BinaryIntegerOpInstr::Make(
representation, op_kind, left, right, deopt_id, speculative_mode);
if (op == nullptr) {
return nullptr;
}
if (!Range::IsUnknown(range)) {
op->set_range(*range);
}
op->set_can_overflow(can_overflow);
if (is_truncating) {
op->mark_truncating();
}
return op;
}
Definition* BinaryIntegerOpInstr::Canonicalize(FlowGraph* flow_graph) {
// If both operands are constants evaluate this expression. Might
// occur due to load forwarding after constant propagation pass
// have already been run.
if (left()->BindsToConstant() && right()->BindsToConstant()) {
const Integer& result = Integer::Handle(Evaluator::BinaryIntegerEvaluate(
left()->BoundConstant(), right()->BoundConstant(), op_kind(),
is_truncating(), representation(), Thread::Current()));
if (!result.IsNull()) {
return flow_graph->TryCreateConstantReplacementFor(this, result);
}
}
if (left()->BindsToConstant() && !right()->BindsToConstant() &&
IsCommutative(op_kind())) {
Value* l = left();
Value* r = right();
SetInputAt(0, r);
SetInputAt(1, l);
}
int64_t rhs;
if (!Evaluator::ToIntegerConstant(right(), &rhs)) {
return this;
}
if (is_truncating()) {
switch (op_kind()) {
case Token::kMUL:
case Token::kSUB:
case Token::kADD:
case Token::kBIT_AND:
case Token::kBIT_OR:
case Token::kBIT_XOR:
rhs = Evaluator::TruncateTo(rhs, representation());
break;
default:
break;
}
}
switch (op_kind()) {
case Token::kMUL:
if (rhs == 1) {
return left()->definition();
} else if (rhs == 0) {
return right()->definition();
} else if (rhs == 2) {
const int64_t shift_1 = 1;
ConstantInstr* constant_1 =
flow_graph->GetConstant(Smi::Handle(Smi::New(shift_1)));
BinaryIntegerOpInstr* shift = BinaryIntegerOpInstr::Make(
representation(), Token::kSHL, left()->CopyWithType(),
new Value(constant_1), GetDeoptId(), can_overflow(),
is_truncating(), range(), SpeculativeModeOfInputs());
if (shift != nullptr) {
// Assign a range to the shift factor, just in case range
// analysis no longer runs after this rewriting.
if (auto shift_with_range = shift->AsShiftIntegerOp()) {
shift_with_range->set_shift_range(
new Range(RangeBoundary::FromConstant(shift_1),
RangeBoundary::FromConstant(shift_1)));
}
flow_graph->InsertBefore(this, shift, env(), FlowGraph::kValue);
return shift;
}
}
break;
case Token::kADD:
if (rhs == 0) {
return left()->definition();
}
break;
case Token::kBIT_AND:
if (rhs == 0) {
return right()->definition();
} else if (rhs == RepresentationMask(representation())) {
return left()->definition();
}
break;
case Token::kBIT_OR:
if (rhs == 0) {
return left()->definition();
} else if (rhs == RepresentationMask(representation())) {
return right()->definition();
}
break;
case Token::kBIT_XOR:
if (rhs == 0) {
return left()->definition();
} else if (rhs == RepresentationMask(representation())) {
UnaryIntegerOpInstr* bit_not = UnaryIntegerOpInstr::Make(
representation(), Token::kBIT_NOT, left()->CopyWithType(),
GetDeoptId(), range());
if (bit_not != NULL) {
flow_graph->InsertBefore(this, bit_not, env(), FlowGraph::kValue);
return bit_not;
}
}
break;
case Token::kSUB:
if (rhs == 0) {
return left()->definition();
}
break;
case Token::kTRUNCDIV:
if (rhs == 1) {
return left()->definition();
} else if (rhs == -1) {
UnaryIntegerOpInstr* negation = UnaryIntegerOpInstr::Make(
representation(), Token::kNEGATE, left()->CopyWithType(),
GetDeoptId(), range());
if (negation != NULL) {
flow_graph->InsertBefore(this, negation, env(), FlowGraph::kValue);
return negation;
}
}
break;
case Token::kMOD:
if (std::abs(rhs) == 1) {
return flow_graph->TryCreateConstantReplacementFor(this,
Object::smi_zero());
}
break;
case Token::kUSHR:
if (rhs >= kBitsPerInt64) {
return flow_graph->TryCreateConstantReplacementFor(this,
Object::smi_zero());
}
FALL_THROUGH;
case Token::kSHR:
if (rhs == 0) {
return left()->definition();
} else if (rhs < 0) {
// Instruction will always throw on negative rhs operand.
if (!CanDeoptimize()) {
// For non-speculative operations (no deopt), let
// the code generator deal with throw on slowpath.
break;
}
ASSERT(GetDeoptId() != DeoptId::kNone);
DeoptimizeInstr* deopt =
new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId());
flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect);
// Replace with zero since it always throws.
return flow_graph->TryCreateConstantReplacementFor(this,
Object::smi_zero());
}
break;
case Token::kSHL: {
const intptr_t result_bits = RepresentationBits(representation());
if (rhs == 0) {
return left()->definition();
} else if ((rhs >= kBitsPerInt64) ||
((rhs >= result_bits) && is_truncating())) {
return flow_graph->TryCreateConstantReplacementFor(this,
Object::smi_zero());
} else if ((rhs < 0) || ((rhs >= result_bits) && !is_truncating())) {
// Instruction will always throw on negative rhs operand or
// deoptimize on large rhs operand.
if (!CanDeoptimize()) {
// For non-speculative operations (no deopt), let
// the code generator deal with throw on slowpath.
break;
}
ASSERT(GetDeoptId() != DeoptId::kNone);
DeoptimizeInstr* deopt =
new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId());
flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect);
// Replace with zero since it overshifted or always throws.
return flow_graph->TryCreateConstantReplacementFor(this,
Object::smi_zero());
}
break;
}
default:
break;
}
return this;
}
// Optimizations that eliminate or simplify individual instructions.
Instruction* Instruction::Canonicalize(FlowGraph* flow_graph) {
return this;
}
Definition* Definition::Canonicalize(FlowGraph* flow_graph) {
return this;
}
Definition* RedefinitionInstr::Canonicalize(FlowGraph* flow_graph) {
// Must not remove Redifinitions without uses until LICM, even though
// Redefinition might not have any uses itself it can still be dominating
// uses of the value it redefines and must serve as a barrier for those
// uses. RenameUsesDominatedByRedefinitions would normalize the graph and
// route those uses through this redefinition.
if (!HasUses() && !flow_graph->is_licm_allowed()) {
return NULL;
}
if (constrained_type() != nullptr &&
constrained_type()->IsEqualTo(value()->Type())) {
return value()->definition();
}
return this;
}
Instruction* CheckStackOverflowInstr::Canonicalize(FlowGraph* flow_graph) {
switch (kind_) {
case kOsrAndPreemption:
return this;
case kOsrOnly:
// Don't need OSR entries in the optimized code.
return NULL;
}
// Switch above exhausts all possibilities but some compilers can't figure
// it out.
UNREACHABLE();
return this;
}
bool LoadFieldInstr::IsFixedLengthArrayCid(intptr_t cid) {
if (IsTypedDataClassId(cid) || IsExternalTypedDataClassId(cid)) {
return true;
}
switch (cid) {
case kArrayCid:
case kImmutableArrayCid:
case kTypeArgumentsCid:
return true;
default:
return false;
}
}
bool LoadFieldInstr::IsTypedDataViewFactory(const Function& function) {
auto kind = function.recognized_kind();
switch (kind) {
case MethodRecognizer::kTypedData_ByteDataView_factory:
case MethodRecognizer::kTypedData_Int8ArrayView_factory:
case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory:
case MethodRecognizer::kTypedData_Int16ArrayView_factory:
case MethodRecognizer::kTypedData_Uint16ArrayView_factory:
case MethodRecognizer::kTypedData_Int32ArrayView_factory:
case MethodRecognizer::kTypedData_Uint32ArrayView_factory:
case MethodRecognizer::kTypedData_Int64ArrayView_factory:
case MethodRecognizer::kTypedData_Uint64ArrayView_factory:
case MethodRecognizer::kTypedData_Float32ArrayView_factory:
case MethodRecognizer::kTypedData_Float64ArrayView_factory:
case MethodRecognizer::kTypedData_Float32x4ArrayView_factory:
case MethodRecognizer::kTypedData_Int32x4ArrayView_factory:
case MethodRecognizer::kTypedData_Float64x2ArrayView_factory:
return true;
default:
return false;
}
}
Definition* ConstantInstr::Canonicalize(FlowGraph* flow_graph) {
return HasUses() ? this : NULL;
}
// A math unary instruction has a side effect (exception
// thrown) if the argument is not a number.
// TODO(srdjan): eliminate if has no uses and input is guaranteed to be number.
Definition* MathUnaryInstr::Canonicalize(FlowGraph* flow_graph) {
return this;
}
bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
const Slot& field,
Object* result) {
switch (field.kind()) {
case Slot::Kind::kDartField:
return TryEvaluateLoad(instance, field.field(), result);
case Slot::Kind::kArgumentsDescriptor_type_args_len:
if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
ArgumentsDescriptor desc(Array::Cast(instance));
*result = Smi::New(desc.TypeArgsLen());
return true;
}
return false;
case Slot::Kind::kArgumentsDescriptor_count:
if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
ArgumentsDescriptor desc(Array::Cast(instance));
*result = Smi::New(desc.Count());
return true;
}
return false;
case Slot::Kind::kArgumentsDescriptor_positional_count:
if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
ArgumentsDescriptor desc(Array::Cast(instance));
*result = Smi::New(desc.PositionalCount());
return true;
}
return false;
case Slot::Kind::kArgumentsDescriptor_size:
// If a constant arguments descriptor appears, then either it is from
// a invocation dispatcher (which always has tagged arguments and so
// [host]Size() == [target]Size() == Count()) or the constant should
// have the correct Size() in terms of the target architecture if any
// spill slots are involved.
if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
ArgumentsDescriptor desc(Array::Cast(instance));
*result = Smi::New(desc.Size());
return true;
}
return false;
case Slot::Kind::kTypeArguments_length:
if (instance.IsTypeArguments()) {
*result = Smi::New(TypeArguments::Cast(instance).Length());
return true;
}
return false;
default:
break;
}
return false;
}
bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
const Field& field,
Object* result) {
if (!field.is_final() || !instance.IsInstance()) {
return false;
}
// Check that instance really has the field which we
// are trying to load from.
Class& cls = Class::Handle(instance.clazz());
while (cls.ptr() != Class::null() && cls.ptr() != field.Owner()) {
cls = cls.SuperClass();
}
if (cls.ptr() != field.Owner()) {
// Failed to find the field in class or its superclasses.
return false;
}
// Object has the field: execute the load.
*result = Instance::Cast(instance).GetField(field);
return true;
}
bool LoadFieldInstr::Evaluate(const Object& instance, Object* result) {
return TryEvaluateLoad(instance, slot(), result);
}
Definition* LoadFieldInstr::Canonicalize(FlowGraph* flow_graph) {
if (!HasUses() && !calls_initializer()) return nullptr;
if (IsImmutableLengthLoad()) {
ASSERT(!calls_initializer());
Definition* array = instance()->definition()->OriginalDefinition();
if (StaticCallInstr* call = array->AsStaticCall()) {
// For fixed length arrays if the array is the result of a known
// constructor call we can replace the length load with the length
// argument passed to the constructor.
if (call->is_known_list_constructor() &&
IsFixedLengthArrayCid(call->Type()->ToCid())) {
return call->ArgumentAt(1);
} else if (call->function().recognized_kind() ==
MethodRecognizer::kByteDataFactory) {
// Similarly, we check for the ByteData constructor and forward its
// explicit length argument appropriately.
return call->ArgumentAt(1);
} else if (IsTypedDataViewFactory(call->function())) {
// Typed data view factories all take three arguments (after
// the implicit type arguments parameter):
//
// 1) _TypedList buffer -- the underlying data for the view
// 2) int offsetInBytes -- the offset into the buffer to start viewing
// 3) int length -- the number of elements in the view
//
// Here, we forward the third.
return call->ArgumentAt(3);
}
} else if (CreateArrayInstr* create_array = array->AsCreateArray()) {
if (slot().kind() == Slot::Kind::kArray_length) {
return create_array->num_elements()->definition();
}
} else if (AllocateTypedDataInstr* alloc_typed_data =
array->AsAllocateTypedData()) {
if (slot().kind() == Slot::Kind::kTypedDataBase_length) {
return alloc_typed_data->num_elements()->definition();
}
} else if (LoadFieldInstr* load_array = array->AsLoadField()) {
// For arrays with guarded lengths, replace the length load
// with a constant.
const Slot& slot = load_array->slot();
if (slot.IsDartField()) {
if (slot.field().guarded_list_length() >= 0) {
return flow_graph->GetConstant(
Smi::Handle(Smi::New(slot.field().guarded_list_length())));
}
}
}
} else if (slot().kind() == Slot::Kind::kTypedDataView_typed_data) {
// This case cover the first explicit argument to typed data view
// factories, the data (buffer).
ASSERT(!calls_initializer());
Definition* array = instance()->definition()->OriginalDefinition();
if (StaticCallInstr* call = array->AsStaticCall()) {
if (IsTypedDataViewFactory(call->function())) {
return call->ArgumentAt(1);
}
}
} else if (slot().kind() == Slot::Kind::kTypedDataView_offset_in_bytes) {