| // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| |
| #include "vm/compiler/backend/il.h" |
| |
| #include "vm/bit_vector.h" |
| #include "vm/bootstrap.h" |
| #include "vm/compiler/backend/code_statistics.h" |
| #include "vm/compiler/backend/constant_propagator.h" |
| #include "vm/compiler/backend/flow_graph_compiler.h" |
| #include "vm/compiler/backend/linearscan.h" |
| #include "vm/compiler/backend/locations.h" |
| #include "vm/compiler/backend/loops.h" |
| #include "vm/compiler/backend/range_analysis.h" |
| #include "vm/compiler/frontend/flow_graph_builder.h" |
| #include "vm/compiler/jit/compiler.h" |
| #include "vm/compiler/method_recognizer.h" |
| #include "vm/cpu.h" |
| #include "vm/dart_entry.h" |
| #include "vm/object.h" |
| #include "vm/object_store.h" |
| #include "vm/os.h" |
| #include "vm/regexp_assembler_ir.h" |
| #include "vm/resolver.h" |
| #include "vm/scopes.h" |
| #include "vm/stub_code.h" |
| #include "vm/symbols.h" |
| #include "vm/type_testing_stubs.h" |
| |
| #include "vm/compiler/backend/il_printer.h" |
| |
| namespace dart { |
| |
| DEFINE_FLAG(bool, |
| propagate_ic_data, |
| true, |
| "Propagate IC data from unoptimized to optimized IC calls."); |
| DEFINE_FLAG(bool, |
| two_args_smi_icd, |
| true, |
| "Generate special IC stubs for two args Smi operations"); |
| DEFINE_FLAG(bool, |
| unbox_numeric_fields, |
| !USING_DBC, |
| "Support unboxed double and float32x4 fields."); |
| |
| class SubclassFinder { |
| public: |
| SubclassFinder(Zone* zone, |
| GrowableArray<intptr_t>* cids, |
| bool include_abstract) |
| : array_handles_(zone), |
| class_handles_(zone), |
| cids_(cids), |
| include_abstract_(include_abstract) {} |
| |
| void ScanSubClasses(const Class& klass) { |
| if (include_abstract_ || !klass.is_abstract()) { |
| cids_->Add(klass.id()); |
| } |
| ScopedHandle<GrowableObjectArray> array(&array_handles_); |
| ScopedHandle<Class> subclass(&class_handles_); |
| *array = klass.direct_subclasses(); |
| if (!array->IsNull()) { |
| for (intptr_t i = 0; i < array->Length(); ++i) { |
| *subclass ^= array->At(i); |
| ScanSubClasses(*subclass); |
| } |
| } |
| } |
| |
| void ScanImplementorClasses(const Class& klass) { |
| // An implementor of [klass] is |
| // * the [klass] itself. |
| // * all implementors of the direct subclasses of [klass]. |
| // * all implementors of the direct implementors of [klass]. |
| if (include_abstract_ || !klass.is_abstract()) { |
| cids_->Add(klass.id()); |
| } |
| |
| ScopedHandle<GrowableObjectArray> array(&array_handles_); |
| ScopedHandle<Class> subclass_or_implementor(&class_handles_); |
| |
| *array = klass.direct_subclasses(); |
| if (!array->IsNull()) { |
| for (intptr_t i = 0; i < array->Length(); ++i) { |
| *subclass_or_implementor ^= (*array).At(i); |
| ScanImplementorClasses(*subclass_or_implementor); |
| } |
| } |
| *array = klass.direct_implementors(); |
| if (!array->IsNull()) { |
| for (intptr_t i = 0; i < array->Length(); ++i) { |
| *subclass_or_implementor ^= (*array).At(i); |
| ScanImplementorClasses(*subclass_or_implementor); |
| } |
| } |
| } |
| |
| private: |
| ReusableHandleStack<GrowableObjectArray> array_handles_; |
| ReusableHandleStack<Class> class_handles_; |
| GrowableArray<intptr_t>* cids_; |
| const bool include_abstract_; |
| }; |
| |
| const CidRangeVector& HierarchyInfo::SubtypeRangesForClass( |
| const Class& klass, |
| bool include_abstract) { |
| ClassTable* table = thread()->isolate()->class_table(); |
| const intptr_t cid_count = table->NumCids(); |
| CidRangeVector** cid_ranges = |
| include_abstract ? &cid_subtype_ranges_abstract_ : &cid_subtype_ranges_; |
| if (*cid_ranges == NULL) { |
| *cid_ranges = new CidRangeVector[cid_count]; |
| } |
| |
| CidRangeVector& ranges = (*cid_ranges)[klass.id()]; |
| if (ranges.length() == 0) { |
| if (!FLAG_precompiled_mode) { |
| BuildRangesForJIT(table, &ranges, klass, /*use_subtype_test=*/true, |
| include_abstract); |
| } else { |
| BuildRangesFor(table, &ranges, klass, /*use_subtype_test=*/true, |
| include_abstract); |
| } |
| } |
| return ranges; |
| } |
| |
| const CidRangeVector& HierarchyInfo::SubclassRangesForClass( |
| const Class& klass) { |
| ClassTable* table = thread()->isolate()->class_table(); |
| const intptr_t cid_count = table->NumCids(); |
| if (cid_subclass_ranges_ == NULL) { |
| cid_subclass_ranges_ = new CidRangeVector[cid_count]; |
| } |
| |
| CidRangeVector& ranges = cid_subclass_ranges_[klass.id()]; |
| if (ranges.length() == 0) { |
| if (!FLAG_precompiled_mode) { |
| BuildRangesForJIT(table, &ranges, klass, /*use_subtype_test=*/true); |
| } else { |
| BuildRangesFor(table, &ranges, klass, /*use_subtype_test=*/false); |
| } |
| } |
| return ranges; |
| } |
| |
| void HierarchyInfo::BuildRangesFor(ClassTable* table, |
| CidRangeVector* ranges, |
| const Class& klass, |
| bool use_subtype_test, |
| bool include_abstract) { |
| Zone* zone = thread()->zone(); |
| ClassTable* class_table = thread()->isolate()->class_table(); |
| |
| // Only really used if `use_subtype_test == true`. |
| const Type& dst_type = Type::Handle(zone, Type::RawCast(klass.RareType())); |
| AbstractType& cls_type = AbstractType::Handle(zone); |
| |
| Class& cls = Class::Handle(zone); |
| AbstractType& super_type = AbstractType::Handle(zone); |
| const intptr_t cid_count = table->NumCids(); |
| |
| intptr_t start = -1; |
| for (intptr_t cid = kInstanceCid; cid < cid_count; ++cid) { |
| // Create local zone because deep hierarchies may allocate lots of handles |
| // within one iteration of this loop. |
| StackZone stack_zone(thread()); |
| HANDLESCOPE(thread()); |
| |
| if (!table->HasValidClassAt(cid)) continue; |
| if (cid == kTypeArgumentsCid) continue; |
| if (cid == kVoidCid) continue; |
| if (cid == kDynamicCid) continue; |
| if (cid == kNullCid) continue; |
| cls = table->At(cid); |
| if (!include_abstract && cls.is_abstract()) continue; |
| if (cls.is_patch()) continue; |
| if (cls.IsTopLevel()) continue; |
| |
| // We are either interested in [CidRange]es of subclasses or subtypes. |
| bool test_succeded = false; |
| if (use_subtype_test) { |
| cls_type = cls.RareType(); |
| test_succeded = cls_type.IsSubtypeOf(dst_type, NULL, NULL, Heap::kNew); |
| } else { |
| while (!cls.IsObjectClass()) { |
| if (cls.raw() == klass.raw()) { |
| test_succeded = true; |
| break; |
| } |
| |
| super_type = cls.super_type(); |
| const intptr_t type_class_id = super_type.type_class_id(); |
| cls = class_table->At(type_class_id); |
| } |
| } |
| |
| if (start == -1 && test_succeded) { |
| start = cid; |
| } else if (start != -1 && !test_succeded) { |
| CidRange range(start, cid - 1); |
| ranges->Add(range); |
| start = -1; |
| } |
| } |
| |
| if (start != -1) { |
| CidRange range(start, cid_count - 1); |
| ranges->Add(range); |
| } |
| |
| if (start == -1 && ranges->length() == 0) { |
| CidRange range; |
| ASSERT(range.IsIllegalRange()); |
| ranges->Add(range); |
| } |
| } |
| |
| void HierarchyInfo::BuildRangesForJIT(ClassTable* table, |
| CidRangeVector* ranges, |
| const Class& dst_klass, |
| bool use_subtype_test, |
| bool include_abstract) { |
| if (dst_klass.InVMHeap()) { |
| BuildRangesFor(table, ranges, dst_klass, use_subtype_test, |
| include_abstract); |
| return; |
| } |
| |
| Zone* zone = thread()->zone(); |
| GrowableArray<intptr_t> cids; |
| SubclassFinder finder(zone, &cids, include_abstract); |
| if (use_subtype_test) { |
| finder.ScanImplementorClasses(dst_klass); |
| } else { |
| finder.ScanSubClasses(dst_klass); |
| } |
| |
| // Sort all collected cids. |
| intptr_t* cids_array = cids.data(); |
| |
| qsort(cids_array, cids.length(), sizeof(intptr_t), |
| [](const void* a, const void* b) { |
| return static_cast<int>(*static_cast<const intptr_t*>(a) - |
| *static_cast<const intptr_t*>(b)); |
| }); |
| |
| // Build ranges of all the cids. |
| Class& klass = Class::Handle(); |
| intptr_t left_cid = -1; |
| intptr_t last_cid = -1; |
| for (intptr_t i = 0; i < cids.length(); ++i) { |
| if (left_cid == -1) { |
| left_cid = last_cid = cids[i]; |
| } else { |
| const intptr_t current_cid = cids[i]; |
| |
| // Skip duplicates. |
| if (current_cid == last_cid) continue; |
| |
| // Consecutive numbers cids are ok. |
| if (current_cid == (last_cid + 1)) { |
| last_cid = current_cid; |
| } else { |
| // We sorted, after all! |
| RELEASE_ASSERT(last_cid < current_cid); |
| |
| intptr_t j = last_cid + 1; |
| for (; j < current_cid; ++j) { |
| if (table->HasValidClassAt(j)) { |
| klass = table->At(j); |
| if (!klass.is_patch() && !klass.IsTopLevel()) { |
| // If we care about abstract classes also, we cannot skip over any |
| // arbitrary abstract class, only those which are subtypes. |
| if (include_abstract) { |
| break; |
| } |
| |
| // If the class is concrete we cannot skip over it. |
| if (!klass.is_abstract()) { |
| break; |
| } |
| } |
| } |
| } |
| |
| if (current_cid == j) { |
| // If there's only abstract cids between [last_cid] and the |
| // [current_cid] then we connect them. |
| last_cid = current_cid; |
| } else { |
| // Finish the current open cid range and start a new one. |
| ranges->Add(CidRange{left_cid, last_cid}); |
| left_cid = last_cid = current_cid; |
| } |
| } |
| } |
| } |
| |
| // If there is an open cid-range which we haven't finished yet, we'll |
| // complete it. |
| if (left_cid != -1) { |
| ranges->Add(CidRange{left_cid, last_cid}); |
| } |
| } |
| |
| bool HierarchyInfo::CanUseSubtypeRangeCheckFor(const AbstractType& type) { |
| ASSERT(type.IsFinalized() && !type.IsMalformedOrMalbounded()); |
| |
| if (!type.IsInstantiated() || !type.IsType() || type.IsFunctionType() || |
| type.IsDartFunctionType()) { |
| return false; |
| } |
| |
| Zone* zone = thread()->zone(); |
| const Class& type_class = Class::Handle(zone, type.type_class()); |
| |
| // The FutureOr<T> type cannot be handled by checking whether the instance is |
| // a subtype of FutureOr and then checking whether the type argument `T` |
| // matches. |
| // |
| // Instead we would need to perform multiple checks: |
| // |
| // instance is Null || instance is T || instance is Future<T> |
| // |
| if (type_class.IsFutureOrClass()) { |
| return false; |
| } |
| |
| // We can use class id range checks only if we don't have to test type |
| // arguments. |
| // |
| // This is e.g. true for "String" but also for "List<dynamic>". (A type for |
| // which the type arguments vector is filled with "dynamic" is known as a rare |
| // type) |
| if (type_class.IsGeneric()) { |
| // TODO(kustermann): We might want to consider extending this when the type |
| // arguments are not "dynamic" but instantiated-to-bounds. |
| const Type& rare_type = |
| Type::Handle(zone, Type::RawCast(type_class.RareType())); |
| if (!rare_type.Equals(type)) { |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| bool HierarchyInfo::CanUseGenericSubtypeRangeCheckFor( |
| const AbstractType& type) { |
| ASSERT(type.IsFinalized() && !type.IsMalformedOrMalbounded()); |
| |
| if (!type.IsType() || type.IsFunctionType() || type.IsDartFunctionType()) { |
| return false; |
| } |
| |
| // NOTE: We do allow non-instantiated types here (in comparison to |
| // [CanUseSubtypeRangeCheckFor], since we handle type parameters in the type |
| // expression in some cases (see below). |
| |
| Zone* zone = thread()->zone(); |
| const Class& type_class = Class::Handle(zone, type.type_class()); |
| const intptr_t num_type_parameters = type_class.NumTypeParameters(); |
| const intptr_t num_type_arguments = type_class.NumTypeArguments(); |
| |
| // The FutureOr<T> type cannot be handled by checking whether the instance is |
| // a subtype of FutureOr and then checking whether the type argument `T` |
| // matches. |
| // |
| // Instead we would need to perform multiple checks: |
| // |
| // instance is Null || instance is T || instance is Future<T> |
| // |
| if (type_class.IsFutureOrClass()) { |
| return false; |
| } |
| |
| // This function should only be called for generic classes. |
| ASSERT(type_class.NumTypeParameters() > 0 && |
| type.arguments() != TypeArguments::null()); |
| |
| // If the type class is implemented the different implementations might have |
| // their type argument vector stored at different offsets and we can therefore |
| // not perform our optimized [CidRange]-based implementation. |
| // |
| // TODO(kustermann): If the class is implemented but all implementations |
| // store the instantator type argument vector at the same offset we can |
| // still do it! |
| if (type_class.is_implemented()) { |
| return false; |
| } |
| |
| const TypeArguments& ta = |
| TypeArguments::Handle(zone, Type::Cast(type).arguments()); |
| ASSERT(ta.Length() == num_type_arguments); |
| |
| // The last [num_type_pararameters] entries in the [TypeArguments] vector [ta] |
| // are the values we have to check against. Ensure we can handle all of them |
| // via [CidRange]-based checks or that it is a type parameter. |
| AbstractType& type_arg = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < num_type_parameters; ++i) { |
| type_arg = ta.TypeAt(num_type_arguments - num_type_parameters + i); |
| if (!CanUseSubtypeRangeCheckFor(type_arg) && !type_arg.IsTypeParameter()) { |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| bool HierarchyInfo::InstanceOfHasClassRange(const AbstractType& type, |
| intptr_t* lower_limit, |
| intptr_t* upper_limit) { |
| if (CanUseSubtypeRangeCheckFor(type)) { |
| const Class& type_class = |
| Class::Handle(thread()->zone(), type.type_class()); |
| const CidRangeVector& ranges = SubtypeRangesForClass(type_class); |
| if (ranges.length() == 1) { |
| const CidRange& range = ranges[0]; |
| if (!range.IsIllegalRange()) { |
| *lower_limit = range.cid_start; |
| *upper_limit = range.cid_end; |
| return true; |
| } |
| } |
| } |
| return false; |
| } |
| |
| #if defined(DEBUG) |
| void Instruction::CheckField(const Field& field) const { |
| ASSERT(field.IsZoneHandle()); |
| ASSERT(!Compiler::IsBackgroundCompilation() || !field.IsOriginal()); |
| } |
| #endif // DEBUG |
| |
| Definition::Definition(intptr_t deopt_id) |
| : Instruction(deopt_id), |
| range_(NULL), |
| type_(NULL), |
| temp_index_(-1), |
| ssa_temp_index_(-1), |
| input_use_list_(NULL), |
| env_use_list_(NULL), |
| constant_value_(NULL) {} |
| |
| // A value in the constant propagation lattice. |
| // - non-constant sentinel |
| // - a constant (any non-sentinel value) |
| // - unknown sentinel |
| Object& Definition::constant_value() { |
| if (constant_value_ == NULL) { |
| constant_value_ = &Object::ZoneHandle(ConstantPropagator::Unknown()); |
| } |
| return *constant_value_; |
| } |
| |
| Definition* Definition::OriginalDefinition() { |
| Definition* defn = this; |
| while (defn->IsRedefinition() || defn->IsAssertAssignable()) { |
| if (defn->IsRedefinition()) { |
| defn = defn->AsRedefinition()->value()->definition(); |
| } else { |
| defn = defn->AsAssertAssignable()->value()->definition(); |
| } |
| } |
| return defn; |
| } |
| |
| const ICData* Instruction::GetICData( |
| const ZoneGrowableArray<const ICData*>& ic_data_array) const { |
| // The deopt_id can be outside the range of the IC data array for |
| // computations added in the optimizing compiler. |
| ASSERT(deopt_id_ != DeoptId::kNone); |
| if (deopt_id_ < ic_data_array.length()) { |
| const ICData* result = ic_data_array[deopt_id_]; |
| #if defined(TAG_IC_DATA) |
| if (result != NULL) { |
| ICData::Tag ic_data_tag = ICData::Tag::kUnknown; |
| switch (tag()) { |
| case kInstanceCall: |
| ic_data_tag = ICData::Tag::kInstanceCall; |
| break; |
| case kStaticCall: |
| ic_data_tag = ICData::Tag::kStaticCall; |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| if (result->tag() == ICData::Tag::kUnknown) { |
| result->set_tag(ic_data_tag); |
| } else if (result->tag() != ic_data_tag) { |
| FATAL("ICData tag mismatch"); |
| } |
| } |
| #endif |
| return result; |
| } |
| return NULL; |
| } |
| |
| intptr_t Instruction::Hashcode() const { |
| intptr_t result = tag(); |
| for (intptr_t i = 0; i < InputCount(); ++i) { |
| Value* value = InputAt(i); |
| intptr_t j = value->definition()->ssa_temp_index(); |
| result = result * 31 + j; |
| } |
| return result; |
| } |
| |
| bool Instruction::Equals(Instruction* other) const { |
| if (tag() != other->tag()) return false; |
| if (InputCount() != other->InputCount()) return false; |
| for (intptr_t i = 0; i < InputCount(); ++i) { |
| if (!InputAt(i)->Equals(other->InputAt(i))) return false; |
| } |
| return AttributesEqual(other); |
| } |
| |
| void Instruction::Unsupported(FlowGraphCompiler* compiler) { |
| compiler->Bailout(ToCString()); |
| UNREACHABLE(); |
| } |
| |
| bool Value::Equals(Value* other) const { |
| return definition() == other->definition(); |
| } |
| |
| static int OrderById(CidRange* const* a, CidRange* const* b) { |
| // Negative if 'a' should sort before 'b'. |
| ASSERT((*a)->IsSingleCid()); |
| ASSERT((*b)->IsSingleCid()); |
| return (*a)->cid_start - (*b)->cid_start; |
| } |
| |
| static int OrderByFrequency(CidRange* const* a, CidRange* const* b) { |
| const TargetInfo* target_info_a = static_cast<const TargetInfo*>(*a); |
| const TargetInfo* target_info_b = static_cast<const TargetInfo*>(*b); |
| // Negative if 'a' should sort before 'b'. |
| return target_info_b->count - target_info_a->count; |
| } |
| |
| bool Cids::Equals(const Cids& other) const { |
| if (length() != other.length()) return false; |
| for (int i = 0; i < length(); i++) { |
| if (cid_ranges_[i]->cid_start != other.cid_ranges_[i]->cid_start || |
| cid_ranges_[i]->cid_end != other.cid_ranges_[i]->cid_end) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| intptr_t Cids::ComputeLowestCid() const { |
| intptr_t min = kIntptrMax; |
| for (intptr_t i = 0; i < cid_ranges_.length(); ++i) { |
| min = Utils::Minimum(min, cid_ranges_[i]->cid_start); |
| } |
| return min; |
| } |
| |
| intptr_t Cids::ComputeHighestCid() const { |
| intptr_t max = -1; |
| for (intptr_t i = 0; i < cid_ranges_.length(); ++i) { |
| max = Utils::Maximum(max, cid_ranges_[i]->cid_end); |
| } |
| return max; |
| } |
| |
| bool Cids::HasClassId(intptr_t cid) const { |
| for (int i = 0; i < length(); i++) { |
| if (cid_ranges_[i]->Contains(cid)) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| Cids* Cids::CreateMonomorphic(Zone* zone, intptr_t cid) { |
| Cids* cids = new (zone) Cids(zone); |
| cids->Add(new (zone) CidRange(cid, cid)); |
| return cids; |
| } |
| |
| Cids* Cids::Create(Zone* zone, const ICData& ic_data, int argument_number) { |
| Cids* cids = new (zone) Cids(zone); |
| cids->CreateHelper(zone, ic_data, argument_number, |
| /* include_targets = */ false); |
| cids->Sort(OrderById); |
| |
| // Merge adjacent class id ranges. |
| int dest = 0; |
| for (int src = 1; src < cids->length(); src++) { |
| if (cids->cid_ranges_[dest]->cid_end + 1 >= |
| cids->cid_ranges_[src]->cid_start) { |
| cids->cid_ranges_[dest]->cid_end = cids->cid_ranges_[src]->cid_end; |
| } else { |
| dest++; |
| if (src != dest) cids->cid_ranges_[dest] = cids->cid_ranges_[src]; |
| } |
| } |
| cids->SetLength(dest + 1); |
| |
| return cids; |
| } |
| |
| void Cids::CreateHelper(Zone* zone, |
| const ICData& ic_data, |
| int argument_number, |
| bool include_targets) { |
| ASSERT(argument_number < ic_data.NumArgsTested()); |
| |
| if (ic_data.NumberOfChecks() == 0) return; |
| |
| Function& dummy = Function::Handle(zone); |
| |
| bool check_one_arg = ic_data.NumArgsTested() == 1; |
| |
| int checks = ic_data.NumberOfChecks(); |
| for (int i = 0; i < checks; i++) { |
| if (ic_data.GetCountAt(i) == 0) continue; |
| intptr_t id = 0; |
| if (check_one_arg) { |
| ic_data.GetOneClassCheckAt(i, &id, &dummy); |
| } else { |
| GrowableArray<intptr_t> arg_ids; |
| ic_data.GetCheckAt(i, &arg_ids, &dummy); |
| id = arg_ids[argument_number]; |
| } |
| if (include_targets) { |
| Function& function = Function::ZoneHandle(zone, ic_data.GetTargetAt(i)); |
| cid_ranges_.Add(new (zone) TargetInfo( |
| id, id, &function, ic_data.GetCountAt(i), ic_data.GetExactnessAt(i))); |
| } else { |
| cid_ranges_.Add(new (zone) CidRange(id, id)); |
| } |
| } |
| } |
| |
| bool Cids::IsMonomorphic() const { |
| if (length() != 1) return false; |
| return cid_ranges_[0]->IsSingleCid(); |
| } |
| |
| intptr_t Cids::MonomorphicReceiverCid() const { |
| ASSERT(IsMonomorphic()); |
| return cid_ranges_[0]->cid_start; |
| } |
| |
| CheckClassInstr::CheckClassInstr(Value* value, |
| intptr_t deopt_id, |
| const Cids& cids, |
| TokenPosition token_pos) |
| : TemplateInstruction(deopt_id), |
| cids_(cids), |
| licm_hoisted_(false), |
| is_bit_test_(IsCompactCidRange(cids)), |
| token_pos_(token_pos) { |
| // Expected useful check data. |
| const intptr_t number_of_checks = cids.length(); |
| ASSERT(number_of_checks > 0); |
| SetInputAt(0, value); |
| // Otherwise use CheckSmiInstr. |
| ASSERT(number_of_checks != 1 || !cids[0].IsSingleCid() || |
| cids[0].cid_start != kSmiCid); |
| } |
| |
| bool CheckClassInstr::AttributesEqual(Instruction* other) const { |
| CheckClassInstr* other_check = other->AsCheckClass(); |
| ASSERT(other_check != NULL); |
| return cids().Equals(other_check->cids()); |
| } |
| |
| bool CheckClassInstr::IsDeoptIfNull() const { |
| if (!cids().IsMonomorphic()) { |
| return false; |
| } |
| CompileType* in_type = value()->Type(); |
| const intptr_t cid = cids().MonomorphicReceiverCid(); |
| // Performance check: use CheckSmiInstr instead. |
| ASSERT(cid != kSmiCid); |
| return in_type->is_nullable() && (in_type->ToNullableCid() == cid); |
| } |
| |
| // Null object is a singleton of null-class (except for some sentinel, |
| // transitional temporaries). Instead of checking against the null class only |
| // we can check against null instance instead. |
| bool CheckClassInstr::IsDeoptIfNotNull() const { |
| if (!cids().IsMonomorphic()) { |
| return false; |
| } |
| const intptr_t cid = cids().MonomorphicReceiverCid(); |
| return cid == kNullCid; |
| } |
| |
| bool CheckClassInstr::IsCompactCidRange(const Cids& cids) { |
| const intptr_t number_of_checks = cids.length(); |
| // If there are only two checks, the extra register pressure needed for the |
| // dense-cid-range code is not justified. |
| if (number_of_checks <= 2) return false; |
| |
| // TODO(fschneider): Support smis in dense cid checks. |
| if (cids.HasClassId(kSmiCid)) return false; |
| |
| intptr_t min = cids.ComputeLowestCid(); |
| intptr_t max = cids.ComputeHighestCid(); |
| return (max - min) < kBitsPerWord; |
| } |
| |
| bool CheckClassInstr::IsBitTest() const { |
| return is_bit_test_; |
| } |
| |
| intptr_t CheckClassInstr::ComputeCidMask() const { |
| ASSERT(IsBitTest()); |
| intptr_t min = cids_.ComputeLowestCid(); |
| intptr_t mask = 0; |
| for (intptr_t i = 0; i < cids_.length(); ++i) { |
| intptr_t run; |
| uintptr_t range = 1ul + cids_[i].Extent(); |
| if (range >= static_cast<uintptr_t>(kBitsPerWord)) { |
| run = -1; |
| } else { |
| run = (1 << range) - 1; |
| } |
| mask |= run << (cids_[i].cid_start - min); |
| } |
| return mask; |
| } |
| |
| const NativeFieldDesc* NativeFieldDesc::Get(Kind kind) { |
| static const NativeFieldDesc fields[] = { |
| #define IMMUTABLE true |
| #define MUTABLE false |
| #define DEFINE_NATIVE_FIELD(ClassName, FieldName, cid, mutability) \ |
| NativeFieldDesc(k##ClassName##_##FieldName, ClassName::FieldName##_offset(), \ |
| k##cid##Cid, mutability), |
| |
| NATIVE_FIELDS_LIST(DEFINE_NATIVE_FIELD) |
| |
| #undef DEFINE_FIELD |
| #undef MUTABLE |
| #undef IMMUTABLE |
| }; |
| |
| return &fields[kind]; |
| } |
| |
| const NativeFieldDesc* NativeFieldDesc::GetLengthFieldForArrayCid( |
| intptr_t array_cid) { |
| if (RawObject::IsExternalTypedDataClassId(array_cid) || |
| RawObject::IsTypedDataClassId(array_cid)) { |
| return Get(kTypedData_length); |
| } |
| |
| switch (array_cid) { |
| case kGrowableObjectArrayCid: |
| return Get(kGrowableObjectArray_length); |
| |
| case kOneByteStringCid: |
| case kTwoByteStringCid: |
| case kExternalOneByteStringCid: |
| case kExternalTwoByteStringCid: |
| return Get(kString_length); |
| |
| case kArrayCid: |
| case kImmutableArrayCid: |
| return Get(kArray_length); |
| |
| default: |
| UNREACHABLE(); |
| return nullptr; |
| } |
| } |
| |
| const NativeFieldDesc* NativeFieldDesc::GetTypeArgumentsField(Zone* zone, |
| intptr_t offset) { |
| // TODO(vegorov) consider caching type arguments fields for specific classes |
| // in some sort of a flow-graph specific cache. |
| ASSERT(offset != Class::kNoTypeArguments); |
| return new (zone) NativeFieldDesc(kTypeArguments, offset, kDynamicCid, |
| /*immutable=*/true); |
| } |
| |
| const NativeFieldDesc* NativeFieldDesc::GetTypeArgumentsFieldFor( |
| Zone* zone, |
| const Class& cls) { |
| return GetTypeArgumentsField(zone, cls.type_arguments_field_offset()); |
| } |
| |
| RawAbstractType* NativeFieldDesc::type() const { |
| if (cid() == kSmiCid) { |
| return Type::SmiType(); |
| } |
| |
| return Type::DynamicType(); |
| } |
| |
| const char* NativeFieldDesc::name() const { |
| switch (kind()) { |
| #define HANDLE_CASE(ClassName, FieldName, cid, mutability) \ |
| case k##ClassName##_##FieldName: \ |
| return #ClassName "." #FieldName; |
| |
| NATIVE_FIELDS_LIST(HANDLE_CASE) |
| |
| #undef HANDLE_CASE |
| case kTypeArguments: |
| return ":type_arguments"; |
| } |
| UNREACHABLE(); |
| return nullptr; |
| } |
| |
| bool LoadFieldInstr::IsUnboxedLoad() const { |
| return FLAG_unbox_numeric_fields && (field() != NULL) && |
| FlowGraphCompiler::IsUnboxedField(*field()); |
| } |
| |
| bool LoadFieldInstr::IsPotentialUnboxedLoad() const { |
| return FLAG_unbox_numeric_fields && (field() != NULL) && |
| FlowGraphCompiler::IsPotentialUnboxedField(*field()); |
| } |
| |
| Representation LoadFieldInstr::representation() const { |
| if (IsUnboxedLoad()) { |
| const intptr_t cid = field()->UnboxedFieldCid(); |
| switch (cid) { |
| case kDoubleCid: |
| return kUnboxedDouble; |
| case kFloat32x4Cid: |
| return kUnboxedFloat32x4; |
| case kFloat64x2Cid: |
| return kUnboxedFloat64x2; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| return kTagged; |
| } |
| |
| bool StoreInstanceFieldInstr::IsUnboxedStore() const { |
| return FLAG_unbox_numeric_fields && !field().IsNull() && |
| FlowGraphCompiler::IsUnboxedField(field()); |
| } |
| |
| bool StoreInstanceFieldInstr::IsPotentialUnboxedStore() const { |
| return FLAG_unbox_numeric_fields && !field().IsNull() && |
| FlowGraphCompiler::IsPotentialUnboxedField(field()); |
| } |
| |
| Representation StoreInstanceFieldInstr::RequiredInputRepresentation( |
| intptr_t index) const { |
| ASSERT((index == 0) || (index == 1)); |
| if ((index == 1) && IsUnboxedStore()) { |
| const intptr_t cid = field().UnboxedFieldCid(); |
| switch (cid) { |
| case kDoubleCid: |
| return kUnboxedDouble; |
| case kFloat32x4Cid: |
| return kUnboxedFloat32x4; |
| case kFloat64x2Cid: |
| return kUnboxedFloat64x2; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| return kTagged; |
| } |
| |
| bool GuardFieldClassInstr::AttributesEqual(Instruction* other) const { |
| return field().raw() == other->AsGuardFieldClass()->field().raw(); |
| } |
| |
| bool GuardFieldLengthInstr::AttributesEqual(Instruction* other) const { |
| return field().raw() == other->AsGuardFieldLength()->field().raw(); |
| } |
| |
| bool GuardFieldTypeInstr::AttributesEqual(Instruction* other) const { |
| return field().raw() == other->AsGuardFieldType()->field().raw(); |
| } |
| |
| bool AssertAssignableInstr::AttributesEqual(Instruction* other) const { |
| AssertAssignableInstr* other_assert = other->AsAssertAssignable(); |
| ASSERT(other_assert != NULL); |
| // This predicate has to be commutative for DominatorBasedCSE to work. |
| // TODO(fschneider): Eliminate more asserts with subtype relation. |
| return dst_type().raw() == other_assert->dst_type().raw(); |
| } |
| |
| Instruction* AssertSubtypeInstr::Canonicalize(FlowGraph* flow_graph) { |
| // If all values for type parameters are known (i.e. from instantiator and |
| // function) we can instantiate the sub and super type and remove this |
| // instruction if the subtype test succeeds. |
| ConstantInstr* constant_instantiator_type_args = |
| instantiator_type_arguments()->definition()->AsConstant(); |
| ConstantInstr* constant_function_type_args = |
| function_type_arguments()->definition()->AsConstant(); |
| if ((constant_instantiator_type_args != NULL) && |
| (constant_function_type_args != NULL)) { |
| ASSERT(constant_instantiator_type_args->value().IsNull() || |
| constant_instantiator_type_args->value().IsTypeArguments()); |
| ASSERT(constant_function_type_args->value().IsNull() || |
| constant_function_type_args->value().IsTypeArguments()); |
| |
| Zone* Z = Thread::Current()->zone(); |
| const TypeArguments& instantiator_type_args = TypeArguments::Handle( |
| Z, |
| TypeArguments::RawCast(constant_instantiator_type_args->value().raw())); |
| |
| const TypeArguments& function_type_args = TypeArguments::Handle( |
| Z, TypeArguments::RawCast(constant_function_type_args->value().raw())); |
| |
| Error& error_bound = Error::Handle(Z); |
| |
| AbstractType& sub_type = AbstractType::Handle(Z, sub_type_.raw()); |
| AbstractType& super_type = AbstractType::Handle(Z, super_type_.raw()); |
| if (AbstractType::InstantiateAndTestSubtype( |
| &sub_type, &super_type, &error_bound, instantiator_type_args, |
| function_type_args)) { |
| return NULL; |
| } |
| } |
| return this; |
| } |
| |
| bool AssertSubtypeInstr::AttributesEqual(Instruction* other) const { |
| AssertSubtypeInstr* other_assert = other->AsAssertSubtype(); |
| ASSERT(other_assert != NULL); |
| return super_type().raw() == other_assert->super_type().raw() && |
| sub_type().raw() == other_assert->sub_type().raw(); |
| } |
| |
| bool StrictCompareInstr::AttributesEqual(Instruction* other) const { |
| StrictCompareInstr* other_op = other->AsStrictCompare(); |
| ASSERT(other_op != NULL); |
| return ComparisonInstr::AttributesEqual(other) && |
| (needs_number_check() == other_op->needs_number_check()); |
| } |
| |
| bool MathMinMaxInstr::AttributesEqual(Instruction* other) const { |
| MathMinMaxInstr* other_op = other->AsMathMinMax(); |
| ASSERT(other_op != NULL); |
| return (op_kind() == other_op->op_kind()) && |
| (result_cid() == other_op->result_cid()); |
| } |
| |
| bool BinaryIntegerOpInstr::AttributesEqual(Instruction* other) const { |
| ASSERT(other->tag() == tag()); |
| BinaryIntegerOpInstr* other_op = other->AsBinaryIntegerOp(); |
| return (op_kind() == other_op->op_kind()) && |
| (can_overflow() == other_op->can_overflow()) && |
| (is_truncating() == other_op->is_truncating()); |
| } |
| |
| bool LoadFieldInstr::AttributesEqual(Instruction* other) const { |
| LoadFieldInstr* other_load = other->AsLoadField(); |
| ASSERT(other_load != NULL); |
| if (field() != NULL) { |
| return (other_load->field() != NULL) && |
| (field()->raw() == other_load->field()->raw()); |
| } |
| return (other_load->field() == NULL) && |
| (offset_in_bytes() == other_load->offset_in_bytes()); |
| } |
| |
| Instruction* InitStaticFieldInstr::Canonicalize(FlowGraph* flow_graph) { |
| const bool is_initialized = |
| (field_.StaticValue() != Object::sentinel().raw()) && |
| (field_.StaticValue() != Object::transition_sentinel().raw()); |
| // When precompiling, the fact that a field is currently initialized does not |
| // make it safe to omit code that checks if the field needs initialization |
| // because the field will be reset so it starts uninitialized in the process |
| // running the precompiled code. We must be prepared to reinitialize fields. |
| return is_initialized && !FLAG_fields_may_be_reset ? NULL : this; |
| } |
| |
| bool LoadStaticFieldInstr::AttributesEqual(Instruction* other) const { |
| LoadStaticFieldInstr* other_load = other->AsLoadStaticField(); |
| ASSERT(other_load != NULL); |
| // Assert that the field is initialized. |
| ASSERT(StaticField().StaticValue() != Object::sentinel().raw()); |
| ASSERT(StaticField().StaticValue() != Object::transition_sentinel().raw()); |
| return StaticField().raw() == other_load->StaticField().raw(); |
| } |
| |
| const Field& LoadStaticFieldInstr::StaticField() const { |
| return Field::Cast(field_value()->BoundConstant()); |
| } |
| |
| bool LoadStaticFieldInstr::IsFieldInitialized() const { |
| const Field& field = StaticField(); |
| return (field.StaticValue() != Object::sentinel().raw()) && |
| (field.StaticValue() != Object::transition_sentinel().raw()); |
| } |
| |
| ConstantInstr::ConstantInstr(const Object& value, TokenPosition token_pos) |
| : value_(value), token_pos_(token_pos) { |
| // Check that the value is not an incorrect Integer representation. |
| ASSERT(!value.IsMint() || !Smi::IsValid(Mint::Cast(value).AsInt64Value())); |
| ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); |
| ASSERT(value.IsSmi() || value.IsOld()); |
| } |
| |
| bool ConstantInstr::AttributesEqual(Instruction* other) const { |
| ConstantInstr* other_constant = other->AsConstant(); |
| ASSERT(other_constant != NULL); |
| return (value().raw() == other_constant->value().raw()); |
| } |
| |
| UnboxedConstantInstr::UnboxedConstantInstr(const Object& value, |
| Representation representation) |
| : ConstantInstr(value), |
| representation_(representation), |
| constant_address_(0) { |
| if (representation_ == kUnboxedDouble) { |
| ASSERT(value.IsDouble()); |
| constant_address_ = FindDoubleConstant(Double::Cast(value).value()); |
| } |
| } |
| |
| // Returns true if the value represents a constant. |
| bool Value::BindsToConstant() const { |
| return definition()->IsConstant(); |
| } |
| |
| // Returns true if the value represents constant null. |
| bool Value::BindsToConstantNull() const { |
| ConstantInstr* constant = definition()->AsConstant(); |
| return (constant != NULL) && constant->value().IsNull(); |
| } |
| |
| const Object& Value::BoundConstant() const { |
| ASSERT(BindsToConstant()); |
| ConstantInstr* constant = definition()->AsConstant(); |
| ASSERT(constant != NULL); |
| return constant->value(); |
| } |
| |
| GraphEntryInstr::GraphEntryInstr(const ParsedFunction& parsed_function, |
| intptr_t osr_id) |
| : BlockEntryWithInitialDefs(0, |
| kInvalidTryIndex, |
| CompilerState::Current().GetNextDeoptId()), |
| parsed_function_(parsed_function), |
| catch_entries_(), |
| indirect_entries_(), |
| osr_id_(osr_id), |
| entry_count_(0), |
| spill_slot_count_(0), |
| fixed_slot_count_(0) {} |
| |
| ConstantInstr* GraphEntryInstr::constant_null() { |
| ASSERT(initial_definitions()->length() > 0); |
| for (intptr_t i = 0; i < initial_definitions()->length(); ++i) { |
| ConstantInstr* defn = (*initial_definitions())[i]->AsConstant(); |
| if (defn != NULL && defn->value().IsNull()) return defn; |
| } |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| CatchBlockEntryInstr* GraphEntryInstr::GetCatchEntry(intptr_t index) { |
| // TODO(fschneider): Sort the catch entries by catch_try_index to avoid |
| // searching. |
| for (intptr_t i = 0; i < catch_entries_.length(); ++i) { |
| if (catch_entries_[i]->catch_try_index() == index) return catch_entries_[i]; |
| } |
| return NULL; |
| } |
| |
| bool GraphEntryInstr::IsCompiledForOsr() const { |
| return osr_id_ != Compiler::kNoOSRDeoptId; |
| } |
| |
| // ==== Support for visiting flow graphs. |
| |
| #define DEFINE_ACCEPT(ShortName, Attrs) \ |
| void ShortName##Instr::Accept(FlowGraphVisitor* visitor) { \ |
| visitor->Visit##ShortName(this); \ |
| } |
| |
| FOR_EACH_INSTRUCTION(DEFINE_ACCEPT) |
| |
| #undef DEFINE_ACCEPT |
| |
| void Instruction::SetEnvironment(Environment* deopt_env) { |
| intptr_t use_index = 0; |
| for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) { |
| Value* use = it.CurrentValue(); |
| use->set_instruction(this); |
| use->set_use_index(use_index++); |
| } |
| env_ = deopt_env; |
| } |
| |
| void Instruction::RemoveEnvironment() { |
| for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) { |
| it.CurrentValue()->RemoveFromUseList(); |
| } |
| env_ = NULL; |
| } |
| |
| Instruction* Instruction::RemoveFromGraph(bool return_previous) { |
| ASSERT(!IsBlockEntry()); |
| ASSERT(!IsBranch()); |
| ASSERT(!IsThrow()); |
| ASSERT(!IsReturn()); |
| ASSERT(!IsReThrow()); |
| ASSERT(!IsGoto()); |
| ASSERT(previous() != NULL); |
| // We cannot assert that the instruction, if it is a definition, has no |
| // uses. This function is used to remove instructions from the graph and |
| // reinsert them elsewhere (e.g., hoisting). |
| Instruction* prev_instr = previous(); |
| Instruction* next_instr = next(); |
| ASSERT(next_instr != NULL); |
| ASSERT(!next_instr->IsBlockEntry()); |
| prev_instr->LinkTo(next_instr); |
| UnuseAllInputs(); |
| // Reset the successor and previous instruction to indicate that the |
| // instruction is removed from the graph. |
| set_previous(NULL); |
| set_next(NULL); |
| return return_previous ? prev_instr : next_instr; |
| } |
| |
| void Instruction::InsertAfter(Instruction* prev) { |
| ASSERT(previous_ == NULL); |
| ASSERT(next_ == NULL); |
| previous_ = prev; |
| next_ = prev->next_; |
| next_->previous_ = this; |
| previous_->next_ = this; |
| |
| // Update def-use chains whenever instructions are added to the graph |
| // after initial graph construction. |
| for (intptr_t i = InputCount() - 1; i >= 0; --i) { |
| Value* input = InputAt(i); |
| input->definition()->AddInputUse(input); |
| } |
| } |
| |
| Instruction* Instruction::AppendInstruction(Instruction* tail) { |
| LinkTo(tail); |
| // Update def-use chains whenever instructions are added to the graph |
| // after initial graph construction. |
| for (intptr_t i = tail->InputCount() - 1; i >= 0; --i) { |
| Value* input = tail->InputAt(i); |
| input->definition()->AddInputUse(input); |
| } |
| return tail; |
| } |
| |
| BlockEntryInstr* Instruction::GetBlock() { |
| // TODO(fschneider): Implement a faster way to get the block of an |
| // instruction. |
| ASSERT(previous() != NULL); |
| Instruction* result = previous(); |
| while (!result->IsBlockEntry()) |
| result = result->previous(); |
| return result->AsBlockEntry(); |
| } |
| |
| void ForwardInstructionIterator::RemoveCurrentFromGraph() { |
| current_ = current_->RemoveFromGraph(true); // Set current_ to previous. |
| } |
| |
| void BackwardInstructionIterator::RemoveCurrentFromGraph() { |
| current_ = current_->RemoveFromGraph(false); // Set current_ to next. |
| } |
| |
| // Default implementation of visiting basic blocks. Can be overridden. |
| void FlowGraphVisitor::VisitBlocks() { |
| ASSERT(current_iterator_ == NULL); |
| for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| BlockEntryInstr* entry = block_order_[i]; |
| entry->Accept(this); |
| ForwardInstructionIterator it(entry); |
| current_iterator_ = ⁢ |
| for (; !it.Done(); it.Advance()) { |
| it.Current()->Accept(this); |
| } |
| current_iterator_ = NULL; |
| } |
| } |
| |
| bool Value::NeedsWriteBarrier() { |
| if (Type()->IsNull() || (Type()->ToNullableCid() == kSmiCid) || |
| (Type()->ToNullableCid() == kBoolCid)) { |
| return false; |
| } |
| |
| // Strictly speaking, the incremental barrier can only be skipped for |
| // immediate objects (Smis) or permanent objects (vm-isolate heap or |
| // image pages). Here we choose to skip the barrier for any constant on |
| // the assumption it will remain reachable through the object pool. |
| // TODO(concurrent-marking): Consider ensuring marking is not in progress |
| // when code is disabled or only omitting the barrier if code collection |
| // is disabled. |
| |
| return !BindsToConstant(); |
| } |
| |
| void JoinEntryInstr::AddPredecessor(BlockEntryInstr* predecessor) { |
| // Require the predecessors to be sorted by block_id to make managing |
| // their corresponding phi inputs simpler. |
| intptr_t pred_id = predecessor->block_id(); |
| intptr_t index = 0; |
| while ((index < predecessors_.length()) && |
| (predecessors_[index]->block_id() < pred_id)) { |
| ++index; |
| } |
| #if defined(DEBUG) |
| for (intptr_t i = index; i < predecessors_.length(); ++i) { |
| ASSERT(predecessors_[i]->block_id() != pred_id); |
| } |
| #endif |
| predecessors_.InsertAt(index, predecessor); |
| } |
| |
| intptr_t JoinEntryInstr::IndexOfPredecessor(BlockEntryInstr* pred) const { |
| for (intptr_t i = 0; i < predecessors_.length(); ++i) { |
| if (predecessors_[i] == pred) return i; |
| } |
| return -1; |
| } |
| |
| void Value::AddToList(Value* value, Value** list) { |
| ASSERT(value->next_use() == nullptr); |
| ASSERT(value->previous_use() == nullptr); |
| Value* next = *list; |
| ASSERT(value != next); |
| *list = value; |
| value->set_next_use(next); |
| value->set_previous_use(NULL); |
| if (next != NULL) next->set_previous_use(value); |
| } |
| |
| void Value::RemoveFromUseList() { |
| Definition* def = definition(); |
| Value* next = next_use(); |
| if (this == def->input_use_list()) { |
| def->set_input_use_list(next); |
| if (next != NULL) next->set_previous_use(NULL); |
| } else if (this == def->env_use_list()) { |
| def->set_env_use_list(next); |
| if (next != NULL) next->set_previous_use(NULL); |
| } else { |
| Value* prev = previous_use(); |
| prev->set_next_use(next); |
| if (next != NULL) next->set_previous_use(prev); |
| } |
| |
| set_previous_use(NULL); |
| set_next_use(NULL); |
| } |
| |
| // True if the definition has a single input use and is used only in |
| // environments at the same instruction as that input use. |
| bool Definition::HasOnlyUse(Value* use) const { |
| if (!HasOnlyInputUse(use)) { |
| return false; |
| } |
| |
| Instruction* target = use->instruction(); |
| for (Value::Iterator it(env_use_list()); !it.Done(); it.Advance()) { |
| if (it.Current()->instruction() != target) return false; |
| } |
| return true; |
| } |
| |
| bool Definition::HasOnlyInputUse(Value* use) const { |
| return (input_use_list() == use) && (use->next_use() == NULL); |
| } |
| |
| void Definition::ReplaceUsesWith(Definition* other) { |
| ASSERT(other != NULL); |
| ASSERT(this != other); |
| |
| Value* current = NULL; |
| Value* next = input_use_list(); |
| if (next != NULL) { |
| // Change all the definitions. |
| while (next != NULL) { |
| current = next; |
| current->set_definition(other); |
| next = current->next_use(); |
| } |
| |
| // Concatenate the lists. |
| next = other->input_use_list(); |
| current->set_next_use(next); |
| if (next != NULL) next->set_previous_use(current); |
| other->set_input_use_list(input_use_list()); |
| set_input_use_list(NULL); |
| } |
| |
| // Repeat for environment uses. |
| current = NULL; |
| next = env_use_list(); |
| if (next != NULL) { |
| while (next != NULL) { |
| current = next; |
| current->set_definition(other); |
| next = current->next_use(); |
| } |
| next = other->env_use_list(); |
| current->set_next_use(next); |
| if (next != NULL) next->set_previous_use(current); |
| other->set_env_use_list(env_use_list()); |
| set_env_use_list(NULL); |
| } |
| } |
| |
| void Instruction::UnuseAllInputs() { |
| for (intptr_t i = InputCount() - 1; i >= 0; --i) { |
| InputAt(i)->RemoveFromUseList(); |
| } |
| for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) { |
| it.CurrentValue()->RemoveFromUseList(); |
| } |
| } |
| |
| void Instruction::InheritDeoptTargetAfter(FlowGraph* flow_graph, |
| Definition* call, |
| Definition* result) { |
| ASSERT(call->env() != NULL); |
| deopt_id_ = DeoptId::ToDeoptAfter(call->deopt_id_); |
| call->env()->DeepCopyAfterTo( |
| flow_graph->zone(), this, call->ArgumentCount(), |
| flow_graph->constant_dead(), |
| result != NULL ? result : flow_graph->constant_dead()); |
| env()->set_deopt_id(deopt_id_); |
| } |
| |
| void Instruction::InheritDeoptTarget(Zone* zone, Instruction* other) { |
| ASSERT(other->env() != NULL); |
| CopyDeoptIdFrom(*other); |
| other->env()->DeepCopyTo(zone, this); |
| env()->set_deopt_id(deopt_id_); |
| } |
| |
| void BranchInstr::InheritDeoptTarget(Zone* zone, Instruction* other) { |
| ASSERT(env() == NULL); |
| Instruction::InheritDeoptTarget(zone, other); |
| comparison()->SetDeoptId(*this); |
| } |
| |
| bool Instruction::IsDominatedBy(Instruction* dom) { |
| BlockEntryInstr* block = GetBlock(); |
| BlockEntryInstr* dom_block = dom->GetBlock(); |
| |
| if (dom->IsPhi()) { |
| dom = dom_block; |
| } |
| |
| if (block == dom_block) { |
| if ((block == dom) || (this == block->last_instruction())) { |
| return true; |
| } |
| |
| if (IsPhi()) { |
| return false; |
| } |
| |
| for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) { |
| if (curr == this) return true; |
| } |
| |
| return false; |
| } |
| |
| return dom_block->Dominates(block); |
| } |
| |
| bool Instruction::HasUnmatchedInputRepresentations() const { |
| for (intptr_t i = 0; i < InputCount(); i++) { |
| Definition* input = InputAt(i)->definition(); |
| if (RequiredInputRepresentation(i) != input->representation()) { |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| const intptr_t Instruction::kInstructionAttrs[Instruction::kNumInstructions] = { |
| #define INSTR_ATTRS(type, attrs) InstrAttrs::attrs, |
| FOR_EACH_INSTRUCTION(INSTR_ATTRS) |
| #undef INSTR_ATTRS |
| }; |
| |
| bool Instruction::CanTriggerGC() const { |
| return (kInstructionAttrs[tag()] & InstrAttrs::kNoGC) == 0; |
| } |
| |
| void Definition::ReplaceWith(Definition* other, |
| ForwardInstructionIterator* iterator) { |
| // Record other's input uses. |
| for (intptr_t i = other->InputCount() - 1; i >= 0; --i) { |
| Value* input = other->InputAt(i); |
| input->definition()->AddInputUse(input); |
| } |
| // Take other's environment from this definition. |
| ASSERT(other->env() == NULL); |
| other->SetEnvironment(env()); |
| ClearEnv(); |
| // Replace all uses of this definition with other. |
| ReplaceUsesWith(other); |
| // Reuse this instruction's SSA name for other. |
| ASSERT(!other->HasSSATemp()); |
| if (HasSSATemp()) { |
| other->set_ssa_temp_index(ssa_temp_index()); |
| } |
| |
| // Finally insert the other definition in place of this one in the graph. |
| previous()->LinkTo(other); |
| if ((iterator != NULL) && (this == iterator->Current())) { |
| // Remove through the iterator. |
| other->LinkTo(this); |
| iterator->RemoveCurrentFromGraph(); |
| } else { |
| other->LinkTo(next()); |
| // Remove this definition's input uses. |
| UnuseAllInputs(); |
| } |
| set_previous(NULL); |
| set_next(NULL); |
| } |
| |
| void BranchInstr::SetComparison(ComparisonInstr* new_comparison) { |
| for (intptr_t i = new_comparison->InputCount() - 1; i >= 0; --i) { |
| Value* input = new_comparison->InputAt(i); |
| input->definition()->AddInputUse(input); |
| input->set_instruction(this); |
| } |
| // There should be no need to copy or unuse an environment. |
| ASSERT(comparison()->env() == NULL); |
| ASSERT(new_comparison->env() == NULL); |
| // Remove the current comparison's input uses. |
| comparison()->UnuseAllInputs(); |
| ASSERT(!new_comparison->HasUses()); |
| comparison_ = new_comparison; |
| } |
| |
| // ==== Postorder graph traversal. |
| static bool IsMarked(BlockEntryInstr* block, |
| GrowableArray<BlockEntryInstr*>* preorder) { |
| // Detect that a block has been visited as part of the current |
| // DiscoverBlocks (we can call DiscoverBlocks multiple times). The block |
| // will be 'marked' by (1) having a preorder number in the range of the |
| // preorder array and (2) being in the preorder array at that index. |
| intptr_t i = block->preorder_number(); |
| return (i >= 0) && (i < preorder->length()) && ((*preorder)[i] == block); |
| } |
| |
| // Base class implementation used for JoinEntry and TargetEntry. |
| bool BlockEntryInstr::DiscoverBlock(BlockEntryInstr* predecessor, |
| GrowableArray<BlockEntryInstr*>* preorder, |
| GrowableArray<intptr_t>* parent) { |
| // If this block has a predecessor (i.e., is not the graph entry) we can |
| // assume the preorder array is non-empty. |
| ASSERT((predecessor == NULL) || !preorder->is_empty()); |
| // Blocks with a single predecessor cannot have been reached before. |
| ASSERT(IsJoinEntry() || !IsMarked(this, preorder)); |
| |
| // 1. If the block has already been reached, add current_block as a |
| // basic-block predecessor and we are done. |
| if (IsMarked(this, preorder)) { |
| ASSERT(predecessor != NULL); |
| AddPredecessor(predecessor); |
| return false; |
| } |
| |
| // 2. Otherwise, clear the predecessors which might have been computed on |
| // some earlier call to DiscoverBlocks and record this predecessor. |
| ClearPredecessors(); |
| if (predecessor != NULL) AddPredecessor(predecessor); |
| |
| // 3. The predecessor is the spanning-tree parent. The graph entry has no |
| // parent, indicated by -1. |
| intptr_t parent_number = |
| (predecessor == NULL) ? -1 : predecessor->preorder_number(); |
| parent->Add(parent_number); |
| |
| // 4. Assign the preorder number and add the block entry to the list. |
| set_preorder_number(preorder->length()); |
| preorder->Add(this); |
| |
| // The preorder and parent arrays are indexed by |
| // preorder block number, so they should stay in lockstep. |
| ASSERT(preorder->length() == parent->length()); |
| |
| // 5. Iterate straight-line successors to record assigned variables and |
| // find the last instruction in the block. The graph entry block consists |
| // of only the entry instruction, so that is the last instruction in the |
| // block. |
| Instruction* last = this; |
| for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) { |
| last = it.Current(); |
| } |
| set_last_instruction(last); |
| if (last->IsGoto()) last->AsGoto()->set_block(this); |
| |
| return true; |
| } |
| |
| void GraphEntryInstr::RelinkToOsrEntry(Zone* zone, intptr_t max_block_id) { |
| ASSERT(osr_id_ != Compiler::kNoOSRDeoptId); |
| BitVector* block_marks = new (zone) BitVector(zone, max_block_id + 1); |
| bool found = FindOsrEntryAndRelink(this, /*parent=*/NULL, block_marks); |
| ASSERT(found); |
| } |
| |
| bool BlockEntryInstr::FindOsrEntryAndRelink(GraphEntryInstr* graph_entry, |
| Instruction* parent, |
| BitVector* block_marks) { |
| const intptr_t osr_id = graph_entry->osr_id(); |
| |
| // Search for the instruction with the OSR id. Use a depth first search |
| // because basic blocks have not been discovered yet. Prune unreachable |
| // blocks by replacing the normal entry with a jump to the block |
| // containing the OSR entry point. |
| |
| // Do not visit blocks more than once. |
| if (block_marks->Contains(block_id())) return false; |
| block_marks->Add(block_id()); |
| |
| // Search this block for the OSR id. |
| Instruction* instr = this; |
| for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) { |
| instr = it.Current(); |
| if (instr->GetDeoptId() == osr_id) { |
| // Sanity check that we found a stack check instruction. |
| ASSERT(instr->IsCheckStackOverflow()); |
| // Loop stack check checks are always in join blocks so that they can |
| // be the target of a goto. |
| ASSERT(IsJoinEntry()); |
| // The instruction should be the first instruction in the block so |
| // we can simply jump to the beginning of the block. |
| ASSERT(instr->previous() == this); |
| |
| auto normal_entry = graph_entry->normal_entry(); |
| auto osr_entry = new OsrEntryInstr(graph_entry, normal_entry->block_id(), |
| normal_entry->try_index(), |
| normal_entry->deopt_id()); |
| |
| auto goto_join = new GotoInstr(AsJoinEntry(), |
| CompilerState::Current().GetNextDeoptId()); |
| goto_join->CopyDeoptIdFrom(*parent); |
| osr_entry->LinkTo(goto_join); |
| |
| // Remove normal function entries & add osr entry. |
| graph_entry->set_normal_entry(nullptr); |
| graph_entry->set_unchecked_entry(nullptr); |
| graph_entry->set_osr_entry(osr_entry); |
| |
| return true; |
| } |
| } |
| |
| // Recursively search the successors. |
| for (intptr_t i = instr->SuccessorCount() - 1; i >= 0; --i) { |
| if (instr->SuccessorAt(i)->FindOsrEntryAndRelink(graph_entry, instr, |
| block_marks)) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| bool BlockEntryInstr::Dominates(BlockEntryInstr* other) const { |
| // TODO(fschneider): Make this faster by e.g. storing dominators for each |
| // block while computing the dominator tree. |
| ASSERT(other != NULL); |
| BlockEntryInstr* current = other; |
| while (current != NULL && current != this) { |
| current = current->dominator(); |
| } |
| return current == this; |
| } |
| |
| BlockEntryInstr* BlockEntryInstr::ImmediateDominator() const { |
| Instruction* last = dominator()->last_instruction(); |
| if ((last->SuccessorCount() == 1) && (last->SuccessorAt(0) == this)) { |
| return dominator(); |
| } |
| return NULL; |
| } |
| |
| bool BlockEntryInstr::IsLoopHeader() const { |
| return loop_info_ != nullptr && loop_info_->header() == this; |
| } |
| |
| // Helper to mutate the graph during inlining. This block should be |
| // replaced with new_block as a predecessor of all of this block's |
| // successors. For each successor, the predecessors will be reordered |
| // to preserve block-order sorting of the predecessors as well as the |
| // phis if the successor is a join. |
| void BlockEntryInstr::ReplaceAsPredecessorWith(BlockEntryInstr* new_block) { |
| // Set the last instruction of the new block to that of the old block. |
| Instruction* last = last_instruction(); |
| new_block->set_last_instruction(last); |
| // For each successor, update the predecessors. |
| for (intptr_t sidx = 0; sidx < last->SuccessorCount(); ++sidx) { |
| // If the successor is a target, update its predecessor. |
| TargetEntryInstr* target = last->SuccessorAt(sidx)->AsTargetEntry(); |
| if (target != NULL) { |
| target->predecessor_ = new_block; |
| continue; |
| } |
| // If the successor is a join, update each predecessor and the phis. |
| JoinEntryInstr* join = last->SuccessorAt(sidx)->AsJoinEntry(); |
| ASSERT(join != NULL); |
| // Find the old predecessor index. |
| intptr_t old_index = join->IndexOfPredecessor(this); |
| intptr_t pred_count = join->PredecessorCount(); |
| ASSERT(old_index >= 0); |
| ASSERT(old_index < pred_count); |
| // Find the new predecessor index while reordering the predecessors. |
| intptr_t new_id = new_block->block_id(); |
| intptr_t new_index = old_index; |
| if (block_id() < new_id) { |
| // Search upwards, bubbling down intermediate predecessors. |
| for (; new_index < pred_count - 1; ++new_index) { |
| if (join->predecessors_[new_index + 1]->block_id() > new_id) break; |
| join->predecessors_[new_index] = join->predecessors_[new_index + 1]; |
| } |
| } else { |
| // Search downwards, bubbling up intermediate predecessors. |
| for (; new_index > 0; --new_index) { |
| if (join->predecessors_[new_index - 1]->block_id() < new_id) break; |
| join->predecessors_[new_index] = join->predecessors_[new_index - 1]; |
| } |
| } |
| join->predecessors_[new_index] = new_block; |
| // If the new and old predecessor index match there is nothing to update. |
| if ((join->phis() == NULL) || (old_index == new_index)) return; |
| // Otherwise, reorder the predecessor uses in each phi. |
| for (PhiIterator it(join); !it.Done(); it.Advance()) { |
| PhiInstr* phi = it.Current(); |
| ASSERT(phi != NULL); |
| ASSERT(pred_count == phi->InputCount()); |
| // Save the predecessor use. |
| Value* pred_use = phi->InputAt(old_index); |
| // Move uses between old and new. |
| intptr_t step = (old_index < new_index) ? 1 : -1; |
| for (intptr_t use_idx = old_index; use_idx != new_index; |
| use_idx += step) { |
| phi->SetInputAt(use_idx, phi->InputAt(use_idx + step)); |
| } |
| // Write the predecessor use. |
| phi->SetInputAt(new_index, pred_use); |
| } |
| } |
| } |
| |
| void BlockEntryInstr::ClearAllInstructions() { |
| JoinEntryInstr* join = this->AsJoinEntry(); |
| if (join != NULL) { |
| for (PhiIterator it(join); !it.Done(); it.Advance()) { |
| it.Current()->UnuseAllInputs(); |
| } |
| } |
| UnuseAllInputs(); |
| for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) { |
| it.Current()->UnuseAllInputs(); |
| } |
| } |
| |
| PhiInstr* JoinEntryInstr::InsertPhi(intptr_t var_index, intptr_t var_count) { |
| // Lazily initialize the array of phis. |
| // Currently, phis are stored in a sparse array that holds the phi |
| // for variable with index i at position i. |
| // TODO(fschneider): Store phis in a more compact way. |
| if (phis_ == NULL) { |
| phis_ = new ZoneGrowableArray<PhiInstr*>(var_count); |
| for (intptr_t i = 0; i < var_count; i++) { |
| phis_->Add(NULL); |
| } |
| } |
| ASSERT((*phis_)[var_index] == NULL); |
| return (*phis_)[var_index] = new PhiInstr(this, PredecessorCount()); |
| } |
| |
| void JoinEntryInstr::InsertPhi(PhiInstr* phi) { |
| // Lazily initialize the array of phis. |
| if (phis_ == NULL) { |
| phis_ = new ZoneGrowableArray<PhiInstr*>(1); |
| } |
| phis_->Add(phi); |
| } |
| |
| void JoinEntryInstr::RemovePhi(PhiInstr* phi) { |
| ASSERT(phis_ != NULL); |
| for (intptr_t index = 0; index < phis_->length(); ++index) { |
| if (phi == (*phis_)[index]) { |
| (*phis_)[index] = phis_->Last(); |
| phis_->RemoveLast(); |
| return; |
| } |
| } |
| } |
| |
| void JoinEntryInstr::RemoveDeadPhis(Definition* replacement) { |
| if (phis_ == NULL) return; |
| |
| intptr_t to_index = 0; |
| for (intptr_t from_index = 0; from_index < phis_->length(); ++from_index) { |
| PhiInstr* phi = (*phis_)[from_index]; |
| if (phi != NULL) { |
| if (phi->is_alive()) { |
| (*phis_)[to_index++] = phi; |
| for (intptr_t i = phi->InputCount() - 1; i >= 0; --i) { |
| Value* input = phi->InputAt(i); |
| input->definition()->AddInputUse(input); |
| } |
| } else { |
| phi->ReplaceUsesWith(replacement); |
| } |
| } |
| } |
| if (to_index == 0) { |
| phis_ = NULL; |
| } else { |
| phis_->TruncateTo(to_index); |
| } |
| } |
| |
| intptr_t Instruction::SuccessorCount() const { |
| return 0; |
| } |
| |
| BlockEntryInstr* Instruction::SuccessorAt(intptr_t index) const { |
| // Called only if index is in range. Only control-transfer instructions |
| // can have non-zero successor counts and they override this function. |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| intptr_t GraphEntryInstr::SuccessorCount() const { |
| return (normal_entry() == nullptr ? 0 : 1) + |
| (unchecked_entry() == nullptr ? 0 : 1) + |
| (osr_entry() == nullptr ? 0 : 1) + catch_entries_.length(); |
| } |
| |
| BlockEntryInstr* GraphEntryInstr::SuccessorAt(intptr_t index) const { |
| if (normal_entry() != nullptr) { |
| if (index == 0) return normal_entry_; |
| index--; |
| } |
| if (unchecked_entry() != nullptr) { |
| if (index == 0) return unchecked_entry(); |
| index--; |
| } |
| if (osr_entry() != nullptr) { |
| if (index == 0) return osr_entry(); |
| index--; |
| } |
| return catch_entries_[index]; |
| } |
| |
| intptr_t BranchInstr::SuccessorCount() const { |
| return 2; |
| } |
| |
| BlockEntryInstr* BranchInstr::SuccessorAt(intptr_t index) const { |
| if (index == 0) return true_successor_; |
| if (index == 1) return false_successor_; |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| intptr_t GotoInstr::SuccessorCount() const { |
| return 1; |
| } |
| |
| BlockEntryInstr* GotoInstr::SuccessorAt(intptr_t index) const { |
| ASSERT(index == 0); |
| return successor(); |
| } |
| |
| void Instruction::Goto(JoinEntryInstr* entry) { |
| LinkTo(new GotoInstr(entry, CompilerState::Current().GetNextDeoptId())); |
| } |
| |
| bool UnboxedIntConverterInstr::ComputeCanDeoptimize() const { |
| return (to() == kUnboxedInt32) && !is_truncating() && |
| !RangeUtils::Fits(value()->definition()->range(), |
| RangeBoundary::kRangeBoundaryInt32); |
| } |
| |
| bool UnboxInt32Instr::ComputeCanDeoptimize() const { |
| if (speculative_mode() == kNotSpeculative) { |
| return false; |
| } |
| const intptr_t value_cid = value()->Type()->ToCid(); |
| if (value_cid == kSmiCid) { |
| return (kSmiBits > 32) && !is_truncating() && |
| !RangeUtils::Fits(value()->definition()->range(), |
| RangeBoundary::kRangeBoundaryInt32); |
| } else if (value_cid == kMintCid) { |
| return !is_truncating() && |
| !RangeUtils::Fits(value()->definition()->range(), |
| RangeBoundary::kRangeBoundaryInt32); |
| } else if (is_truncating() && value()->definition()->IsBoxInteger()) { |
| return false; |
| } else if ((kSmiBits < 32) && value()->Type()->IsInt()) { |
| return !RangeUtils::Fits(value()->definition()->range(), |
| RangeBoundary::kRangeBoundaryInt32); |
| } else { |
| return true; |
| } |
| } |
| |
| bool UnboxUint32Instr::ComputeCanDeoptimize() const { |
| ASSERT(is_truncating()); |
| if (speculative_mode() == kNotSpeculative) { |
| return false; |
| } |
| if ((value()->Type()->ToCid() == kSmiCid) || |
| (value()->Type()->ToCid() == kMintCid)) { |
| return false; |
| } |
| // Check input value's range. |
| Range* value_range = value()->definition()->range(); |
| return !RangeUtils::Fits(value_range, RangeBoundary::kRangeBoundaryInt64); |
| } |
| |
| bool BinaryInt32OpInstr::ComputeCanDeoptimize() const { |
| switch (op_kind()) { |
| case Token::kBIT_AND: |
| case Token::kBIT_OR: |
| case Token::kBIT_XOR: |
| return false; |
| |
| case Token::kSHR: |
| return false; |
| |
| case Token::kSHL: |
| // Currently only shifts by in range constant are supported, see |
| // BinaryInt32OpInstr::IsSupported. |
| return can_overflow(); |
| |
| case Token::kMOD: { |
| UNREACHABLE(); |
| } |
| |
| default: |
| return can_overflow(); |
| } |
| } |
| |
| bool BinarySmiOpInstr::ComputeCanDeoptimize() const { |
| switch (op_kind()) { |
| case Token::kBIT_AND: |
| case Token::kBIT_OR: |
| case Token::kBIT_XOR: |
| return false; |
| |
| case Token::kSHR: |
| return !RangeUtils::IsPositive(right_range()); |
| |
| case Token::kSHL: |
| return can_overflow() || !RangeUtils::IsPositive(right_range()); |
| |
| case Token::kMOD: |
| return RangeUtils::CanBeZero(right_range()); |
| |
| default: |
| return can_overflow(); |
| } |
| } |
| |
| bool ShiftIntegerOpInstr::IsShiftCountInRange(int64_t max) const { |
| return RangeUtils::IsWithin(shift_range(), 0, max); |
| } |
| |
| bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const { |
| if (!right()->definition()->IsConstant()) return false; |
| const Object& constant = right()->definition()->AsConstant()->value(); |
| if (!constant.IsSmi()) return false; |
| const intptr_t int_value = Smi::Cast(constant).Value(); |
| ASSERT(int_value != kIntptrMin); |
| return Utils::IsPowerOfTwo(Utils::Abs(int_value)); |
| } |
| |
| static intptr_t RepresentationBits(Representation r) { |
| switch (r) { |
| case kTagged: |
| return kBitsPerWord - 1; |
| case kUnboxedInt32: |
| case kUnboxedUint32: |
| return 32; |
| case kUnboxedInt64: |
| return 64; |
| default: |
| UNREACHABLE(); |
| return 0; |
| } |
| } |
| |
| static int64_t RepresentationMask(Representation r) { |
| return static_cast<int64_t>(static_cast<uint64_t>(-1) >> |
| (64 - RepresentationBits(r))); |
| } |
| |
| static bool ToIntegerConstant(Value* value, int64_t* result) { |
| if (!value->BindsToConstant()) { |
| UnboxInstr* unbox = value->definition()->AsUnbox(); |
| if (unbox != NULL) { |
| switch (unbox->representation()) { |
| case kUnboxedDouble: |
| case kUnboxedInt64: |
| return ToIntegerConstant(unbox->value(), result); |
| |
| case kUnboxedUint32: |
| if (ToIntegerConstant(unbox->value(), result)) { |
| *result &= RepresentationMask(kUnboxedUint32); |
| return true; |
| } |
| break; |
| |
| // No need to handle Unbox<Int32>(Constant(C)) because it gets |
| // canonicalized to UnboxedConstant<Int32>(C). |
| case kUnboxedInt32: |
| default: |
| break; |
| } |
| } |
| return false; |
| } |
| |
| const Object& constant = value->BoundConstant(); |
| if (constant.IsDouble()) { |
| const Double& double_constant = Double::Cast(constant); |
| *result = Utils::SafeDoubleToInt<int64_t>(double_constant.value()); |
| return (static_cast<double>(*result) == double_constant.value()); |
| } else if (constant.IsSmi()) { |
| *result = Smi::Cast(constant).Value(); |
| return true; |
| } else if (constant.IsMint()) { |
| *result = Mint::Cast(constant).value(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| static Definition* CanonicalizeCommutativeDoubleArithmetic(Token::Kind op, |
| Value* left, |
| Value* right) { |
| int64_t left_value; |
| if (!ToIntegerConstant(left, &left_value)) { |
| return NULL; |
| } |
| |
| // Can't apply 0.0 * x -> 0.0 equivalence to double operation because |
| // 0.0 * NaN is NaN not 0.0. |
| // Can't apply 0.0 + x -> x to double because 0.0 + (-0.0) is 0.0 not -0.0. |
| switch (op) { |
| case Token::kMUL: |
| if (left_value == 1) { |
| if (right->definition()->representation() != kUnboxedDouble) { |
| // Can't yet apply the equivalence because representation selection |
| // did not run yet. We need it to guarantee that right value is |
| // correctly coerced to double. The second canonicalization pass |
| // will apply this equivalence. |
| return NULL; |
| } else { |
| return right->definition(); |
| } |
| } |
| break; |
| default: |
| break; |
| } |
| |
| return NULL; |
| } |
| |
| Definition* DoubleToFloatInstr::Canonicalize(FlowGraph* flow_graph) { |
| #ifdef DEBUG |
| // Must only be used in Float32 StoreIndexedInstr or FloatToDoubleInstr or |
| // Phis introduce by load forwarding. |
| ASSERT(env_use_list() == NULL); |
| for (Value* use = input_use_list(); use != NULL; use = use->next_use()) { |
| ASSERT(use->instruction()->IsPhi() || |
| use->instruction()->IsFloatToDouble() || |
| (use->instruction()->IsStoreIndexed() && |
| (use->instruction()->AsStoreIndexed()->class_id() == |
| kTypedDataFloat32ArrayCid))); |
| } |
| #endif |
| if (!HasUses()) return NULL; |
| if (value()->definition()->IsFloatToDouble()) { |
| // F2D(D2F(v)) == v. |
| return value()->definition()->AsFloatToDouble()->value()->definition(); |
| } |
| return this; |
| } |
| |
| Definition* FloatToDoubleInstr::Canonicalize(FlowGraph* flow_graph) { |
| return HasUses() ? this : NULL; |
| } |
| |
| Definition* BinaryDoubleOpInstr::Canonicalize(FlowGraph* flow_graph) { |
| if (!HasUses()) return NULL; |
| |
| Definition* result = NULL; |
| |
| result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), left(), right()); |
| if (result != NULL) { |
| return result; |
| } |
| |
| result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), right(), left()); |
| if (result != NULL) { |
| return result; |
| } |
| |
| if ((op_kind() == Token::kMUL) && |
| (left()->definition() == right()->definition())) { |
| MathUnaryInstr* math_unary = new MathUnaryInstr( |
| MathUnaryInstr::kDoubleSquare, new Value(left()->definition()), |
| DeoptimizationTarget()); |
| flow_graph->InsertBefore(this, math_unary, env(), FlowGraph::kValue); |
| return math_unary; |
| } |
| |
| return this; |
| } |
| |
| Definition* DoubleTestOpInstr::Canonicalize(FlowGraph* flow_graph) { |
| return HasUses() ? this : NULL; |
| } |
| |
| static bool IsCommutative(Token::Kind op) { |
| switch (op) { |
| case Token::kMUL: |
| case Token::kADD: |
| case Token::kBIT_AND: |
| case Token::kBIT_OR: |
| case Token::kBIT_XOR: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| UnaryIntegerOpInstr* UnaryIntegerOpInstr::Make(Representation representation, |
| Token::Kind op_kind, |
| Value* value, |
| intptr_t deopt_id, |
| Range* range) { |
| UnaryIntegerOpInstr* op = NULL; |
| switch (representation) { |
| case kTagged: |
| op = new UnarySmiOpInstr(op_kind, value, deopt_id); |
| break; |
| case kUnboxedInt32: |
| return NULL; |
| case kUnboxedUint32: |
| op = new UnaryUint32OpInstr(op_kind, value, deopt_id); |
| break; |
| case kUnboxedInt64: |
| op = new UnaryInt64OpInstr(op_kind, value, deopt_id); |
| break; |
| default: |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| if (op == NULL) { |
| return op; |
| } |
| |
| if (!Range::IsUnknown(range)) { |
| op->set_range(*range); |
| } |
| |
| ASSERT(op->representation() == representation); |
| return op; |
| } |
| |
| BinaryIntegerOpInstr* BinaryIntegerOpInstr::Make( |
| Representation representation, |
| Token::Kind op_kind, |
| Value* left, |
| Value* right, |
| intptr_t deopt_id, |
| bool can_overflow, |
| bool is_truncating, |
| Range* range, |
| SpeculativeMode speculative_mode) { |
| BinaryIntegerOpInstr* op = NULL; |
| switch (representation) { |
| case kTagged: |
| op = new BinarySmiOpInstr(op_kind, left, right, deopt_id); |
| break; |
| case kUnboxedInt32: |
| if (!BinaryInt32OpInstr::IsSupported(op_kind, left, right)) { |
| return NULL; |
| } |
| op = new BinaryInt32OpInstr(op_kind, left, right, deopt_id); |
| break; |
| case kUnboxedUint32: |
| if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) { |
| if (speculative_mode == kNotSpeculative) { |
| op = new ShiftUint32OpInstr(op_kind, left, right, deopt_id); |
| } else { |
| op = |
| new SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id); |
| } |
| } else { |
| op = new BinaryUint32OpInstr(op_kind, left, right, deopt_id); |
| } |
| break; |
| case kUnboxedInt64: |
| if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) { |
| if (speculative_mode == kNotSpeculative) { |
| op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id); |
| } else { |
| op = new SpeculativeShiftInt64OpInstr(op_kind, left, right, deopt_id); |
| } |
| } else { |
| op = new BinaryInt64OpInstr(op_kind, left, right, deopt_id); |
| } |
| break; |
| default: |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| if (!Range::IsUnknown(range)) { |
| op->set_range(*range); |
| } |
| |
| op->set_can_overflow(can_overflow); |
| if (is_truncating) { |
| op->mark_truncating(); |
| } |
| |
| ASSERT(op->representation() == representation); |
| return op; |
| } |
| |
| static bool IsRepresentable(const Integer& value, Representation rep) { |
| switch (rep) { |
| case kTagged: // Smi case. |
| return value.IsSmi(); |
| |
| case kUnboxedInt32: |
| if (value.IsSmi() || value.IsMint()) { |
| return Utils::IsInt(32, value.AsInt64Value()); |
| } |
| return false; |
| |
| case kUnboxedInt64: |
| return value.IsSmi() || value.IsMint(); |
| |
| case kUnboxedUint32: // Only truncating Uint32 arithmetic is supported. |
| default: |
| UNREACHABLE(); |
| } |
| |
| return false; |
| } |
| |
| RawInteger* UnaryIntegerOpInstr::Evaluate(const Integer& value) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| Integer& result = Integer::Handle(zone); |
| |
| switch (op_kind()) { |
| case Token::kNEGATE: |
| result = value.ArithmeticOp(Token::kMUL, Smi::Handle(zone, Smi::New(-1)), |
| Heap::kOld); |
| break; |
| |
| case Token::kBIT_NOT: |
| if (value.IsSmi()) { |
| result = Integer::New(~Smi::Cast(value).Value(), Heap::kOld); |
| } else if (value.IsMint()) { |
| result = Integer::New(~Mint::Cast(value).value(), Heap::kOld); |
| } |
| break; |
| |
| default: |
| UNREACHABLE(); |
| } |
| |
| if (!result.IsNull()) { |
| if (!IsRepresentable(result, representation())) { |
| // If this operation is not truncating it would deoptimize on overflow. |
| // Check that we match this behavior and don't produce a value that is |
| // larger than something this operation can produce. We could have |
| // specialized instructions that use this value under this assumption. |
| return Integer::null(); |
| } |
| |
| const char* error_str = NULL; |
| result ^= result.CheckAndCanonicalize(thread, &error_str); |
| if (error_str != NULL) { |
| FATAL1("Failed to canonicalize: %s", error_str); |
| } |
| } |
| |
| return result.raw(); |
| } |
| |
| RawInteger* BinaryIntegerOpInstr::Evaluate(const Integer& left, |
| const Integer& right) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| Integer& result = Integer::Handle(zone); |
| |
| switch (op_kind()) { |
| case Token::kTRUNCDIV: |
| case Token::kMOD: |
| // Check right value for zero. |
| if (right.AsInt64Value() == 0) { |
| break; // Will throw. |
| } |
| // Fall through. |
| case Token::kADD: |
| case Token::kSUB: |
| case Token::kMUL: { |
| result = left.ArithmeticOp(op_kind(), right, Heap::kOld); |
| break; |
| } |
| case Token::kSHL: |
| case Token::kSHR: |
| if (right.AsInt64Value() >= 0) { |
| result = left.ShiftOp(op_kind(), right, Heap::kOld); |
| } |
| break; |
| case Token::kBIT_AND: |
| case Token::kBIT_OR: |
| case Token::kBIT_XOR: { |
| result = left.BitOp(op_kind(), right, Heap::kOld); |
| break; |
| } |
| case Token::kDIV: |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| if (!result.IsNull()) { |
| if (is_truncating()) { |
| int64_t truncated = result.AsTruncatedInt64Value(); |
| truncated &= RepresentationMask(representation()); |
| result = Integer::New(truncated, Heap::kOld); |
| ASSERT(IsRepresentable(result, representation())); |
| } else if (!IsRepresentable(result, representation())) { |
| // If this operation is not truncating it would deoptimize on overflow. |
| // Check that we match this behavior and don't produce a value that is |
| // larger than something this operation can produce. We could have |
| // specialized instructions that use this value under this assumption. |
| return Integer::null(); |
| } |
| const char* error_str = NULL; |
| result ^= result.CheckAndCanonicalize(thread, &error_str); |
| if (error_str != NULL) { |
| FATAL1("Failed to canonicalize: %s", error_str); |
| } |
| } |
| |
| return result.raw(); |
| } |
| |
| Definition* BinaryIntegerOpInstr::CreateConstantResult(FlowGraph* flow_graph, |
| const Integer& result) { |
| Definition* result_defn = flow_graph->GetConstant(result); |
| if (representation() != kTagged) { |
| result_defn = UnboxInstr::Create(representation(), new Value(result_defn), |
| GetDeoptId()); |
| flow_graph->InsertBefore(this, result_defn, env(), FlowGraph::kValue); |
| } |
| return result_defn; |
| } |
| |
| Definition* CheckedSmiOpInstr::Canonicalize(FlowGraph* flow_graph) { |
| if ((left()->Type()->ToCid() == kSmiCid) && |
| (right()->Type()->ToCid() == kSmiCid)) { |
| Definition* replacement = NULL; |
| // Operations that can't deoptimize are specialized here: These include |
| // bit-wise operators and comparisons. Other arithmetic operations can |
| // overflow or divide by 0 and can't be specialized unless we have extra |
| // range information. |
| switch (op_kind()) { |
| case Token::kBIT_AND: |
| case Token::kBIT_OR: |
| case Token::kBIT_XOR: |
| replacement = new BinarySmiOpInstr( |
| op_kind(), new Value(left()->definition()), |
| new Value(right()->definition()), DeoptId::kNone); |
| default: |
| break; |
| } |
| if (replacement != NULL) { |
| flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue); |
| return replacement; |
| } |
| } |
| return this; |
| } |
| |
| ComparisonInstr* CheckedSmiComparisonInstr::CopyWithNewOperands(Value* left, |
| Value* right) { |
| UNREACHABLE(); |
| return NULL; |
| } |
| |
| Definition* CheckedSmiComparisonInstr::Canonicalize(FlowGraph* flow_graph) { |
| CompileType* left_type = left()->Type(); |
| CompileType* right_type = right()->Type(); |
| intptr_t op_cid = kIllegalCid; |
| SpeculativeMode speculative_mode = kGuardInputs; |
| |
| if ((left_type->ToCid() == kSmiCid) && (right_type->ToCid() == kSmiCid)) { |
| op_cid = kSmiCid; |
| } else if (Isolate::Current()->can_use_strong_mode_types() && |
| FlowGraphCompiler::SupportsUnboxedInt64() && |
| // TODO(dartbug.com/30480): handle nullable types here |
| left_type->IsNullableInt() && !left_type->is_nullable() && |
| right_type->IsNullableInt() && !right_type->is_nullable()) { |
| op_cid = kMintCid; |
| speculative_mode = kNotSpeculative; |
| } |
| |
| if (op_cid != kIllegalCid) { |
| Definition* replacement = NULL; |
| if (Token::IsRelationalOperator(kind())) { |
| replacement = new RelationalOpInstr( |
| token_pos(), kind(), left()->CopyWithType(), right()->CopyWithType(), |
| op_cid, DeoptId::kNone, speculative_mode); |
| } else if (Token::IsEqualityOperator(kind())) { |
| replacement = new EqualityCompareInstr( |
| token_pos(), kind(), left()->CopyWithType(), right()->CopyWithType(), |
| op_cid, DeoptId::kNone, speculative_mode); |
| } |
| if (replacement != NULL) { |
| if (FLAG_trace_strong_mode_types && (op_cid == kMintCid)) { |
| THR_Print("[Strong mode] Optimization: replacing %s with %s\n", |
| ToCString(), replacement->ToCString()); |
| } |
| flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue); |
| return replacement; |
| } |
| } |
| return this; |
| } |
| |
| Definition* BinaryIntegerOpInstr::Canonicalize(FlowGraph* flow_graph) { |
| // If both operands are constants evaluate this expression. Might |
| // occur due to load forwarding after constant propagation pass |
| // have already been run. |
| if (left()->BindsToConstant() && left()->BoundConstant().IsInteger() && |
| right()->BindsToConstant() && right()->BoundConstant().IsInteger()) { |
| const Integer& result = |
| Integer::Handle(Evaluate(Integer::Cast(left()->BoundConstant()), |
| Integer::Cast(right()->BoundConstant()))); |
| if (!result.IsNull()) { |
| return CreateConstantResult(flow_graph, result); |
| } |
| } |
| |
| if (left()->BindsToConstant() && !right()->BindsToConstant() && |
| IsCommutative(op_kind())) { |
| Value* l = left(); |
| Value* r = right(); |
| SetInputAt(0, r); |
| SetInputAt(1, l); |
| } |
| |
| int64_t rhs; |
| if (!ToIntegerConstant(right(), &rhs)) { |
| return this; |
| } |
| |
| const int64_t range_mask = RepresentationMask(representation()); |
| if (is_truncating()) { |
| switch (op_kind()) { |
| case Token::kMUL: |
| case Token::kSUB: |
| case Token::kADD: |
| case Token::kBIT_AND: |
| case Token::kBIT_OR: |
| case Token::kBIT_XOR: |
| rhs = (rhs & range_mask); |
| break; |
| default: |
| break; |
| } |
| } |
| |
| switch (op_kind()) { |
| case Token::kMUL: |
| if (rhs == 1) { |
| return left()->definition(); |
| } else if (rhs == 0) { |
| return right()->definition(); |
| } else if (rhs == 2) { |
| ConstantInstr* constant_1 = |
| flow_graph->GetConstant(Smi::Handle(Smi::New(1))); |
| BinaryIntegerOpInstr* shift = BinaryIntegerOpInstr::Make( |
| representation(), Token::kSHL, left()->CopyWithType(), |
| new Value(constant_1), GetDeoptId(), can_overflow(), |
| is_truncating(), range(), speculative_mode()); |
| if (shift != NULL) { |
| flow_graph->InsertBefore(this, shift, env(), FlowGraph::kValue); |
| return shift; |
| } |
| } |
| |
| break; |
| case Token::kADD: |
| if (rhs == 0) { |
| return left()->definition(); |
| } |
| break; |
| case Token::kBIT_AND: |
| if (rhs == 0) { |
| return right()->definition(); |
| } else if (rhs == range_mask) { |
| return left()->definition(); |
| } |
| break; |
| case Token::kBIT_OR: |
| if (rhs == 0) { |
| return left()->definition(); |
| } else if (rhs == range_mask) { |
| return right()->definition(); |
| } |
| break; |
| case Token::kBIT_XOR: |
| if (rhs == 0) { |
| return left()->definition(); |
| } else if (rhs == range_mask) { |
| UnaryIntegerOpInstr* bit_not = UnaryIntegerOpInstr::Make( |
| representation(), Token::kBIT_NOT, left()->CopyWithType(), |
| GetDeoptId(), range()); |
| if (bit_not != NULL) { |
| flow_graph->InsertBefore(this, bit_not, env(), FlowGraph::kValue); |
| return bit_not; |
| } |
| } |
| break; |
| |
| case Token::kSUB: |
| if (rhs == 0) { |
| return left()->definition(); |
| } |
| break; |
| |
| case Token::kTRUNCDIV: |
| if (rhs == 1) { |
| return left()->definition(); |
| } else if (rhs == -1) { |
| UnaryIntegerOpInstr* negation = UnaryIntegerOpInstr::Make( |
| representation(), Token::kNEGATE, left()->CopyWithType(), |
| GetDeoptId(), range()); |
| if (negation != NULL) { |
| flow_graph->InsertBefore(this, negation, env(), FlowGraph::kValue); |
| return negation; |
| } |
| } |
| break; |
| |
| case Token::kSHR: |
| if (rhs == 0) { |
| return left()->definition(); |
| } else if (rhs < 0) { |
| // Instruction will always throw on negative rhs operand. |
| if (!CanDeoptimize()) { |
| // For non-speculative operations (no deopt), let |
| // the code generator deal with throw on slowpath. |
| break; |
| } |
| ASSERT(GetDeoptId() != DeoptId::kNone); |
| DeoptimizeInstr* deopt = |
| new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId()); |
| flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect); |
| // Replace with zero since it always throws. |
| return CreateConstantResult(flow_graph, Integer::Handle(Smi::New(0))); |
| } |
| break; |
| |
| case Token::kSHL: { |
| const intptr_t result_bits = RepresentationBits(representation()); |
| if (rhs == 0) { |
| return left()->definition(); |
| } else if ((rhs >= kBitsPerInt64) || |
| ((rhs >= result_bits) && is_truncating())) { |
| return CreateConstantResult(flow_graph, Integer::Handle(Smi::New(0))); |
| } else if ((rhs < 0) || ((rhs >= result_bits) && !is_truncating())) { |
| // Instruction will always throw on negative rhs operand or |
| // deoptimize on large rhs operand. |
| if (!CanDeoptimize()) { |
| // For non-speculative operations (no deopt), let |
| // the code generator deal with throw on slowpath. |
| break; |
| } |
| ASSERT(GetDeoptId() != DeoptId::kNone); |
| DeoptimizeInstr* deopt = |
| new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId()); |
| flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect); |
| // Replace with zero since it overshifted or always throws. |
| return CreateConstantResult(flow_graph, Integer::Handle(Smi::New(0))); |
| } |
| break; |
| } |
| |
| default: |
| break; |
| } |
| |
| return this; |
| } |
| |
| // Optimizations that eliminate or simplify individual instructions. |
| Instruction* Instruction::Canonicalize(FlowGraph* flow_graph) { |
| return this; |
| } |
| |
| Definition* Definition::Canonicalize(FlowGraph* flow_graph) { |
| return this; |
| } |
| |
| Definition* RedefinitionInstr::Canonicalize(FlowGraph* flow_graph) { |
| // Must not remove Redifinitions without uses until LICM, even though |
| // Redefinition might not have any uses itself it can still be dominating |
| // uses of the value it redefines and must serve as a barrier for those |
| // uses. RenameUsesDominatedByRedefinitions would normalize the graph and |
| // route those uses through this redefinition. |
| if (!HasUses() && !flow_graph->is_licm_allowed()) { |
| return NULL; |
| } |
| if ((constrained_type() != NULL) && |
| Type()->IsEqualTo(value()->definition()->Type())) { |
| return value()->definition(); |
| } |
| return this; |
| } |
| |
| Instruction* CheckStackOverflowInstr::Canonicalize(FlowGraph* flow_graph) { |
| switch (kind_) { |
| case kOsrAndPreemption: |
| return this; |
| case kOsrOnly: |
| // Don't need OSR entries in the optimized code. |
| return NULL; |
| } |
| |
| // Switch above exhausts all possibilities but some compilers can't figure |
| // it out. |
| UNREACHABLE(); |
| return this; |
| } |
| |
| bool LoadFieldInstr::IsImmutableLengthLoad() const { |
| if (native_field() != nullptr) { |
| switch (native_field()->kind()) { |
| case NativeFieldDesc::kArray_length: |
| case NativeFieldDesc::kTypedData_length: |
| case NativeFieldDesc::kString_length: |
| return true; |
| case NativeFieldDesc::kGrowableObjectArray_length: |
| return false; |
| |
| // Not length loads. |
| case NativeFieldDesc::kLinkedHashMap_index: |
| case NativeFieldDesc::kLinkedHashMap_data: |
| case NativeFieldDesc::kLinkedHashMap_hash_mask: |
| case NativeFieldDesc::kLinkedHashMap_used_data: |
| case NativeFieldDesc::kLinkedHashMap_deleted_keys: |
| case NativeFieldDesc::kArgumentsDescriptor_type_args_len: |
| case NativeFieldDesc::kTypeArguments: |
| return false; |
| } |
| } |
| return false; |
| } |
| |
| bool LoadFieldInstr::IsFixedLengthArrayCid(intptr_t cid) { |
| if (RawObject::IsTypedDataClassId(cid) || |
| RawObject::IsExternalTypedDataClassId(cid)) { |
| return true; |
| } |
| |
| switch (cid) { |
| case kArrayCid: |
| case kImmutableArrayCid: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| Definition* ConstantInstr::Canonicalize(FlowGraph* flow_graph) { |
| return HasUses() ? this : NULL; |
| } |
| |
| // A math unary instruction has a side effect (exception |
| // thrown) if the argument is not a number. |
| // TODO(srdjan): eliminate if has no uses and input is guaranteed to be number. |
| Definition* MathUnaryInstr::Canonicalize(FlowGraph* flow_graph) { |
| return this; |
| } |
| |
| bool LoadFieldInstr::Evaluate(const Object& instance, Object* result) { |
| if (native_field() != nullptr) { |
| switch (native_field()->kind()) { |
| case NativeFieldDesc::kArgumentsDescriptor_type_args_len: |
| if (instance.IsArray() && Array::Cast(instance).IsImmutable()) { |
| ArgumentsDescriptor desc(Array::Cast(instance)); |
| *result = Smi::New(desc.TypeArgsLen()); |
| return true; |
| } |
| return false; |
| |
| default: |
| break; |
| } |
| } |
| |
| if (field() == nullptr || !field()->is_final() || !instance.IsInstance()) { |
| return false; |
| } |
| |
| // Check that instance really has the field which we |
| // are trying to load from. |
| Class& cls = Class::Handle(instance.clazz()); |
| while (cls.raw() != Class::null() && cls.raw() != field()->Owner()) { |
| cls = cls.SuperClass(); |
| } |
| if (cls.raw() != field()->Owner()) { |
| // Failed to find the field in class or its superclasses. |
| return false; |
| } |
| |
| // Object has the field: execute the load. |
| *result = Instance::Cast(instance).GetField(*field()); |
| return true; |
| } |
| |
| Definition* LoadFieldInstr::Canonicalize(FlowGraph* flow_graph) { |
| if (!HasUses()) return nullptr; |
| |
| if (IsImmutableLengthLoad()) { |
| Definition* array = instance()->definition()->OriginalDefinition(); |
| if (StaticCallInstr* call = array->AsStaticCall()) { |
| // For fixed length arrays if the array is the result of a known |
| // constructor call we can replace the length load with the length |
| // argument passed to the constructor. |
| if (call->is_known_list_constructor() && |
| IsFixedLengthArrayCid(call->Type()->ToCid())) { |
| return call->ArgumentAt(1); |
| } |
| } else if (CreateArrayInstr* create_array = array->AsCreateArray()) { |
| if (native_field() == NativeFieldDesc::Array_length()) { |
| return create_array->num_elements()->definition(); |
| } |
| } else if (LoadFieldInstr* load_array = array->AsLoadField()) { |
| // For arrays with guarded lengths, replace the length load |
| // with a constant. |
| if (const Field* field = load_array->field()) { |
| if (field->guarded_list_length() >= 0) { |
| return flow_graph->GetConstant( |
| Smi::Handle(Smi::New(field->guarded_list_length()))); |
| } |
| } |
| } |
| } else if (native_field() != nullptr && |
| native_field()->kind() == NativeFieldDesc::kTypeArguments) { |
| Definition* array = instance()->definition()->OriginalDefinition(); |
| if (StaticCallInstr* call = array->AsStaticCall()) { |
| if (call->is_known_list_constructor()) { |
| return call->ArgumentAt(0); |
| } else if (call->function().recognized_kind() == |
| MethodRecognizer::kLinkedHashMap_getData) { |
| return flow_graph->constant_null(); |
| } |
| } else if (CreateArrayInstr* create_array = array->AsCreateArray()) { |
| return create_array->element_type()->definition(); |
| } else if (LoadFieldInstr* load_array = array->AsLoadField()) { |
| const Field* field = load_array->field(); |
| // For trivially exact fields we know that type arguments match |
| // static type arguments exactly. |
| if ((field != nullptr) && |
| field->static_type_exactness_state().IsTriviallyExact()) { |
| return flow_graph->GetConstant(TypeArguments::Handle( |
| AbstractType::Handle(field->type()).arguments())); |
| } else if (const NativeFieldDesc* native_field = |
| load_array->native_field()) { |
| if (native_field == NativeFieldDesc::LinkedHashMap_data()) { |
| return flow_graph->constant_null(); |
| } |
| } |
| } |
| } |
| |
| // Try folding away loads from constant objects. |
| if (instance()->BindsToConstant()) { |
| Object& result = Object::Handle(); |
| if (Evaluate(instance()->BoundConstant(), &result)) { |
| if (result.IsSmi() || result.IsOld()) { |
| return flow_graph->GetConstant(result); |
| } |
| } |
| } |
| |
| return this; |
| } |
| |
| Definition* AssertBooleanInstr::Canonicalize(FlowGraph* flow_graph) { |
| if (FLAG_eliminate_type_checks) { |
| if (value()->Type()->ToCid() == kBoolCid) { |
| return value()->definition(); |
| } |
| |
| // In strong mode type is already verified either by static analysis |
| // or runtime checks, so AssertBoolean just ensures that value is not null. |
| if (FLAG_strong && !value()->Type()->is_nullable()) { |
| return value()->definition(); |
| } |
| } |
| |
| return this; |
| } |
| |
| Definition* AssertAssignableInstr::Canonicalize(FlowGraph* flow_graph) { |
| if (FLAG_eliminate_type_checks && |
| value()->Type()->IsAssignableTo(dst_type())) { |
| return value()->definition(); |
| } |
| if (dst_type().IsInstantiated()) { |
| return this; |
| } |
| |
| // For uninstantiated target types: If the instantiator and function |
| // type arguments are constant, instantiate the target type here. |
| // Note: these constant type arguments might not necessarily correspond |
| // to the correct instantiator because AssertAssignable might |
| // be located in the unreachable part of the graph (e.g. |
| // it might be dominated by CheckClass that always fails). |
| // This means that the code below must guard against such possibility. |
| Zone* Z = Thread::Current()->zone(); |
| |
| const TypeArguments* instantiator_type_args = nullptr; |
| const TypeArguments* function_type_args = nullptr; |
| |
| if (instantiator_type_arguments()->BindsToConstant()) { |
| const Object& val = instantiator_type_arguments()->BoundConstant(); |
| instantiator_type_args = (val.raw() == TypeArguments::null()) |
| ? &TypeArguments::null_type_arguments() |
| : &TypeArguments::Cast(val); |
| } |
| |
| if (function_type_arguments()->BindsToConstant()) { |
| const Object& val = function_type_arguments()->BoundConstant(); |
| function_type_args = |
| (val.raw() == TypeArguments::null()) |
| ? &TypeArguments::null_type_arguments() |
| : &TypeArguments::Cast(function_type_arguments()->BoundConstant()); |
| } |
| |
| // If instantiator_type_args are not constant try to match the pattern |
| // obj.field.:type_arguments where field's static type exactness state |
| |