| // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include "vm/flow_graph_optimizer.h" |
| |
| #include "vm/bit_vector.h" |
| #include "vm/cha.h" |
| #include "vm/compiler.h" |
| #include "vm/cpu.h" |
| #include "vm/dart_entry.h" |
| #include "vm/exceptions.h" |
| #include "vm/flow_graph_builder.h" |
| #include "vm/flow_graph_compiler.h" |
| #include "vm/flow_graph_range_analysis.h" |
| #include "vm/hash_map.h" |
| #include "vm/il_printer.h" |
| #include "vm/intermediate_language.h" |
| #include "vm/object_store.h" |
| #include "vm/parser.h" |
| #include "vm/resolver.h" |
| #include "vm/scopes.h" |
| #include "vm/stack_frame.h" |
| #include "vm/symbols.h" |
| |
| namespace dart { |
| |
| DEFINE_FLAG(int, getter_setter_ratio, 13, |
| "Ratio of getter/setter usage used for double field unboxing heuristics"); |
| DEFINE_FLAG(bool, guess_other_cid, true, |
| "Artificially create type feedback for arithmetic etc. operations" |
| " by guessing the other unknown argument cid"); |
| DEFINE_FLAG(bool, load_cse, true, "Use redundant load elimination."); |
| DEFINE_FLAG(bool, dead_store_elimination, true, "Eliminate dead stores"); |
| DEFINE_FLAG(int, max_polymorphic_checks, 4, |
| "Maximum number of polymorphic check, otherwise it is megamorphic."); |
| DEFINE_FLAG(int, max_equality_polymorphic_checks, 32, |
| "Maximum number of polymorphic checks in equality operator," |
| " otherwise use megamorphic dispatch."); |
| DEFINE_FLAG(bool, merge_sin_cos, false, "Merge sin/cos into sincos"); |
| DEFINE_FLAG(bool, trace_load_optimization, false, |
| "Print live sets for load optimization pass."); |
| DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); |
| DEFINE_FLAG(bool, truncating_left_shift, true, |
| "Optimize left shift to truncate if possible"); |
| DEFINE_FLAG(bool, use_cha_deopt, true, |
| "Use class hierarchy analysis even if it can cause deoptimization."); |
| #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) |
| DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass."); |
| #endif |
| |
| DECLARE_FLAG(bool, polymorphic_with_deopt); |
| DECLARE_FLAG(bool, source_lines); |
| DECLARE_FLAG(bool, trace_cha); |
| DECLARE_FLAG(bool, trace_field_guards); |
| DECLARE_FLAG(bool, trace_type_check_elimination); |
| DECLARE_FLAG(bool, warn_on_javascript_compatibility); |
| |
| // Quick access to the current isolate and zone. |
| #define I (isolate()) |
| #define Z (zone()) |
| |
| static bool ShouldInlineSimd() { |
| return FlowGraphCompiler::SupportsUnboxedSimd128(); |
| } |
| |
| |
| static bool CanUnboxDouble() { |
| return FlowGraphCompiler::SupportsUnboxedDoubles(); |
| } |
| |
| |
| static bool ShouldInlineInt64ArrayOps() { |
| #if defined(TARGET_ARCH_X64) |
| return true; |
| #endif |
| return false; |
| } |
| |
| static bool CanConvertUnboxedMintToDouble() { |
| #if defined(TARGET_ARCH_IA32) |
| return true; |
| #else |
| // ARM does not have a short instruction sequence for converting int64 to |
| // double. |
| // TODO(johnmccutchan): Investigate possibility on MIPS once |
| // mints are implemented there. |
| return false; |
| #endif |
| } |
| |
| |
| // Optimize instance calls using ICData. |
| void FlowGraphOptimizer::ApplyICData() { |
| VisitBlocks(); |
| } |
| |
| |
| void FlowGraphOptimizer::PopulateWithICData() { |
| ASSERT(current_iterator_ == NULL); |
| for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| BlockEntryInstr* entry = block_order_[i]; |
| ForwardInstructionIterator it(entry); |
| for (; !it.Done(); it.Advance()) { |
| Instruction* instr = it.Current(); |
| if (instr->IsInstanceCall()) { |
| InstanceCallInstr* call = instr->AsInstanceCall(); |
| if (!call->HasICData()) { |
| const Array& arguments_descriptor = |
| Array::Handle(zone(), |
| ArgumentsDescriptor::New(call->ArgumentCount(), |
| call->argument_names())); |
| const ICData& ic_data = ICData::ZoneHandle(zone(), ICData::New( |
| function(), call->function_name(), |
| arguments_descriptor, call->deopt_id(), |
| call->checked_argument_count())); |
| call->set_ic_data(&ic_data); |
| } |
| } |
| } |
| current_iterator_ = NULL; |
| } |
| } |
| |
| |
| // Optimize instance calls using cid. This is called after optimizer |
| // converted instance calls to instructions. Any remaining |
| // instance calls are either megamorphic calls, cannot be optimized or |
| // have no runtime type feedback collected. |
| // Attempts to convert an instance call (IC call) using propagated class-ids, |
| // e.g., receiver class id, guarded-cid, or by guessing cid-s. |
| void FlowGraphOptimizer::ApplyClassIds() { |
| ASSERT(current_iterator_ == NULL); |
| for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| BlockEntryInstr* entry = block_order_[i]; |
| ForwardInstructionIterator it(entry); |
| current_iterator_ = ⁢ |
| for (; !it.Done(); it.Advance()) { |
| Instruction* instr = it.Current(); |
| if (instr->IsInstanceCall()) { |
| InstanceCallInstr* call = instr->AsInstanceCall(); |
| if (call->HasICData()) { |
| if (TryCreateICData(call)) { |
| VisitInstanceCall(call); |
| } |
| } |
| } else if (instr->IsPolymorphicInstanceCall()) { |
| SpecializePolymorphicInstanceCall(instr->AsPolymorphicInstanceCall()); |
| } else if (instr->IsStrictCompare()) { |
| VisitStrictCompare(instr->AsStrictCompare()); |
| } else if (instr->IsBranch()) { |
| ComparisonInstr* compare = instr->AsBranch()->comparison(); |
| if (compare->IsStrictCompare()) { |
| VisitStrictCompare(compare->AsStrictCompare()); |
| } |
| } |
| } |
| current_iterator_ = NULL; |
| } |
| } |
| |
| |
| // TODO(srdjan): Test/support other number types as well. |
| static bool IsNumberCid(intptr_t cid) { |
| return (cid == kSmiCid) || (cid == kDoubleCid); |
| } |
| |
| |
| bool FlowGraphOptimizer::TryCreateICData(InstanceCallInstr* call) { |
| ASSERT(call->HasICData()); |
| if (call->ic_data()->NumberOfUsedChecks() > 0) { |
| // This occurs when an instance call has too many checks, will be converted |
| // to megamorphic call. |
| return false; |
| } |
| if (FLAG_warn_on_javascript_compatibility) { |
| // Do not make the instance call megamorphic if the callee needs to decode |
| // the calling code sequence to lookup the ic data and verify if a warning |
| // has already been issued or not. |
| // TryCreateICData is only invoked if the ic_data target has not been called |
| // yet, so no warning can possibly have been issued. |
| ASSERT(!call->ic_data()->IssuedJSWarning()); |
| if (call->ic_data()->MayCheckForJSWarning()) { |
| return false; |
| } |
| } |
| GrowableArray<intptr_t> class_ids(call->ic_data()->NumArgsTested()); |
| ASSERT(call->ic_data()->NumArgsTested() <= call->ArgumentCount()); |
| for (intptr_t i = 0; i < call->ic_data()->NumArgsTested(); i++) { |
| const intptr_t cid = call->PushArgumentAt(i)->value()->Type()->ToCid(); |
| class_ids.Add(cid); |
| } |
| |
| const Token::Kind op_kind = call->token_kind(); |
| if (Token::IsRelationalOperator(op_kind) || |
| Token::IsEqualityOperator(op_kind) || |
| Token::IsBinaryOperator(op_kind)) { |
| // Guess cid: if one of the inputs is a number assume that the other |
| // is a number of same type. |
| if (FLAG_guess_other_cid) { |
| const intptr_t cid_0 = class_ids[0]; |
| const intptr_t cid_1 = class_ids[1]; |
| if ((cid_0 == kDynamicCid) && (IsNumberCid(cid_1))) { |
| class_ids[0] = cid_1; |
| } else if (IsNumberCid(cid_0) && (cid_1 == kDynamicCid)) { |
| class_ids[1] = cid_0; |
| } |
| } |
| } |
| |
| bool all_cids_known = true; |
| for (intptr_t i = 0; i < class_ids.length(); i++) { |
| if (class_ids[i] == kDynamicCid) { |
| // Not all cid-s known. |
| all_cids_known = false; |
| break; |
| } |
| } |
| |
| if (all_cids_known) { |
| const Array& args_desc_array = Array::Handle(Z, |
| ArgumentsDescriptor::New(call->ArgumentCount(), |
| call->argument_names())); |
| ArgumentsDescriptor args_desc(args_desc_array); |
| const Class& receiver_class = Class::Handle(Z, |
| isolate()->class_table()->At(class_ids[0])); |
| if (!receiver_class.is_finalized()) { |
| // Do not eagerly finalize classes. ResolveDynamicForReceiverClass can |
| // cause class finalization, since callee's receiver class may not be |
| // finalized yet. |
| return false; |
| } |
| const Function& function = Function::Handle(Z, |
| Resolver::ResolveDynamicForReceiverClass( |
| receiver_class, |
| call->function_name(), |
| args_desc)); |
| if (function.IsNull()) { |
| return false; |
| } |
| |
| // Create new ICData, do not modify the one attached to the instruction |
| // since it is attached to the assembly instruction itself. |
| // TODO(srdjan): Prevent modification of ICData object that is |
| // referenced in assembly code. |
| const ICData& ic_data = ICData::ZoneHandle(Z, |
| ICData::NewFrom(*call->ic_data(), class_ids.length())); |
| if (class_ids.length() > 1) { |
| ic_data.AddCheck(class_ids, function); |
| } else { |
| ASSERT(class_ids.length() == 1); |
| ic_data.AddReceiverCheck(class_ids[0], function); |
| } |
| call->set_ic_data(&ic_data); |
| return true; |
| } |
| |
| // Check if getter or setter in function's class and class is currently leaf. |
| if ((call->token_kind() == Token::kGET) || |
| (call->token_kind() == Token::kSET)) { |
| const Class& owner_class = Class::Handle(Z, function().Owner()); |
| if (!owner_class.is_abstract() && |
| !CHA::HasSubclasses(owner_class) && |
| !CHA::IsImplemented(owner_class)) { |
| const Array& args_desc_array = Array::Handle(Z, |
| ArgumentsDescriptor::New(call->ArgumentCount(), |
| call->argument_names())); |
| ArgumentsDescriptor args_desc(args_desc_array); |
| const Function& function = Function::Handle(Z, |
| Resolver::ResolveDynamicForReceiverClass(owner_class, |
| call->function_name(), |
| args_desc)); |
| if (function.IsNull()) { |
| return false; |
| } |
| const ICData& ic_data = ICData::ZoneHandle(Z, |
| ICData::NewFrom(*call->ic_data(), class_ids.length())); |
| ic_data.AddReceiverCheck(owner_class.id(), function); |
| call->set_ic_data(&ic_data); |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| |
| const ICData& FlowGraphOptimizer::TrySpecializeICData(const ICData& ic_data, |
| intptr_t cid) { |
| ASSERT(ic_data.NumArgsTested() == 1); |
| |
| if ((ic_data.NumberOfUsedChecks() == 1) && ic_data.HasReceiverClassId(cid)) { |
| return ic_data; // Nothing to do |
| } |
| |
| const Function& function = |
| Function::Handle(Z, ic_data.GetTargetForReceiverClassId(cid)); |
| // TODO(fschneider): Try looking up the function on the class if it is |
| // not found in the ICData. |
| if (!function.IsNull()) { |
| const ICData& new_ic_data = ICData::ZoneHandle(Z, ICData::New( |
| Function::Handle(Z, ic_data.owner()), |
| String::Handle(Z, ic_data.target_name()), |
| Object::empty_array(), // Dummy argument descriptor. |
| ic_data.deopt_id(), |
| ic_data.NumArgsTested())); |
| new_ic_data.SetDeoptReasons(ic_data.DeoptReasons()); |
| new_ic_data.AddReceiverCheck(cid, function); |
| return new_ic_data; |
| } |
| |
| return ic_data; |
| } |
| |
| |
| void FlowGraphOptimizer::SpecializePolymorphicInstanceCall( |
| PolymorphicInstanceCallInstr* call) { |
| if (!FLAG_polymorphic_with_deopt) { |
| // Specialization adds receiver checks which can lead to deoptimization. |
| return; |
| } |
| if (!call->with_checks()) { |
| return; // Already specialized. |
| } |
| |
| const intptr_t receiver_cid = |
| call->PushArgumentAt(0)->value()->Type()->ToCid(); |
| if (receiver_cid == kDynamicCid) { |
| return; // No information about receiver was infered. |
| } |
| |
| const ICData& ic_data = TrySpecializeICData(call->ic_data(), receiver_cid); |
| if (ic_data.raw() == call->ic_data().raw()) { |
| // No specialization. |
| return; |
| } |
| |
| const bool with_checks = false; |
| PolymorphicInstanceCallInstr* specialized = |
| new(Z) PolymorphicInstanceCallInstr(call->instance_call(), |
| ic_data, |
| with_checks); |
| call->ReplaceWith(specialized, current_iterator()); |
| } |
| |
| |
| static BinarySmiOpInstr* AsSmiShiftLeftInstruction(Definition* d) { |
| BinarySmiOpInstr* instr = d->AsBinarySmiOp(); |
| if ((instr != NULL) && (instr->op_kind() == Token::kSHL)) { |
| return instr; |
| } |
| return NULL; |
| } |
| |
| |
| static bool IsPositiveOrZeroSmiConst(Definition* d) { |
| ConstantInstr* const_instr = d->AsConstant(); |
| if ((const_instr != NULL) && (const_instr->value().IsSmi())) { |
| return Smi::Cast(const_instr->value()).Value() >= 0; |
| } |
| return false; |
| } |
| |
| |
| void FlowGraphOptimizer::OptimizeLeftShiftBitAndSmiOp( |
| Definition* bit_and_instr, |
| Definition* left_instr, |
| Definition* right_instr) { |
| ASSERT(bit_and_instr != NULL); |
| ASSERT((left_instr != NULL) && (right_instr != NULL)); |
| |
| // Check for pattern, smi_shift_left must be single-use. |
| bool is_positive_or_zero = IsPositiveOrZeroSmiConst(left_instr); |
| if (!is_positive_or_zero) { |
| is_positive_or_zero = IsPositiveOrZeroSmiConst(right_instr); |
| } |
| if (!is_positive_or_zero) return; |
| |
| BinarySmiOpInstr* smi_shift_left = NULL; |
| if (bit_and_instr->InputAt(0)->IsSingleUse()) { |
| smi_shift_left = AsSmiShiftLeftInstruction(left_instr); |
| } |
| if ((smi_shift_left == NULL) && (bit_and_instr->InputAt(1)->IsSingleUse())) { |
| smi_shift_left = AsSmiShiftLeftInstruction(right_instr); |
| } |
| if (smi_shift_left == NULL) return; |
| |
| // Pattern recognized. |
| smi_shift_left->mark_truncating(); |
| ASSERT(bit_and_instr->IsBinarySmiOp() || bit_and_instr->IsBinaryMintOp()); |
| if (bit_and_instr->IsBinaryMintOp()) { |
| // Replace Mint op with Smi op. |
| BinarySmiOpInstr* smi_op = new(Z) BinarySmiOpInstr( |
| Token::kBIT_AND, |
| new(Z) Value(left_instr), |
| new(Z) Value(right_instr), |
| Thread::kNoDeoptId); // BIT_AND cannot deoptimize. |
| bit_and_instr->ReplaceWith(smi_op, current_iterator()); |
| } |
| } |
| |
| |
| |
| // Used by TryMergeDivMod. |
| // Inserts a load-indexed instruction between a TRUNCDIV or MOD instruction, |
| // and the using instruction. This is an intermediate step before merging. |
| void FlowGraphOptimizer::AppendLoadIndexedForMerged(Definition* instr, |
| intptr_t ix, |
| intptr_t cid) { |
| const intptr_t index_scale = Instance::ElementSizeFor(cid); |
| ConstantInstr* index_instr = |
| flow_graph()->GetConstant(Smi::Handle(Z, Smi::New(ix))); |
| LoadIndexedInstr* load = |
| new(Z) LoadIndexedInstr(new(Z) Value(instr), |
| new(Z) Value(index_instr), |
| index_scale, |
| cid, |
| Thread::kNoDeoptId, |
| instr->token_pos()); |
| instr->ReplaceUsesWith(load); |
| flow_graph()->InsertAfter(instr, load, NULL, FlowGraph::kValue); |
| } |
| |
| |
| void FlowGraphOptimizer::AppendExtractNthOutputForMerged(Definition* instr, |
| intptr_t index, |
| Representation rep, |
| intptr_t cid) { |
| ExtractNthOutputInstr* extract = |
| new(Z) ExtractNthOutputInstr(new(Z) Value(instr), index, rep, cid); |
| instr->ReplaceUsesWith(extract); |
| flow_graph()->InsertAfter(instr, extract, NULL, FlowGraph::kValue); |
| } |
| |
| |
| // Dart: |
| // var x = d % 10; |
| // var y = d ~/ 10; |
| // var z = x + y; |
| // |
| // IL: |
| // v4 <- %(v2, v3) |
| // v5 <- ~/(v2, v3) |
| // v6 <- +(v4, v5) |
| // |
| // IL optimized: |
| // v4 <- DIVMOD(v2, v3); |
| // v5 <- LoadIndexed(v4, 0); // ~/ result |
| // v6 <- LoadIndexed(v4, 1); // % result |
| // v7 <- +(v5, v6) |
| // Because of the environment it is important that merged instruction replaces |
| // first original instruction encountered. |
| void FlowGraphOptimizer::TryMergeTruncDivMod( |
| GrowableArray<BinarySmiOpInstr*>* merge_candidates) { |
| if (merge_candidates->length() < 2) { |
| // Need at least a TRUNCDIV and a MOD. |
| return; |
| } |
| for (intptr_t i = 0; i < merge_candidates->length(); i++) { |
| BinarySmiOpInstr* curr_instr = (*merge_candidates)[i]; |
| if (curr_instr == NULL) { |
| // Instruction was merged already. |
| continue; |
| } |
| ASSERT((curr_instr->op_kind() == Token::kTRUNCDIV) || |
| (curr_instr->op_kind() == Token::kMOD)); |
| // Check if there is kMOD/kTRUNDIV binop with same inputs. |
| const intptr_t other_kind = (curr_instr->op_kind() == Token::kTRUNCDIV) ? |
| Token::kMOD : Token::kTRUNCDIV; |
| Definition* left_def = curr_instr->left()->definition(); |
| Definition* right_def = curr_instr->right()->definition(); |
| for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { |
| BinarySmiOpInstr* other_binop = (*merge_candidates)[k]; |
| // 'other_binop' can be NULL if it was already merged. |
| if ((other_binop != NULL) && |
| (other_binop->op_kind() == other_kind) && |
| (other_binop->left()->definition() == left_def) && |
| (other_binop->right()->definition() == right_def)) { |
| (*merge_candidates)[k] = NULL; // Clear it. |
| ASSERT(curr_instr->HasUses()); |
| AppendExtractNthOutputForMerged( |
| curr_instr, |
| MergedMathInstr::OutputIndexOf(curr_instr->op_kind()), |
| kTagged, kSmiCid); |
| ASSERT(other_binop->HasUses()); |
| AppendExtractNthOutputForMerged( |
| other_binop, |
| MergedMathInstr::OutputIndexOf(other_binop->op_kind()), |
| kTagged, kSmiCid); |
| |
| ZoneGrowableArray<Value*>* args = new(Z) ZoneGrowableArray<Value*>(2); |
| args->Add(new(Z) Value(curr_instr->left()->definition())); |
| args->Add(new(Z) Value(curr_instr->right()->definition())); |
| |
| // Replace with TruncDivMod. |
| MergedMathInstr* div_mod = new(Z) MergedMathInstr( |
| args, |
| curr_instr->deopt_id(), |
| MergedMathInstr::kTruncDivMod); |
| curr_instr->ReplaceWith(div_mod, current_iterator()); |
| other_binop->ReplaceUsesWith(div_mod); |
| other_binop->RemoveFromGraph(); |
| // Only one merge possible. Because canonicalization happens later, |
| // more candidates are possible. |
| // TODO(srdjan): Allow merging of trunc-div/mod into truncDivMod. |
| break; |
| } |
| } |
| } |
| } |
| |
| |
| // Tries to merge MathUnary operations, in this case sinus and cosinus. |
| void FlowGraphOptimizer::TryMergeMathUnary( |
| GrowableArray<MathUnaryInstr*>* merge_candidates) { |
| if (!FlowGraphCompiler::SupportsSinCos() || !CanUnboxDouble() || |
| !FLAG_merge_sin_cos) { |
| return; |
| } |
| if (merge_candidates->length() < 2) { |
| // Need at least a SIN and a COS. |
| return; |
| } |
| for (intptr_t i = 0; i < merge_candidates->length(); i++) { |
| MathUnaryInstr* curr_instr = (*merge_candidates)[i]; |
| if (curr_instr == NULL) { |
| // Instruction was merged already. |
| continue; |
| } |
| const intptr_t kind = curr_instr->kind(); |
| ASSERT((kind == MathUnaryInstr::kSin) || |
| (kind == MathUnaryInstr::kCos)); |
| // Check if there is sin/cos binop with same inputs. |
| const intptr_t other_kind = (kind == MathUnaryInstr::kSin) ? |
| MathUnaryInstr::kCos : MathUnaryInstr::kSin; |
| Definition* def = curr_instr->value()->definition(); |
| for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { |
| MathUnaryInstr* other_op = (*merge_candidates)[k]; |
| // 'other_op' can be NULL if it was already merged. |
| if ((other_op != NULL) && (other_op->kind() == other_kind) && |
| (other_op->value()->definition() == def)) { |
| (*merge_candidates)[k] = NULL; // Clear it. |
| ASSERT(curr_instr->HasUses()); |
| AppendExtractNthOutputForMerged(curr_instr, |
| MergedMathInstr::OutputIndexOf(kind), |
| kUnboxedDouble, kDoubleCid); |
| ASSERT(other_op->HasUses()); |
| AppendExtractNthOutputForMerged( |
| other_op, |
| MergedMathInstr::OutputIndexOf(other_kind), |
| kUnboxedDouble, kDoubleCid); |
| ZoneGrowableArray<Value*>* args = new(Z) ZoneGrowableArray<Value*>(1); |
| args->Add(new(Z) Value(curr_instr->value()->definition())); |
| // Replace with SinCos. |
| MergedMathInstr* sin_cos = |
| new(Z) MergedMathInstr(args, |
| curr_instr->DeoptimizationTarget(), |
| MergedMathInstr::kSinCos); |
| curr_instr->ReplaceWith(sin_cos, current_iterator()); |
| other_op->ReplaceUsesWith(sin_cos); |
| other_op->RemoveFromGraph(); |
| // Only one merge possible. Because canonicalization happens later, |
| // more candidates are possible. |
| // TODO(srdjan): Allow merging of sin/cos into sincos. |
| break; |
| } |
| } |
| } |
| } |
| |
| |
| // Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the |
| // shift can be a truncating Smi shift-left and result is always Smi. |
| // Merging occurs only per basic-block. |
| void FlowGraphOptimizer::TryOptimizePatterns() { |
| if (!FLAG_truncating_left_shift) return; |
| ASSERT(current_iterator_ == NULL); |
| GrowableArray<BinarySmiOpInstr*> div_mod_merge; |
| GrowableArray<MathUnaryInstr*> sin_cos_merge; |
| for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| // Merging only per basic-block. |
| div_mod_merge.Clear(); |
| sin_cos_merge.Clear(); |
| BlockEntryInstr* entry = block_order_[i]; |
| ForwardInstructionIterator it(entry); |
| current_iterator_ = ⁢ |
| for (; !it.Done(); it.Advance()) { |
| if (it.Current()->IsBinarySmiOp()) { |
| BinarySmiOpInstr* binop = it.Current()->AsBinarySmiOp(); |
| if (binop->op_kind() == Token::kBIT_AND) { |
| OptimizeLeftShiftBitAndSmiOp(binop, |
| binop->left()->definition(), |
| binop->right()->definition()); |
| } else if ((binop->op_kind() == Token::kTRUNCDIV) || |
| (binop->op_kind() == Token::kMOD)) { |
| if (binop->HasUses()) { |
| div_mod_merge.Add(binop); |
| } |
| } |
| } else if (it.Current()->IsBinaryMintOp()) { |
| BinaryMintOpInstr* mintop = it.Current()->AsBinaryMintOp(); |
| if (mintop->op_kind() == Token::kBIT_AND) { |
| OptimizeLeftShiftBitAndSmiOp(mintop, |
| mintop->left()->definition(), |
| mintop->right()->definition()); |
| } |
| } else if (it.Current()->IsMathUnary()) { |
| MathUnaryInstr* math_unary = it.Current()->AsMathUnary(); |
| if ((math_unary->kind() == MathUnaryInstr::kSin) || |
| (math_unary->kind() == MathUnaryInstr::kCos)) { |
| if (math_unary->HasUses()) { |
| sin_cos_merge.Add(math_unary); |
| } |
| } |
| } |
| } |
| TryMergeTruncDivMod(&div_mod_merge); |
| TryMergeMathUnary(&sin_cos_merge); |
| current_iterator_ = NULL; |
| } |
| } |
| |
| |
| static void EnsureSSATempIndex(FlowGraph* graph, |
| Definition* defn, |
| Definition* replacement) { |
| if ((replacement->ssa_temp_index() == -1) && |
| (defn->ssa_temp_index() != -1)) { |
| graph->AllocateSSAIndexes(replacement); |
| } |
| } |
| |
| |
| static void ReplaceCurrentInstruction(ForwardInstructionIterator* iterator, |
| Instruction* current, |
| Instruction* replacement, |
| FlowGraph* graph) { |
| Definition* current_defn = current->AsDefinition(); |
| if ((replacement != NULL) && (current_defn != NULL)) { |
| Definition* replacement_defn = replacement->AsDefinition(); |
| ASSERT(replacement_defn != NULL); |
| current_defn->ReplaceUsesWith(replacement_defn); |
| EnsureSSATempIndex(graph, current_defn, replacement_defn); |
| |
| if (FLAG_trace_optimization) { |
| THR_Print("Replacing v%" Pd " with v%" Pd "\n", |
| current_defn->ssa_temp_index(), |
| replacement_defn->ssa_temp_index()); |
| } |
| } else if (FLAG_trace_optimization) { |
| if (current_defn == NULL) { |
| THR_Print("Removing %s\n", current->DebugName()); |
| } else { |
| ASSERT(!current_defn->HasUses()); |
| THR_Print("Removing v%" Pd ".\n", current_defn->ssa_temp_index()); |
| } |
| } |
| iterator->RemoveCurrentFromGraph(); |
| } |
| |
| |
| bool FlowGraphOptimizer::Canonicalize() { |
| bool changed = false; |
| for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| BlockEntryInstr* entry = block_order_[i]; |
| for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
| Instruction* current = it.Current(); |
| if (current->HasUnmatchedInputRepresentations()) { |
| // Can't canonicalize this instruction until all conversions for its |
| // inputs are inserted. |
| continue; |
| } |
| |
| Instruction* replacement = current->Canonicalize(flow_graph()); |
| |
| if (replacement != current) { |
| // For non-definitions Canonicalize should return either NULL or |
| // this. |
| ASSERT((replacement == NULL) || current->IsDefinition()); |
| ReplaceCurrentInstruction(&it, current, replacement, flow_graph_); |
| changed = true; |
| } |
| } |
| } |
| return changed; |
| } |
| |
| |
| static bool IsUnboxedInteger(Representation rep) { |
| return (rep == kUnboxedInt32) || |
| (rep == kUnboxedUint32) || |
| (rep == kUnboxedMint); |
| } |
| |
| |
| void FlowGraphOptimizer::InsertConversion(Representation from, |
| Representation to, |
| Value* use, |
| bool is_environment_use) { |
| Instruction* insert_before; |
| Instruction* deopt_target; |
| PhiInstr* phi = use->instruction()->AsPhi(); |
| if (phi != NULL) { |
| ASSERT(phi->is_alive()); |
| // For phis conversions have to be inserted in the predecessor. |
| insert_before = |
| phi->block()->PredecessorAt(use->use_index())->last_instruction(); |
| deopt_target = NULL; |
| } else { |
| deopt_target = insert_before = use->instruction(); |
| } |
| |
| Definition* converted = NULL; |
| if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) { |
| const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL) ? |
| deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; |
| converted = new(Z) UnboxedIntConverterInstr(from, |
| to, |
| use->CopyWithType(), |
| deopt_id); |
| } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) { |
| converted = new Int32ToDoubleInstr(use->CopyWithType()); |
| } else if ((from == kUnboxedMint) && |
| (to == kUnboxedDouble) && |
| CanConvertUnboxedMintToDouble()) { |
| const intptr_t deopt_id = (deopt_target != NULL) ? |
| deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; |
| ASSERT(CanUnboxDouble()); |
| converted = new MintToDoubleInstr(use->CopyWithType(), deopt_id); |
| } else if ((from == kTagged) && Boxing::Supports(to)) { |
| const intptr_t deopt_id = (deopt_target != NULL) ? |
| deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; |
| converted = UnboxInstr::Create(to, use->CopyWithType(), deopt_id); |
| } else if ((to == kTagged) && Boxing::Supports(from)) { |
| converted = BoxInstr::Create(from, use->CopyWithType()); |
| } else { |
| // We have failed to find a suitable conversion instruction. |
| // Insert two "dummy" conversion instructions with the correct |
| // "from" and "to" representation. The inserted instructions will |
| // trigger a deoptimization if executed. See #12417 for a discussion. |
| const intptr_t deopt_id = (deopt_target != NULL) ? |
| deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; |
| ASSERT(Boxing::Supports(from)); |
| ASSERT(Boxing::Supports(to)); |
| Definition* boxed = BoxInstr::Create(from, use->CopyWithType()); |
| use->BindTo(boxed); |
| InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); |
| converted = UnboxInstr::Create(to, new(Z) Value(boxed), deopt_id); |
| } |
| ASSERT(converted != NULL); |
| InsertBefore(insert_before, converted, use->instruction()->env(), |
| FlowGraph::kValue); |
| if (is_environment_use) { |
| use->BindToEnvironment(converted); |
| } else { |
| use->BindTo(converted); |
| } |
| |
| if ((to == kUnboxedInt32) && (phi != NULL)) { |
| // Int32 phis are unboxed optimistically. Ensure that unboxing |
| // has deoptimization target attached from the goto instruction. |
| flow_graph_->CopyDeoptTarget(converted, insert_before); |
| } |
| } |
| |
| |
| void FlowGraphOptimizer::ConvertUse(Value* use, Representation from_rep) { |
| const Representation to_rep = |
| use->instruction()->RequiredInputRepresentation(use->use_index()); |
| if (from_rep == to_rep || to_rep == kNoRepresentation) { |
| return; |
| } |
| InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false); |
| } |
| |
| |
| void FlowGraphOptimizer::ConvertEnvironmentUse(Value* use, |
| Representation from_rep) { |
| const Representation to_rep = kTagged; |
| if (from_rep == to_rep) { |
| return; |
| } |
| InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ true); |
| } |
| |
| |
| void FlowGraphOptimizer::InsertConversionsFor(Definition* def) { |
| const Representation from_rep = def->representation(); |
| |
| for (Value::Iterator it(def->input_use_list()); |
| !it.Done(); |
| it.Advance()) { |
| ConvertUse(it.Current(), from_rep); |
| } |
| |
| if (flow_graph()->graph_entry()->SuccessorCount() > 1) { |
| for (Value::Iterator it(def->env_use_list()); |
| !it.Done(); |
| it.Advance()) { |
| Value* use = it.Current(); |
| if (use->instruction()->MayThrow() && |
| use->instruction()->GetBlock()->InsideTryBlock()) { |
| // Environment uses at calls inside try-blocks must be converted to |
| // tagged representation. |
| ConvertEnvironmentUse(it.Current(), from_rep); |
| } |
| } |
| } |
| } |
| |
| |
| static void UnboxPhi(PhiInstr* phi) { |
| Representation unboxed = phi->representation(); |
| |
| switch (phi->Type()->ToCid()) { |
| case kDoubleCid: |
| if (CanUnboxDouble()) { |
| unboxed = kUnboxedDouble; |
| } |
| break; |
| case kFloat32x4Cid: |
| if (ShouldInlineSimd()) { |
| unboxed = kUnboxedFloat32x4; |
| } |
| break; |
| case kInt32x4Cid: |
| if (ShouldInlineSimd()) { |
| unboxed = kUnboxedInt32x4; |
| } |
| break; |
| case kFloat64x2Cid: |
| if (ShouldInlineSimd()) { |
| unboxed = kUnboxedFloat64x2; |
| } |
| break; |
| } |
| |
| if ((kSmiBits < 32) && |
| (unboxed == kTagged) && |
| phi->Type()->IsInt() && |
| RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt64)) { |
| // On 32-bit platforms conservatively unbox phis that: |
| // - are proven to be of type Int; |
| // - fit into 64bits range; |
| // - have either constants or Box() operations as inputs; |
| // - have at least one Box() operation as an input; |
| // - are used in at least 1 Unbox() operation. |
| bool should_unbox = false; |
| for (intptr_t i = 0; i < phi->InputCount(); i++) { |
| Definition* input = phi->InputAt(i)->definition(); |
| if (input->IsBox() && |
| RangeUtils::Fits(input->range(), |
| RangeBoundary::kRangeBoundaryInt64)) { |
| should_unbox = true; |
| } else if (!input->IsConstant()) { |
| should_unbox = false; |
| break; |
| } |
| } |
| |
| if (should_unbox) { |
| // We checked inputs. Check if phi is used in at least one unbox |
| // operation. |
| bool has_unboxed_use = false; |
| for (Value* use = phi->input_use_list(); |
| use != NULL; |
| use = use->next_use()) { |
| Instruction* instr = use->instruction(); |
| if (instr->IsUnbox()) { |
| has_unboxed_use = true; |
| break; |
| } else if (IsUnboxedInteger( |
| instr->RequiredInputRepresentation(use->use_index()))) { |
| has_unboxed_use = true; |
| break; |
| } |
| } |
| |
| if (!has_unboxed_use) { |
| should_unbox = false; |
| } |
| } |
| |
| if (should_unbox) { |
| unboxed = |
| RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32) |
| ? kUnboxedInt32 : kUnboxedMint; |
| } |
| } |
| |
| phi->set_representation(unboxed); |
| } |
| |
| |
| void FlowGraphOptimizer::SelectRepresentations() { |
| // Conservatively unbox all phis that were proven to be of Double, |
| // Float32x4, or Int32x4 type. |
| for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| JoinEntryInstr* join_entry = block_order_[i]->AsJoinEntry(); |
| if (join_entry != NULL) { |
| for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { |
| PhiInstr* phi = it.Current(); |
| UnboxPhi(phi); |
| } |
| } |
| } |
| |
| // Process all instructions and insert conversions where needed. |
| GraphEntryInstr* graph_entry = block_order_[0]->AsGraphEntry(); |
| |
| // Visit incoming parameters and constants. |
| for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); i++) { |
| InsertConversionsFor((*graph_entry->initial_definitions())[i]); |
| } |
| |
| for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| BlockEntryInstr* entry = block_order_[i]; |
| JoinEntryInstr* join_entry = entry->AsJoinEntry(); |
| if (join_entry != NULL) { |
| for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { |
| PhiInstr* phi = it.Current(); |
| ASSERT(phi != NULL); |
| ASSERT(phi->is_alive()); |
| InsertConversionsFor(phi); |
| } |
| } |
| CatchBlockEntryInstr* catch_entry = entry->AsCatchBlockEntry(); |
| if (catch_entry != NULL) { |
| for (intptr_t i = 0; |
| i < catch_entry->initial_definitions()->length(); |
| i++) { |
| InsertConversionsFor((*catch_entry->initial_definitions())[i]); |
| } |
| } |
| for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
| Definition* def = it.Current()->AsDefinition(); |
| if (def != NULL) { |
| InsertConversionsFor(def); |
| } |
| } |
| } |
| } |
| |
| |
| static bool ClassIdIsOneOf(intptr_t class_id, |
| const GrowableArray<intptr_t>& class_ids) { |
| for (intptr_t i = 0; i < class_ids.length(); i++) { |
| ASSERT(class_ids[i] != kIllegalCid); |
| if (class_ids[i] == class_id) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| |
| // Returns true if ICData tests two arguments and all ICData cids are in the |
| // required sets 'receiver_class_ids' or 'argument_class_ids', respectively. |
| static bool ICDataHasOnlyReceiverArgumentClassIds( |
| const ICData& ic_data, |
| const GrowableArray<intptr_t>& receiver_class_ids, |
| const GrowableArray<intptr_t>& argument_class_ids) { |
| if (ic_data.NumArgsTested() != 2) { |
| return false; |
| } |
| Function& target = Function::Handle(); |
| const intptr_t len = ic_data.NumberOfChecks(); |
| GrowableArray<intptr_t> class_ids; |
| for (intptr_t i = 0; i < len; i++) { |
| if (ic_data.IsUsedAt(i)) { |
| ic_data.GetCheckAt(i, &class_ids, &target); |
| ASSERT(class_ids.length() == 2); |
| if (!ClassIdIsOneOf(class_ids[0], receiver_class_ids) || |
| !ClassIdIsOneOf(class_ids[1], argument_class_ids)) { |
| return false; |
| } |
| } |
| } |
| return true; |
| } |
| |
| |
| static bool ICDataHasReceiverArgumentClassIds(const ICData& ic_data, |
| intptr_t receiver_class_id, |
| intptr_t argument_class_id) { |
| if (ic_data.NumArgsTested() != 2) { |
| return false; |
| } |
| Function& target = Function::Handle(); |
| const intptr_t len = ic_data.NumberOfChecks(); |
| for (intptr_t i = 0; i < len; i++) { |
| if (ic_data.IsUsedAt(i)) { |
| GrowableArray<intptr_t> class_ids; |
| ic_data.GetCheckAt(i, &class_ids, &target); |
| ASSERT(class_ids.length() == 2); |
| if ((class_ids[0] == receiver_class_id) && |
| (class_ids[1] == argument_class_id)) { |
| return true; |
| } |
| } |
| } |
| return false; |
| } |
| |
| |
| static bool HasOnlyOneSmi(const ICData& ic_data) { |
| return (ic_data.NumberOfUsedChecks() == 1) |
| && ic_data.HasReceiverClassId(kSmiCid); |
| } |
| |
| |
| static bool HasOnlySmiOrMint(const ICData& ic_data) { |
| if (ic_data.NumberOfUsedChecks() == 1) { |
| return ic_data.HasReceiverClassId(kSmiCid) |
| || ic_data.HasReceiverClassId(kMintCid); |
| } |
| return (ic_data.NumberOfUsedChecks() == 2) |
| && ic_data.HasReceiverClassId(kSmiCid) |
| && ic_data.HasReceiverClassId(kMintCid); |
| } |
| |
| |
| static bool HasOnlyTwoOf(const ICData& ic_data, intptr_t cid) { |
| if (ic_data.NumberOfUsedChecks() != 1) { |
| return false; |
| } |
| GrowableArray<intptr_t> first; |
| GrowableArray<intptr_t> second; |
| ic_data.GetUsedCidsForTwoArgs(&first, &second); |
| return (first[0] == cid) && (second[0] == cid); |
| } |
| |
| // Returns false if the ICData contains anything other than the 4 combinations |
| // of Mint and Smi for the receiver and argument classes. |
| static bool HasTwoMintOrSmi(const ICData& ic_data) { |
| GrowableArray<intptr_t> first; |
| GrowableArray<intptr_t> second; |
| ic_data.GetUsedCidsForTwoArgs(&first, &second); |
| for (intptr_t i = 0; i < first.length(); i++) { |
| if ((first[i] != kSmiCid) && (first[i] != kMintCid)) { |
| return false; |
| } |
| if ((second[i] != kSmiCid) && (second[i] != kMintCid)) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| |
| // Returns false if the ICData contains anything other than the 4 combinations |
| // of Double and Smi for the receiver and argument classes. |
| static bool HasTwoDoubleOrSmi(const ICData& ic_data) { |
| GrowableArray<intptr_t> class_ids(2); |
| class_ids.Add(kSmiCid); |
| class_ids.Add(kDoubleCid); |
| return ICDataHasOnlyReceiverArgumentClassIds(ic_data, class_ids, class_ids); |
| } |
| |
| |
| static bool HasOnlyOneDouble(const ICData& ic_data) { |
| return (ic_data.NumberOfUsedChecks() == 1) |
| && ic_data.HasReceiverClassId(kDoubleCid); |
| } |
| |
| |
| static bool ShouldSpecializeForDouble(const ICData& ic_data) { |
| // Don't specialize for double if we can't unbox them. |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| |
| // Unboxed double operation can't handle case of two smis. |
| if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) { |
| return false; |
| } |
| |
| // Check that it have seen only smis and doubles. |
| return HasTwoDoubleOrSmi(ic_data); |
| } |
| |
| |
| void FlowGraphOptimizer::ReplaceCall(Definition* call, |
| Definition* replacement) { |
| // Remove the original push arguments. |
| for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |
| PushArgumentInstr* push = call->PushArgumentAt(i); |
| push->ReplaceUsesWith(push->value()->definition()); |
| push->RemoveFromGraph(); |
| } |
| call->ReplaceWith(replacement, current_iterator()); |
| } |
| |
| |
| void FlowGraphOptimizer::AddCheckSmi(Definition* to_check, |
| intptr_t deopt_id, |
| Environment* deopt_environment, |
| Instruction* insert_before) { |
| if (to_check->Type()->ToCid() != kSmiCid) { |
| InsertBefore(insert_before, |
| new(Z) CheckSmiInstr(new(Z) Value(to_check), |
| deopt_id, |
| insert_before->token_pos()), |
| deopt_environment, |
| FlowGraph::kEffect); |
| } |
| } |
| |
| |
| Instruction* FlowGraphOptimizer::GetCheckClass(Definition* to_check, |
| const ICData& unary_checks, |
| intptr_t deopt_id, |
| intptr_t token_pos) { |
| if ((unary_checks.NumberOfUsedChecks() == 1) && |
| unary_checks.HasReceiverClassId(kSmiCid)) { |
| return new(Z) CheckSmiInstr(new(Z) Value(to_check), |
| deopt_id, |
| token_pos); |
| } |
| return new(Z) CheckClassInstr( |
| new(Z) Value(to_check), deopt_id, unary_checks, token_pos); |
| } |
| |
| |
| void FlowGraphOptimizer::AddCheckClass(Definition* to_check, |
| const ICData& unary_checks, |
| intptr_t deopt_id, |
| Environment* deopt_environment, |
| Instruction* insert_before) { |
| // Type propagation has not run yet, we cannot eliminate the check. |
| Instruction* check = GetCheckClass( |
| to_check, unary_checks, deopt_id, insert_before->token_pos()); |
| InsertBefore(insert_before, check, deopt_environment, FlowGraph::kEffect); |
| } |
| |
| |
| void FlowGraphOptimizer::AddReceiverCheck(InstanceCallInstr* call) { |
| AddCheckClass(call->ArgumentAt(0), |
| ICData::ZoneHandle(Z, call->ic_data()->AsUnaryClassChecks()), |
| call->deopt_id(), |
| call->env(), |
| call); |
| } |
| |
| |
| static bool ArgIsAlways(intptr_t cid, |
| const ICData& ic_data, |
| intptr_t arg_number) { |
| ASSERT(ic_data.NumArgsTested() > arg_number); |
| if (ic_data.NumberOfUsedChecks() == 0) { |
| return false; |
| } |
| const intptr_t num_checks = ic_data.NumberOfChecks(); |
| for (intptr_t i = 0; i < num_checks; i++) { |
| if (ic_data.IsUsedAt(i) && ic_data.GetClassIdAt(i, arg_number) != cid) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| |
| static bool CanUnboxInt32() { |
| // Int32/Uint32 can be unboxed if it fits into a smi or the platform |
| // supports unboxed mints. |
| return (kSmiBits >= 32) || FlowGraphCompiler::SupportsUnboxedMints(); |
| } |
| |
| |
| static intptr_t MethodKindToCid(MethodRecognizer::Kind kind) { |
| switch (kind) { |
| case MethodRecognizer::kImmutableArrayGetIndexed: |
| return kImmutableArrayCid; |
| |
| case MethodRecognizer::kObjectArrayGetIndexed: |
| case MethodRecognizer::kObjectArraySetIndexed: |
| return kArrayCid; |
| |
| case MethodRecognizer::kGrowableArrayGetIndexed: |
| case MethodRecognizer::kGrowableArraySetIndexed: |
| return kGrowableObjectArrayCid; |
| |
| case MethodRecognizer::kFloat32ArrayGetIndexed: |
| case MethodRecognizer::kFloat32ArraySetIndexed: |
| return kTypedDataFloat32ArrayCid; |
| |
| case MethodRecognizer::kFloat64ArrayGetIndexed: |
| case MethodRecognizer::kFloat64ArraySetIndexed: |
| return kTypedDataFloat64ArrayCid; |
| |
| case MethodRecognizer::kInt8ArrayGetIndexed: |
| case MethodRecognizer::kInt8ArraySetIndexed: |
| return kTypedDataInt8ArrayCid; |
| |
| case MethodRecognizer::kUint8ArrayGetIndexed: |
| case MethodRecognizer::kUint8ArraySetIndexed: |
| return kTypedDataUint8ArrayCid; |
| |
| case MethodRecognizer::kUint8ClampedArrayGetIndexed: |
| case MethodRecognizer::kUint8ClampedArraySetIndexed: |
| return kTypedDataUint8ClampedArrayCid; |
| |
| case MethodRecognizer::kExternalUint8ArrayGetIndexed: |
| case MethodRecognizer::kExternalUint8ArraySetIndexed: |
| return kExternalTypedDataUint8ArrayCid; |
| |
| case MethodRecognizer::kExternalUint8ClampedArrayGetIndexed: |
| case MethodRecognizer::kExternalUint8ClampedArraySetIndexed: |
| return kExternalTypedDataUint8ClampedArrayCid; |
| |
| case MethodRecognizer::kInt16ArrayGetIndexed: |
| case MethodRecognizer::kInt16ArraySetIndexed: |
| return kTypedDataInt16ArrayCid; |
| |
| case MethodRecognizer::kUint16ArrayGetIndexed: |
| case MethodRecognizer::kUint16ArraySetIndexed: |
| return kTypedDataUint16ArrayCid; |
| |
| case MethodRecognizer::kInt32ArrayGetIndexed: |
| case MethodRecognizer::kInt32ArraySetIndexed: |
| return kTypedDataInt32ArrayCid; |
| |
| case MethodRecognizer::kUint32ArrayGetIndexed: |
| case MethodRecognizer::kUint32ArraySetIndexed: |
| return kTypedDataUint32ArrayCid; |
| |
| case MethodRecognizer::kInt64ArrayGetIndexed: |
| case MethodRecognizer::kInt64ArraySetIndexed: |
| return kTypedDataInt64ArrayCid; |
| |
| case MethodRecognizer::kFloat32x4ArrayGetIndexed: |
| case MethodRecognizer::kFloat32x4ArraySetIndexed: |
| return kTypedDataFloat32x4ArrayCid; |
| |
| case MethodRecognizer::kInt32x4ArrayGetIndexed: |
| case MethodRecognizer::kInt32x4ArraySetIndexed: |
| return kTypedDataInt32x4ArrayCid; |
| |
| case MethodRecognizer::kFloat64x2ArrayGetIndexed: |
| case MethodRecognizer::kFloat64x2ArraySetIndexed: |
| return kTypedDataFloat64x2ArrayCid; |
| |
| default: |
| break; |
| } |
| return kIllegalCid; |
| } |
| |
| |
| bool FlowGraphOptimizer::TryReplaceWithIndexedOp(InstanceCallInstr* call) { |
| // Check for monomorphic IC data. |
| if (!call->HasICData()) return false; |
| const ICData& ic_data = |
| ICData::Handle(Z, call->ic_data()->AsUnaryClassChecks()); |
| if (ic_data.NumberOfChecks() != 1) { |
| return false; |
| } |
| return TryReplaceInstanceCallWithInline(call); |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineSetIndexed( |
| MethodRecognizer::Kind kind, |
| const Function& target, |
| Instruction* call, |
| Definition* receiver, |
| intptr_t token_pos, |
| const ICData& value_check, |
| TargetEntryInstr** entry, |
| Definition** last) { |
| intptr_t array_cid = MethodKindToCid(kind); |
| ASSERT(array_cid != kIllegalCid); |
| |
| Definition* array = receiver; |
| Definition* index = call->ArgumentAt(1); |
| Definition* stored_value = call->ArgumentAt(2); |
| |
| *entry = new(Z) TargetEntryInstr(flow_graph()->allocate_block_id(), |
| call->GetBlock()->try_index()); |
| (*entry)->InheritDeoptTarget(Z, call); |
| Instruction* cursor = *entry; |
| if (I->flags().type_checks()) { |
| // Only type check for the value. A type check for the index is not |
| // needed here because we insert a deoptimizing smi-check for the case |
| // the index is not a smi. |
| const AbstractType& value_type = |
| AbstractType::ZoneHandle(Z, target.ParameterTypeAt(2)); |
| Definition* instantiator = NULL; |
| Definition* type_args = NULL; |
| switch (array_cid) { |
| case kArrayCid: |
| case kGrowableObjectArrayCid: { |
| const Class& instantiator_class = Class::Handle(Z, target.Owner()); |
| intptr_t type_arguments_field_offset = |
| instantiator_class.type_arguments_field_offset(); |
| LoadFieldInstr* load_type_args = |
| new(Z) LoadFieldInstr(new(Z) Value(array), |
| type_arguments_field_offset, |
| Type::ZoneHandle(Z), // No type. |
| call->token_pos()); |
| cursor = flow_graph()->AppendTo(cursor, |
| load_type_args, |
| NULL, |
| FlowGraph::kValue); |
| |
| instantiator = array; |
| type_args = load_type_args; |
| break; |
| } |
| case kTypedDataInt8ArrayCid: |
| case kTypedDataUint8ArrayCid: |
| case kTypedDataUint8ClampedArrayCid: |
| case kExternalTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: |
| case kTypedDataInt16ArrayCid: |
| case kTypedDataUint16ArrayCid: |
| case kTypedDataInt32ArrayCid: |
| case kTypedDataUint32ArrayCid: |
| case kTypedDataInt64ArrayCid: |
| ASSERT(value_type.IsIntType()); |
| // Fall through. |
| case kTypedDataFloat32ArrayCid: |
| case kTypedDataFloat64ArrayCid: { |
| type_args = instantiator = flow_graph_->constant_null(); |
| ASSERT((array_cid != kTypedDataFloat32ArrayCid && |
| array_cid != kTypedDataFloat64ArrayCid) || |
| value_type.IsDoubleType()); |
| ASSERT(value_type.IsInstantiated()); |
| break; |
| } |
| case kTypedDataFloat32x4ArrayCid: { |
| type_args = instantiator = flow_graph_->constant_null(); |
| ASSERT((array_cid != kTypedDataFloat32x4ArrayCid) || |
| value_type.IsFloat32x4Type()); |
| ASSERT(value_type.IsInstantiated()); |
| break; |
| } |
| case kTypedDataFloat64x2ArrayCid: { |
| type_args = instantiator = flow_graph_->constant_null(); |
| ASSERT((array_cid != kTypedDataFloat64x2ArrayCid) || |
| value_type.IsFloat64x2Type()); |
| ASSERT(value_type.IsInstantiated()); |
| break; |
| } |
| default: |
| // TODO(fschneider): Add support for other array types. |
| UNREACHABLE(); |
| } |
| AssertAssignableInstr* assert_value = |
| new(Z) AssertAssignableInstr(token_pos, |
| new(Z) Value(stored_value), |
| new(Z) Value(instantiator), |
| new(Z) Value(type_args), |
| value_type, |
| Symbols::Value(), |
| call->deopt_id()); |
| cursor = flow_graph()->AppendTo(cursor, |
| assert_value, |
| call->env(), |
| FlowGraph::kValue); |
| } |
| |
| array_cid = PrepareInlineIndexedOp(call, |
| array_cid, |
| &array, |
| index, |
| &cursor); |
| |
| // Check if store barrier is needed. Byte arrays don't need a store barrier. |
| StoreBarrierType needs_store_barrier = |
| (RawObject::IsTypedDataClassId(array_cid) || |
| RawObject::IsTypedDataViewClassId(array_cid) || |
| RawObject::IsExternalTypedDataClassId(array_cid)) ? kNoStoreBarrier |
| : kEmitStoreBarrier; |
| |
| // No need to class check stores to Int32 and Uint32 arrays because |
| // we insert unboxing instructions below which include a class check. |
| if ((array_cid != kTypedDataUint32ArrayCid) && |
| (array_cid != kTypedDataInt32ArrayCid) && |
| !value_check.IsNull()) { |
| // No store barrier needed because checked value is a smi, an unboxed mint, |
| // an unboxed double, an unboxed Float32x4, or unboxed Int32x4. |
| needs_store_barrier = kNoStoreBarrier; |
| Instruction* check = GetCheckClass( |
| stored_value, value_check, call->deopt_id(), call->token_pos()); |
| cursor = flow_graph()->AppendTo(cursor, |
| check, |
| call->env(), |
| FlowGraph::kEffect); |
| } |
| |
| if (array_cid == kTypedDataFloat32ArrayCid) { |
| stored_value = |
| new(Z) DoubleToFloatInstr( |
| new(Z) Value(stored_value), call->deopt_id()); |
| cursor = flow_graph()->AppendTo(cursor, |
| stored_value, |
| NULL, |
| FlowGraph::kValue); |
| } else if (array_cid == kTypedDataInt32ArrayCid) { |
| stored_value = new(Z) UnboxInt32Instr( |
| UnboxInt32Instr::kTruncate, |
| new(Z) Value(stored_value), |
| call->deopt_id()); |
| cursor = flow_graph()->AppendTo(cursor, |
| stored_value, |
| call->env(), |
| FlowGraph::kValue); |
| } else if (array_cid == kTypedDataUint32ArrayCid) { |
| stored_value = new(Z) UnboxUint32Instr( |
| new(Z) Value(stored_value), |
| call->deopt_id()); |
| ASSERT(stored_value->AsUnboxInteger()->is_truncating()); |
| cursor = flow_graph()->AppendTo(cursor, |
| stored_value, |
| call->env(), |
| FlowGraph::kValue); |
| } |
| |
| const intptr_t index_scale = Instance::ElementSizeFor(array_cid); |
| *last = new(Z) StoreIndexedInstr(new(Z) Value(array), |
| new(Z) Value(index), |
| new(Z) Value(stored_value), |
| needs_store_barrier, |
| index_scale, |
| array_cid, |
| call->deopt_id(), |
| call->token_pos()); |
| flow_graph()->AppendTo(cursor, |
| *last, |
| call->env(), |
| FlowGraph::kEffect); |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::TryInlineRecognizedMethod(intptr_t receiver_cid, |
| const Function& target, |
| Instruction* call, |
| Definition* receiver, |
| intptr_t token_pos, |
| const ICData& ic_data, |
| TargetEntryInstr** entry, |
| Definition** last) { |
| ICData& value_check = ICData::ZoneHandle(Z); |
| MethodRecognizer::Kind kind = MethodRecognizer::RecognizeKind(target); |
| switch (kind) { |
| // Recognized [] operators. |
| case MethodRecognizer::kImmutableArrayGetIndexed: |
| case MethodRecognizer::kObjectArrayGetIndexed: |
| case MethodRecognizer::kGrowableArrayGetIndexed: |
| case MethodRecognizer::kInt8ArrayGetIndexed: |
| case MethodRecognizer::kUint8ArrayGetIndexed: |
| case MethodRecognizer::kUint8ClampedArrayGetIndexed: |
| case MethodRecognizer::kExternalUint8ArrayGetIndexed: |
| case MethodRecognizer::kExternalUint8ClampedArrayGetIndexed: |
| case MethodRecognizer::kInt16ArrayGetIndexed: |
| case MethodRecognizer::kUint16ArrayGetIndexed: |
| return InlineGetIndexed(kind, call, receiver, entry, last); |
| case MethodRecognizer::kFloat32ArrayGetIndexed: |
| case MethodRecognizer::kFloat64ArrayGetIndexed: |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| return InlineGetIndexed(kind, call, receiver, entry, last); |
| case MethodRecognizer::kFloat32x4ArrayGetIndexed: |
| case MethodRecognizer::kFloat64x2ArrayGetIndexed: |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| return InlineGetIndexed(kind, call, receiver, entry, last); |
| case MethodRecognizer::kInt32ArrayGetIndexed: |
| case MethodRecognizer::kUint32ArrayGetIndexed: |
| if (!CanUnboxInt32()) return false; |
| return InlineGetIndexed(kind, call, receiver, entry, last); |
| |
| case MethodRecognizer::kInt64ArrayGetIndexed: |
| if (!ShouldInlineInt64ArrayOps()) { |
| return false; |
| } |
| return InlineGetIndexed(kind, call, receiver, entry, last); |
| // Recognized []= operators. |
| case MethodRecognizer::kObjectArraySetIndexed: |
| case MethodRecognizer::kGrowableArraySetIndexed: |
| return InlineSetIndexed(kind, target, call, receiver, token_pos, |
| value_check, entry, last); |
| case MethodRecognizer::kInt8ArraySetIndexed: |
| case MethodRecognizer::kUint8ArraySetIndexed: |
| case MethodRecognizer::kUint8ClampedArraySetIndexed: |
| case MethodRecognizer::kExternalUint8ArraySetIndexed: |
| case MethodRecognizer::kExternalUint8ClampedArraySetIndexed: |
| case MethodRecognizer::kInt16ArraySetIndexed: |
| case MethodRecognizer::kUint16ArraySetIndexed: |
| // Optimistically assume Smi. |
| if (ic_data.HasDeoptReason(ICData::kDeoptCheckSmi)) { |
| // Optimistic assumption failed at least once. |
| return false; |
| } |
| value_check = ic_data.AsUnaryClassChecksForCid(kSmiCid, target); |
| return InlineSetIndexed(kind, target, call, receiver, token_pos, |
| value_check, entry, last); |
| case MethodRecognizer::kInt32ArraySetIndexed: |
| case MethodRecognizer::kUint32ArraySetIndexed: { |
| // Value check not needed for Int32 and Uint32 arrays because they |
| // implicitly contain unboxing instructions which check for right type. |
| ICData& value_check = ICData::Handle(); |
| return InlineSetIndexed(kind, target, call, receiver, token_pos, |
| value_check, entry, last); |
| } |
| case MethodRecognizer::kInt64ArraySetIndexed: |
| if (!ShouldInlineInt64ArrayOps()) { |
| return false; |
| } |
| return InlineSetIndexed(kind, target, call, receiver, token_pos, |
| value_check, entry, last); |
| case MethodRecognizer::kFloat32ArraySetIndexed: |
| case MethodRecognizer::kFloat64ArraySetIndexed: |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| value_check = ic_data.AsUnaryClassChecksForCid(kDoubleCid, target); |
| return InlineSetIndexed(kind, target, call, receiver, token_pos, |
| value_check, entry, last); |
| case MethodRecognizer::kFloat32x4ArraySetIndexed: |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| value_check = ic_data.AsUnaryClassChecksForCid(kFloat32x4Cid, target); |
| |
| return InlineSetIndexed(kind, target, call, receiver, token_pos, |
| value_check, entry, last); |
| case MethodRecognizer::kFloat64x2ArraySetIndexed: |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| value_check = ic_data.AsUnaryClassChecksForCid(kFloat64x2Cid, target); |
| return InlineSetIndexed(kind, target, call, receiver, token_pos, |
| value_check, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetInt8: |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataInt8ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetUint8: |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataUint8ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetInt16: |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataInt16ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetUint16: |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataUint16ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetInt32: |
| if (!CanUnboxInt32()) { |
| return false; |
| } |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataInt32ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetUint32: |
| if (!CanUnboxInt32()) { |
| return false; |
| } |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataUint32ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetFloat32: |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataFloat32ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetFloat64: |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataFloat64ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetFloat32x4: |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataFloat32x4ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseGetInt32x4: |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| return InlineByteArrayBaseLoad(call, receiver, receiver_cid, |
| kTypedDataInt32x4ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetInt8: |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataInt8ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetUint8: |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataUint8ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetInt16: |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataInt16ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetUint16: |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataUint16ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetInt32: |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataInt32ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetUint32: |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataUint32ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetFloat32: |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataFloat32ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetFloat64: |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataFloat64ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetFloat32x4: |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataFloat32x4ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kByteArrayBaseSetInt32x4: |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| return InlineByteArrayBaseStore(target, call, receiver, receiver_cid, |
| kTypedDataInt32x4ArrayCid, |
| ic_data, entry, last); |
| case MethodRecognizer::kStringBaseCodeUnitAt: |
| return InlineStringCodeUnitAt(call, receiver_cid, entry, last); |
| case MethodRecognizer::kStringBaseCharAt: |
| return InlineStringBaseCharAt(call, receiver_cid, entry, last); |
| case MethodRecognizer::kDoubleAdd: |
| return InlineDoubleOp(Token::kADD, call, entry, last); |
| case MethodRecognizer::kDoubleSub: |
| return InlineDoubleOp(Token::kSUB, call, entry, last); |
| case MethodRecognizer::kDoubleMul: |
| return InlineDoubleOp(Token::kMUL, call, entry, last); |
| case MethodRecognizer::kDoubleDiv: |
| return InlineDoubleOp(Token::kDIV, call, entry, last); |
| default: |
| return false; |
| } |
| } |
| |
| |
| intptr_t FlowGraphOptimizer::PrepareInlineIndexedOp(Instruction* call, |
| intptr_t array_cid, |
| Definition** array, |
| Definition* index, |
| Instruction** cursor) { |
| // Insert index smi check. |
| *cursor = flow_graph()->AppendTo( |
| *cursor, |
| new(Z) CheckSmiInstr(new(Z) Value(index), |
| call->deopt_id(), |
| call->token_pos()), |
| call->env(), |
| FlowGraph::kEffect); |
| |
| // Insert array length load and bounds check. |
| LoadFieldInstr* length = |
| new(Z) LoadFieldInstr( |
| new(Z) Value(*array), |
| CheckArrayBoundInstr::LengthOffsetFor(array_cid), |
| Type::ZoneHandle(Z, Type::SmiType()), |
| call->token_pos()); |
| length->set_is_immutable( |
| CheckArrayBoundInstr::IsFixedLengthArrayType(array_cid)); |
| length->set_result_cid(kSmiCid); |
| length->set_recognized_kind( |
| LoadFieldInstr::RecognizedKindFromArrayCid(array_cid)); |
| *cursor = flow_graph()->AppendTo(*cursor, |
| length, |
| NULL, |
| FlowGraph::kValue); |
| |
| *cursor = flow_graph()->AppendTo(*cursor, |
| new(Z) CheckArrayBoundInstr( |
| new(Z) Value(length), |
| new(Z) Value(index), |
| call->deopt_id()), |
| call->env(), |
| FlowGraph::kEffect); |
| |
| if (array_cid == kGrowableObjectArrayCid) { |
| // Insert data elements load. |
| LoadFieldInstr* elements = |
| new(Z) LoadFieldInstr( |
| new(Z) Value(*array), |
| GrowableObjectArray::data_offset(), |
| Type::ZoneHandle(Z, Type::DynamicType()), |
| call->token_pos()); |
| elements->set_result_cid(kArrayCid); |
| *cursor = flow_graph()->AppendTo(*cursor, |
| elements, |
| NULL, |
| FlowGraph::kValue); |
| // Load from the data from backing store which is a fixed-length array. |
| *array = elements; |
| array_cid = kArrayCid; |
| } else if (RawObject::IsExternalTypedDataClassId(array_cid)) { |
| LoadUntaggedInstr* elements = |
| new(Z) LoadUntaggedInstr(new(Z) Value(*array), |
| ExternalTypedData::data_offset()); |
| *cursor = flow_graph()->AppendTo(*cursor, |
| elements, |
| NULL, |
| FlowGraph::kValue); |
| *array = elements; |
| } |
| return array_cid; |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineGetIndexed(MethodRecognizer::Kind kind, |
| Instruction* call, |
| Definition* receiver, |
| TargetEntryInstr** entry, |
| Definition** last) { |
| intptr_t array_cid = MethodKindToCid(kind); |
| ASSERT(array_cid != kIllegalCid); |
| |
| Definition* array = receiver; |
| Definition* index = call->ArgumentAt(1); |
| *entry = new(Z) TargetEntryInstr(flow_graph()->allocate_block_id(), |
| call->GetBlock()->try_index()); |
| (*entry)->InheritDeoptTarget(Z, call); |
| Instruction* cursor = *entry; |
| |
| array_cid = PrepareInlineIndexedOp(call, |
| array_cid, |
| &array, |
| index, |
| &cursor); |
| |
| intptr_t deopt_id = Thread::kNoDeoptId; |
| if ((array_cid == kTypedDataInt32ArrayCid) || |
| (array_cid == kTypedDataUint32ArrayCid)) { |
| // Deoptimization may be needed if result does not always fit in a Smi. |
| deopt_id = (kSmiBits >= 32) ? Thread::kNoDeoptId : call->deopt_id(); |
| } |
| |
| // Array load and return. |
| intptr_t index_scale = Instance::ElementSizeFor(array_cid); |
| *last = new(Z) LoadIndexedInstr(new(Z) Value(array), |
| new(Z) Value(index), |
| index_scale, |
| array_cid, |
| deopt_id, |
| call->token_pos()); |
| cursor = flow_graph()->AppendTo( |
| cursor, |
| *last, |
| deopt_id != Thread::kNoDeoptId ? call->env() : NULL, |
| FlowGraph::kValue); |
| |
| if (array_cid == kTypedDataFloat32ArrayCid) { |
| *last = new(Z) FloatToDoubleInstr(new(Z) Value(*last), deopt_id); |
| flow_graph()->AppendTo(cursor, |
| *last, |
| deopt_id != Thread::kNoDeoptId ? call->env() : NULL, |
| FlowGraph::kValue); |
| } |
| return true; |
| } |
| |
| |
| // Return true if d is a string of length one (a constant or result from |
| // from string-from-char-code instruction. |
| static bool IsLengthOneString(Definition* d) { |
| if (d->IsConstant()) { |
| const Object& obj = d->AsConstant()->value(); |
| if (obj.IsString()) { |
| return String::Cast(obj).Length() == 1; |
| } else { |
| return false; |
| } |
| } else { |
| return d->IsStringFromCharCode(); |
| } |
| } |
| |
| |
| // Returns true if the string comparison was converted into char-code |
| // comparison. Conversion is only possible for strings of length one. |
| // E.g., detect str[x] == "x"; and use an integer comparison of char-codes. |
| // TODO(srdjan): Expand for two-byte and external strings. |
| bool FlowGraphOptimizer::TryStringLengthOneEquality(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| ASSERT(HasOnlyTwoOf(*call->ic_data(), kOneByteStringCid)); |
| // Check that left and right are length one strings (either string constants |
| // or results of string-from-char-code. |
| Definition* left = call->ArgumentAt(0); |
| Definition* right = call->ArgumentAt(1); |
| Value* left_val = NULL; |
| Definition* to_remove_left = NULL; |
| if (IsLengthOneString(right)) { |
| // Swap, since we know that both arguments are strings |
| Definition* temp = left; |
| left = right; |
| right = temp; |
| } |
| if (IsLengthOneString(left)) { |
| // Optimize if left is a string with length one (either constant or |
| // result of string-from-char-code. |
| if (left->IsConstant()) { |
| ConstantInstr* left_const = left->AsConstant(); |
| const String& str = String::Cast(left_const->value()); |
| ASSERT(str.Length() == 1); |
| ConstantInstr* char_code_left = flow_graph()->GetConstant( |
| Smi::ZoneHandle(Z, Smi::New(static_cast<intptr_t>(str.CharAt(0))))); |
| left_val = new(Z) Value(char_code_left); |
| } else if (left->IsStringFromCharCode()) { |
| // Use input of string-from-charcode as left value. |
| StringFromCharCodeInstr* instr = left->AsStringFromCharCode(); |
| left_val = new(Z) Value(instr->char_code()->definition()); |
| to_remove_left = instr; |
| } else { |
| // IsLengthOneString(left) should have been false. |
| UNREACHABLE(); |
| } |
| |
| Definition* to_remove_right = NULL; |
| Value* right_val = NULL; |
| if (right->IsStringFromCharCode()) { |
| // Skip string-from-char-code, and use its input as right value. |
| StringFromCharCodeInstr* right_instr = right->AsStringFromCharCode(); |
| right_val = new(Z) Value(right_instr->char_code()->definition()); |
| to_remove_right = right_instr; |
| } else { |
| const ICData& unary_checks_1 = |
| ICData::ZoneHandle(Z, call->ic_data()->AsUnaryClassChecksForArgNr(1)); |
| AddCheckClass(right, |
| unary_checks_1, |
| call->deopt_id(), |
| call->env(), |
| call); |
| // String-to-char-code instructions returns -1 (illegal charcode) if |
| // string is not of length one. |
| StringToCharCodeInstr* char_code_right = |
| new(Z) StringToCharCodeInstr(new(Z) Value(right), kOneByteStringCid); |
| InsertBefore(call, char_code_right, call->env(), FlowGraph::kValue); |
| right_val = new(Z) Value(char_code_right); |
| } |
| |
| // Comparing char-codes instead of strings. |
| EqualityCompareInstr* comp = |
| new(Z) EqualityCompareInstr(call->token_pos(), |
| op_kind, |
| left_val, |
| right_val, |
| kSmiCid, |
| call->deopt_id()); |
| ReplaceCall(call, comp); |
| |
| // Remove dead instructions. |
| if ((to_remove_left != NULL) && |
| (to_remove_left->input_use_list() == NULL)) { |
| to_remove_left->ReplaceUsesWith(flow_graph()->constant_null()); |
| to_remove_left->RemoveFromGraph(); |
| } |
| if ((to_remove_right != NULL) && |
| (to_remove_right->input_use_list() == NULL)) { |
| to_remove_right->ReplaceUsesWith(flow_graph()->constant_null()); |
| to_remove_right->RemoveFromGraph(); |
| } |
| return true; |
| } |
| return false; |
| } |
| |
| |
| static bool SmiFitsInDouble() { return kSmiBits < 53; } |
| |
| bool FlowGraphOptimizer::TryReplaceWithEqualityOp(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| const ICData& ic_data = *call->ic_data(); |
| ASSERT(ic_data.NumArgsTested() == 2); |
| |
| ASSERT(call->ArgumentCount() == 2); |
| Definition* left = call->ArgumentAt(0); |
| Definition* right = call->ArgumentAt(1); |
| |
| intptr_t cid = kIllegalCid; |
| if (HasOnlyTwoOf(ic_data, kOneByteStringCid)) { |
| if (TryStringLengthOneEquality(call, op_kind)) { |
| return true; |
| } else { |
| return false; |
| } |
| } else if (HasOnlyTwoOf(ic_data, kSmiCid)) { |
| InsertBefore(call, |
| new(Z) CheckSmiInstr(new(Z) Value(left), |
| call->deopt_id(), |
| call->token_pos()), |
| call->env(), |
| FlowGraph::kEffect); |
| InsertBefore(call, |
| new(Z) CheckSmiInstr(new(Z) Value(right), |
| call->deopt_id(), |
| call->token_pos()), |
| call->env(), |
| FlowGraph::kEffect); |
| cid = kSmiCid; |
| } else if (HasTwoMintOrSmi(ic_data) && |
| FlowGraphCompiler::SupportsUnboxedMints()) { |
| cid = kMintCid; |
| } else if (HasTwoDoubleOrSmi(ic_data) && CanUnboxDouble()) { |
| // Use double comparison. |
| if (SmiFitsInDouble()) { |
| cid = kDoubleCid; |
| } else { |
| if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) { |
| // We cannot use double comparison on two smis. Need polymorphic |
| // call. |
| return false; |
| } else { |
| InsertBefore(call, |
| new(Z) CheckEitherNonSmiInstr( |
| new(Z) Value(left), |
| new(Z) Value(right), |
| call->deopt_id()), |
| call->env(), |
| FlowGraph::kEffect); |
| cid = kDoubleCid; |
| } |
| } |
| } else { |
| // Check if ICDData contains checks with Smi/Null combinations. In that case |
| // we can still emit the optimized Smi equality operation but need to add |
| // checks for null or Smi. |
| GrowableArray<intptr_t> smi_or_null(2); |
| smi_or_null.Add(kSmiCid); |
| smi_or_null.Add(kNullCid); |
| if (ICDataHasOnlyReceiverArgumentClassIds(ic_data, |
| smi_or_null, |
| smi_or_null)) { |
| const ICData& unary_checks_0 = |
| ICData::ZoneHandle(Z, call->ic_data()->AsUnaryClassChecks()); |
| AddCheckClass(left, |
| unary_checks_0, |
| call->deopt_id(), |
| call->env(), |
| call); |
| |
| const ICData& unary_checks_1 = |
| ICData::ZoneHandle(Z, call->ic_data()->AsUnaryClassChecksForArgNr(1)); |
| AddCheckClass(right, |
| unary_checks_1, |
| call->deopt_id(), |
| call->env(), |
| call); |
| cid = kSmiCid; |
| } else { |
| // Shortcut for equality with null. |
| ConstantInstr* right_const = right->AsConstant(); |
| ConstantInstr* left_const = left->AsConstant(); |
| if ((right_const != NULL && right_const->value().IsNull()) || |
| (left_const != NULL && left_const->value().IsNull())) { |
| StrictCompareInstr* comp = |
| new(Z) StrictCompareInstr(call->token_pos(), |
| Token::kEQ_STRICT, |
| new(Z) Value(left), |
| new(Z) Value(right), |
| false); // No number check. |
| ReplaceCall(call, comp); |
| return true; |
| } |
| return false; |
| } |
| } |
| ASSERT(cid != kIllegalCid); |
| EqualityCompareInstr* comp = new(Z) EqualityCompareInstr(call->token_pos(), |
| op_kind, |
| new(Z) Value(left), |
| new(Z) Value(right), |
| cid, |
| call->deopt_id()); |
| ReplaceCall(call, comp); |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::TryReplaceWithRelationalOp(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| const ICData& ic_data = *call->ic_data(); |
| ASSERT(ic_data.NumArgsTested() == 2); |
| |
| ASSERT(call->ArgumentCount() == 2); |
| Definition* left = call->ArgumentAt(0); |
| Definition* right = call->ArgumentAt(1); |
| |
| intptr_t cid = kIllegalCid; |
| if (HasOnlyTwoOf(ic_data, kSmiCid)) { |
| InsertBefore(call, |
| new(Z) CheckSmiInstr(new(Z) Value(left), |
| call->deopt_id(), |
| call->token_pos()), |
| call->env(), |
| FlowGraph::kEffect); |
| InsertBefore(call, |
| new(Z) CheckSmiInstr(new(Z) Value(right), |
| call->deopt_id(), |
| call->token_pos()), |
| call->env(), |
| FlowGraph::kEffect); |
| cid = kSmiCid; |
| } else if (HasTwoMintOrSmi(ic_data) && |
| FlowGraphCompiler::SupportsUnboxedMints()) { |
| cid = kMintCid; |
| } else if (HasTwoDoubleOrSmi(ic_data) && CanUnboxDouble()) { |
| // Use double comparison. |
| if (SmiFitsInDouble()) { |
| cid = kDoubleCid; |
| } else { |
| if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) { |
| // We cannot use double comparison on two smis. Need polymorphic |
| // call. |
| return false; |
| } else { |
| InsertBefore(call, |
| new(Z) CheckEitherNonSmiInstr( |
| new(Z) Value(left), |
| new(Z) Value(right), |
| call->deopt_id()), |
| call->env(), |
| FlowGraph::kEffect); |
| cid = kDoubleCid; |
| } |
| } |
| } else { |
| return false; |
| } |
| ASSERT(cid != kIllegalCid); |
| RelationalOpInstr* comp = new(Z) RelationalOpInstr(call->token_pos(), |
| op_kind, |
| new(Z) Value(left), |
| new(Z) Value(right), |
| cid, |
| call->deopt_id()); |
| ReplaceCall(call, comp); |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::TryReplaceWithBinaryOp(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| intptr_t operands_type = kIllegalCid; |
| ASSERT(call->HasICData()); |
| const ICData& ic_data = *call->ic_data(); |
| switch (op_kind) { |
| case Token::kADD: |
| case Token::kSUB: |
| case Token::kMUL: |
| if (HasOnlyTwoOf(ic_data, kSmiCid)) { |
| // Don't generate smi code if the IC data is marked because |
| // of an overflow. |
| operands_type = ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp) |
| ? kMintCid |
| : kSmiCid; |
| } else if (HasTwoMintOrSmi(ic_data) && |
| FlowGraphCompiler::SupportsUnboxedMints()) { |
| // Don't generate mint code if the IC data is marked because of an |
| // overflow. |
| if (ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) return false; |
| operands_type = kMintCid; |
| } else if (ShouldSpecializeForDouble(ic_data)) { |
| operands_type = kDoubleCid; |
| } else if (HasOnlyTwoOf(ic_data, kFloat32x4Cid)) { |
| operands_type = kFloat32x4Cid; |
| } else if (HasOnlyTwoOf(ic_data, kInt32x4Cid)) { |
| ASSERT(op_kind != Token::kMUL); // Int32x4 doesn't have a multiply op. |
| operands_type = kInt32x4Cid; |
| } else if (HasOnlyTwoOf(ic_data, kFloat64x2Cid)) { |
| operands_type = kFloat64x2Cid; |
| } else { |
| return false; |
| } |
| break; |
| case Token::kDIV: |
| if (!FlowGraphCompiler::SupportsHardwareDivision()) return false; |
| if (ShouldSpecializeForDouble(ic_data) || |
| HasOnlyTwoOf(ic_data, kSmiCid)) { |
| operands_type = kDoubleCid; |
| } else if (HasOnlyTwoOf(ic_data, kFloat32x4Cid)) { |
| operands_type = kFloat32x4Cid; |
| } else if (HasOnlyTwoOf(ic_data, kFloat64x2Cid)) { |
| operands_type = kFloat64x2Cid; |
| } else { |
| return false; |
| } |
| break; |
| case Token::kBIT_AND: |
| case Token::kBIT_OR: |
| case Token::kBIT_XOR: |
| if (HasOnlyTwoOf(ic_data, kSmiCid)) { |
| operands_type = kSmiCid; |
| } else if (HasTwoMintOrSmi(ic_data)) { |
| operands_type = kMintCid; |
| } else if (HasOnlyTwoOf(ic_data, kInt32x4Cid)) { |
| operands_type = kInt32x4Cid; |
| } else { |
| return false; |
| } |
| break; |
| case Token::kSHR: |
| case Token::kSHL: |
| if (HasOnlyTwoOf(ic_data, kSmiCid)) { |
| // Left shift may overflow from smi into mint or big ints. |
| // Don't generate smi code if the IC data is marked because |
| // of an overflow. |
| if (ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) { |
| return false; |
| } |
| operands_type = ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp) |
| ? kMintCid |
| : kSmiCid; |
| } else if (HasTwoMintOrSmi(ic_data) && |
| HasOnlyOneSmi(ICData::Handle(Z, |
| ic_data.AsUnaryClassChecksForArgNr(1)))) { |
| // Don't generate mint code if the IC data is marked because of an |
| // overflow. |
| if (ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) { |
| return false; |
| } |
| // Check for smi/mint << smi or smi/mint >> smi. |
| operands_type = kMintCid; |
| } else { |
| return false; |
| } |
| break; |
| case Token::kMOD: |
| case Token::kTRUNCDIV: |
| if (!FlowGraphCompiler::SupportsHardwareDivision()) return false; |
| if (HasOnlyTwoOf(ic_data, kSmiCid)) { |
| if (ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp)) { |
| return false; |
| } |
| operands_type = kSmiCid; |
| } else { |
| return false; |
| } |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| ASSERT(call->ArgumentCount() == 2); |
| Definition* left = call->ArgumentAt(0); |
| Definition* right = call->ArgumentAt(1); |
| if (operands_type == kDoubleCid) { |
| if (!CanUnboxDouble()) { |
| return false; |
| } |
| // Check that either left or right are not a smi. Result of a |
| // binary operation with two smis is a smi not a double, except '/' which |
| // returns a double for two smis. |
| if (op_kind != Token::kDIV) { |
| InsertBefore(call, |
| new(Z) CheckEitherNonSmiInstr( |
| new(Z) Value(left), |
| new(Z) Value(right), |
| call->deopt_id()), |
| call->env(), |
| FlowGraph::kEffect); |
| } |
| |
| BinaryDoubleOpInstr* double_bin_op = |
| new(Z) BinaryDoubleOpInstr(op_kind, |
| new(Z) Value(left), |
| new(Z) Value(right), |
| call->deopt_id(), call->token_pos()); |
| ReplaceCall(call, double_bin_op); |
| } else if (operands_type == kMintCid) { |
| if (!FlowGraphCompiler::SupportsUnboxedMints()) return false; |
| if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) { |
| ShiftMintOpInstr* shift_op = |
| new(Z) ShiftMintOpInstr( |
| op_kind, new(Z) Value(left), new(Z) Value(right), |
| call->deopt_id()); |
| ReplaceCall(call, shift_op); |
| } else { |
| BinaryMintOpInstr* bin_op = |
| new(Z) BinaryMintOpInstr( |
| op_kind, new(Z) Value(left), new(Z) Value(right), |
| call->deopt_id()); |
| ReplaceCall(call, bin_op); |
| } |
| } else if (operands_type == kFloat32x4Cid) { |
| return InlineFloat32x4BinaryOp(call, op_kind); |
| } else if (operands_type == kInt32x4Cid) { |
| return InlineInt32x4BinaryOp(call, op_kind); |
| } else if (operands_type == kFloat64x2Cid) { |
| return InlineFloat64x2BinaryOp(call, op_kind); |
| } else if (op_kind == Token::kMOD) { |
| ASSERT(operands_type == kSmiCid); |
| if (right->IsConstant()) { |
| const Object& obj = right->AsConstant()->value(); |
| if (obj.IsSmi() && Utils::IsPowerOfTwo(Smi::Cast(obj).Value())) { |
| // Insert smi check and attach a copy of the original environment |
| // because the smi operation can still deoptimize. |
| InsertBefore(call, |
| new(Z) CheckSmiInstr(new(Z) Value(left), |
| call->deopt_id(), |
| call->token_pos()), |
| call->env(), |
| FlowGraph::kEffect); |
| ConstantInstr* constant = |
| flow_graph()->GetConstant(Smi::Handle(Z, |
| Smi::New(Smi::Cast(obj).Value() - 1))); |
| BinarySmiOpInstr* bin_op = |
| new(Z) BinarySmiOpInstr(Token::kBIT_AND, |
| new(Z) Value(left), |
| new(Z) Value(constant), |
| call->deopt_id()); |
| ReplaceCall(call, bin_op); |
| return true; |
| } |
| } |
| // Insert two smi checks and attach a copy of the original |
| // environment because the smi operation can still deoptimize. |
| AddCheckSmi(left, call->deopt_id(), call->env(), call); |
| AddCheckSmi(right, call->deopt_id(), call->env(), call); |
| BinarySmiOpInstr* bin_op = |
| new(Z) BinarySmiOpInstr(op_kind, |
| new(Z) Value(left), |
| new(Z) Value(right), |
| call->deopt_id()); |
| ReplaceCall(call, bin_op); |
| } else { |
| ASSERT(operands_type == kSmiCid); |
| // Insert two smi checks and attach a copy of the original |
| // environment because the smi operation can still deoptimize. |
| AddCheckSmi(left, call->deopt_id(), call->env(), call); |
| AddCheckSmi(right, call->deopt_id(), call->env(), call); |
| if (left->IsConstant() && |
| ((op_kind == Token::kADD) || (op_kind == Token::kMUL))) { |
| // Constant should be on the right side. |
| Definition* temp = left; |
| left = right; |
| right = temp; |
| } |
| BinarySmiOpInstr* bin_op = |
| new(Z) BinarySmiOpInstr( |
| op_kind, |
| new(Z) Value(left), |
| new(Z) Value(right), |
| call->deopt_id()); |
| ReplaceCall(call, bin_op); |
| } |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::TryReplaceWithUnaryOp(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| ASSERT(call->ArgumentCount() == 1); |
| Definition* input = call->ArgumentAt(0); |
| Definition* unary_op = NULL; |
| if (HasOnlyOneSmi(*call->ic_data())) { |
| InsertBefore(call, |
| new(Z) CheckSmiInstr(new(Z) Value(input), |
| call->deopt_id(), |
| call->token_pos()), |
| call->env(), |
| FlowGraph::kEffect); |
| unary_op = new(Z) UnarySmiOpInstr( |
| op_kind, new(Z) Value(input), call->deopt_id()); |
| } else if ((op_kind == Token::kBIT_NOT) && |
| HasOnlySmiOrMint(*call->ic_data()) && |
| FlowGraphCompiler::SupportsUnboxedMints()) { |
| unary_op = new(Z) UnaryMintOpInstr( |
| op_kind, new(Z) Value(input), call->deopt_id()); |
| } else if (HasOnlyOneDouble(*call->ic_data()) && |
| (op_kind == Token::kNEGATE) && |
| CanUnboxDouble()) { |
| AddReceiverCheck(call); |
| unary_op = new(Z) UnaryDoubleOpInstr( |
| Token::kNEGATE, new(Z) Value(input), call->deopt_id()); |
| } else { |
| return false; |
| } |
| ASSERT(unary_op != NULL); |
| ReplaceCall(call, unary_op); |
| return true; |
| } |
| |
| |
| // Using field class |
| RawField* FlowGraphOptimizer::GetField(intptr_t class_id, |
| const String& field_name) { |
| Class& cls = Class::Handle(Z, isolate()->class_table()->At(class_id)); |
| Field& field = Field::Handle(Z); |
| while (!cls.IsNull()) { |
| field = cls.LookupInstanceField(field_name); |
| if (!field.IsNull()) { |
| return field.raw(); |
| } |
| cls = cls.SuperClass(); |
| } |
| return Field::null(); |
| } |
| |
| |
| // Use CHA to determine if the call needs a class check: if the callee's |
| // receiver is the same as the caller's receiver and there are no overriden |
| // callee functions, then no class check is needed. |
| bool FlowGraphOptimizer::InstanceCallNeedsClassCheck( |
| InstanceCallInstr* call, RawFunction::Kind kind) const { |
| if (!FLAG_use_cha_deopt) { |
| // Even if class or function are private, lazy class finalization |
| // may later add overriding methods. |
| return true; |
| } |
| Definition* callee_receiver = call->ArgumentAt(0); |
| ASSERT(callee_receiver != NULL); |
| const Function& function = flow_graph_->function(); |
| if (function.IsDynamicFunction() && |
| callee_receiver->IsParameter() && |
| (callee_receiver->AsParameter()->index() == 0)) { |
| const String& name = (kind == RawFunction::kMethodExtractor) |
| ? String::Handle(Z, Field::NameFromGetter(call->function_name())) |
| : call->function_name(); |
| const Class& cls = Class::Handle(Z, function.Owner()); |
| if (!thread()->cha()->HasOverride(cls, name)) { |
| if (FLAG_trace_cha) { |
| THR_Print(" **(CHA) Instance call needs no check, " |
| "no overrides of '%s' '%s'\n", |
| name.ToCString(), cls.ToCString()); |
| } |
| thread()->cha()->AddToLeafClasses(cls); |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineImplicitInstanceGetter(InstanceCallInstr* call, |
| bool allow_check) { |
| ASSERT(call->HasICData()); |
| const ICData& ic_data = *call->ic_data(); |
| ASSERT(ic_data.HasOneTarget()); |
| Function& target = Function::Handle(Z); |
| GrowableArray<intptr_t> class_ids; |
| ic_data.GetCheckAt(0, &class_ids, &target); |
| ASSERT(class_ids.length() == 1); |
| // Inline implicit instance getter. |
| const String& field_name = |
| String::Handle(Z, Field::NameFromGetter(call->function_name())); |
| const Field& field = |
| Field::ZoneHandle(Z, GetField(class_ids[0], field_name)); |
| ASSERT(!field.IsNull()); |
| |
| if (InstanceCallNeedsClassCheck(call, RawFunction::kImplicitGetter)) { |
| if (!allow_check) { |
| return false; |
| } |
| AddReceiverCheck(call); |
| } |
| LoadFieldInstr* load = new(Z) LoadFieldInstr( |
| new(Z) Value(call->ArgumentAt(0)), |
| &field, |
| AbstractType::ZoneHandle(Z, field.type()), |
| call->token_pos()); |
| load->set_is_immutable(field.is_final()); |
| if (field.guarded_cid() != kIllegalCid) { |
| if (!field.is_nullable() || (field.guarded_cid() == kNullCid)) { |
| load->set_result_cid(field.guarded_cid()); |
| } |
| FlowGraph::AddToGuardedFields(flow_graph_->guarded_fields(), &field); |
| } |
| |
| // Discard the environment from the original instruction because the load |
| // can't deoptimize. |
| call->RemoveEnvironment(); |
| ReplaceCall(call, load); |
| |
| if (load->result_cid() != kDynamicCid) { |
| // Reset value types if guarded_cid was used. |
| for (Value::Iterator it(load->input_use_list()); |
| !it.Done(); |
| it.Advance()) { |
| it.Current()->SetReachingType(NULL); |
| } |
| } |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineFloat32x4Getter(InstanceCallInstr* call, |
| MethodRecognizer::Kind getter) { |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| AddCheckClass(call->ArgumentAt(0), |
| ICData::ZoneHandle( |
| Z, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| intptr_t mask = 0; |
| if ((getter == MethodRecognizer::kFloat32x4Shuffle) || |
| (getter == MethodRecognizer::kFloat32x4ShuffleMix)) { |
| // Extract shuffle mask. |
| Definition* mask_definition = NULL; |
| if (getter == MethodRecognizer::kFloat32x4Shuffle) { |
| ASSERT(call->ArgumentCount() == 2); |
| mask_definition = call->ArgumentAt(1); |
| } else { |
| ASSERT(getter == MethodRecognizer::kFloat32x4ShuffleMix); |
| ASSERT(call->ArgumentCount() == 3); |
| mask_definition = call->ArgumentAt(2); |
| } |
| if (!mask_definition->IsConstant()) { |
| return false; |
| } |
| ASSERT(mask_definition->IsConstant()); |
| ConstantInstr* constant_instruction = mask_definition->AsConstant(); |
| const Object& constant_mask = constant_instruction->value(); |
| if (!constant_mask.IsSmi()) { |
| return false; |
| } |
| ASSERT(constant_mask.IsSmi()); |
| mask = Smi::Cast(constant_mask).Value(); |
| if ((mask < 0) || (mask > 255)) { |
| // Not a valid mask. |
| return false; |
| } |
| } |
| if (getter == MethodRecognizer::kFloat32x4GetSignMask) { |
| Simd32x4GetSignMaskInstr* instr = new(Z) Simd32x4GetSignMaskInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } else if (getter == MethodRecognizer::kFloat32x4ShuffleMix) { |
| Simd32x4ShuffleMixInstr* instr = new(Z) Simd32x4ShuffleMixInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| new(Z) Value(call->ArgumentAt(1)), |
| mask, |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } else { |
| ASSERT((getter == MethodRecognizer::kFloat32x4Shuffle) || |
| (getter == MethodRecognizer::kFloat32x4ShuffleX) || |
| (getter == MethodRecognizer::kFloat32x4ShuffleY) || |
| (getter == MethodRecognizer::kFloat32x4ShuffleZ) || |
| (getter == MethodRecognizer::kFloat32x4ShuffleW)); |
| Simd32x4ShuffleInstr* instr = new(Z) Simd32x4ShuffleInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| mask, |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } |
| UNREACHABLE(); |
| return false; |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineFloat64x2Getter(InstanceCallInstr* call, |
| MethodRecognizer::Kind getter) { |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| AddCheckClass(call->ArgumentAt(0), |
| ICData::ZoneHandle( |
| Z, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| if ((getter == MethodRecognizer::kFloat64x2GetX) || |
| (getter == MethodRecognizer::kFloat64x2GetY)) { |
| Simd64x2ShuffleInstr* instr = new(Z) Simd64x2ShuffleInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| 0, |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } |
| UNREACHABLE(); |
| return false; |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineInt32x4Getter(InstanceCallInstr* call, |
| MethodRecognizer::Kind getter) { |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| AddCheckClass(call->ArgumentAt(0), |
| ICData::ZoneHandle( |
| Z, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| intptr_t mask = 0; |
| if ((getter == MethodRecognizer::kInt32x4Shuffle) || |
| (getter == MethodRecognizer::kInt32x4ShuffleMix)) { |
| // Extract shuffle mask. |
| Definition* mask_definition = NULL; |
| if (getter == MethodRecognizer::kInt32x4Shuffle) { |
| ASSERT(call->ArgumentCount() == 2); |
| mask_definition = call->ArgumentAt(1); |
| } else { |
| ASSERT(getter == MethodRecognizer::kInt32x4ShuffleMix); |
| ASSERT(call->ArgumentCount() == 3); |
| mask_definition = call->ArgumentAt(2); |
| } |
| if (!mask_definition->IsConstant()) { |
| return false; |
| } |
| ASSERT(mask_definition->IsConstant()); |
| ConstantInstr* constant_instruction = mask_definition->AsConstant(); |
| const Object& constant_mask = constant_instruction->value(); |
| if (!constant_mask.IsSmi()) { |
| return false; |
| } |
| ASSERT(constant_mask.IsSmi()); |
| mask = Smi::Cast(constant_mask).Value(); |
| if ((mask < 0) || (mask > 255)) { |
| // Not a valid mask. |
| return false; |
| } |
| } |
| if (getter == MethodRecognizer::kInt32x4GetSignMask) { |
| Simd32x4GetSignMaskInstr* instr = new(Z) Simd32x4GetSignMaskInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } else if (getter == MethodRecognizer::kInt32x4ShuffleMix) { |
| Simd32x4ShuffleMixInstr* instr = new(Z) Simd32x4ShuffleMixInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| new(Z) Value(call->ArgumentAt(1)), |
| mask, |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } else if (getter == MethodRecognizer::kInt32x4Shuffle) { |
| Simd32x4ShuffleInstr* instr = new(Z) Simd32x4ShuffleInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| mask, |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } else { |
| Int32x4GetFlagInstr* instr = new(Z) Int32x4GetFlagInstr( |
| getter, |
| new(Z) Value(call->ArgumentAt(0)), |
| call->deopt_id()); |
| ReplaceCall(call, instr); |
| return true; |
| } |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineFloat32x4BinaryOp(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| ASSERT(call->ArgumentCount() == 2); |
| Definition* left = call->ArgumentAt(0); |
| Definition* right = call->ArgumentAt(1); |
| // Type check left. |
| AddCheckClass(left, |
| ICData::ZoneHandle( |
| Z, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| // Type check right. |
| AddCheckClass(right, |
| ICData::ZoneHandle( |
| Z, call->ic_data()->AsUnaryClassChecksForArgNr(1)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| // Replace call. |
| BinaryFloat32x4OpInstr* float32x4_bin_op = |
| new(Z) BinaryFloat32x4OpInstr( |
| op_kind, new(Z) Value(left), new(Z) Value(right), |
| call->deopt_id()); |
| ReplaceCall(call, float32x4_bin_op); |
| |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineInt32x4BinaryOp(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| ASSERT(call->ArgumentCount() == 2); |
| Definition* left = call->ArgumentAt(0); |
| Definition* right = call->ArgumentAt(1); |
| // Type check left. |
| AddCheckClass(left, |
| ICData::ZoneHandle( |
| Z, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| // Type check right. |
| AddCheckClass(right, |
| ICData::ZoneHandle(Z, |
| call->ic_data()->AsUnaryClassChecksForArgNr(1)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| // Replace call. |
| BinaryInt32x4OpInstr* int32x4_bin_op = |
| new(Z) BinaryInt32x4OpInstr( |
| op_kind, new(Z) Value(left), new(Z) Value(right), |
| call->deopt_id()); |
| ReplaceCall(call, int32x4_bin_op); |
| return true; |
| } |
| |
| |
| bool FlowGraphOptimizer::InlineFloat64x2BinaryOp(InstanceCallInstr* call, |
| Token::Kind op_kind) { |
| if (!ShouldInlineSimd()) { |
| return false; |
| } |
| ASSERT(call->ArgumentCount() == 2); |
| Definition* left = call->ArgumentAt(0); |
| Definition* right = call->ArgumentAt(1); |
| // Type check left. |
| AddCheckClass(left, |
| ICData::ZoneHandle( |
| call->ic_data()->AsUnaryClassChecksForArgNr(0)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| // Type check right. |
| AddCheckClass(right, |
| ICData::ZoneHandle( |
| call->ic_data()->AsUnaryClassChecksForArgNr(1)), |
| call->deopt_id(), |
| call->env(), |
| call); |
| // Replace call. |
| BinaryFloat64x2OpInstr* float64x2_bin_op = |
| new(Z) BinaryFloat64x2OpInstr( |
| op_kind, new(Z) Value(left), new(Z) Value(right), |
| call->deopt_id()); |
| ReplaceCall(call, float64x2_bin_op); |
| return true; |
| } |
| |
| |
| // Only unique implicit instance getters can be currently handled. |
| // Returns false if 'allow_check' is false and a check is needed. |
| bool FlowGraphOptimizer::TryInlineInstanceGetter(InstanceCallInstr* call, |
| bool allow_check) { |
| ASSERT(call->HasICData()); |
| const ICData& ic_data = *call->ic_data(); |
| if (ic_data.NumberOfUsedChecks() == 0) { |
| // No type feedback collected. |
| return false; |
| } |
| |
| if (!ic_data.HasOneTarget()) { |
| // Polymorphic sites are inlined like normal methods by conventional |
| // inlining in FlowGraphInliner. |
| return false; |
| } |
| |
| const Function& target = Function::Handle(Z, ic_data.GetTargetAt(0)); |
| if (target.kind() != RawFunction::kImplicitGetter) { |
| // Non-implicit getters are inlined like normal methods by conventional |
| // inlining in FlowGraphInliner. |
| return false; |
| } |
| return InlineImplicitInstanceGetter(call, allow_check); |
| } |
| |
| |
| bool FlowGraphOptimizer::TryReplaceInstanceCallWithInline( |
| InstanceCallInstr* call) { |
| Function& target = Function::Handle(Z); |
| GrowableArray<intptr_t> class_ids; |
| call->ic_data()->GetCheckAt(0, &class_ids, &target); |
| const intptr_t receiver_cid = class_ids[0]; |
| |
| TargetEntryInstr* entry; |
| Definition* last; |
| if (!TryInlineRecognizedMethod(receiver_cid, |
| target, |
| call, |
| call->ArgumentAt(0), |
| call->token_pos(), |
| *call->ic_data(), |
| &entry, &last)) { |
| return false; |
| } |
| |
| // Insert receiver class check. |
| AddReceiverCheck(call); |
| // Remove the original push arguments. |
| for (intptr_t i = 0; i < |