| // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include "vm/runtime_entry.h" |
| |
| #include <memory> |
| |
| #include "platform/memory_sanitizer.h" |
| #include "platform/thread_sanitizer.h" |
| #include "vm/code_descriptors.h" |
| #include "vm/code_patcher.h" |
| #include "vm/compiler/api/deopt_id.h" |
| #include "vm/compiler/api/type_check_mode.h" |
| #include "vm/compiler/jit/compiler.h" |
| #include "vm/dart_api_impl.h" |
| #include "vm/dart_api_state.h" |
| #include "vm/dart_entry.h" |
| #include "vm/debugger.h" |
| #include "vm/double_conversion.h" |
| #include "vm/exceptions.h" |
| #include "vm/ffi_callback_metadata.h" |
| #include "vm/flags.h" |
| #include "vm/heap/verifier.h" |
| #include "vm/instructions.h" |
| #include "vm/interpreter.h" |
| #include "vm/kernel_isolate.h" |
| #include "vm/message.h" |
| #include "vm/message_handler.h" |
| #include "vm/object_store.h" |
| #include "vm/parser.h" |
| #include "vm/resolver.h" |
| #include "vm/service_isolate.h" |
| #include "vm/stack_frame.h" |
| #include "vm/symbols.h" |
| #include "vm/thread.h" |
| #include "vm/type_testing_stubs.h" |
| #include "vm/zone_text_buffer.h" |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| #include "vm/deopt_instructions.h" |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| namespace dart { |
| |
| static constexpr intptr_t kDefaultMaxSubtypeCacheEntries = |
| SubtypeTestCache::MaxEntriesForCacheAllocatedFor(1000); |
| DEFINE_FLAG( |
| int, |
| max_subtype_cache_entries, |
| kDefaultMaxSubtypeCacheEntries, |
| "Maximum number of subtype cache entries (number of checks cached)."); |
| DEFINE_FLAG( |
| int, |
| regexp_optimization_counter_threshold, |
| 1000, |
| "RegExp's usage-counter value before it is optimized, -1 means never"); |
| DEFINE_FLAG(int, |
| reoptimization_counter_threshold, |
| 4000, |
| "Counter threshold before a function gets reoptimized."); |
| DEFINE_FLAG(bool, |
| runtime_allocate_old, |
| false, |
| "Use old-space for allocation via runtime calls."); |
| DEFINE_FLAG(bool, |
| runtime_allocate_spill_tlab, |
| false, |
| "Ensure results of allocation via runtime calls are not in an " |
| "active TLAB."); |
| DEFINE_FLAG(bool, trace_deoptimization, false, "Trace deoptimization"); |
| DEFINE_FLAG(bool, |
| trace_deoptimization_verbose, |
| false, |
| "Trace deoptimization verbose"); |
| |
| DECLARE_FLAG(int, max_deoptimization_counter_threshold); |
| DECLARE_FLAG(bool, trace_compiler); |
| DECLARE_FLAG(bool, trace_optimizing_compiler); |
| DECLARE_FLAG(int, max_polymorphic_checks); |
| |
| DEFINE_FLAG(bool, trace_osr, false, "Trace attempts at on-stack replacement."); |
| |
| DEFINE_FLAG(int, gc_every, 0, "Run major GC on every N stack overflow checks"); |
| DEFINE_FLAG(int, |
| stacktrace_every, |
| 0, |
| "Compute debugger stacktrace on every N stack overflow checks"); |
| DEFINE_FLAG(charp, |
| stacktrace_filter, |
| nullptr, |
| "Compute stacktrace in named function on stack overflow checks"); |
| DEFINE_FLAG(charp, |
| deoptimize_filter, |
| nullptr, |
| "Deoptimize in named function on stack overflow checks"); |
| DEFINE_FLAG(charp, |
| deoptimize_on_runtime_call_name_filter, |
| nullptr, |
| "Runtime call name filter for --deoptimize-on-runtime-call-every."); |
| |
| DEFINE_FLAG(bool, |
| unopt_monomorphic_calls, |
| true, |
| "Enable specializing monomorphic calls from unoptimized code."); |
| DEFINE_FLAG(bool, |
| unopt_megamorphic_calls, |
| true, |
| "Enable specializing megamorphic calls from unoptimized code."); |
| DEFINE_FLAG(bool, |
| verbose_stack_overflow, |
| false, |
| "Print additional details about stack overflow."); |
| |
| DECLARE_FLAG(int, reload_every); |
| DECLARE_FLAG(bool, reload_every_optimized); |
| DECLARE_FLAG(bool, reload_every_back_off); |
| |
| DEFINE_RUNTIME_ENTRY(RangeError, 2) { |
| const Instance& length = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const Instance& index = Instance::CheckedHandle(zone, arguments.ArgAt(1)); |
| if (!length.IsInteger()) { |
| // Throw: new ArgumentError.value(length, "length", "is not an integer"); |
| const Array& args = Array::Handle(zone, Array::New(3)); |
| args.SetAt(0, length); |
| args.SetAt(1, Symbols::Length()); |
| args.SetAt(2, String::Handle(zone, String::New("is not an integer"))); |
| Exceptions::ThrowByType(Exceptions::kArgumentValue, args); |
| } |
| if (!index.IsInteger()) { |
| // Throw: new ArgumentError.value(index, "index", "is not an integer"); |
| const Array& args = Array::Handle(zone, Array::New(3)); |
| args.SetAt(0, index); |
| args.SetAt(1, Symbols::Index()); |
| args.SetAt(2, String::Handle(zone, String::New("is not an integer"))); |
| Exceptions::ThrowByType(Exceptions::kArgumentValue, args); |
| } |
| // Throw: new RangeError.range(index, 0, length - 1, "length"); |
| const Array& args = Array::Handle(zone, Array::New(4)); |
| args.SetAt(0, index); |
| args.SetAt(1, Integer::Handle(zone, Integer::New(0))); |
| args.SetAt( |
| 2, Integer::Handle( |
| zone, Integer::Cast(length).ArithmeticOp( |
| Token::kSUB, Integer::Handle(zone, Integer::New(1))))); |
| args.SetAt(3, Symbols::Length()); |
| Exceptions::ThrowByType(Exceptions::kRange, args); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(RangeErrorUnboxedInt64, 0) { |
| int64_t unboxed_length = thread->unboxed_int64_runtime_arg(); |
| int64_t unboxed_index = thread->unboxed_int64_runtime_second_arg(); |
| const auto& length = Integer::Handle(zone, Integer::New(unboxed_length)); |
| const auto& index = Integer::Handle(zone, Integer::New(unboxed_index)); |
| // Throw: new RangeError.range(index, 0, length - 1, "length"); |
| const Array& args = Array::Handle(zone, Array::New(4)); |
| args.SetAt(0, index); |
| args.SetAt(1, Integer::Handle(zone, Integer::New(0))); |
| args.SetAt( |
| 2, Integer::Handle( |
| zone, Integer::Cast(length).ArithmeticOp( |
| Token::kSUB, Integer::Handle(zone, Integer::New(1))))); |
| args.SetAt(3, Symbols::Length()); |
| Exceptions::ThrowByType(Exceptions::kRange, args); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(WriteError, 2) { |
| const Instance& receiver = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const Smi& kind = Smi::CheckedHandle(zone, arguments.ArgAt(1)); |
| auto& message = String::Handle(zone); |
| switch (kind.Value()) { |
| case 0: // CheckWritableInstr::Kind::kWriteUnmodifiableTypedData: |
| message = String::NewFormatted("Cannot modify an unmodifiable list: %s", |
| receiver.ToCString()); |
| break; |
| case 1: // CheckWritableInstr::Kind::kDeeplyImmutableAttachNativeFinalizer: |
| message = String::NewFormatted( |
| "Cannot attach NativeFinalizer to deeply immutable object: %s", |
| receiver.ToCString()); |
| break; |
| } |
| const Array& args = Array::Handle(Array::New(1)); |
| args.SetAt(0, message); |
| Exceptions::ThrowByType(Exceptions::kUnsupported, args); |
| } |
| |
| static void NullErrorHelper(Zone* zone, |
| const String& selector, |
| bool is_param_name = false) { |
| if (is_param_name) { |
| const String& error = String::Handle( |
| selector.IsNull() |
| ? String::New("argument value is null") |
| : String::NewFormatted("argument value for '%s' is null", |
| selector.ToCString())); |
| Exceptions::ThrowArgumentError(error); |
| return; |
| } |
| |
| // If the selector is null, this must be a null check that wasn't due to a |
| // method invocation, so was due to the null check operator. |
| if (selector.IsNull()) { |
| const Array& args = Array::Handle(zone, Array::New(4)); |
| args.SetAt( |
| 3, String::Handle( |
| zone, String::New("Null check operator used on a null value"))); |
| Exceptions::ThrowByType(Exceptions::kType, args); |
| return; |
| } |
| |
| InvocationMirror::Kind kind = InvocationMirror::kMethod; |
| if (Field::IsGetterName(selector)) { |
| kind = InvocationMirror::kGetter; |
| } else if (Field::IsSetterName(selector)) { |
| kind = InvocationMirror::kSetter; |
| } |
| |
| const Smi& invocation_type = Smi::Handle( |
| zone, |
| Smi::New(InvocationMirror::EncodeType(InvocationMirror::kDynamic, kind))); |
| |
| const Array& args = Array::Handle(zone, Array::New(7)); |
| args.SetAt(0, /* instance */ Object::null_object()); |
| args.SetAt(1, selector); |
| args.SetAt(2, invocation_type); |
| args.SetAt(3, /* func_type_args_length */ Object::smi_zero()); |
| args.SetAt(4, /* func_type_args */ Object::null_object()); |
| args.SetAt(5, /* func_args */ Object::null_object()); |
| args.SetAt(6, /* func_arg_names */ Object::null_object()); |
| Exceptions::ThrowByType(Exceptions::kNoSuchMethod, args); |
| } |
| |
| static void DoThrowNullError(Isolate* isolate, |
| Thread* thread, |
| Zone* zone, |
| bool is_param) { |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| const StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame->IsDartFrame()); |
| ASSERT(!caller_frame->is_interpreted()); |
| const Code& code = Code::Handle(zone, caller_frame->LookupDartCode()); |
| const uword pc_offset = caller_frame->pc() - code.PayloadStart(); |
| |
| if (FLAG_shared_slow_path_triggers_gc) { |
| isolate->group()->heap()->CollectAllGarbage(GCReason::kDebugging); |
| } |
| |
| const CodeSourceMap& map = |
| CodeSourceMap::Handle(zone, code.code_source_map()); |
| String& member_name = String::Handle(zone); |
| if (!map.IsNull()) { |
| CodeSourceMapReader reader(map, Array::null_array(), |
| Function::null_function()); |
| const intptr_t name_index = reader.GetNullCheckNameIndexAt(pc_offset); |
| RELEASE_ASSERT(name_index >= 0); |
| |
| const ObjectPool& pool = ObjectPool::Handle(zone, code.GetObjectPool()); |
| member_name ^= pool.ObjectAt(name_index); |
| } else { |
| member_name = Symbols::OptimizedOut().ptr(); |
| } |
| |
| NullErrorHelper(zone, member_name, is_param); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(NullError, 0) { |
| DoThrowNullError(isolate, thread, zone, /*is_param=*/false); |
| } |
| |
| // Collects information about pointers within the top |kMaxSlotsCollected| |
| // slots on the stack. |
| // TODO(b/179632636) This code is added in attempt to better understand |
| // b/179632636 and should be removed in the future. |
| void ReportImpossibleNullError(intptr_t cid, |
| StackFrame* caller_frame, |
| Thread* thread) { |
| TextBuffer buffer(512); |
| buffer.Printf("hit null error with cid %" Pd ", caller context: ", cid); |
| |
| const intptr_t kMaxSlotsCollected = 5; |
| const auto slots = reinterpret_cast<ObjectPtr*>(caller_frame->sp()); |
| const intptr_t num_slots_in_frame = |
| reinterpret_cast<ObjectPtr*>(caller_frame->fp()) - slots; |
| const auto num_slots_to_collect = |
| Utils::Maximum(kMaxSlotsCollected, num_slots_in_frame); |
| bool comma = false; |
| for (intptr_t i = 0; i < num_slots_to_collect; i++) { |
| const ObjectPtr ptr = slots[i]; |
| buffer.Printf("%s[sp+%" Pd "] %" Pp "", comma ? ", " : "", i, |
| static_cast<uword>(ptr)); |
| if (ptr->IsHeapObject() && |
| (Dart::vm_isolate_group()->heap()->Contains( |
| UntaggedObject::ToAddr(ptr)) || |
| thread->heap()->Contains(UntaggedObject::ToAddr(ptr)))) { |
| buffer.Printf("(%" Pp ")", static_cast<uword>(ptr->untag()->tags_)); |
| } |
| comma = true; |
| } |
| |
| const char* message = buffer.buffer(); |
| FATAL("%s", message); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(DispatchTableNullError, 1) { |
| const Smi& cid = Smi::CheckedHandle(zone, arguments.ArgAt(0)); |
| if (cid.Value() != kNullCid) { |
| // We hit null error, but receiver is not null itself. This most likely |
| // is a memory corruption. Crash the VM but provide some additional |
| // information about the arguments on the stack. |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| RELEASE_ASSERT(caller_frame->IsDartFrame()); |
| ReportImpossibleNullError(cid.Value(), caller_frame, thread); |
| } |
| DoThrowNullError(isolate, thread, zone, /*is_param=*/false); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(NullErrorWithSelector, 1) { |
| const String& selector = String::CheckedHandle(zone, arguments.ArgAt(0)); |
| NullErrorHelper(zone, selector); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(NullCastError, 0) { |
| NullErrorHelper(zone, String::null_string()); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(ArgumentNullError, 0) { |
| DoThrowNullError(isolate, thread, zone, /*is_param=*/true); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(ArgumentError, 1) { |
| const Instance& value = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| Exceptions::ThrowArgumentError(value); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(ArgumentErrorUnboxedInt64, 0) { |
| // Unboxed value is passed through a dedicated slot in Thread. |
| int64_t unboxed_value = arguments.thread()->unboxed_int64_runtime_arg(); |
| const Integer& value = Integer::Handle(zone, Integer::New(unboxed_value)); |
| Exceptions::ThrowArgumentError(value); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(DoubleToInteger, 1) { |
| // Unboxed value is passed through a dedicated slot in Thread. |
| double val = arguments.thread()->unboxed_double_runtime_arg(); |
| const Smi& recognized_kind = Smi::CheckedHandle(zone, arguments.ArgAt(0)); |
| switch (recognized_kind.Value()) { |
| case MethodRecognizer::kDoubleToInteger: |
| break; |
| case MethodRecognizer::kDoubleFloorToInt: |
| val = floor(val); |
| break; |
| case MethodRecognizer::kDoubleCeilToInt: |
| val = ceil(val); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| arguments.SetReturn(Integer::Handle(zone, DoubleToInteger(zone, val))); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(IntegerDivisionByZeroException, 0) { |
| const Array& args = Array::Handle(zone, Array::New(0)); |
| Exceptions::ThrowByType(Exceptions::kIntegerDivisionByZeroException, args); |
| } |
| |
| static Heap::Space SpaceForRuntimeAllocation() { |
| return UNLIKELY(FLAG_runtime_allocate_old) ? Heap::kOld : Heap::kNew; |
| } |
| |
| static void RuntimeAllocationEpilogue(Thread* thread) { |
| if (UNLIKELY(FLAG_runtime_allocate_spill_tlab)) { |
| static RelaxedAtomic<uword> count = 0; |
| if ((count++ % 10) == 0) { |
| thread->heap()->new_space()->AbandonRemainingTLAB(thread); |
| } |
| } |
| } |
| |
| // Allocation of a fixed length array of given element type. |
| // This runtime entry is never called for allocating a List of a generic type, |
| // because a prior run time call instantiates the element type if necessary. |
| // Arg0: array length. |
| // Arg1: array type arguments, i.e. vector of 1 type, the element type. |
| // Return value: newly allocated array of length arg0. |
| DEFINE_RUNTIME_ENTRY(AllocateArray, 2) { |
| const Instance& length = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| if (!length.IsInteger()) { |
| // Throw: new ArgumentError.value(length, "length", "is not an integer"); |
| const Array& args = Array::Handle(zone, Array::New(3)); |
| args.SetAt(0, length); |
| args.SetAt(1, Symbols::Length()); |
| args.SetAt(2, String::Handle(zone, String::New("is not an integer"))); |
| Exceptions::ThrowByType(Exceptions::kArgumentValue, args); |
| } |
| const int64_t len = Integer::Cast(length).Value(); |
| if (len < 0) { |
| // Throw: new RangeError.range(length, 0, Array::kMaxElements, "length"); |
| Exceptions::ThrowRangeError("length", Integer::Cast(length), 0, |
| Array::kMaxElements); |
| } |
| if (len > Array::kMaxElements) { |
| Exceptions::ThrowOOM(); |
| } |
| |
| const Array& array = Array::Handle( |
| zone, |
| Array::New(static_cast<intptr_t>(len), SpaceForRuntimeAllocation())); |
| TypeArguments& element_type = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(1)); |
| // An Array is raw or takes one type argument. However, its type argument |
| // vector may be longer than 1 due to a type optimization reusing the type |
| // argument vector of the instantiator. |
| ASSERT(element_type.IsNull() || |
| (element_type.Length() >= 1 && element_type.IsInstantiated())); |
| array.SetTypeArguments(element_type); // May be null. |
| arguments.SetReturn(array); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(AllocateDouble, 0) { |
| if (FLAG_shared_slow_path_triggers_gc) { |
| isolate->group()->heap()->CollectAllGarbage(GCReason::kDebugging); |
| } |
| arguments.SetReturn( |
| Object::Handle(zone, Double::New(0.0, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(BoxDouble, 0) { |
| const double val = thread->unboxed_double_runtime_arg(); |
| arguments.SetReturn( |
| Object::Handle(zone, Double::New(val, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(BoxFloat32x4, 0) { |
| const auto val = thread->unboxed_simd128_runtime_arg(); |
| arguments.SetReturn( |
| Object::Handle(zone, Float32x4::New(val, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(BoxFloat64x2, 0) { |
| const auto val = thread->unboxed_simd128_runtime_arg(); |
| arguments.SetReturn( |
| Object::Handle(zone, Float64x2::New(val, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(AllocateMint, 0) { |
| if (FLAG_shared_slow_path_triggers_gc) { |
| isolate->group()->heap()->CollectAllGarbage(GCReason::kDebugging); |
| } |
| arguments.SetReturn(Object::Handle( |
| zone, Integer::New(kMaxInt64, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(AllocateFloat32x4, 0) { |
| if (FLAG_shared_slow_path_triggers_gc) { |
| isolate->group()->heap()->CollectAllGarbage(GCReason::kDebugging); |
| } |
| arguments.SetReturn(Object::Handle( |
| zone, Float32x4::New(0.0, 0.0, 0.0, 0.0, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(AllocateFloat64x2, 0) { |
| if (FLAG_shared_slow_path_triggers_gc) { |
| isolate->group()->heap()->CollectAllGarbage(GCReason::kDebugging); |
| } |
| arguments.SetReturn(Object::Handle( |
| zone, Float64x2::New(0.0, 0.0, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(AllocateInt32x4, 0) { |
| if (FLAG_shared_slow_path_triggers_gc) { |
| isolate->group()->heap()->CollectAllGarbage(GCReason::kDebugging); |
| } |
| arguments.SetReturn(Object::Handle( |
| zone, Int32x4::New(0, 0, 0, 0, SpaceForRuntimeAllocation()))); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Allocate typed data array of given class id and length. |
| // Arg0: class id. |
| // Arg1: number of elements. |
| // Return value: newly allocated typed data array. |
| DEFINE_RUNTIME_ENTRY(AllocateTypedData, 2) { |
| const intptr_t cid = Smi::CheckedHandle(zone, arguments.ArgAt(0)).Value(); |
| const auto& length = Instance::CheckedHandle(zone, arguments.ArgAt(1)); |
| if (!length.IsInteger()) { |
| const Array& args = Array::Handle(zone, Array::New(1)); |
| args.SetAt(0, length); |
| Exceptions::ThrowByType(Exceptions::kArgument, args); |
| } |
| const int64_t len = Integer::Cast(length).Value(); |
| const intptr_t max = TypedData::MaxElements(cid); |
| if (len < 0) { |
| Exceptions::ThrowRangeError("length", Integer::Cast(length), 0, max); |
| } else if (len > max) { |
| Exceptions::ThrowOOM(); |
| } |
| const auto& typed_data = |
| TypedData::Handle(zone, TypedData::New(cid, static_cast<intptr_t>(len), |
| SpaceForRuntimeAllocation())); |
| arguments.SetReturn(typed_data); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Helper returning the token position of the Dart caller. |
| static TokenPosition GetCallerLocation() { |
| DartFrameIterator iterator(Thread::Current(), |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame != nullptr); |
| return caller_frame->GetTokenPos(); |
| } |
| |
| // Result of an invoke may be an unhandled exception, in which case we |
| // rethrow it. |
| static void ThrowIfError(const Object& result) { |
| if (!result.IsNull() && result.IsError()) { |
| Exceptions::PropagateError(Error::Cast(result)); |
| } |
| } |
| |
| // Allocate a new object. |
| // Arg0: class of the object that needs to be allocated. |
| // Arg1: type arguments of the object that needs to be allocated. |
| // Return value: newly allocated object. |
| DEFINE_RUNTIME_ENTRY(AllocateObject, 2) { |
| const Class& cls = Class::CheckedHandle(zone, arguments.ArgAt(0)); |
| ASSERT(cls.is_allocate_finalized()); |
| const Instance& instance = Instance::Handle( |
| zone, Instance::NewAlreadyFinalized(cls, SpaceForRuntimeAllocation())); |
| if (cls.NumTypeArguments() == 0) { |
| // No type arguments required for a non-parameterized type. |
| ASSERT(Instance::CheckedHandle(zone, arguments.ArgAt(1)).IsNull()); |
| } else { |
| const auto& type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(1)); |
| // Unless null (for a raw type), the type argument vector may be longer than |
| // necessary due to a type optimization reusing the type argument vector of |
| // the instantiator. |
| ASSERT(type_arguments.IsNull() || |
| (type_arguments.IsInstantiated() && |
| (type_arguments.Length() >= cls.NumTypeArguments()))); |
| instance.SetTypeArguments(type_arguments); |
| } |
| arguments.SetReturn(instance); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| DEFINE_LEAF_RUNTIME_ENTRY(uword /*ObjectPtr*/, |
| EnsureRememberedAndMarkingDeferred, |
| 2, |
| uword /*ObjectPtr*/ object_in, |
| Thread* thread) { |
| ObjectPtr object = static_cast<ObjectPtr>(object_in); |
| |
| // If we eliminate the generational write barrier when writing into an object, |
| // we need to ensure it's either a new-space object or it has been added to |
| // the remembered set. If we eliminate the incremental write barrier, we need |
| // to add the object to the deferred marking stack so it will be [re]scanned. |
| // |
| // NOTE: We use static_cast<>() instead of ::RawCast() to avoid handle |
| // allocations in debug mode. Handle allocations in leaf runtimes can cause |
| // memory leaks because they will allocate into a handle scope from the next |
| // outermost runtime code (to which the generated Dart code might not return |
| // in a long time). |
| bool skips_barrier = true; |
| if (object->IsArray()) { |
| const intptr_t length = Array::LengthOf(static_cast<ArrayPtr>(object)); |
| skips_barrier = compiler::target::WillAllocateNewOrRememberedArray(length); |
| } else if (object->IsContext()) { |
| const intptr_t num_context_variables = |
| Context::NumVariables(static_cast<ContextPtr>(object)); |
| skips_barrier = compiler::target::WillAllocateNewOrRememberedContext( |
| num_context_variables); |
| } |
| |
| if (skips_barrier) { |
| if (object->IsOldObject()) { |
| object->untag()->EnsureInRememberedSet(thread); |
| } |
| |
| if (thread->is_marking()) { |
| thread->DeferredMarkingStackAddObject(object); |
| } |
| } |
| |
| return static_cast<uword>(object); |
| } |
| END_LEAF_RUNTIME_ENTRY |
| |
| // Instantiate type. |
| // Arg0: uninstantiated type. |
| // Arg1: instantiator type arguments. |
| // Arg2: function type arguments. |
| // Return value: instantiated type. |
| DEFINE_RUNTIME_ENTRY(InstantiateType, 3) { |
| AbstractType& type = AbstractType::CheckedHandle(zone, arguments.ArgAt(0)); |
| const TypeArguments& instantiator_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(1)); |
| const TypeArguments& function_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(2)); |
| ASSERT(!type.IsNull()); |
| ASSERT(instantiator_type_arguments.IsNull() || |
| instantiator_type_arguments.IsInstantiated()); |
| ASSERT(function_type_arguments.IsNull() || |
| function_type_arguments.IsInstantiated()); |
| type = type.InstantiateFrom(instantiator_type_arguments, |
| function_type_arguments, kAllFree, Heap::kOld); |
| ASSERT(!type.IsNull() && type.IsInstantiated()); |
| arguments.SetReturn(type); |
| } |
| |
| // Instantiate type arguments. |
| // Arg0: uninstantiated type arguments. |
| // Arg1: instantiator type arguments. |
| // Arg2: function type arguments. |
| // Return value: instantiated type arguments. |
| DEFINE_RUNTIME_ENTRY(InstantiateTypeArguments, 3) { |
| TypeArguments& type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(0)); |
| const TypeArguments& instantiator_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(1)); |
| const TypeArguments& function_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(2)); |
| ASSERT(!type_arguments.IsNull() && !type_arguments.IsInstantiated()); |
| ASSERT(instantiator_type_arguments.IsNull() || |
| instantiator_type_arguments.IsInstantiated()); |
| ASSERT(function_type_arguments.IsNull() || |
| function_type_arguments.IsInstantiated()); |
| // Code inlined in the caller should have optimized the case where the |
| // instantiator can be reused as type argument vector. |
| ASSERT(!type_arguments.IsUninstantiatedIdentity()); |
| type_arguments = type_arguments.InstantiateAndCanonicalizeFrom( |
| instantiator_type_arguments, function_type_arguments); |
| ASSERT(type_arguments.IsNull() || type_arguments.IsInstantiated()); |
| arguments.SetReturn(type_arguments); |
| } |
| |
| // Helper routine for tracing a subtype check. |
| static void PrintSubtypeCheck(const AbstractType& subtype, |
| const AbstractType& supertype, |
| const bool result) { |
| DartFrameIterator iterator(Thread::Current(), |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame != nullptr); |
| |
| LogBlock lb; |
| THR_Print("SubtypeCheck: '%s' %d %s '%s' %d (pc: %#" Px ").\n", |
| subtype.NameCString(), subtype.type_class_id(), |
| result ? "is" : "is !", supertype.NameCString(), |
| supertype.type_class_id(), caller_frame->pc()); |
| |
| const Function& function = |
| Function::Handle(caller_frame->LookupDartFunction()); |
| if (function.HasSavedArgumentsDescriptor()) { |
| const auto& args_desc_array = Array::Handle(function.saved_args_desc()); |
| const ArgumentsDescriptor args_desc(args_desc_array); |
| THR_Print(" -> Function %s [%s]\n", function.ToFullyQualifiedCString(), |
| args_desc.ToCString()); |
| } else { |
| THR_Print(" -> Function %s\n", function.ToFullyQualifiedCString()); |
| } |
| } |
| |
| // Instantiate type. |
| // Arg0: instantiator type arguments |
| // Arg1: function type arguments |
| // Arg2: type to be a subtype of the other |
| // Arg3: type to be a supertype of the other |
| // Arg4: variable name of the subtype parameter |
| // No return value. |
| DEFINE_RUNTIME_ENTRY(SubtypeCheck, 5) { |
| const TypeArguments& instantiator_type_args = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(0)); |
| const TypeArguments& function_type_args = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(1)); |
| AbstractType& subtype = AbstractType::CheckedHandle(zone, arguments.ArgAt(2)); |
| AbstractType& supertype = |
| AbstractType::CheckedHandle(zone, arguments.ArgAt(3)); |
| const String& dst_name = String::CheckedHandle(zone, arguments.ArgAt(4)); |
| |
| ASSERT(!supertype.IsNull()); |
| ASSERT(!subtype.IsNull()); |
| |
| // Now that AssertSubtype may be checking types only available at runtime, |
| // we can't guarantee the supertype isn't the top type. |
| if (supertype.IsTopTypeForSubtyping()) return; |
| |
| // The supertype or subtype may not be instantiated. |
| if (AbstractType::InstantiateAndTestSubtype( |
| &subtype, &supertype, instantiator_type_args, function_type_args)) { |
| if (FLAG_trace_type_checks) { |
| // The supertype and subtype are now instantiated. Subtype check passed. |
| PrintSubtypeCheck(subtype, supertype, true); |
| } |
| return; |
| } |
| if (FLAG_trace_type_checks) { |
| // The supertype and subtype are now instantiated. Subtype check failed. |
| PrintSubtypeCheck(subtype, supertype, false); |
| } |
| |
| // Throw a dynamic type error. |
| const TokenPosition location = GetCallerLocation(); |
| Exceptions::CreateAndThrowTypeError(location, subtype, supertype, dst_name); |
| UNREACHABLE(); |
| } |
| |
| // Allocate a new closure and initializes its function, context, |
| // instantiator type arguments and delayed type arguments fields. |
| // Arg0: function. |
| // Arg1: context. |
| // Arg2: instantiator type arguments. |
| // Arg3: delayed type arguments. |
| // Return value: newly allocated closure. |
| DEFINE_RUNTIME_ENTRY(AllocateClosure, 4) { |
| const auto& function = Function::CheckedHandle(zone, arguments.ArgAt(0)); |
| const auto& context = Object::Handle(zone, arguments.ArgAt(1)); |
| const auto& instantiator_type_args = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(2)); |
| const auto& delayed_type_args = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(3)); |
| const Closure& closure = Closure::Handle( |
| zone, Closure::New(instantiator_type_args, Object::null_type_arguments(), |
| delayed_type_args, function, context, |
| SpaceForRuntimeAllocation())); |
| arguments.SetReturn(closure); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Allocate a new context large enough to hold the given number of variables. |
| // Arg0: number of variables. |
| // Return value: newly allocated context. |
| DEFINE_RUNTIME_ENTRY(AllocateContext, 1) { |
| const Smi& num_variables = Smi::CheckedHandle(zone, arguments.ArgAt(0)); |
| const Context& context = Context::Handle( |
| zone, Context::New(num_variables.Value(), SpaceForRuntimeAllocation())); |
| arguments.SetReturn(context); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Make a copy of the given context, including the values of the captured |
| // variables. |
| // Arg0: the context to be cloned. |
| // Return value: newly allocated context. |
| DEFINE_RUNTIME_ENTRY(CloneContext, 1) { |
| const Context& ctx = Context::CheckedHandle(zone, arguments.ArgAt(0)); |
| Context& cloned_ctx = Context::Handle( |
| zone, Context::New(ctx.num_variables(), SpaceForRuntimeAllocation())); |
| cloned_ctx.set_parent(Context::Handle(zone, ctx.parent())); |
| Object& inst = Object::Handle(zone); |
| for (int i = 0; i < ctx.num_variables(); i++) { |
| inst = ctx.At(i); |
| cloned_ctx.SetAt(i, inst); |
| } |
| arguments.SetReturn(cloned_ctx); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Allocate a new record instance. |
| // Arg0: record shape id. |
| // Return value: newly allocated record. |
| DEFINE_RUNTIME_ENTRY(AllocateRecord, 1) { |
| const RecordShape shape(Smi::RawCast(arguments.ArgAt(0))); |
| const Record& record = |
| Record::Handle(zone, Record::New(shape, SpaceForRuntimeAllocation())); |
| arguments.SetReturn(record); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Allocate a new small record instance and initialize its fields. |
| // Arg0: record shape id. |
| // Arg1-Arg3: field values. |
| // Return value: newly allocated record. |
| DEFINE_RUNTIME_ENTRY(AllocateSmallRecord, 4) { |
| const RecordShape shape(Smi::RawCast(arguments.ArgAt(0))); |
| const auto& value0 = Instance::CheckedHandle(zone, arguments.ArgAt(1)); |
| const auto& value1 = Instance::CheckedHandle(zone, arguments.ArgAt(2)); |
| const auto& value2 = Instance::CheckedHandle(zone, arguments.ArgAt(3)); |
| const Record& record = |
| Record::Handle(zone, Record::New(shape, SpaceForRuntimeAllocation())); |
| const intptr_t num_fields = shape.num_fields(); |
| ASSERT(num_fields == 2 || num_fields == 3); |
| record.SetFieldAt(0, value0); |
| record.SetFieldAt(1, value1); |
| if (num_fields > 2) { |
| record.SetFieldAt(2, value2); |
| } |
| arguments.SetReturn(record); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Allocate a SuspendState object. |
| // Arg0: frame size. |
| // Arg1: existing SuspendState object or function data. |
| // Return value: newly allocated object. |
| // No lazy deopt: the various suspend stubs need to save the real pc, not the |
| // lazy deopt stub entry, for pointer visiting of the suspend state to work. The |
| // resume stubs will do a check for disabled code. |
| DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(AllocateSuspendState, 2) { |
| const intptr_t frame_size = |
| Smi::CheckedHandle(zone, arguments.ArgAt(0)).Value(); |
| const Object& previous_state = Object::Handle(zone, arguments.ArgAt(1)); |
| SuspendState& result = SuspendState::Handle(zone); |
| if (previous_state.IsSuspendState()) { |
| const auto& suspend_state = SuspendState::Cast(previous_state); |
| const auto& function_data = |
| Instance::Handle(zone, suspend_state.function_data()); |
| ObjectStore* object_store = thread->isolate_group()->object_store(); |
| if (function_data.GetClassId() == |
| Class::Handle(zone, object_store->async_star_stream_controller()) |
| .id()) { |
| // Reset _AsyncStarStreamController.asyncStarBody to null in order |
| // to create a new callback closure during next yield. |
| // The new callback closure will capture the reallocated SuspendState. |
| // |
| // Caveat: can't use [SetField] here because it will try to take program |
| // lock (to update the state of guarded cid) and that requires us to |
| // be at safepoint which permits lazy deopt. Instead bypass |
| // field guard by making sure that guarded_cid allows our store here. |
| // (See ObjectStore::InitKnownObjects which initializes it). |
| function_data.SetFieldWithoutFieldGuard( |
| Field::Handle( |
| zone, |
| object_store->async_star_stream_controller_async_star_body()), |
| Object::null_object()); |
| } |
| result = SuspendState::New(frame_size, function_data, |
| SpaceForRuntimeAllocation()); |
| if (function_data.GetClassId() == |
| Class::Handle(zone, object_store->sync_star_iterator_class()).id()) { |
| // Refresh _SyncStarIterator._state with the new SuspendState object. |
| // |
| // Caveat: can't use [SetField] here because it will try to take program |
| // lock (to update the state of guarded cid) and that requires us to |
| // be at safepoint which permits lazy deopt. Instead bypass |
| // field guard by making sure that guarded_cid allows our store here. |
| // (See ObjectStore::InitKnownObjects which initializes it). |
| function_data.SetFieldWithoutFieldGuard( |
| Field::Handle(zone, object_store->sync_star_iterator_state()), |
| result); |
| } |
| } else { |
| result = SuspendState::New(frame_size, Instance::Cast(previous_state), |
| SpaceForRuntimeAllocation()); |
| } |
| arguments.SetReturn(result); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Makes a copy of the given SuspendState object, including the payload frame. |
| // Arg0: the SuspendState object to be cloned. |
| // Return value: newly allocated object. |
| DEFINE_RUNTIME_ENTRY(CloneSuspendState, 1) { |
| const SuspendState& src = |
| SuspendState::CheckedHandle(zone, arguments.ArgAt(0)); |
| const SuspendState& dst = SuspendState::Handle( |
| zone, SuspendState::Clone(thread, src, SpaceForRuntimeAllocation())); |
| arguments.SetReturn(dst); |
| RuntimeAllocationEpilogue(thread); |
| } |
| |
| // Allocate a new SubtypeTestCache for use in interpreted implicit setters. |
| // Return value: newly allocated SubtypeTestCache. |
| DEFINE_RUNTIME_ENTRY(AllocateSubtypeTestCache, 0) { |
| #if defined(DART_DYNAMIC_MODULES) |
| const auto& cache = SubtypeTestCache::Handle( |
| zone, SubtypeTestCache::New(SubtypeTestCache::kMaxInputs)); |
| arguments.SetReturn(cache); |
| #else |
| UNREACHABLE(); |
| #endif // defined(DART_DYNAMIC_MODULES) |
| } |
| |
| // Invoke field getter before dispatch. |
| // Arg0: instance. |
| // Arg1: field name (may be demangled during call). |
| // Return value: field value. |
| DEFINE_RUNTIME_ENTRY(GetFieldForDispatch, 2) { |
| #if defined(DART_DYNAMIC_MODULES) |
| const Instance& receiver = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| String& name = String::CheckedHandle(zone, arguments.ArgAt(1)); |
| const Class& receiver_class = Class::Handle(zone, receiver.clazz()); |
| if (Function::IsDynamicInvocationForwarderName(name)) { |
| name = Function::DemangleDynamicInvocationForwarderName(name); |
| arguments.SetArgAt(1, name); // Reflect change in arguments. |
| } |
| const String& getter_name = String::Handle(zone, Field::GetterName(name)); |
| const int kTypeArgsLen = 0; |
| const int kNumArguments = 1; |
| ArgumentsDescriptor args_desc(Array::Handle( |
| zone, ArgumentsDescriptor::NewBoxed(kTypeArgsLen, kNumArguments))); |
| const Function& getter = Function::Handle( |
| zone, Resolver::ResolveDynamicForReceiverClass( |
| receiver_class, getter_name, args_desc, /*allow_add=*/true)); |
| ASSERT(!getter.IsNull()); // An InvokeFieldDispatcher function was created. |
| const Array& args = Array::Handle(zone, Array::New(kNumArguments)); |
| args.SetAt(0, receiver); |
| const Object& result = |
| Object::Handle(zone, DartEntry::InvokeFunction(getter, args)); |
| ThrowIfError(result); |
| arguments.SetReturn(result); |
| #else |
| UNREACHABLE(); |
| #endif // defined(DART_DYNAMIC_MODULES) |
| } |
| |
| // Converts arguments descriptor passed to an implicit closure |
| // into an arguments descriptor for the target function. |
| // Arg0: implicit closure arguments descriptor |
| // Arg1: target function |
| // Return value: target arguments descriptor |
| DEFINE_RUNTIME_ENTRY(AdjustArgumentsDesciptorForImplicitClosure, 2) { |
| #if defined(DART_DYNAMIC_MODULES) |
| const auto& descriptor = Array::CheckedHandle(zone, arguments.ArgAt(0)); |
| const auto& target = Function::CheckedHandle(zone, arguments.ArgAt(1)); |
| |
| const ArgumentsDescriptor args_desc(descriptor); |
| intptr_t type_args_len = args_desc.TypeArgsLen(); |
| intptr_t num_arguments = args_desc.Count(); |
| |
| if (target.is_static()) { |
| if (target.IsFactory()) { |
| // Factory always takes type arguments via a positional parameter. |
| type_args_len = 0; |
| } else { |
| // Drop closure receiver. |
| --num_arguments; |
| } |
| } else { |
| if (target.IsGenerativeConstructor()) { |
| // Type arguments are not passed to a generative constructor. |
| type_args_len = 0; |
| } else { |
| // No need to adjust arguments descriptor. |
| arguments.SetReturn(descriptor); |
| return; |
| } |
| } |
| |
| const auto& optional_arguments_names = |
| Array::Handle(zone, args_desc.GetArgumentNames()); |
| const auto& result = Array::Handle( |
| zone, ArgumentsDescriptor::NewBoxed(type_args_len, num_arguments, |
| optional_arguments_names)); |
| arguments.SetReturn(result); |
| #else |
| UNREACHABLE(); |
| #endif // defined(DART_DYNAMIC_MODULES) |
| } |
| |
| // Check that arguments are valid for the given closure. |
| // Arg0: closure |
| // Arg1: arguments descriptor |
| // Return value: whether the arguments are valid |
| DEFINE_RUNTIME_ENTRY(ClosureArgumentsValid, 2) { |
| #if defined(DART_DYNAMIC_MODULES) |
| const auto& closure = Closure::CheckedHandle(zone, arguments.ArgAt(0)); |
| const auto& descriptor = Array::CheckedHandle(zone, arguments.ArgAt(1)); |
| |
| const auto& function = Function::Handle(zone, closure.function()); |
| const ArgumentsDescriptor args_desc(descriptor); |
| if (!function.AreValidArguments(args_desc, nullptr)) { |
| arguments.SetReturn(Bool::False()); |
| } else if (!closure.IsGeneric() && args_desc.TypeArgsLen() > 0) { |
| // The arguments may be valid for the closure function itself, but if the |
| // closure has delayed type arguments, no type arguments should be provided. |
| arguments.SetReturn(Bool::False()); |
| } else { |
| arguments.SetReturn(Bool::True()); |
| } |
| #else |
| UNREACHABLE(); |
| #endif // defined(DART_DYNAMIC_MODULES) |
| } |
| |
| // Resolve 'call' function of receiver. |
| // Arg0: receiver (not a closure). |
| // Arg1: arguments descriptor |
| // Return value: 'call' function'. |
| DEFINE_RUNTIME_ENTRY(ResolveCallFunction, 2) { |
| #if defined(DART_DYNAMIC_MODULES) |
| const Instance& receiver = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const Array& descriptor = Array::CheckedHandle(zone, arguments.ArgAt(1)); |
| ArgumentsDescriptor args_desc(descriptor); |
| ASSERT(!receiver.IsClosure()); // Interpreter tests for closure. |
| Class& cls = Class::Handle(zone, receiver.clazz()); |
| Function& call_function = Function::Handle( |
| zone, |
| Resolver::ResolveDynamicForReceiverClass(cls, Symbols::call(), args_desc, |
| /*allow_add=*/false)); |
| arguments.SetReturn(call_function); |
| #else |
| UNREACHABLE(); |
| #endif // defined(DART_DYNAMIC_MODULES) |
| } |
| |
| // Helper routine for tracing a type check. |
| static void PrintTypeCheck(const char* message, |
| const Instance& instance, |
| const AbstractType& type, |
| const TypeArguments& instantiator_type_arguments, |
| const TypeArguments& function_type_arguments, |
| const Bool& result) { |
| DartFrameIterator iterator(Thread::Current(), |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame != nullptr); |
| |
| const AbstractType& instance_type = |
| AbstractType::Handle(instance.GetType(Heap::kNew)); |
| ASSERT(instance_type.IsInstantiated() || |
| (instance.IsClosure() && instance_type.IsInstantiated(kCurrentClass))); |
| LogBlock lb; |
| if (type.IsInstantiated()) { |
| THR_Print("%s: '%s' %d %s '%s' %d (pc: %#" Px ").\n", message, |
| instance_type.NameCString(), instance_type.type_class_id(), |
| (result.ptr() == Bool::True().ptr()) ? "is" : "is !", |
| type.NameCString(), type.type_class_id(), caller_frame->pc()); |
| } else { |
| // Instantiate type before printing. |
| const AbstractType& instantiated_type = AbstractType::Handle( |
| type.InstantiateFrom(instantiator_type_arguments, |
| function_type_arguments, kAllFree, Heap::kOld)); |
| THR_Print("%s: '%s' %s '%s' instantiated from '%s' (pc: %#" Px ").\n", |
| message, instance_type.NameCString(), |
| (result.ptr() == Bool::True().ptr()) ? "is" : "is !", |
| instantiated_type.NameCString(), type.NameCString(), |
| caller_frame->pc()); |
| } |
| const Function& function = |
| Function::Handle(caller_frame->LookupDartFunction()); |
| if (function.HasSavedArgumentsDescriptor()) { |
| const auto& args_desc_array = Array::Handle(function.saved_args_desc()); |
| const ArgumentsDescriptor args_desc(args_desc_array); |
| THR_Print(" -> Function %s [%s]\n", function.ToFullyQualifiedCString(), |
| args_desc.ToCString()); |
| } else { |
| THR_Print(" -> Function %s\n", function.ToFullyQualifiedCString()); |
| } |
| } |
| |
| #if defined(TARGET_ARCH_IA32) || defined(DART_DYNAMIC_MODULES) |
| static BoolPtr CheckHashBasedSubtypeTestCache( |
| Zone* zone, |
| Thread* thread, |
| const Instance& instance, |
| const AbstractType& destination_type, |
| const TypeArguments& instantiator_type_arguments, |
| const TypeArguments& function_type_arguments, |
| const SubtypeTestCache& cache) { |
| ASSERT(cache.IsHash()); |
| // Record instances are not added to the cache as they don't have a valid |
| // key (type of a record depends on types of all its fields). |
| if (instance.IsRecord()) return Bool::null(); |
| Class& instance_class = Class::Handle(zone); |
| if (instance.IsSmi()) { |
| instance_class = Smi::Class(); |
| } else { |
| instance_class = instance.clazz(); |
| } |
| // If the type is uninstantiated and refers to parent function type |
| // parameters, the function_type_arguments have been canonicalized |
| // when concatenated. |
| auto& instance_class_id_or_signature = Object::Handle(zone); |
| auto& instance_type_arguments = TypeArguments::Handle(zone); |
| auto& instance_parent_function_type_arguments = TypeArguments::Handle(zone); |
| auto& instance_delayed_type_arguments = TypeArguments::Handle(zone); |
| if (instance_class.IsClosureClass()) { |
| const auto& closure = Closure::Cast(instance); |
| const auto& function = Function::Handle(zone, closure.function()); |
| instance_class_id_or_signature = function.signature(); |
| instance_type_arguments = closure.instantiator_type_arguments(); |
| instance_parent_function_type_arguments = closure.function_type_arguments(); |
| instance_delayed_type_arguments = closure.delayed_type_arguments(); |
| } else { |
| instance_class_id_or_signature = Smi::New(instance_class.id()); |
| if (instance_class.NumTypeArguments() > 0) { |
| instance_type_arguments = instance.GetTypeArguments(); |
| } |
| } |
| |
| intptr_t index = -1; |
| auto& result = Bool::Handle(zone); |
| if (cache.HasCheck(instance_class_id_or_signature, destination_type, |
| instance_type_arguments, instantiator_type_arguments, |
| function_type_arguments, |
| instance_parent_function_type_arguments, |
| instance_delayed_type_arguments, &index, &result)) { |
| return result.ptr(); |
| } |
| |
| return Bool::null(); |
| } |
| #endif // defined(TARGET_ARCH_IA32) || defined(DART_DYNAMIC_MODULES) |
| |
| // This updates the type test cache, an array containing 8 elements: |
| // - instance class (or function if the instance is a closure) |
| // - instance type arguments (null if the instance class is not generic) |
| // - instantiator type arguments (null if the type is instantiated) |
| // - function type arguments (null if the type is instantiated) |
| // - instance parent function type arguments (null if instance is not a closure) |
| // - instance delayed type arguments (null if instance is not a closure) |
| // - destination type (null if the type was known at compile time) |
| // - test result |
| // It can be applied to classes with type arguments in which case it contains |
| // just the result of the class subtype test, not including the evaluation of |
| // type arguments. |
| // This operation is currently very slow (lookup of code is not efficient yet). |
| static void UpdateTypeTestCache( |
| Zone* zone, |
| Thread* thread, |
| const Instance& instance, |
| const AbstractType& destination_type, |
| const TypeArguments& instantiator_type_arguments, |
| const TypeArguments& function_type_arguments, |
| const Bool& result, |
| const SubtypeTestCache& new_cache) { |
| ASSERT(!new_cache.IsNull()); |
| ASSERT(destination_type.IsCanonical()); |
| ASSERT(instantiator_type_arguments.IsCanonical()); |
| ASSERT(function_type_arguments.IsCanonical()); |
| if (instance.IsRecord()) { |
| // Do not add record instances to cache as they don't have a valid |
| // key (type of a record depends on types of all its fields). |
| if (FLAG_trace_type_checks) { |
| THR_Print("Not updating subtype test cache for the record instance.\n"); |
| } |
| return; |
| } |
| Class& instance_class = Class::Handle(zone); |
| if (instance.IsSmi()) { |
| instance_class = Smi::Class(); |
| } else { |
| instance_class = instance.clazz(); |
| } |
| // If the type is uninstantiated and refers to parent function type |
| // parameters, the function_type_arguments have been canonicalized |
| // when concatenated. |
| auto& instance_class_id_or_signature = Object::Handle(zone); |
| auto& instance_type_arguments = TypeArguments::Handle(zone); |
| auto& instance_parent_function_type_arguments = TypeArguments::Handle(zone); |
| auto& instance_delayed_type_arguments = TypeArguments::Handle(zone); |
| if (instance_class.IsClosureClass()) { |
| const auto& closure = Closure::Cast(instance); |
| const auto& function = Function::Handle(zone, closure.function()); |
| instance_class_id_or_signature = function.signature(); |
| ASSERT(instance_class_id_or_signature.IsFunctionType()); |
| instance_type_arguments = closure.instantiator_type_arguments(); |
| instance_parent_function_type_arguments = closure.function_type_arguments(); |
| instance_delayed_type_arguments = closure.delayed_type_arguments(); |
| ASSERT(instance_class_id_or_signature.IsCanonical()); |
| ASSERT(instance_type_arguments.IsCanonical()); |
| ASSERT(instance_parent_function_type_arguments.IsCanonical()); |
| ASSERT(instance_delayed_type_arguments.IsCanonical()); |
| } else { |
| instance_class_id_or_signature = Smi::New(instance_class.id()); |
| if (instance_class.NumTypeArguments() > 0) { |
| instance_type_arguments = instance.GetTypeArguments(); |
| ASSERT(instance_type_arguments.IsCanonical()); |
| } |
| } |
| if (FLAG_trace_type_checks) { |
| const auto& instance_class_name = |
| String::Handle(zone, instance_class.Name()); |
| TextBuffer buffer(256); |
| buffer.Printf(" Updating test cache %#" Px " with result %s for:\n", |
| static_cast<uword>(new_cache.ptr()), result.ToCString()); |
| if (instance.IsString()) { |
| buffer.Printf(" instance: '%s'\n", instance.ToCString()); |
| } else { |
| buffer.Printf(" instance: %s\n", instance.ToCString()); |
| } |
| buffer.Printf(" class: %s (%" Pd ")\n", instance_class_name.ToCString(), |
| instance_class.id()); |
| buffer.Printf( |
| " raw entry: [ %#" Px ", %#" Px ", %#" Px ", %#" Px ", %#" Px |
| ", %#" Px ", %#" Px ", %#" Px " ]\n", |
| static_cast<uword>(instance_class_id_or_signature.ptr()), |
| static_cast<uword>(instance_type_arguments.ptr()), |
| static_cast<uword>(instantiator_type_arguments.ptr()), |
| static_cast<uword>(function_type_arguments.ptr()), |
| static_cast<uword>(instance_parent_function_type_arguments.ptr()), |
| static_cast<uword>(instance_delayed_type_arguments.ptr()), |
| static_cast<uword>(destination_type.ptr()), |
| static_cast<uword>(result.ptr())); |
| THR_Print("%s", buffer.buffer()); |
| } |
| { |
| SafepointMutexLocker ml( |
| thread->isolate_group()->subtype_test_cache_mutex()); |
| const intptr_t len = new_cache.NumberOfChecks(); |
| if (len >= FLAG_max_subtype_cache_entries) { |
| if (FLAG_trace_type_checks) { |
| THR_Print("Not updating subtype test cache as its length reached %d\n", |
| FLAG_max_subtype_cache_entries); |
| } |
| return; |
| } |
| intptr_t colliding_index = -1; |
| auto& old_result = Bool::Handle(zone); |
| if (new_cache.HasCheck( |
| instance_class_id_or_signature, destination_type, |
| instance_type_arguments, instantiator_type_arguments, |
| function_type_arguments, instance_parent_function_type_arguments, |
| instance_delayed_type_arguments, &colliding_index, &old_result)) { |
| if (FLAG_trace_type_checks) { |
| TextBuffer buffer(256); |
| buffer.Printf(" Collision for test cache %#" Px " at index %" Pd ":\n", |
| static_cast<uword>(new_cache.ptr()), colliding_index); |
| buffer.Printf(" entry: "); |
| new_cache.WriteEntryToBuffer(zone, &buffer, colliding_index, " "); |
| THR_Print("%s\n", buffer.buffer()); |
| } |
| if (old_result.ptr() != result.ptr()) { |
| FATAL("Existing subtype test cache entry has result %s, not %s", |
| old_result.ToCString(), result.ToCString()); |
| } |
| // Some other isolate might have updated the cache between entry was |
| // found missing and now. |
| return; |
| } |
| const intptr_t new_index = new_cache.AddCheck( |
| instance_class_id_or_signature, destination_type, |
| instance_type_arguments, instantiator_type_arguments, |
| function_type_arguments, instance_parent_function_type_arguments, |
| instance_delayed_type_arguments, result); |
| if (FLAG_trace_type_checks) { |
| TextBuffer buffer(256); |
| buffer.Printf(" Added new entry to test cache %#" Px " at index %" Pd |
| ":\n", |
| static_cast<uword>(new_cache.ptr()), new_index); |
| buffer.Printf(" new entry: "); |
| new_cache.WriteEntryToBuffer(zone, &buffer, new_index, " "); |
| THR_Print("%s\n", buffer.buffer()); |
| } |
| } |
| } |
| |
| // Check that the given instance is an instance of the given type. |
| // Tested instance may be null, because a null test cannot always be inlined, |
| // e.g 'null is T' yields true if T = Null, but false if T = bool. |
| // Arg0: instance being checked. |
| // Arg1: type. |
| // Arg2: type arguments of the instantiator of the type. |
| // Arg3: type arguments of the function of the type. |
| // Arg4: SubtypeTestCache. |
| // Return value: true or false. |
| DEFINE_RUNTIME_ENTRY(Instanceof, 5) { |
| const Instance& instance = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const AbstractType& type = |
| AbstractType::CheckedHandle(zone, arguments.ArgAt(1)); |
| const TypeArguments& instantiator_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(2)); |
| const TypeArguments& function_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(3)); |
| const SubtypeTestCache& cache = |
| SubtypeTestCache::CheckedHandle(zone, arguments.ArgAt(4)); |
| ASSERT(type.IsFinalized()); |
| ASSERT(!type.IsDynamicType()); // No need to check assignment. |
| ASSERT(!cache.IsNull()); |
| #if defined(TARGET_ARCH_IA32) |
| // Hash-based caches are still not handled by the stubs on IA32. |
| if (cache.IsHash()) { |
| const auto& result = Bool::Handle( |
| zone, CheckHashBasedSubtypeTestCache(zone, thread, instance, type, |
| instantiator_type_arguments, |
| function_type_arguments, cache)); |
| if (!result.IsNull()) { |
| // Early exit because an entry already exists in the cache. |
| arguments.SetReturn(result); |
| return; |
| } |
| } |
| #endif // defined(TARGET_ARCH_IA32) |
| const Bool& result = Bool::Get(instance.IsInstanceOf( |
| type, instantiator_type_arguments, function_type_arguments)); |
| if (FLAG_trace_type_checks) { |
| PrintTypeCheck("InstanceOf", instance, type, instantiator_type_arguments, |
| function_type_arguments, result); |
| } |
| UpdateTypeTestCache(zone, thread, instance, type, instantiator_type_arguments, |
| function_type_arguments, result, cache); |
| arguments.SetReturn(result); |
| } |
| |
| #if defined(TESTING) |
| // Used only in type_testing_stubs_test.cc. If DRT_TypeCheck is entered, then |
| // this flag is set to true. |
| thread_local bool TESTING_runtime_entered_on_TTS_invocation = false; |
| #endif |
| |
| // Check that the type of the given instance is a subtype of the given type and |
| // can therefore be assigned. |
| // Tested instance may not be null, because a null test is always inlined. |
| // Arg0: instance being assigned. |
| // Arg1: type being assigned to. |
| // Arg2: type arguments of the instantiator of the type being assigned to. |
| // Arg3: type arguments of the function of the type being assigned to. |
| // Arg4: name of variable being assigned to. |
| // Arg5: SubtypeTestCache. |
| // Arg6: invocation mode (see TypeCheckMode) |
| // Return value: instance if a subtype, otherwise throw a TypeError. |
| DEFINE_RUNTIME_ENTRY(TypeCheck, 7) { |
| const Instance& src_instance = |
| Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const AbstractType& dst_type = |
| AbstractType::CheckedHandle(zone, arguments.ArgAt(1)); |
| const TypeArguments& instantiator_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(2)); |
| const TypeArguments& function_type_arguments = |
| TypeArguments::CheckedHandle(zone, arguments.ArgAt(3)); |
| String& dst_name = String::Handle(zone); |
| dst_name ^= arguments.ArgAt(4); |
| ASSERT(dst_name.IsNull() || dst_name.IsString()); |
| |
| SubtypeTestCache& cache = SubtypeTestCache::Handle(zone); |
| cache ^= arguments.ArgAt(5); |
| ASSERT(cache.IsNull() || cache.IsSubtypeTestCache()); |
| |
| const TypeCheckMode mode = static_cast<TypeCheckMode>( |
| Smi::CheckedHandle(zone, arguments.ArgAt(6)).Value()); |
| |
| #if defined(TESTING) |
| TESTING_runtime_entered_on_TTS_invocation = true; |
| #endif |
| |
| #if defined(TARGET_ARCH_IA32) |
| ASSERT(mode == kTypeCheckFromInline); |
| #endif |
| |
| #if defined(TARGET_ARCH_IA32) || defined(DART_DYNAMIC_MODULES) |
| // Hash-based caches are not handled by the inline AssertAssignable |
| // on IA32 and in the interpreter. |
| if ((mode == kTypeCheckFromInline) && cache.IsHash()) { |
| const auto& result = Bool::Handle( |
| zone, CheckHashBasedSubtypeTestCache( |
| zone, thread, src_instance, dst_type, |
| instantiator_type_arguments, function_type_arguments, cache)); |
| if (!result.IsNull()) { |
| // Early exit because an entry already exists in the cache. |
| arguments.SetReturn(result); |
| return; |
| } |
| } |
| #endif // defined(TARGET_ARCH_IA32) || defined(DART_DYNAMIC_MODULES) |
| |
| // This is guaranteed on the calling side. |
| ASSERT(!dst_type.IsDynamicType()); |
| |
| const bool is_instance_of = src_instance.IsAssignableTo( |
| dst_type, instantiator_type_arguments, function_type_arguments); |
| |
| if (FLAG_trace_type_checks) { |
| PrintTypeCheck("TypeCheck", src_instance, dst_type, |
| instantiator_type_arguments, function_type_arguments, |
| Bool::Get(is_instance_of)); |
| } |
| |
| // Most paths through this runtime entry don't need to know what the |
| // destination name was or if this was a dynamic assert assignable call, |
| // so only walk the stack to find the stored destination name when necessary. |
| auto resolve_dst_name = [&]() { |
| if (!dst_name.IsNull()) return; |
| #if !defined(TARGET_ARCH_IA32) |
| // Can only come here from type testing stub. |
| ASSERT(mode != kTypeCheckFromInline); |
| |
| // Grab the [dst_name] from the pool. It's stored at one pool slot after |
| // the subtype-test-cache. |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| const Code& caller_code = |
| Code::Handle(zone, caller_frame->LookupDartCode()); |
| const ObjectPool& pool = |
| ObjectPool::Handle(zone, caller_code.GetObjectPool()); |
| TypeTestingStubCallPattern tts_pattern(caller_frame->pc()); |
| const intptr_t stc_pool_idx = tts_pattern.GetSubtypeTestCachePoolIndex(); |
| const intptr_t dst_name_idx = stc_pool_idx + 1; |
| dst_name ^= pool.ObjectAt(dst_name_idx); |
| #else |
| UNREACHABLE(); |
| #endif |
| }; |
| |
| if (!is_instance_of) { |
| resolve_dst_name(); |
| if (dst_name.ptr() == |
| Symbols::dynamic_assert_assignable_stc_check().ptr()) { |
| #if !defined(TARGET_ARCH_IA32) |
| // Can only come here from type testing stub via dynamic AssertAssignable. |
| ASSERT(mode != kTypeCheckFromInline); |
| #endif |
| // This was a dynamic closure call where the destination name was not |
| // known at compile-time. Thus, fetch the original arguments and arguments |
| // descriptor and re-do the type check in the runtime, which causes the |
| // error with the proper destination name to be thrown. |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(!caller_frame->is_interpreted()); |
| const auto& dispatcher = |
| Function::Handle(zone, caller_frame->LookupDartFunction()); |
| ASSERT(dispatcher.IsInvokeFieldDispatcher()); |
| const auto& orig_arguments_desc = |
| Array::Handle(zone, dispatcher.saved_args_desc()); |
| const ArgumentsDescriptor args_desc(orig_arguments_desc); |
| const intptr_t arg_count = args_desc.CountWithTypeArgs(); |
| const auto& orig_arguments = Array::Handle(zone, Array::New(arg_count)); |
| auto& obj = Object::Handle(zone); |
| for (intptr_t i = 0; i < arg_count; i++) { |
| obj = *reinterpret_cast<ObjectPtr*>( |
| ParamAddress(caller_frame->fp(), arg_count - i)); |
| orig_arguments.SetAt(i, obj); |
| } |
| const auto& receiver = Closure::CheckedHandle( |
| zone, orig_arguments.At(args_desc.FirstArgIndex())); |
| const auto& function = Function::Handle(zone, receiver.function()); |
| const auto& result = Object::Handle( |
| zone, function.DoArgumentTypesMatch(orig_arguments, args_desc)); |
| if (result.IsError()) { |
| Exceptions::PropagateError(Error::Cast(result)); |
| } |
| // IsAssignableTo returned false, so we should have thrown a type |
| // error in DoArgumentsTypesMatch. |
| UNREACHABLE(); |
| } |
| |
| ASSERT(!dst_name.IsNull()); |
| // Throw a dynamic type error. |
| const TokenPosition location = GetCallerLocation(); |
| const auto& src_type = |
| AbstractType::Handle(zone, src_instance.GetType(Heap::kNew)); |
| auto& reported_type = AbstractType::Handle(zone, dst_type.ptr()); |
| if (!reported_type.IsInstantiated()) { |
| // Instantiate dst_type before reporting the error. |
| reported_type = reported_type.InstantiateFrom(instantiator_type_arguments, |
| function_type_arguments, |
| kAllFree, Heap::kNew); |
| } |
| Exceptions::CreateAndThrowTypeError(location, src_type, reported_type, |
| dst_name); |
| UNREACHABLE(); |
| } |
| |
| bool should_update_cache = true; |
| #if !defined(TARGET_ARCH_IA32) |
| bool would_update_cache_if_not_lazy = false; |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| // Checks against type parameters are done by loading the corresponding type |
| // argument at runtime and calling the type argument's TTS. Thus, we install |
| // specialized TTSes on the type argument, not the parameter itself. |
| auto& tts_type = AbstractType::Handle(zone, dst_type.ptr()); |
| if (tts_type.IsTypeParameter()) { |
| const auto& param = TypeParameter::Cast(tts_type); |
| tts_type = param.GetFromTypeArguments(instantiator_type_arguments, |
| function_type_arguments); |
| } |
| ASSERT(!tts_type.IsTypeParameter()); |
| |
| if (mode == kTypeCheckFromLazySpecializeStub) { |
| if (FLAG_trace_type_checks) { |
| THR_Print(" Specializing type testing stub for %s\n", |
| tts_type.ToCString()); |
| } |
| const Code& code = Code::Handle( |
| zone, TypeTestingStubGenerator::SpecializeStubFor(thread, tts_type)); |
| tts_type.SetTypeTestingStub(code); |
| |
| // Only create the cache if we failed to create a specialized TTS and doing |
| // the same check would cause an update to the cache. |
| would_update_cache_if_not_lazy = |
| (!src_instance.IsNull() && |
| tts_type.type_test_stub() == |
| StubCode::DefaultNullableTypeTest().ptr()) || |
| tts_type.type_test_stub() == StubCode::DefaultTypeTest().ptr(); |
| should_update_cache = would_update_cache_if_not_lazy && cache.IsNull(); |
| } |
| |
| // Since dst_type is not a top type or type parameter, then the only default |
| // stubs it can use are DefaultTypeTest or DefaultNullableTypeTest. |
| if ((mode == kTypeCheckFromSlowStub) && |
| (tts_type.type_test_stub() != StubCode::DefaultNullableTypeTest().ptr() && |
| tts_type.type_test_stub() != StubCode::DefaultTypeTest().ptr())) { |
| // The specialized type testing stub returned a false negative. That means |
| // the specialization may have been generated using outdated cid ranges and |
| // new classes appeared since the stub was generated. Try respecializing. |
| if (FLAG_trace_type_checks) { |
| THR_Print(" Rebuilding type testing stub for %s\n", |
| tts_type.ToCString()); |
| } |
| const auto& old_code = Code::Handle(zone, tts_type.type_test_stub()); |
| const auto& new_code = Code::Handle( |
| zone, TypeTestingStubGenerator::SpecializeStubFor(thread, tts_type)); |
| ASSERT(old_code.ptr() != new_code.ptr()); |
| // A specialized stub should always respecialize to a non-default stub. |
| ASSERT(new_code.ptr() != StubCode::DefaultNullableTypeTest().ptr() && |
| new_code.ptr() != StubCode::DefaultTypeTest().ptr()); |
| const auto& old_instructions = |
| Instructions::Handle(old_code.instructions()); |
| const auto& new_instructions = |
| Instructions::Handle(new_code.instructions()); |
| // Check if specialization produced exactly the same sequence of |
| // instructions. If it did, then we have a false negative, which can |
| // happen in some cases involving uninstantiated types. In these cases, |
| // update the cache, because the only case in which these false negatives |
| // could possibly turn into true positives is with reloads, which clear |
| // all the SubtypeTestCaches. |
| should_update_cache = old_instructions.Equals(new_instructions); |
| if (FLAG_trace_type_checks) { |
| THR_Print(" %s rebuilt type testing stub for %s\n", |
| should_update_cache ? "Discarding" : "Installing", |
| tts_type.ToCString()); |
| } |
| if (!should_update_cache) { |
| tts_type.SetTypeTestingStub(new_code); |
| } |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| #endif // !defined(TARGET_ARCH_IA32) |
| |
| if (should_update_cache) { |
| if (cache.IsNull()) { |
| #if !defined(TARGET_ARCH_IA32) |
| ASSERT(mode == kTypeCheckFromSlowStub || |
| (mode == kTypeCheckFromLazySpecializeStub && |
| would_update_cache_if_not_lazy)); |
| // We lazily create [SubtypeTestCache] for those call sites which actually |
| // need one and will patch the pool entry. |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(!caller_frame->is_interpreted()); |
| const Code& caller_code = |
| Code::Handle(zone, caller_frame->LookupDartCode()); |
| const ObjectPool& pool = |
| ObjectPool::Handle(zone, caller_code.GetObjectPool()); |
| TypeTestingStubCallPattern tts_pattern(caller_frame->pc()); |
| const intptr_t stc_pool_idx = tts_pattern.GetSubtypeTestCachePoolIndex(); |
| // Ensure we do have a STC (lazily create it if not) and all threads use |
| // the same STC. |
| { |
| SafepointMutexLocker ml(isolate->group()->subtype_test_cache_mutex()); |
| cache ^= pool.ObjectAt<std::memory_order_acquire>(stc_pool_idx); |
| if (cache.IsNull()) { |
| resolve_dst_name(); |
| // If this is a dynamic AssertAssignable check, then we must assume |
| // all inputs may be needed, as the type may vary from call to call. |
| const intptr_t num_inputs = |
| dst_name.ptr() == |
| Symbols::dynamic_assert_assignable_stc_check().ptr() |
| ? SubtypeTestCache::kMaxInputs |
| : SubtypeTestCache::UsedInputsForType(dst_type); |
| cache = SubtypeTestCache::New(num_inputs); |
| pool.SetObjectAt<std::memory_order_release>(stc_pool_idx, cache); |
| if (FLAG_trace_type_checks) { |
| THR_Print(" Installed new subtype test cache %#" Px " with %" Pd |
| " inputs at index %" Pd " of pool for %s\n", |
| static_cast<uword>(cache.ptr()), num_inputs, stc_pool_idx, |
| caller_code.ToCString()); |
| } |
| } |
| } |
| #else |
| UNREACHABLE(); |
| #endif |
| } |
| |
| UpdateTypeTestCache(zone, thread, src_instance, dst_type, |
| instantiator_type_arguments, function_type_arguments, |
| Bool::True(), cache); |
| } |
| |
| arguments.SetReturn(src_instance); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(Throw, 1) { |
| const Instance& exception = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| Exceptions::Throw(thread, exception); |
| } |
| |
| DEFINE_RUNTIME_ENTRY(ReThrow, 3) { |
| const Instance& exception = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const Instance& stacktrace = |
| Instance::CheckedHandle(zone, arguments.ArgAt(1)); |
| const Smi& bypass_debugger = Smi::CheckedHandle(zone, arguments.ArgAt(2)); |
| Exceptions::ReThrow(thread, exception, stacktrace, |
| bypass_debugger.Value() != 0); |
| } |
| |
| // Patches static call in optimized code with the target's entry point. |
| // Compiles target if necessary. |
| DEFINE_RUNTIME_ENTRY(PatchStaticCall, 0) { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame != nullptr); |
| ASSERT(!caller_frame->is_interpreted()); |
| const Code& caller_code = Code::Handle(zone, caller_frame->LookupDartCode()); |
| ASSERT(!caller_code.IsNull()); |
| ASSERT(caller_code.is_optimized()); |
| const Function& target_function = Function::Handle( |
| zone, caller_code.GetStaticCallTargetFunctionAt(caller_frame->pc())); |
| const Code& target_code = Code::Handle(zone, target_function.EnsureHasCode()); |
| // Before patching verify that we are not repeatedly patching to the same |
| // target. |
| if (target_code.ptr() != |
| CodePatcher::GetStaticCallTargetAt(caller_frame->pc(), caller_code)) { |
| GcSafepointOperationScope safepoint(thread); |
| if (target_code.ptr() != |
| CodePatcher::GetStaticCallTargetAt(caller_frame->pc(), caller_code)) { |
| CodePatcher::PatchStaticCallAt(caller_frame->pc(), caller_code, |
| target_code); |
| caller_code.SetStaticCallTargetCodeAt(caller_frame->pc(), target_code); |
| if (FLAG_trace_patching) { |
| THR_Print("PatchStaticCall: patching caller pc %#" Px |
| "" |
| " to '%s' new entry point %#" Px " (%s)\n", |
| caller_frame->pc(), target_function.ToFullyQualifiedCString(), |
| target_code.EntryPoint(), |
| target_code.is_optimized() ? "optimized" : "unoptimized"); |
| } |
| } |
| } |
| arguments.SetReturn(target_code); |
| #else |
| UNREACHABLE(); |
| #endif |
| } |
| |
| #if defined(PRODUCT) || defined(DART_PRECOMPILED_RUNTIME) |
| DEFINE_RUNTIME_ENTRY(BreakpointRuntimeHandler, 0) { |
| UNREACHABLE(); |
| return; |
| } |
| #else |
| // Gets called from debug stub when code reaches a breakpoint |
| // set on a runtime stub call. |
| DEFINE_RUNTIME_ENTRY(BreakpointRuntimeHandler, 0) { |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame != nullptr); |
| Code& orig_stub = Code::Handle(zone); |
| if (!caller_frame->is_interpreted()) { |
| orig_stub = |
| isolate->group()->debugger()->GetPatchedStubAddress(caller_frame->pc()); |
| } |
| const Error& error = |
| Error::Handle(zone, isolate->debugger()->PauseBreakpoint()); |
| ThrowIfError(error); |
| arguments.SetReturn(orig_stub); |
| } |
| #endif |
| |
| DEFINE_RUNTIME_ENTRY(SingleStepHandler, 0) { |
| #if defined(PRODUCT) || defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| const Error& error = |
| Error::Handle(zone, isolate->debugger()->PauseStepping()); |
| ThrowIfError(error); |
| #endif |
| } |
| |
| // An instance call of the form o.f(...) could not be resolved. Check if |
| // there is a getter with the same name. If so, invoke it. If the value is |
| // a closure, invoke it with the given arguments. If the value is a |
| // non-closure, attempt to invoke "call" on it. |
| static bool ResolveCallThroughGetter(const Class& receiver_class, |
| const String& target_name, |
| const String& demangled, |
| const Array& arguments_descriptor, |
| Function* result) { |
| const bool create_if_absent = !FLAG_precompiled_mode; |
| const String& getter_name = String::Handle(Field::GetterName(demangled)); |
| const int kTypeArgsLen = 0; |
| const int kNumArguments = 1; |
| ArgumentsDescriptor args_desc(Array::Handle( |
| ArgumentsDescriptor::NewBoxed(kTypeArgsLen, kNumArguments))); |
| const Function& getter = |
| Function::Handle(Resolver::ResolveDynamicForReceiverClass( |
| receiver_class, getter_name, args_desc, create_if_absent)); |
| if (getter.IsNull() || getter.IsMethodExtractor()) { |
| return false; |
| } |
| // We do this on the target_name, _not_ on the demangled name, so that |
| // FlowGraphBuilder::BuildGraphOfInvokeFieldDispatcher can detect dynamic |
| // calls from the dyn: tag on the name of the dispatcher. |
| const Function& target_function = |
| Function::Handle(receiver_class.GetInvocationDispatcher( |
| target_name, arguments_descriptor, |
| UntaggedFunction::kInvokeFieldDispatcher, create_if_absent)); |
| ASSERT(!create_if_absent || !target_function.IsNull()); |
| if (FLAG_trace_ic) { |
| OS::PrintErr( |
| "InvokeField IC miss: adding <%s> id:%" Pd " -> <%s>\n", |
| receiver_class.ToCString(), receiver_class.id(), |
| target_function.IsNull() ? "null" : target_function.ToCString()); |
| } |
| *result = target_function.ptr(); |
| return true; |
| } |
| |
| // Handle other invocations (implicit closures, noSuchMethod). |
| FunctionPtr InlineCacheMissHelper(const Class& receiver_class, |
| const Array& args_descriptor, |
| const String& target_name) { |
| // Create a demangled version of the target_name, if necessary, This is used |
| // for the field getter in ResolveCallThroughGetter and as the target name |
| // for the NoSuchMethod dispatcher (if needed). |
| const String* demangled = &target_name; |
| if (Function::IsDynamicInvocationForwarderName(target_name)) { |
| demangled = &String::Handle( |
| Function::DemangleDynamicInvocationForwarderName(target_name)); |
| } |
| const bool is_getter = Field::IsGetterName(*demangled); |
| Function& result = Function::Handle(); |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const bool create_if_absent = false; |
| #else |
| const bool create_if_absent = true; |
| #endif |
| if (is_getter || |
| !ResolveCallThroughGetter(receiver_class, target_name, *demangled, |
| args_descriptor, &result)) { |
| ArgumentsDescriptor desc(args_descriptor); |
| const Function& target_function = |
| Function::Handle(receiver_class.GetInvocationDispatcher( |
| *demangled, args_descriptor, |
| UntaggedFunction::kNoSuchMethodDispatcher, create_if_absent)); |
| if (FLAG_trace_ic) { |
| OS::PrintErr( |
| "NoSuchMethod IC miss: adding <%s> id:%" Pd " -> <%s>\n", |
| receiver_class.ToCString(), receiver_class.id(), |
| target_function.IsNull() ? "null" : target_function.ToCString()); |
| } |
| result = target_function.ptr(); |
| } |
| // May be null if in the precompiled runtime, in which case dispatch will be |
| // handled by NoSuchMethodFromCallStub. |
| ASSERT(!create_if_absent || !result.IsNull()); |
| return result.ptr(); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| static void TrySwitchInstanceCall(Thread* thread, |
| StackFrame* caller_frame, |
| const Code& caller_code, |
| const Function& caller_function, |
| const ICData& ic_data, |
| const Function& target_function) { |
| ASSERT(!target_function.IsNull()); |
| auto zone = thread->zone(); |
| |
| // Monomorphic/megamorphic calls only check the receiver CID. |
| if (ic_data.NumArgsTested() != 1) return; |
| |
| ASSERT(ic_data.rebind_rule() == ICData::kInstance); |
| |
| // Monomorphic/megamorphic calls don't record exactness. |
| if (ic_data.is_tracking_exactness()) return; |
| |
| #if !defined(PRODUCT) |
| // Monomorphic/megamorphic do not check the isolate's stepping flag. |
| if (thread->isolate()->has_attempted_stepping()) return; |
| #endif |
| |
| // Monomorphic/megamorphic calls are only for unoptimized code. |
| if (caller_frame->is_interpreted()) return; |
| ASSERT(!caller_code.is_optimized()); |
| |
| // Code is detached from its function. This will prevent us from resetting |
| // the switchable call later because resets are function based and because |
| // the ic_data_array belongs to the function instead of the code. This should |
| // only happen because of reload, but it sometimes happens with KBC mixed mode |
| // probably through a race between foreground and background compilation. |
| if (caller_function.unoptimized_code() != caller_code.ptr()) { |
| return; |
| } |
| #if !defined(PRODUCT) |
| // Skip functions that contain breakpoints or when debugger is in single |
| // stepping mode. |
| if (thread->isolate_group()->debugger()->IsDebugging(thread, |
| caller_function)) { |
| return; |
| } |
| #endif |
| |
| const intptr_t num_checks = ic_data.NumberOfChecks(); |
| |
| // Monomorphic call. |
| if (FLAG_unopt_monomorphic_calls && (num_checks == 1)) { |
| // A call site in the monomorphic state does not load the arguments |
| // descriptor, so do not allow transition to this state if the callee |
| // needs it. |
| if (target_function.PrologueNeedsArgumentsDescriptor()) { |
| return; |
| } |
| |
| const Array& data = Array::Handle(zone, ic_data.entries()); |
| const Code& target = Code::Handle(zone, target_function.EnsureHasCode()); |
| CodePatcher::PatchInstanceCallAt(caller_frame->pc(), caller_code, data, |
| target); |
| if (FLAG_trace_ic) { |
| OS::PrintErr("Instance call at %" Px |
| " switching to monomorphic dispatch, %s\n", |
| caller_frame->pc(), ic_data.ToCString()); |
| } |
| return; // Success. |
| } |
| |
| // Megamorphic call. |
| if (FLAG_unopt_megamorphic_calls && |
| (num_checks > FLAG_max_polymorphic_checks)) { |
| const String& name = String::Handle(zone, ic_data.target_name()); |
| const Array& descriptor = |
| Array::Handle(zone, ic_data.arguments_descriptor()); |
| const MegamorphicCache& cache = MegamorphicCache::Handle( |
| zone, MegamorphicCacheTable::Lookup(thread, name, descriptor)); |
| ic_data.set_is_megamorphic(true); |
| CodePatcher::PatchInstanceCallAt(caller_frame->pc(), caller_code, cache, |
| StubCode::MegamorphicCall()); |
| if (FLAG_trace_ic) { |
| OS::PrintErr("Instance call at %" Px |
| " switching to megamorphic dispatch, %s\n", |
| caller_frame->pc(), ic_data.ToCString()); |
| } |
| return; // Success. |
| } |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| // Perform the subtype and return constant function based on the result. |
| static FunctionPtr ComputeTypeCheckTarget(const Instance& receiver, |
| const AbstractType& type, |
| const ArgumentsDescriptor& desc) { |
| const bool result = receiver.IsInstanceOf(type, Object::null_type_arguments(), |
| Object::null_type_arguments()); |
| const ObjectStore* store = IsolateGroup::Current()->object_store(); |
| const Function& target = |
| Function::Handle(result ? store->simple_instance_of_true_function() |
| : store->simple_instance_of_false_function()); |
| ASSERT(!target.IsNull()); |
| return target.ptr(); |
| } |
| |
| static FunctionPtr Resolve( |
| Thread* thread, |
| Zone* zone, |
| const GrowableArray<const Instance*>& caller_arguments, |
| const Class& receiver_class, |
| const String& name, |
| const Array& descriptor) { |
| ASSERT(name.IsSymbol()); |
| auto& target_function = Function::Handle(zone); |
| ArgumentsDescriptor args_desc(descriptor); |
| |
| const bool allow_add = !FLAG_precompiled_mode; |
| if (receiver_class.EnsureIsFinalized(thread) == Error::null()) { |
| target_function = Resolver::ResolveDynamicForReceiverClass( |
| receiver_class, name, args_desc, allow_add); |
| } |
| if (caller_arguments.length() == 2 && |
| target_function.ptr() == thread->isolate_group() |
| ->object_store() |
| ->simple_instance_of_function()) { |
| // Replace the target function with constant function. |
| const AbstractType& type = AbstractType::Cast(*caller_arguments[1]); |
| target_function = |
| ComputeTypeCheckTarget(*caller_arguments[0], type, args_desc); |
| } |
| |
| if (target_function.IsNull()) { |
| target_function = InlineCacheMissHelper(receiver_class, descriptor, name); |
| } |
| ASSERT(!allow_add || !target_function.IsNull()); |
| return target_function.ptr(); |
| } |
| |
| // Handles a static call in unoptimized code that has one argument type not |
| // seen before. Compile the target if necessary and update the ICData. |
| // Arg0: argument. |
| // Arg1: IC data object. |
| DEFINE_RUNTIME_ENTRY(StaticCallMissHandlerOneArg, 2) { |
| const Instance& arg = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const ICData& ic_data = ICData::CheckedHandle(zone, arguments.ArgAt(1)); |
| // IC data for static call is prepopulated with the statically known target. |
| ASSERT(ic_data.NumberOfChecksIs(1)); |
| const Function& target = Function::Handle(zone, ic_data.GetTargetAt(0)); |
| target.EnsureHasCode(); |
| ASSERT(!target.IsNull() && target.HasCode()); |
| ic_data.EnsureHasReceiverCheck(arg.GetClassId(), target, 1); |
| if (FLAG_trace_ic) { |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame != nullptr); |
| OS::PrintErr("StaticCallMissHandler at %#" Px " target %s (%" Pd ")\n", |
| caller_frame->pc(), target.ToCString(), arg.GetClassId()); |
| } |
| arguments.SetReturn(target); |
| } |
| |
| // Handles a static call in unoptimized code that has two argument types not |
| // seen before. Compile the target if necessary and update the ICData. |
| // Arg0: argument 0. |
| // Arg1: argument 1. |
| // Arg2: IC data object. |
| DEFINE_RUNTIME_ENTRY(StaticCallMissHandlerTwoArgs, 3) { |
| const Instance& arg0 = Instance::CheckedHandle(zone, arguments.ArgAt(0)); |
| const Instance& arg1 = Instance::CheckedHandle(zone, arguments.ArgAt(1)); |
| const ICData& ic_data = ICData::CheckedHandle(zone, arguments.ArgAt(2)); |
| // IC data for static call is prepopulated with the statically known target. |
| ASSERT(!ic_data.NumberOfChecksIs(0)); |
| const Function& target = Function::Handle(zone, ic_data.GetTargetAt(0)); |
| target.EnsureHasCode(); |
| GrowableArray<intptr_t> cids(2); |
| cids.Add(arg0.GetClassId()); |
| cids.Add(arg1.GetClassId()); |
| ic_data.EnsureHasCheck(cids, target); |
| if (FLAG_trace_ic) { |
| DartFrameIterator iterator(thread, |
| StackFrameIterator::kNoCrossThreadIteration); |
| StackFrame* caller_frame = iterator.NextFrame(); |
| ASSERT(caller_frame != nullptr); |
| OS::PrintErr("StaticCallMissHandler at %#" Px " target %s (%" Pd ", %" Pd |
| ")\n", |
| caller_frame->pc(), target.ToCString(), cids[0], cids[1]); |
| } |
| arguments.SetReturn(target); |
| } |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| |
| static bool IsSingleTarget(IsolateGroup* isolate_group, |
| Zone* zone, |
| intptr_t lower_cid, |
| intptr_t upper_cid, |
| const Function& target, |
| const String& name) { |
| Class& cls = Class::Handle(zone); |
| ClassTable* table = isolate_group->class_table(); |
| Function& other_target = Function::Handle(zone); |
| for (intptr_t cid = lower_cid; cid <= upper_cid; cid++) { |
| if (!table->HasValidClassAt(cid)) continue; |
| cls = table->At(cid); |
| if (cls.is_abstract()) continue; |
| if (!cls.is_allocated()) continue; |
| other_target = Resolver::ResolveDynamicAnyArgs(zone, cls, name, |
| /*allow_add=*/false); |
| if (other_target.ptr() != target.ptr()) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| class SavedUnlinkedCallMapKeyEqualsTraits : public AllStatic { |
| public: |
| static const char* Name() { return "SavedUnlinkedCallMapKeyEqualsTraits "; } |
| static bool ReportStats() { return false; } |
| |
| static bool IsMatch(const Object& key1, const Object& key2) { |
| if (!key1.IsInteger() || !key2.IsInteger()) return false; |
| return Integer::Cast(key1).Equals(Integer::Cast(key2)); |
| } |
| static uword Hash(const Object& key) { |
| return Integer::Cast(key).CanonicalizeHash(); |
| } |
| }; |
| |
| using UnlinkedCallMap = UnorderedHashMap<SavedUnlinkedCallMapKeyEqualsTraits>; |
| |
| static void SaveUnlinkedCall(Zone* zone, |
| Isolate* isolate, |
| uword frame_pc, |
| const UnlinkedCall& unlinked_call) { |
| IsolateGroup* isolate_group = isolate->group(); |
| |
| SafepointMutexLocker ml(isolate_group->unlinked_call_map_mutex()); |
| if (isolate_group->saved_unlinked_calls() == Array::null()) { |
| const auto& initial_map = |
| Array::Handle(zone, HashTables::New<UnlinkedCallMap>(16, Heap::kOld)); |
| isolate_group->set_saved_unlinked_calls(initial_map); |
| } |
| |
| UnlinkedCallMap unlinked_call_map(zone, |
| isolate_group->saved_unlinked_calls()); |
| const auto& pc = Integer::Handle(zone, Integer::NewFromUint64(frame_pc)); |
| // Some other isolate might have updated unlinked_call_map[pc] too, but |
| // their update should be identical to ours. |
| const auto& new_or_old_value = UnlinkedCall::Handle( |
| zone, UnlinkedCall::RawCast( |
| unlinked_call_map.InsertOrGetValue(pc, unlinked_call))); |
| RELEASE_ASSERT(new_or_old_value.ptr() == unlinked_call.ptr()); |
| isolate_group->set_saved_unlinked_calls(unlinked_call_map.Release()); |
| } |
| |
| static UnlinkedCallPtr LoadUnlinkedCall(Zone* zone, |
| Isolate* isolate, |
| uword pc) { |
| IsolateGroup* isolate_group = isolate->group(); |
| |
| SafepointMutexLocker ml(isolate_group->unlinked_call_map_mutex()); |
| ASSERT(isolate_group->saved_unlinked_calls() != Array::null()); |
| UnlinkedCallMap unlinked_call_map(zone, |
| isolate_group->saved_unlinked_calls()); |
| |
| const auto& pc_integer = Integer::Handle(zone, Integer::NewFromUint64(pc)); |
| const auto& unlinked_call = UnlinkedCall::Cast( |
| Object::Handle(zone, unlinked_call_map.GetOrDie(pc_integer))); |
| isolate_group->set_saved_unlinked_calls(unlinked_call_map.Release()); |
| return unlinked_call.ptr(); |
| } |
| |
| // NOTE: Right now we never delete [UnlinkedCall] objects. They are needed while |
| // a call site is in Unlinked/Monomorphic/MonomorphicSmiable/SingleTarget |
| // states. |
| // |
| // Theoretically we could free the [UnlinkedCall] object once we transition the |
| // call site to use ICData/MegamorphicCache, but that would require careful |
| // coordination between the deleter and a possible concurrent reader. |
| // |
| // To simplify the code we decided not to do that atm (only a very small |
| // fraction of callsites in AOT use switchable calls, the name/args-descriptor |
| // objects are kept alive anyways -> there is little memory savings from |
| // freeing the [UnlinkedCall] objects). |
| |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| enum class MissHandler { |
| kInlineCacheMiss, |
| kSwitchableCallMiss, |
| kFixCallersTargetMonomorphic, |
| }; |
| |
| // Handles updating of type feedback and possible patching of instance calls. |
| // |
| // It works in 3 separate steps: |
| // - resolve the actual target |
| // - update type feedback & (optionally) perform call site transition |
| // - return the right values |
| // |
| // Depending on the JIT/AOT mode we obtain current and patch new (target, data) |
| // differently: |
| // |
| // - JIT calls must be patched with CodePatcher::PatchInstanceCallAt() |
| // - AOT calls must be patched with CodePatcher::PatchSwitchableCallAt() |
| // |
| // Independent of which miss handler was used or how we will return, we look at |
| // current (target, data) and see if we need to transition the call site to a |
| // new (target, data). We do this while holding `IG->patchable_call_mutex()`. |
| // |
| // Depending on which miss handler got called we might need to return |
| // differently: |
| // |
| // - SwitchableCallMiss will get get (stub, data) return value |
| // - InlineCache*Miss will get get function as return value |
| // |
| class PatchableCallHandler { |
| public: |
| PatchableCallHandler(Thread* thread, |
| const GrowableArray<const Instance*>& caller_arguments, |
| MissHandler miss_handler, |
| NativeArguments arguments, |
| StackFrame* caller_frame, |
| const Code& caller_code, |
| const Function& caller_function) |
| : isolate_(thread->isolate()), |
| thread_(thread), |
| zone_(thread->zone()), |
| caller_arguments_(caller_arguments), |
| miss_handler_(miss_handler), |
| arguments_(arguments), |
| caller_frame_(caller_frame), |
| caller_code_(caller_code), |
| caller_function_(caller_function), |
| name_(String::Handle()), |
| args_descriptor_(Array::Handle()) { |
| // We only have two arg IC calls in JIT mode. |
| ASSERT(caller_arguments_.length() == 1 || !FLAG_precompiled_mode); |
| } |
| |
| void ResolveSwitchAndReturn(const Object& data); |
| |
| private: |
| FunctionPtr ResolveTargetFunction(const Object& data); |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| void HandleMissAOT(const Object& old_data, |
| uword old_entry, |
| const Function& target_function); |
| |
| void DoUnlinkedCallAOT(const UnlinkedCall& unlinked, |
| const Function& target_function); |
| void DoMonomorphicMissAOT(const Object& old_data, |
| const Function& target_function); |
| void DoSingleTargetMissAOT(const SingleTargetCache& data, |
| const Function& target_function); |
| void DoICDataMissAOT(const ICData& data, const Function& target_function); |
| bool CanExtendSingleTargetRange(const String& name, |
| const Function& old_target, |
| const Function& target_function, |
| intptr_t* lower, |
| intptr_t* upper); |
| #else |
| void HandleMissJIT(const Object& old_data, |
| const Code& old_target, |
| const Function& target_function); |
| |
| void DoMonomorphicMissJIT(const Object& old_data, |
| const Function& target_function); |
| void DoICDataMissJIT(const ICData& data, |
| const Object& old_data, |
| const Function& target_function); |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| void DoMegamorphicMiss(const MegamorphicCache& data, |
| const Function& target_function); |
| |
| void UpdateICDataWithTarget(const ICData& ic_data, |
| const Function& target_function); |
| void TrySwitch(const ICData& ic_data, const Function& target_function); |
| |
| void ReturnAOT(const Code& stub, const Object& data); |
| void ReturnJIT(const Code& stub, const Object& data, const Function& target); |
| void ReturnJITorAOT(const Code& stub, |
| const Object& data, |
| const Function& target); |
| |
| const Instance& receiver() { return *caller_arguments_[0]; } |
| |
| bool should_consider_patching() { |
| // In AOT we use switchable calls. |
| if (FLAG_precompiled_mode) return true; |
| |
| // In JIT instance calls use a different calling sequence in unoptimized vs |
| // optimized code (see [FlowGraphCompiler::EmitInstanceCallJIT] vs |
| // [FlowGraphCompiler::EmitOptimizedInstanceCall]). |
| // |
| // The [CodePatcher::GetInstanceCallAt], [CodePatcher::PatchInstanceCallAt] |
| // only recognize unoptimized call pattern. |
| // |
| // So we will not try to switch optimized instance calls. |
| return !caller_code_.is_optimized(); |
| } |
| |
| ICDataPtr NewICData(); |
| ICDataPtr NewICDataWithTarget(intptr_t cid, const Function& target); |
| |
| Isolate* isolate_; |
| Thread* thread_; |
| Zone* zone_; |
| const GrowableArray<const Instance*>& caller_arguments_; |
| MissHandler miss_handler_; |
| NativeArguments arguments_; |
| StackFrame* caller_frame_; |
| const Code& caller_code_; |
| const Function& caller_function_; |
| |
| // Call-site information populated during resolution. |
| String& name_; |
| Array& args_descriptor_; |
| bool is_monomorphic_hit_ = false; |
| }; |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| void PatchableCallHandler::DoUnlinkedCallAOT(const UnlinkedCall& unlinked, |
| const Function& target_function) { |
| const auto& ic_data = ICData::Handle( |
| zone_, |
| target_function.IsNull() |
| ? NewICData() |
| : NewICDataWithTarget(receiver().GetClassId(), target_function)); |
| |
| Object& object = Object::Handle(zone_, ic_data.ptr()); |
| Code& code = Code::Handle(zone_, StubCode::ICCallThroughCode().ptr()); |
| // If the target function has optional parameters or is generic, it's |
| // prologue requires ARGS_DESC_REG to be populated. Yet the switchable calls |
| // do not populate that on the call site, which is why we don't transition |
| // those call sites to monomorphic, but rather directly to call via stub |
| // (which will populate the ARGS_DESC_REG from the ICData). |
| // |
| // Because of this we also don't generate monomorphic checks for those |
| // functions. |
| if (!target_function.IsNull() && |
| !target_function.PrologueNeedsArgumentsDescriptor()) { |
| // Patch to monomorphic call. |
| ASSERT(target_function.HasCode()); |
| const Code& target_code = |
| Code::Handle(zone_, target_function.CurrentCode()); |
| const Smi& expected_cid = |
| Smi::Handle(zone_, Smi::New(receiver().GetClassId())); |
| |
| if (unlinked.can_patch_to_monomorphic()) { |
| object = expected_cid.ptr(); |
| code = target_code.ptr(); |
| ASSERT(code.HasMonomorphicEntry()); |
| } else { |
| object = MonomorphicSmiableCall::New(expected_cid.Value(), target_code); |
| code = StubCode::MonomorphicSmiableCheck().ptr(); |
| } |
| } |
| CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, object, |
| code); |
| |
| // Return the ICData. The miss stub will jump to continue in the IC lookup |
| // stub. |
| ReturnAOT(StubCode::ICCallThroughCode(), ic_data); |
| } |
| |
| bool PatchableCallHandler::CanExtendSingleTargetRange( |
| const String& name, |
| const Function& old_target, |
| const Function& target_function, |
| intptr_t* lower, |
| intptr_t* upper) { |
| if (old_target.ptr() != target_function.ptr()) { |
| return false; |
| } |
| intptr_t unchecked_lower, unchecked_upper; |
| if (receiver().GetClassId() < *lower) { |
| unchecked_lower = receiver().GetClassId(); |
| unchecked_upper = *lower - 1; |
| *lower = receiver().GetClassId(); |
| } else { |
| unchecked_upper = receiver().GetClassId(); |
| unchecked_lower = *upper + 1; |
| *upper = receiver().GetClassId(); |
| } |
| |
| return IsSingleTarget(isolate_->group(), zone_, unchecked_lower, |
| unchecked_upper, target_function, name); |
| } |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| void PatchableCallHandler::DoMonomorphicMissAOT( |
| const Object& old_data, |
| const Function& target_function) { |
| classid_t old_expected_cid; |
| if (old_data.IsSmi()) { |
| old_expected_cid = Smi::Cast(old_data).Value(); |
| } else { |
| RELEASE_ASSERT(old_data.IsMonomorphicSmiableCall()); |
| old_expected_cid = MonomorphicSmiableCall::Cast(old_data).expected_cid(); |
| } |
| const bool is_monomorphic_hit = old_expected_cid == receiver().GetClassId(); |
| const auto& old_receiver_class = Class::Handle( |
| zone_, isolate_->group()->class_table()->At(old_expected_cid)); |
| const auto& old_target = Function::Handle( |
| zone_, Resolve(thread_, zone_, caller_arguments_, old_receiver_class, |
| name_, args_descriptor_)); |
| |
| const auto& ic_data = ICData::Handle( |
| zone_, old_target.IsNull() |
| ? NewICData() |
| : NewICDataWithTarget(old_expected_cid, old_target)); |
| |
| if (is_monomorphic_hit) { |
| // The site just have been updated to monomorphic state with same |
| // exact class id - do nothing in that case: stub will call through ic data. |
| ReturnAOT(StubCode::ICCallThroughCode(), ic_data); |
| return; |
| } |
| |
| intptr_t lower = old_expected_cid; |
| intptr_t upper = old_expected_cid; |
| if (CanExtendSingleTargetRange(name_, old_target, target_function, &lower, |
| &upper)) { |
| const SingleTargetCache& cache = |
| SingleTargetCache::Handle(zone_, SingleTargetCache::New()); |
| const Code& code = Code::Handle(zone_, target_function.CurrentCode()); |
| cache.set_target(code); |
| cache.set_entry_point(code.EntryPoint()); |
| cache.set_lower_limit(lower); |
| cache.set_upper_limit(upper); |
| const Code& stub = StubCode::SingleTargetCall(); |
| CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, cache, |
| stub); |
| // Return the ICData. The miss stub will jump to continue in the IC call |
| // stub. |
| ReturnAOT(StubCode::ICCallThroughCode(), ic_data); |
| return; |
| } |
| |
| // Patch to call through stub. |
| const Code& stub = StubCode::ICCallThroughCode(); |
| CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, ic_data, |
| stub); |
| |
| // Return the ICData. The miss stub will jump to continue in the IC lookup |
| // stub. |
| ReturnAOT(stub, ic_data); |
| } |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| void PatchableCallHandler::DoMonomorphicMissJIT( |
| const Object& old_data, |
| const Function& target_function) { |
| // Monomorphic calls use the ICData::entries() as their data. |
| const auto& old_ic_data_entries = Array::Cast(old_data); |
| // Any non-empty ICData::entries() has a backref to it's ICData. |
| const auto& ic_data = |
| ICData::Handle(zone_, ICData::ICDataOfEntriesArray(old_ic_data_entries)); |
| |
| // The target didn't change, so we can stay inside monomorphic state. |
| if (ic_data.NumberOfChecksIs(1) && |
| (ic_data.GetReceiverClassIdAt(0) == receiver().GetClassId())) { |
| // No need to update ICData - it's already up-to-date. |
| |
| if (FLAG_trace_ic) { |
| OS::PrintErr("Instance call at %" Px |
| " updating code (old code was disabled)\n", |
| caller_frame_->pc()); |
| } |
| |
| // We stay in monomorphic state, patch the code object and reload the icdata |
| // entries array. |
| const auto& code = Code::Handle(zone_, target_function.EnsureHasCode()); |
| const auto& data = Object::Handle(zone_, ic_data.entries()); |
| CodePatcher::PatchInstanceCallAt(caller_frame_->pc(), caller_code_, data, |
| code); |
| ReturnJIT(code, data, target_function); |
| return; |
| } |
| |
| ASSERT(ic_data.NumArgsTested() == 1); |
| const Code& stub = ic_data.is_tracking_exactness() |
| ? StubCode::OneArgCheckInlineCacheWithExactnessCheck() |
| : StubCode::OneArgCheckInlineCache(); |
| if (FLAG_trace_ic) { |
| OS::PrintErr("Instance call at %" Px |
| " switching monomorphic to polymorphic dispatch, %s\n", |
| caller_frame_->pc(), ic_data.ToCString()); |
| } |
| CodePatcher::PatchInstanceCallAt(caller_frame_->pc(), caller_code_, ic_data, |
| stub); |
| |
| ASSERT(caller_arguments_.length() == 1); |
| UpdateICDataWithTarget(ic_data, target_function); |
| ASSERT(should_consider_patching()); |
| TrySwitchInstanceCall(thread_, caller_frame_, caller_code_, caller_function_, |
| ic_data, target_function); |
| ReturnJIT(stub, ic_data, target_function); |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| void PatchableCallHandler::DoSingleTargetMissAOT( |
| const SingleTargetCache& data, |
| const Function& target_function) { |
| const Code& old_target_code = Code::Handle(zone_, data.target()); |
| const Function& old_target = |
| Function::Handle(zone_, Function::RawCast(old_target_code.owner())); |
| |
| // We lost the original ICData when we patched to the monomorphic case. |
| const auto& ic_data = ICData::Handle( |
| zone_, |
| target_function.IsNull() |
| ? NewICData() |
| : NewICDataWithTarget(receiver().GetClassId(), target_function)); |
| |
| intptr_t lower = data.lower_limit(); |
| intptr_t upper = data.upper_limit(); |
| if (CanExtendSingleTargetRange(name_, old_target, target_function, &lower, |
| &upper)) { |
| data.set_lower_limit(lower); |
| data.set_upper_limit(upper); |
| // Return the ICData. The single target stub will jump to continue in the |
| // IC call stub. |
| ReturnAOT(StubCode::ICCallThroughCode(), ic_data); |
| return; |
| } |
| |
| // Call site is not single target, switch to call using ICData. |
| const Code& stub = StubCode::ICCallThroughCode(); |
| CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, ic_data, |
| stub); |
| |
| // Return the ICData. The single target stub will jump to continue in the |
| // IC call stub. |
| ReturnAOT(stub, ic_data); |
| } |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| void PatchableCallHandler::DoICDataMissAOT(const ICData& ic_data, |
| const Function& target_function) { |
| const String& name = String::Handle(zone_, ic_data.target_name()); |
| const Class& cls = Class::Handle(zone_, receiver().clazz()); |
| ASSERT(!cls.IsNull()); |
| const Array& descriptor = |
| Array::CheckedHandle(zone_, ic_data.arguments_descriptor()); |
| ArgumentsDescriptor args_desc(descriptor); |
| if (FLAG_trace_ic || FLAG_trace_ic_miss_in_optimized) { |
| OS::PrintErr("ICData miss, class=%s, function<%" Pd ">=%s\n", |
| cls.ToCString(), args_desc.TypeArgsLen(), name.ToCString()); |
| } |
| |
| if (target_function.IsNull()) { |
| ReturnAOT(StubCode::NoSuchMethodDispatcher(), ic_data); |
| return; |
| } |
| |
| const intptr_t number_of_checks = ic_data.NumberOfChecks(); |
| |
| if ((number_of_checks == 0) && |
| (!FLAG_precompiled_mode || ic_data.receiver_cannot_be_smi()) && |
| !target_function.PrologueNeedsArgumentsDescriptor()) { |
| // This call site is unlinked: transition to a monomorphic direct call. |
| // Note we cannot do this if the target has optional parameters because |
| // the monomorphic direct call does not load the arguments descriptor. |
| // We cannot do this if we are still in the middle of precompiling because |
| // the monomorphic case hides a live instance selector from the |
| // treeshaker. |
| const Code& target_code = |
| Code::Handle(zone_, target_function.EnsureHasCode()); |
| const Smi& expected_cid = |
| Smi::Handle(zone_, Smi::New(receiver().GetClassId())); |
| ASSERT(target_code.HasMonomorphicEntry()); |
| CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, |
| expected_cid, target_code); |
| ReturnAOT(target_code, expected_cid); |
| } else { |
| ic_data.EnsureHasReceiverCheck(receiver().GetClassId(), target_function); |
| if (number_of_checks > FLAG_max_polymorphic_checks) { |
| // Switch to megamorphic call. |
| const MegamorphicCache& cache = MegamorphicCache::Handle( |
| zone_, MegamorphicCacheTable::Lookup(thread_, name, descriptor)); |
| const Code& stub = StubCode::MegamorphicCall(); |
| |
| CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, |
| cache, stub); |
| ReturnAOT(stub, cache); |
| } else { |
| ReturnAOT(StubCode::ICCallThroughCode(), ic_data); |
| } |
| } |
| } |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| void PatchableCallHandler::DoICDataMissJIT(const ICData& ic_data, |
| const Object& old_code, |
| const Function& target_function) { |
| ASSERT(ic_data.NumArgsTested() == caller_arguments_.length()); |
| |
| if (ic_data.NumArgsTested() == 1) { |
| ASSERT(old_code.ptr() == StubCode::OneArgCheckInlineCache().ptr() || |
| old_code.ptr() == |
| StubCode::OneArgCheckInlineCacheWithExactnessCheck().ptr() || |
| old_code.ptr() == |
| StubCode::OneArgOptimizedCheckInlineCache().ptr() || |
| old_code.ptr() == |
| StubCode::OneArgOptimizedCheckInlineCacheWithExactnessCheck() |
| .ptr() || |
| old_code.ptr() == StubCode::ICCallBreakpoint().ptr() || |
| (old_code.IsNull() && !should_consider_patching())); |
| UpdateICDataWithTarget(ic_data, target_function); |
| if (should_consider_patching()) { |
| TrySwitchInstanceCall(thread_, caller_frame_, caller_code_, |
| caller_function_, ic_data, target_function); |
| } |
| const Code& stub = Code::Handle( |
| zone_, ic_data.is_tracking_exactness() |
| ? StubCode::OneArgCheckInlineCacheWithExactnessCheck().ptr() |
| : StubCode::OneArgCheckInlineCache().ptr()); |
| ReturnJIT(stub, ic_data, target_function); |
| } else { |
| ASSERT(old_code.ptr() == StubCode::TwoArgsCheckInlineCache().ptr() || |
| old_code.ptr() == StubCode::SmiAddInlineCache().ptr() || |
| old_code.ptr() == StubCode::SmiLessInlineCache().ptr() || |
| old_code.ptr() == StubCode::SmiEqualInlineCache().ptr() || |
| old_code.ptr() == |
| StubCode::TwoArgsOptimizedCheckInlineCache().ptr() || |
| old_code.ptr() == StubCode::ICCallBreakpoint().ptr() || |
| (old_code.IsNull() && !should_consider_patching())); |
| UpdateICDataWithTarget(ic_data, target_function); |
| ReturnJIT(StubCode::TwoArgsCheckInlineCache(), ic_data, target_function); |
| } |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| void PatchableCallHandler::DoMegamorphicMiss(const MegamorphicCache& data, |
| const Function& target_function) { |
| const String& name = String::Handle(zone_, data.target_name()); |
| const Class& cls = Class::Handle(zone_, receiver().clazz()); |
| ASSERT(!cls.IsNull()); |
| const Array& descriptor = |
| Array::CheckedHandle(zone_, data.arguments_descriptor()); |
| ArgumentsDescriptor args_desc(descriptor); |
| if (FLAG_trace_ic || FLAG_trace_ic_miss_in_optimized) { |
| OS::PrintErr("Megamorphic miss, class=%s, function<%" Pd ">=%s\n", |
| cls.ToCString(), args_desc.TypeArgsLen(), name.ToCString()); |
| } |
| if (target_function.IsNull()) { |
| ReturnJITorAOT(StubCode::NoSuchMethodDispatcher(), data, target_function); |
| return; |
| } |
| |
| // Insert function found into cache. |
| const Smi& class_id = Smi::Handle(zone_, Smi::New(cls.id())); |
| data.EnsureContains(class_id, target_function); |
| ReturnJITorAOT(StubCode::MegamorphicCall(), data, target_function); |
| } |
| |
| void PatchableCallHandler::UpdateICDataWithTarget( |
| const ICData& ic_data, |
| const Function& target_function) { |
| if (target_function.IsNull()) return; |
| |
| // If, upon return of the runtime, we will invoke the target directly we have |
| // to increment the call count here in the ICData. |
| // If we instead only insert a new ICData entry and will return to the IC stub |
| // which will call the target, the stub will take care of the increment. |
| const bool call_target_directly = |
| miss_handler_ == MissHandler::kInlineCacheMiss; |
| const intptr_t invocation_count = call_target_directly ? 1 : 0; |
| |
| if (caller_arguments_.length() == 1) { |
| auto exactness = StaticTypeExactnessState::NotTracking(); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (ic_data.is_tracking_exactness()) { |
| exactness = receiver().IsNull() |
| ? StaticTypeExactnessState::NotExact() |
| : StaticTypeExactnessState::Compute( |
| Type::Cast(AbstractType::Handle( |
| ic_data.receivers_static_type())), |
| receiver()); |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| ic_data.EnsureHasReceiverCheck(receiver().GetClassId(), target_function, |
| invocation_count, exactness); |
| } else { |
| GrowableArray<intptr_t> class_ids(caller_arguments_.length()); |
| ASSERT(ic_data.NumArgsTested() == caller_arguments_.length()); |
| for (intptr_t i = 0; i < caller_arguments_.length(); i++) { |
| class_ids.Add(caller_arguments_[i]->GetClassId()); |
| } |
| ic_data.EnsureHasCheck(class_ids, target_function, invocation_count); |
| } |
| } |
| |
| void PatchableCallHandler::ReturnAOT(const Code& stub, const Object& data) { |
| ASSERT(miss_handler_ == MissHandler::kSwitchableCallMiss); |
| arguments_.SetArgAt(0, stub); // Second return value. |
| arguments_.SetReturn(data); |
| } |
| |
| void PatchableCallHandler::ReturnJIT(const Code& stub, |
| const Object& data, |
| const Function& target) { |
| // In JIT we can have two different miss handlers to which we return slightly |
| // differently. |
| switch (miss_handler_) { |
| case MissHandler::kSwitchableCallMiss: { |
| arguments_.SetArgAt(0, stub); // Second return value. |
| arguments_.SetReturn(data); |
| break; |
| } |
| case MissHandler::kFixCallersTargetMonomorphic: { |
| arguments_.SetArgAt(1, data); // Second return value. |
| arguments_.SetReturn(stub); |
| break; |
| } |
| case MissHandler::kInlineCacheMiss: { |
| arguments_.SetReturn(target); |
| break; |
| } |
| } |
| } |
| |
| void PatchableCallHandler::ReturnJITorAOT(const Code& stub, |
| const Object& data, |
| const Function& target) { |
| #if defined(DART_PRECOMPILED_MODE) |
| ReturnAOT(stub, data); |
| #else |
| ReturnJIT(stub, data, target); |
| #endif |
| } |
| |
| ICDataPtr PatchableCallHandler::NewICData() { |
| return ICData::New(caller_function_, name_, args_descriptor_, DeoptId::kNone, |
| /*num_args_tested=*/1, ICData::kInstance); |
| } |
| |
| ICDataPtr PatchableCallHandler::NewICDataWithTarget(intptr_t cid, |
| const Function& target) { |
| GrowableArray<intptr_t> cids(1); |
| cids.Add(cid); |
| return ICData::NewWithCheck(caller_function_, name_, args_descriptor_, |
| DeoptId::kNone, /*num_args_tested=*/1, |
| ICData::kInstance, &cids, target); |
| } |
| |
| FunctionPtr PatchableCallHandler::ResolveTargetFunction(const Object& data) { |
| switch (data.GetClassId()) { |
| case kUnlinkedCallCid: { |
| const auto& unlinked_call = UnlinkedCall::Cast(data); |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| // When transitioning out of UnlinkedCall to other states (e.g. |
| // Monomorphic, MonomorphicSmiable, SingleTarget) we lose |
| // name/arg-descriptor in AOT mode and cannot recover it. |
| // |
| // Even if we could recover an old target function (which was missed) - |
| // which we cannot in AOT bare mode - we can still lose the name due to a |
| // dyn:* call site potentially targeting non-dyn:* targets. |
| // |
| // => We will therefore retain the unlinked call here. |
| // |
| // In JIT mode we always use ICData from the call site, which has the |
| // correct name/args-descriptor. |
| SaveUnlinkedCall(zone_, isolate_, caller_frame_->pc(), unlinked_call); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| name_ = unlinked_call.target_name(); |
| args_descriptor_ = unlinked_call.arguments_descriptor(); |
| break; |
| } |
| case kMonomorphicSmiableCallCid: |
| FALL_THROUGH; |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| case kSmiCid: |
| FALL_THROUGH; |
| case kSingleTargetCacheCid: { |
| const auto& unlinked_call = UnlinkedCall::Handle( |
| zone_, LoadUnlinkedCall(zone_, isolate_, caller_frame_->pc())); |
| name_ = unlinked_call.target_name(); |
| args_descriptor_ = unlinked_call.arguments_descriptor(); |
| break; |
| } |
| #else |
| case kArrayCid: { |
| // Monomorphic calls use the ICData::entries() as their data. |
| const auto& ic_data_entries = Array::Cast(data); |
| // Any non-empty ICData::entries() has a backref to it's ICData. |
| const auto& ic_data = |
| ICData::Handle(zone_, ICData::ICDataOfEntriesArray(ic_data_entries)); |
| args_descriptor_ = ic_data.arguments_descriptor(); |
| name_ = ic_data.target_name(); |
| break; |
| } |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| case kICDataCid: |
| FALL_THROUGH; |
| case kMegamorphicCacheCid: { |
| const CallSiteData& call_site_data = CallSiteData::Cast(data); |
| name_ = call_site_data.target_name(); |
| args_descriptor_ = call_site_data.arguments_descriptor(); |
| break; |
| } |
| default: |
| UNREACHABLE(); |
| } |
| const Class& cls = Class::Handle(zone_, receiver().clazz()); |
| return Resolve(thread_, zone_, caller_arguments_, cls, name_, |
| args_descriptor_); |
| } |
| |
| void PatchableCallHandler::ResolveSwitchAndReturn(const Object& old_data) { |
| // Find out actual target (which can be time consuming) without holding any |
| // locks. |
| const auto& target_function = |
| Function::Handle(zone_, ResolveTargetFunction(old_data)); |
| |
| auto& data = Object::Handle(zone_); |
| |
| // We ensure any transition in a patchable calls are done in an atomic |
| // manner, we ensure we always transition forward (e.g. Monomorphic -> |
| // Polymorphic). |
| // |
| // Mutators are only stopped if we actually need to patch a patchable call. |
| // We may not do that if we e.g. just add one more check to an ICData. |
| SafepointMutexLocker ml(thread_->isolate_group()->patchable_call_mutex()); |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| data = |
| CodePatcher::GetSwitchableCallDataAt(caller_frame_->pc(), caller_code_); |
| uword target_entry = 0; |
| DEBUG_ONLY(target_entry = CodePatcher::GetSwitchableCallTargetEntryAt( |
| caller_frame_->pc(), caller_code_)); |
| HandleMissAOT(data, target_entry, target_function); |
| #else |
| auto& code = Code::Handle(zone_); |
| if (should_consider_patching()) { |
| code ^= CodePatcher::GetInstanceCallAt(caller_frame_->pc(), caller_code_, |
| &data); |
| } else { |
| ASSERT(old_data.IsICData() || old_data.IsMegamorphicCache()); |
| data = old_data.ptr(); |
| } |
| HandleMissJIT(data, code, target_function); |
| #endif |
| } |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| |
| void PatchableCallHandler::HandleMissAOT(const Object& old_data, |
| uword old_entry, |
| const Function& target_function) { |