| // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include "vm/object.h" |
| |
| #include <memory> |
| |
| #include "compiler/method_recognizer.h" |
| #include "include/dart_api.h" |
| #include "lib/integers.h" |
| #include "lib/stacktrace.h" |
| #include "platform/assert.h" |
| #include "platform/text_buffer.h" |
| #include "platform/unaligned.h" |
| #include "platform/unicode.h" |
| #include "vm/bit_vector.h" |
| #include "vm/bootstrap.h" |
| #include "vm/bytecode_reader.h" |
| #include "vm/canonical_tables.h" |
| #include "vm/class_finalizer.h" |
| #include "vm/class_id.h" |
| #include "vm/closure_functions_cache.h" |
| #include "vm/code_comments.h" |
| #include "vm/code_descriptors.h" |
| #include "vm/code_observers.h" |
| #include "vm/compiler/assembler/disassembler.h" |
| #include "vm/compiler/assembler/disassembler_kbc.h" |
| #include "vm/compiler/jit/compiler.h" |
| #include "vm/compiler/runtime_api.h" |
| #include "vm/cpu.h" |
| #include "vm/dart.h" |
| #include "vm/dart_api_state.h" |
| #include "vm/dart_entry.h" |
| #include "vm/datastream.h" |
| #include "vm/debugger.h" |
| #include "vm/deopt_instructions.h" |
| #include "vm/double_conversion.h" |
| #include "vm/elf.h" |
| #include "vm/exceptions.h" |
| #include "vm/growable_array.h" |
| #include "vm/hash.h" |
| #include "vm/hash_table.h" |
| #include "vm/heap/become.h" |
| #include "vm/heap/heap.h" |
| #include "vm/heap/sampler.h" |
| #include "vm/heap/weak_code.h" |
| #include "vm/image_snapshot.h" |
| #include "vm/isolate_reload.h" |
| #include "vm/kernel.h" |
| #include "vm/kernel_binary.h" |
| #include "vm/kernel_isolate.h" |
| #include "vm/kernel_loader.h" |
| #include "vm/log.h" |
| #include "vm/native_symbol.h" |
| #include "vm/object_graph.h" |
| #include "vm/object_store.h" |
| #include "vm/os.h" |
| #include "vm/parser.h" |
| #include "vm/profiler.h" |
| #include "vm/regexp.h" |
| #include "vm/resolver.h" |
| #include "vm/reusable_handles.h" |
| #include "vm/reverse_pc_lookup_cache.h" |
| #include "vm/runtime_entry.h" |
| #include "vm/scopes.h" |
| #include "vm/stack_frame.h" |
| #include "vm/stub_code.h" |
| #include "vm/symbols.h" |
| #include "vm/tags.h" |
| #include "vm/thread_registry.h" |
| #include "vm/timeline.h" |
| #include "vm/type_testing_stubs.h" |
| #include "vm/zone_text_buffer.h" |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| #include "vm/compiler/aot/precompiler.h" |
| #include "vm/compiler/assembler/assembler.h" |
| #include "vm/compiler/backend/code_statistics.h" |
| #include "vm/compiler/compiler_state.h" |
| #include "vm/compiler/frontend/kernel_fingerprints.h" |
| #include "vm/compiler/frontend/kernel_translation_helper.h" |
| #include "vm/compiler/intrinsifier.h" |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| namespace dart { |
| |
| DEFINE_FLAG(uint64_t, |
| huge_method_cutoff_in_code_size, |
| 200000, |
| "Huge method cutoff in unoptimized code size (in bytes)."); |
| DEFINE_FLAG( |
| bool, |
| show_internal_names, |
| false, |
| "Show names of internal classes (e.g. \"OneByteString\") in error messages " |
| "instead of showing the corresponding interface names (e.g. \"String\"). " |
| "Also show legacy nullability in type names."); |
| |
| DEFINE_FLAG(bool, |
| remove_script_timestamps_for_test, |
| false, |
| "Remove script timestamps to allow for deterministic testing."); |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| DEFINE_FLAG(bool, use_register_cc, true, "Use register calling conventions"); |
| #endif |
| |
| DECLARE_FLAG(bool, intrinsify); |
| DECLARE_FLAG(bool, trace_deoptimization); |
| DECLARE_FLAG(bool, trace_deoptimization_verbose); |
| DECLARE_FLAG(bool, trace_reload); |
| DECLARE_FLAG(bool, write_protect_code); |
| DECLARE_FLAG(bool, precompiled_mode); |
| DECLARE_FLAG(int, max_polymorphic_checks); |
| |
| static const char* const kGetterPrefix = "get:"; |
| static const intptr_t kGetterPrefixLength = strlen(kGetterPrefix); |
| static const char* const kSetterPrefix = "set:"; |
| static const intptr_t kSetterPrefixLength = strlen(kSetterPrefix); |
| static const char* const kInitPrefix = "init:"; |
| static const intptr_t kInitPrefixLength = strlen(kInitPrefix); |
| |
| // A cache of VM heap allocated preinitialized empty ic data entry arrays. |
| ArrayPtr ICData::cached_icdata_arrays_[kCachedICDataArrayCount]; |
| |
| cpp_vtable Object::builtin_vtables_[kNumPredefinedCids] = {}; |
| |
| // These are initialized to a value that will force an illegal memory access if |
| // they are being used. |
| #if defined(RAW_NULL) |
| #error RAW_NULL should not be defined. |
| #endif |
| #define RAW_NULL static_cast<uword>(kHeapObjectTag) |
| |
| #define CHECK_ERROR(error) \ |
| { \ |
| ErrorPtr err = (error); \ |
| if (err != Error::null()) { \ |
| return err; \ |
| } \ |
| } |
| |
| #define DEFINE_SHARED_READONLY_HANDLE(Type, name) \ |
| Type* Object::name##_ = nullptr; |
| SHARED_READONLY_HANDLES_LIST(DEFINE_SHARED_READONLY_HANDLE) |
| #undef DEFINE_SHARED_READONLY_HANDLE |
| |
| ObjectPtr Object::null_ = static_cast<ObjectPtr>(RAW_NULL); |
| BoolPtr Object::true_ = static_cast<BoolPtr>(RAW_NULL); |
| BoolPtr Object::false_ = static_cast<BoolPtr>(RAW_NULL); |
| ClassPtr Object::class_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::dynamic_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::void_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::type_parameters_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::type_arguments_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::patch_class_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::function_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::closure_data_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::ffi_trampoline_data_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::field_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::script_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::library_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::namespace_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::kernel_program_info_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::code_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::instructions_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::instructions_section_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::instructions_table_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::object_pool_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::pc_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::code_source_map_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::compressed_stackmaps_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::var_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::exception_handlers_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::context_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::context_scope_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::bytecode_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::sentinel_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::singletargetcache_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::unlinkedcall_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::monomorphicsmiablecall_class_ = |
| static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::icdata_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::megamorphic_cache_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::subtypetestcache_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::loadingunit_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::api_error_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::language_error_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::unhandled_exception_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::unwind_error_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::weak_serialization_reference_class_ = |
| static_cast<ClassPtr>(RAW_NULL); |
| ClassPtr Object::weak_array_class_ = static_cast<ClassPtr>(RAW_NULL); |
| |
| static void AppendSubString(BaseTextBuffer* buffer, |
| const char* name, |
| intptr_t start_pos, |
| intptr_t len) { |
| buffer->Printf("%.*s", static_cast<int>(len), &name[start_pos]); |
| } |
| |
| // Used to define setters and getters for untagged object fields that are |
| // defined with the WSR_COMPRESSED_POINTER_FIELD macro. See |
| // PRECOMPILER_WSR_FIELD_DECLARATION in object.h for more information. |
| #if defined(DART_PRECOMPILER) |
| #define PRECOMPILER_WSR_FIELD_DEFINITION(Class, Type, Name) \ |
| Type##Ptr Class::Name() const { \ |
| return Type::RawCast(WeakSerializationReference::Unwrap(untag()->Name())); \ |
| } |
| #else |
| #define PRECOMPILER_WSR_FIELD_DEFINITION(Class, Type, Name) \ |
| void Class::set_##Name(const Type& value) const { \ |
| untag()->set_##Name(value.ptr()); \ |
| } |
| #endif |
| |
| PRECOMPILER_WSR_FIELD_DEFINITION(ClosureData, Function, parent_function) |
| PRECOMPILER_WSR_FIELD_DEFINITION(Function, FunctionType, signature) |
| |
| #undef PRECOMPILER_WSR_FIELD_DEFINITION |
| |
| #if defined(_MSC_VER) |
| #define TRACE_TYPE_CHECKS_VERBOSE(format, ...) \ |
| if (FLAG_trace_type_checks_verbose) { \ |
| OS::PrintErr(format, __VA_ARGS__); \ |
| } |
| #else |
| #define TRACE_TYPE_CHECKS_VERBOSE(format, ...) \ |
| if (FLAG_trace_type_checks_verbose) { \ |
| OS::PrintErr(format, ##__VA_ARGS__); \ |
| } |
| #endif |
| |
| // Remove private keys, but retain getter/setter/constructor/mixin manglings. |
| StringPtr String::RemovePrivateKey(const String& name) { |
| ASSERT(name.IsOneByteString()); |
| GrowableArray<uint8_t> without_key(name.Length()); |
| intptr_t i = 0; |
| while (i < name.Length()) { |
| while (i < name.Length()) { |
| uint8_t c = name.CharAt(i++); |
| if (c == '@') break; |
| without_key.Add(c); |
| } |
| while (i < name.Length()) { |
| uint8_t c = name.CharAt(i); |
| if ((c < '0') || (c > '9')) break; |
| i++; |
| } |
| } |
| |
| return String::FromLatin1(without_key.data(), without_key.length()); |
| } |
| |
| // Takes a vm internal name and makes it suitable for external user. |
| // |
| // Examples: |
| // |
| // Internal getter and setter prefixes are changed: |
| // |
| // get:foo -> foo |
| // set:foo -> foo= |
| // |
| // Private name mangling is removed, possibly multiple times: |
| // |
| // _ReceivePortImpl@709387912 -> _ReceivePortImpl |
| // _ReceivePortImpl@709387912._internal@709387912 -> |
| // _ReceivePortImpl._internal |
| // _C@6328321&_E@6328321&_F@6328321 -> _C&_E&_F |
| // |
| // The trailing . on the default constructor name is dropped: |
| // |
| // List. -> List |
| // |
| // And so forth: |
| // |
| // get:foo@6328321 -> foo |
| // _MyClass@6328321. -> _MyClass |
| // _MyClass@6328321.named -> _MyClass.named |
| // |
| // For extension methods the following demangling is done |
| // ext|func -> ext.func (instance extension method) |
| // ext|get#prop -> ext.prop (instance extension getter) |
| // ext|set#prop -> ext.prop= (instance extension setter) |
| // ext|sfunc -> ext.sfunc (static extension method) |
| // get:ext|sprop -> ext.sprop (static extension getter) |
| // set:ext|sprop -> ext.sprop= (static extension setter) |
| // |
| const char* String::ScrubName(const String& name, bool is_extension) { |
| Thread* thread = Thread::Current(); |
| NoSafepointScope no_safepoint(thread); |
| Zone* zone = thread->zone(); |
| ZoneTextBuffer printer(zone); |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (name.Equals(Symbols::TopLevel())) { |
| // Name of invisible top-level class. |
| return ""; |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| const char* cname = name.ToCString(); |
| ASSERT(strlen(cname) == static_cast<size_t>(name.Length())); |
| const intptr_t name_len = name.Length(); |
| // First remove all private name mangling and if 'is_extension' is true |
| // substitute the first '|' character with '.'. |
| intptr_t start_pos = 0; |
| intptr_t sum_segment_len = 0; |
| for (intptr_t i = 0; i < name_len; i++) { |
| if ((cname[i] == '@') && ((i + 1) < name_len) && (cname[i + 1] >= '0') && |
| (cname[i + 1] <= '9')) { |
| // Append the current segment to the unmangled name. |
| const intptr_t segment_len = i - start_pos; |
| sum_segment_len += segment_len; |
| AppendSubString(&printer, cname, start_pos, segment_len); |
| // Advance until past the name mangling. The private keys are only |
| // numbers so we skip until the first non-number. |
| i++; // Skip the '@'. |
| while ((i < name.Length()) && (name.CharAt(i) >= '0') && |
| (name.CharAt(i) <= '9')) { |
| i++; |
| } |
| start_pos = i; |
| i--; // Account for for-loop increment. |
| } else if (is_extension && cname[i] == '|') { |
| // Append the current segment to the unmangled name. |
| const intptr_t segment_len = i - start_pos; |
| AppendSubString(&printer, cname, start_pos, segment_len); |
| // Append the '.' character (replaces '|' with '.'). |
| AppendSubString(&printer, ".", 0, 1); |
| start_pos = i + 1; |
| // Account for length of segments added so far. |
| sum_segment_len += (segment_len + 1); |
| } |
| } |
| |
| const char* unmangled_name = nullptr; |
| if (start_pos == 0) { |
| // No name unmangling needed, reuse the name that was passed in. |
| unmangled_name = cname; |
| sum_segment_len = name_len; |
| } else if (name.Length() != start_pos) { |
| // Append the last segment. |
| const intptr_t segment_len = name.Length() - start_pos; |
| sum_segment_len += segment_len; |
| AppendSubString(&printer, cname, start_pos, segment_len); |
| } |
| if (unmangled_name == nullptr) { |
| // Merge unmangled_segments. |
| unmangled_name = printer.buffer(); |
| } |
| |
| printer.Clear(); |
| intptr_t start = 0; |
| intptr_t len = sum_segment_len; |
| bool is_setter = false; |
| if (is_extension) { |
| // First scan till we see the '.' character. |
| for (intptr_t i = 0; i < len; i++) { |
| if (unmangled_name[i] == '.') { |
| intptr_t slen = i + 1; |
| intptr_t plen = slen - start; |
| AppendSubString(&printer, unmangled_name, start, plen); |
| unmangled_name += slen; |
| len -= slen; |
| break; |
| } else if (unmangled_name[i] == ':') { |
| if (start != 0) { |
| // Reset and break. |
| start = 0; |
| is_setter = false; |
| break; |
| } |
| if (unmangled_name[0] == 's') { |
| is_setter = true; |
| } |
| start = i + 1; |
| } |
| } |
| } |
| intptr_t dot_pos = -1; // Position of '.' in the name, if any. |
| start = 0; |
| for (intptr_t i = start; i < len; i++) { |
| if (unmangled_name[i] == ':' || |
| (is_extension && unmangled_name[i] == '#')) { |
| if (start != 0) { |
| // Reset and break. |
| start = 0; |
| dot_pos = -1; |
| break; |
| } |
| ASSERT(start == 0); // Only one : is possible in getters or setters. |
| if (unmangled_name[0] == 's') { |
| ASSERT(!is_setter); |
| is_setter = true; |
| } |
| start = i + 1; |
| } else if (unmangled_name[i] == '.') { |
| if (dot_pos != -1) { |
| // Reset and break. |
| start = 0; |
| dot_pos = -1; |
| break; |
| } |
| ASSERT(dot_pos == -1); // Only one dot is supported. |
| dot_pos = i; |
| } |
| } |
| |
| if (!is_extension && (start == 0) && (dot_pos == -1)) { |
| // This unmangled_name is fine as it is. |
| return unmangled_name; |
| } |
| |
| // Drop the trailing dot if needed. |
| intptr_t end = ((dot_pos + 1) == len) ? dot_pos : len; |
| |
| intptr_t substr_len = end - start; |
| AppendSubString(&printer, unmangled_name, start, substr_len); |
| if (is_setter) { |
| const char* equals = Symbols::Equals().ToCString(); |
| const intptr_t equals_len = strlen(equals); |
| AppendSubString(&printer, equals, 0, equals_len); |
| } |
| |
| return printer.buffer(); |
| } |
| |
| StringPtr String::ScrubNameRetainPrivate(const String& name, |
| bool is_extension) { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| intptr_t len = name.Length(); |
| intptr_t start = 0; |
| intptr_t at_pos = -1; // Position of '@' in the name, if any. |
| bool is_setter = false; |
| |
| String& result = String::Handle(); |
| |
| // If extension strip out the leading prefix e.g" ext|func would strip out |
| // 'ext|'. |
| if (is_extension) { |
| // First scan till we see the '|' character. |
| for (intptr_t i = 0; i < len; i++) { |
| if (name.CharAt(i) == '|') { |
| result = String::SubString(name, start, (i - start)); |
| result = String::Concat(result, Symbols::Dot()); |
| start = i + 1; |
| break; |
| } else if (name.CharAt(i) == ':') { |
| if (start != 0) { |
| // Reset and break. |
| start = 0; |
| is_setter = false; |
| break; |
| } |
| if (name.CharAt(0) == 's') { |
| is_setter = true; |
| } |
| start = i + 1; |
| } |
| } |
| } |
| |
| for (intptr_t i = start; i < len; i++) { |
| if (name.CharAt(i) == ':' || (is_extension && name.CharAt(i) == '#')) { |
| // Only one : is possible in getters or setters. |
| ASSERT(is_extension || start == 0); |
| if (name.CharAt(start) == 's') { |
| is_setter = true; |
| } |
| start = i + 1; |
| } else if (name.CharAt(i) == '@') { |
| // Setters should have only one @ so we know where to put the =. |
| ASSERT(!is_setter || (at_pos == -1)); |
| at_pos = i; |
| } |
| } |
| |
| if (start == 0) { |
| // This unmangled_name is fine as it is. |
| return name.ptr(); |
| } |
| |
| if (is_extension) { |
| const String& fname = |
| String::Handle(String::SubString(name, start, (len - start))); |
| result = String::Concat(result, fname); |
| } else { |
| result = String::SubString(name, start, (len - start)); |
| } |
| |
| if (is_setter) { |
| // Setters need to end with '='. |
| if (at_pos == -1) { |
| return String::Concat(result, Symbols::Equals()); |
| } else { |
| const String& pre_at = |
| String::Handle(String::SubString(result, 0, at_pos - 4)); |
| const String& post_at = |
| String::Handle(String::SubString(name, at_pos, len - at_pos)); |
| result = String::Concat(pre_at, Symbols::Equals()); |
| result = String::Concat(result, post_at); |
| } |
| } |
| |
| return result.ptr(); |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| return name.ptr(); // In AOT, return argument unchanged. |
| } |
| |
| template <typename type> |
| static bool IsSpecialCharacter(type value) { |
| return ((value == '"') || (value == '\n') || (value == '\f') || |
| (value == '\b') || (value == '\t') || (value == '\v') || |
| (value == '\r') || (value == '\\') || (value == '$')); |
| } |
| |
| static inline bool IsAsciiNonprintable(int32_t c) { |
| return ((0 <= c) && (c < 32)) || (c == 127); |
| } |
| |
| static int32_t EscapeOverhead(int32_t c) { |
| if (IsSpecialCharacter(c)) { |
| return 1; // 1 additional byte for the backslash. |
| } else if (IsAsciiNonprintable(c)) { |
| return 3; // 3 additional bytes to encode c as \x00. |
| } |
| return 0; |
| } |
| |
| template <typename type> |
| static type SpecialCharacter(type value) { |
| if (value == '"') { |
| return '"'; |
| } else if (value == '\n') { |
| return 'n'; |
| } else if (value == '\f') { |
| return 'f'; |
| } else if (value == '\b') { |
| return 'b'; |
| } else if (value == '\t') { |
| return 't'; |
| } else if (value == '\v') { |
| return 'v'; |
| } else if (value == '\r') { |
| return 'r'; |
| } else if (value == '\\') { |
| return '\\'; |
| } else if (value == '$') { |
| return '$'; |
| } |
| UNREACHABLE(); |
| return '\0'; |
| } |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| static BytecodePtr CreateVMInternalBytecode(KernelBytecode::Opcode opcode) { |
| const KBCInstr* instructions = nullptr; |
| intptr_t instructions_size = 0; |
| |
| KernelBytecode::GetVMInternalBytecodeInstructions(opcode, &instructions, |
| &instructions_size); |
| |
| const auto& bytecode = Bytecode::Handle( |
| Bytecode::New(reinterpret_cast<uword>(instructions), instructions_size, |
| -1, TypedDataBase::Handle(), Object::empty_object_pool())); |
| bytecode.set_pc_descriptors(Object::empty_descriptors()); |
| bytecode.set_exception_handlers(Object::empty_exception_handlers()); |
| return bytecode.ptr(); |
| } |
| #endif // defined(DART_DYNAMIC_MODULES) |
| |
| void Object::InitNullAndBool(IsolateGroup* isolate_group) { |
| // Should only be run by the vm isolate. |
| ASSERT(isolate_group == Dart::vm_isolate_group()); |
| Thread* thread = Thread::Current(); |
| auto heap = isolate_group->heap(); |
| |
| // TODO(iposva): NoSafepointScope needs to be added here. |
| ASSERT(class_class() == null_); |
| |
| // Allocate and initialize the null instance. |
| // 'null_' must be the first object allocated as it is used in allocation to |
| // clear the pointer fields of objects. |
| { |
| uword address = |
| heap->Allocate(thread, Instance::InstanceSize(), Heap::kOld); |
| null_ = static_cast<InstancePtr>(address + kHeapObjectTag); |
| InitializeObjectVariant<Instance>(address, kNullCid); |
| null_->untag()->SetCanonical(); |
| } |
| |
| // Allocate and initialize the bool instances. |
| // These must be allocated such that at kBoolValueBitPosition, the address |
| // of true is 0 and the address of false is 1, and their addresses are |
| // otherwise identical. |
| { |
| // Allocate a dummy bool object to give true the desired alignment. |
| uword address = heap->Allocate(thread, Bool::InstanceSize(), Heap::kOld); |
| InitializeObject<Bool>(address); |
| static_cast<BoolPtr>(address + kHeapObjectTag)->untag()->value_ = false; |
| } |
| { |
| // Allocate true. |
| uword address = heap->Allocate(thread, Bool::InstanceSize(), Heap::kOld); |
| true_ = static_cast<BoolPtr>(address + kHeapObjectTag); |
| InitializeObject<Bool>(address); |
| true_->untag()->value_ = true; |
| true_->untag()->SetCanonical(); |
| } |
| { |
| // Allocate false. |
| uword address = heap->Allocate(thread, Bool::InstanceSize(), Heap::kOld); |
| false_ = static_cast<BoolPtr>(address + kHeapObjectTag); |
| InitializeObject<Bool>(address); |
| false_->untag()->value_ = false; |
| false_->untag()->SetCanonical(); |
| } |
| |
| // Check that the objects have been allocated at appropriate addresses. |
| ASSERT(static_cast<uword>(true_) == |
| static_cast<uword>(null_) + kTrueOffsetFromNull); |
| ASSERT(static_cast<uword>(false_) == |
| static_cast<uword>(null_) + kFalseOffsetFromNull); |
| ASSERT((static_cast<uword>(true_) & kBoolValueMask) == 0); |
| ASSERT((static_cast<uword>(false_) & kBoolValueMask) != 0); |
| ASSERT(static_cast<uword>(false_) == |
| (static_cast<uword>(true_) | kBoolValueMask)); |
| ASSERT((static_cast<uword>(null_) & kBoolVsNullMask) == 0); |
| ASSERT((static_cast<uword>(true_) & kBoolVsNullMask) != 0); |
| ASSERT((static_cast<uword>(false_) & kBoolVsNullMask) != 0); |
| } |
| |
| void Object::InitVtables() { |
| { |
| Object fake_handle; |
| builtin_vtables_[kObjectCid] = fake_handle.vtable(); |
| } |
| |
| #define INIT_VTABLE(clazz) \ |
| { \ |
| clazz fake_handle; \ |
| builtin_vtables_[k##clazz##Cid] = fake_handle.vtable(); \ |
| } |
| CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY_NOR_MAP(INIT_VTABLE) |
| INIT_VTABLE(GrowableObjectArray) |
| #undef INIT_VTABLE |
| |
| #define INIT_VTABLE(clazz) \ |
| { \ |
| Map fake_handle; \ |
| builtin_vtables_[k##clazz##Cid] = fake_handle.vtable(); \ |
| } |
| CLASS_LIST_MAPS(INIT_VTABLE) |
| #undef INIT_VTABLE |
| |
| #define INIT_VTABLE(clazz) \ |
| { \ |
| Set fake_handle; \ |
| builtin_vtables_[k##clazz##Cid] = fake_handle.vtable(); \ |
| } |
| CLASS_LIST_SETS(INIT_VTABLE) |
| #undef INIT_VTABLE |
| |
| #define INIT_VTABLE(clazz) \ |
| { \ |
| Array fake_handle; \ |
| builtin_vtables_[k##clazz##Cid] = fake_handle.vtable(); \ |
| } |
| CLASS_LIST_FIXED_LENGTH_ARRAYS(INIT_VTABLE) |
| #undef INIT_VTABLE |
| |
| #define INIT_VTABLE(clazz) \ |
| { \ |
| String fake_handle; \ |
| builtin_vtables_[k##clazz##Cid] = fake_handle.vtable(); \ |
| } |
| CLASS_LIST_STRINGS(INIT_VTABLE) |
| #undef INIT_VTABLE |
| |
| { |
| Instance fake_handle; |
| builtin_vtables_[kFfiNativeTypeCid] = fake_handle.vtable(); |
| } |
| |
| #define INIT_VTABLE(clazz) \ |
| { \ |
| Instance fake_handle; \ |
| builtin_vtables_[kFfi##clazz##Cid] = fake_handle.vtable(); \ |
| } |
| CLASS_LIST_FFI_TYPE_MARKER(INIT_VTABLE) |
| #undef INIT_VTABLE |
| |
| { |
| Instance fake_handle; |
| builtin_vtables_[kFfiNativeFunctionCid] = fake_handle.vtable(); |
| } |
| |
| { |
| Pointer fake_handle; |
| builtin_vtables_[kPointerCid] = fake_handle.vtable(); |
| } |
| |
| { |
| DynamicLibrary fake_handle; |
| builtin_vtables_[kDynamicLibraryCid] = fake_handle.vtable(); |
| } |
| |
| #define INIT_VTABLE(clazz) \ |
| { \ |
| TypedData fake_internal_handle; \ |
| builtin_vtables_[kTypedData##clazz##Cid] = fake_internal_handle.vtable(); \ |
| TypedDataView fake_view_handle; \ |
| builtin_vtables_[kTypedData##clazz##ViewCid] = fake_view_handle.vtable(); \ |
| builtin_vtables_[kUnmodifiableTypedData##clazz##ViewCid] = \ |
| fake_view_handle.vtable(); \ |
| ExternalTypedData fake_external_handle; \ |
| builtin_vtables_[kExternalTypedData##clazz##Cid] = \ |
| fake_external_handle.vtable(); \ |
| } |
| CLASS_LIST_TYPED_DATA(INIT_VTABLE) |
| #undef INIT_VTABLE |
| |
| { |
| TypedDataView fake_handle; |
| builtin_vtables_[kByteDataViewCid] = fake_handle.vtable(); |
| builtin_vtables_[kUnmodifiableByteDataViewCid] = fake_handle.vtable(); |
| } |
| |
| { |
| Instance fake_handle; |
| builtin_vtables_[kByteBufferCid] = fake_handle.vtable(); |
| builtin_vtables_[kNullCid] = fake_handle.vtable(); |
| builtin_vtables_[kDynamicCid] = fake_handle.vtable(); |
| builtin_vtables_[kVoidCid] = fake_handle.vtable(); |
| builtin_vtables_[kNeverCid] = fake_handle.vtable(); |
| } |
| } |
| |
| void Object::Init(IsolateGroup* isolate_group) { |
| // Should only be run by the vm isolate. |
| ASSERT(isolate_group == Dart::vm_isolate_group()); |
| Heap* heap = isolate_group->heap(); |
| Thread* thread = Thread::Current(); |
| ASSERT(thread != nullptr); |
| // Ensure lock checks in setters are happy. |
| SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); |
| |
| InitVtables(); |
| |
| // Allocate the read only object handles here. |
| #define INITIALIZE_SHARED_READONLY_HANDLE(Type, name) \ |
| name##_ = Type::ReadOnlyHandle(); |
| SHARED_READONLY_HANDLES_LIST(INITIALIZE_SHARED_READONLY_HANDLE) |
| #undef INITIALIZE_SHARED_READONLY_HANDLE |
| |
| *null_object_ = Object::null(); |
| *null_class_ = Class::null(); |
| *null_array_ = Array::null(); |
| *null_string_ = String::null(); |
| *null_instance_ = Instance::null(); |
| *null_function_ = Function::null(); |
| *null_function_type_ = FunctionType::null(); |
| *null_record_type_ = RecordType::null(); |
| *null_type_arguments_ = TypeArguments::null(); |
| *null_closure_ = Closure::null(); |
| *empty_type_arguments_ = TypeArguments::null(); |
| *null_abstract_type_ = AbstractType::null(); |
| *null_compressed_stackmaps_ = CompressedStackMaps::null(); |
| *bool_true_ = true_; |
| *bool_false_ = false_; |
| |
| // Initialize the empty array and empty instantiations cache array handles to |
| // null_ in order to be able to check if the empty and zero arrays were |
| // allocated (RAW_NULL is not available). |
| *empty_array_ = Array::null(); |
| *empty_instantiations_cache_array_ = Array::null(); |
| *empty_subtype_test_cache_array_ = Array::null(); |
| |
| Class& cls = Class::Handle(); |
| |
| // Allocate and initialize the class class. |
| { |
| intptr_t size = Class::InstanceSize(); |
| uword address = heap->Allocate(thread, size, Heap::kOld); |
| class_class_ = static_cast<ClassPtr>(address + kHeapObjectTag); |
| InitializeObject<Class>(address); |
| |
| Class fake; |
| // Initialization from Class::New<Class>. |
| // Directly set ptr_ to break a circular dependency: SetRaw will attempt |
| // to lookup class class in the class table where it is not registered yet. |
| cls.ptr_ = class_class_; |
| ASSERT(builtin_vtables_[kClassCid] == fake.vtable()); |
| cls.set_instance_size( |
| Class::InstanceSize(), |
| compiler::target::RoundedAllocationSize(RTN::Class::InstanceSize())); |
| const intptr_t host_next_field_offset = Class::NextFieldOffset(); |
| const intptr_t target_next_field_offset = RTN::Class::NextFieldOffset(); |
| cls.set_next_field_offset(host_next_field_offset, target_next_field_offset); |
| cls.set_id(Class::kClassId); |
| cls.set_state_bits(0); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| cls.set_type_arguments_field_offset_in_words(Class::kNoTypeArguments, |
| RTN::Class::kNoTypeArguments); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_num_native_fields(0); |
| cls.InitEmptyFields(); |
| isolate_group->class_table()->Register(cls); |
| } |
| |
| // Allocate and initialize the null class. |
| cls = Class::New<Instance, RTN::Instance>(kNullCid, isolate_group); |
| cls.set_num_type_arguments_unsafe(0); |
| isolate_group->object_store()->set_null_class(cls); |
| |
| // Allocate and initialize Never class. |
| cls = Class::New<Instance, RTN::Instance>(kNeverCid, isolate_group); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| isolate_group->object_store()->set_never_class(cls); |
| |
| // Allocate and initialize the free list element class. |
| cls = Class::New<FreeListElement::FakeInstance, |
| RTN::FreeListElement::FakeInstance>(kFreeListElement, |
| isolate_group); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| |
| // Allocate and initialize the forwarding corpse class. |
| cls = Class::New<ForwardingCorpse::FakeInstance, |
| RTN::ForwardingCorpse::FakeInstance>(kForwardingCorpse, |
| isolate_group); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| |
| // Allocate and initialize Sentinel class. |
| cls = Class::New<Sentinel, RTN::Sentinel>(isolate_group); |
| sentinel_class_ = cls.ptr(); |
| |
| // Allocate and initialize the sentinel values. |
| { |
| *sentinel_ ^= Sentinel::New(); |
| } |
| |
| // Allocate and initialize optimizing compiler constants. |
| { |
| *unknown_constant_ ^= Sentinel::New(); |
| *non_constant_ ^= Sentinel::New(); |
| *optimized_out_ ^= Sentinel::New(); |
| } |
| |
| // Allocate the remaining VM internal classes. |
| cls = Class::New<TypeParameters, RTN::TypeParameters>(isolate_group); |
| type_parameters_class_ = cls.ptr(); |
| |
| cls = Class::New<TypeArguments, RTN::TypeArguments>(isolate_group); |
| type_arguments_class_ = cls.ptr(); |
| |
| cls = Class::New<PatchClass, RTN::PatchClass>(isolate_group); |
| patch_class_class_ = cls.ptr(); |
| |
| cls = Class::New<Function, RTN::Function>(isolate_group); |
| function_class_ = cls.ptr(); |
| |
| cls = Class::New<ClosureData, RTN::ClosureData>(isolate_group); |
| closure_data_class_ = cls.ptr(); |
| |
| cls = Class::New<FfiTrampolineData, RTN::FfiTrampolineData>(isolate_group); |
| ffi_trampoline_data_class_ = cls.ptr(); |
| |
| cls = Class::New<Field, RTN::Field>(isolate_group); |
| field_class_ = cls.ptr(); |
| |
| cls = Class::New<Script, RTN::Script>(isolate_group); |
| script_class_ = cls.ptr(); |
| |
| cls = Class::New<Library, RTN::Library>(isolate_group); |
| library_class_ = cls.ptr(); |
| |
| cls = Class::New<Namespace, RTN::Namespace>(isolate_group); |
| namespace_class_ = cls.ptr(); |
| |
| cls = Class::New<KernelProgramInfo, RTN::KernelProgramInfo>(isolate_group); |
| kernel_program_info_class_ = cls.ptr(); |
| |
| cls = Class::New<Code, RTN::Code>(isolate_group); |
| code_class_ = cls.ptr(); |
| |
| cls = Class::New<Instructions, RTN::Instructions>(isolate_group); |
| instructions_class_ = cls.ptr(); |
| |
| cls = |
| Class::New<InstructionsSection, RTN::InstructionsSection>(isolate_group); |
| instructions_section_class_ = cls.ptr(); |
| |
| cls = Class::New<InstructionsTable, RTN::InstructionsTable>(isolate_group); |
| instructions_table_class_ = cls.ptr(); |
| |
| cls = Class::New<ObjectPool, RTN::ObjectPool>(isolate_group); |
| object_pool_class_ = cls.ptr(); |
| |
| cls = Class::New<PcDescriptors, RTN::PcDescriptors>(isolate_group); |
| pc_descriptors_class_ = cls.ptr(); |
| |
| cls = Class::New<CodeSourceMap, RTN::CodeSourceMap>(isolate_group); |
| code_source_map_class_ = cls.ptr(); |
| |
| cls = |
| Class::New<CompressedStackMaps, RTN::CompressedStackMaps>(isolate_group); |
| compressed_stackmaps_class_ = cls.ptr(); |
| |
| cls = |
| Class::New<LocalVarDescriptors, RTN::LocalVarDescriptors>(isolate_group); |
| var_descriptors_class_ = cls.ptr(); |
| |
| cls = Class::New<ExceptionHandlers, RTN::ExceptionHandlers>(isolate_group); |
| exception_handlers_class_ = cls.ptr(); |
| |
| cls = Class::New<Context, RTN::Context>(isolate_group); |
| context_class_ = cls.ptr(); |
| |
| cls = Class::New<ContextScope, RTN::ContextScope>(isolate_group); |
| context_scope_class_ = cls.ptr(); |
| |
| cls = Class::New<Bytecode, RTN::Bytecode>(isolate_group); |
| bytecode_class_ = cls.ptr(); |
| |
| cls = Class::New<SingleTargetCache, RTN::SingleTargetCache>(isolate_group); |
| singletargetcache_class_ = cls.ptr(); |
| |
| cls = Class::New<UnlinkedCall, RTN::UnlinkedCall>(isolate_group); |
| unlinkedcall_class_ = cls.ptr(); |
| |
| cls = Class::New<MonomorphicSmiableCall, RTN::MonomorphicSmiableCall>( |
| isolate_group); |
| monomorphicsmiablecall_class_ = cls.ptr(); |
| |
| cls = Class::New<ICData, RTN::ICData>(isolate_group); |
| icdata_class_ = cls.ptr(); |
| |
| cls = Class::New<MegamorphicCache, RTN::MegamorphicCache>(isolate_group); |
| megamorphic_cache_class_ = cls.ptr(); |
| |
| cls = Class::New<SubtypeTestCache, RTN::SubtypeTestCache>(isolate_group); |
| subtypetestcache_class_ = cls.ptr(); |
| |
| cls = Class::New<LoadingUnit, RTN::LoadingUnit>(isolate_group); |
| loadingunit_class_ = cls.ptr(); |
| |
| cls = Class::New<ApiError, RTN::ApiError>(isolate_group); |
| api_error_class_ = cls.ptr(); |
| |
| cls = Class::New<LanguageError, RTN::LanguageError>(isolate_group); |
| language_error_class_ = cls.ptr(); |
| |
| cls = Class::New<UnhandledException, RTN::UnhandledException>(isolate_group); |
| unhandled_exception_class_ = cls.ptr(); |
| |
| cls = Class::New<UnwindError, RTN::UnwindError>(isolate_group); |
| unwind_error_class_ = cls.ptr(); |
| |
| cls = Class::New<WeakSerializationReference, RTN::WeakSerializationReference>( |
| isolate_group); |
| weak_serialization_reference_class_ = cls.ptr(); |
| |
| cls = Class::New<WeakArray, RTN::WeakArray>(isolate_group); |
| weak_array_class_ = cls.ptr(); |
| |
| ASSERT(class_class() != null_); |
| |
| // Pre-allocate classes in the vm isolate so that we can for example create a |
| // symbol table and populate it with some frequently used strings as symbols. |
| cls = Class::New<Array, RTN::Array>(isolate_group); |
| isolate_group->object_store()->set_array_class(cls); |
| cls.set_type_arguments_field_offset(Array::type_arguments_offset(), |
| RTN::Array::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| cls = Class::New<Array, RTN::Array>(kImmutableArrayCid, isolate_group); |
| isolate_group->object_store()->set_immutable_array_class(cls); |
| cls.set_type_arguments_field_offset(Array::type_arguments_offset(), |
| RTN::Array::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| // In order to be able to canonicalize arguments descriptors early. |
| cls.set_is_prefinalized(); |
| cls = |
| Class::New<GrowableObjectArray, RTN::GrowableObjectArray>(isolate_group); |
| isolate_group->object_store()->set_growable_object_array_class(cls); |
| cls.set_type_arguments_field_offset( |
| GrowableObjectArray::type_arguments_offset(), |
| RTN::GrowableObjectArray::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| cls = Class::NewStringClass(kOneByteStringCid, isolate_group); |
| isolate_group->object_store()->set_one_byte_string_class(cls); |
| cls = Class::NewStringClass(kTwoByteStringCid, isolate_group); |
| isolate_group->object_store()->set_two_byte_string_class(cls); |
| cls = Class::New<Mint, RTN::Mint>(isolate_group); |
| isolate_group->object_store()->set_mint_class(cls); |
| cls = Class::New<Double, RTN::Double>(isolate_group); |
| isolate_group->object_store()->set_double_class(cls); |
| cls = Class::New<Float32x4, RTN::Float32x4>(isolate_group); |
| isolate_group->object_store()->set_float32x4_class(cls); |
| cls = Class::New<Float64x2, RTN::Float64x2>(isolate_group); |
| isolate_group->object_store()->set_float64x2_class(cls); |
| cls = Class::New<Int32x4, RTN::Int32x4>(isolate_group); |
| isolate_group->object_store()->set_int32x4_class(cls); |
| |
| // Ensure that class kExternalTypedDataUint8ArrayCid is registered as we |
| // need it when reading in the token stream of bootstrap classes in the VM |
| // isolate. |
| Class::NewExternalTypedDataClass(kExternalTypedDataUint8ArrayCid, |
| isolate_group); |
| |
| // Needed for object pools of VM isolate stubs. |
| Class::NewTypedDataClass(kTypedDataInt8ArrayCid, isolate_group); |
| |
| // Allocate and initialize the empty_array instance. |
| { |
| uword address = heap->Allocate(thread, Array::InstanceSize(0), Heap::kOld); |
| InitializeObjectVariant<Array>(address, kImmutableArrayCid, 0); |
| Array::initializeHandle(empty_array_, |
| static_cast<ArrayPtr>(address + kHeapObjectTag)); |
| empty_array_->untag()->set_length(Smi::New(0)); |
| empty_array_->SetCanonical(); |
| } |
| |
| Smi& smi = Smi::Handle(); |
| // Allocate and initialize the empty instantiations cache array instance, |
| // which contains metadata as the first element and a sentinel value |
| // at the start of the first entry. |
| { |
| const intptr_t array_size = |
| TypeArguments::Cache::kHeaderSize + TypeArguments::Cache::kEntrySize; |
| uword address = |
| heap->Allocate(thread, Array::InstanceSize(array_size), Heap::kOld); |
| InitializeObjectVariant<Array>(address, kImmutableArrayCid, array_size); |
| Array::initializeHandle(empty_instantiations_cache_array_, |
| static_cast<ArrayPtr>(address + kHeapObjectTag)); |
| empty_instantiations_cache_array_->untag()->set_length( |
| Smi::New(array_size)); |
| // The empty cache has no occupied entries and is not a hash-based cache. |
| smi = Smi::New(0); |
| empty_instantiations_cache_array_->SetAt( |
| TypeArguments::Cache::kMetadataIndex, smi); |
| // Make the first (and only) entry unoccupied by setting its first element |
| // to the sentinel value. |
| smi = TypeArguments::Cache::Sentinel(); |
| InstantiationsCacheTable table(*empty_instantiations_cache_array_); |
| table.At(0).Set<TypeArguments::Cache::kSentinelIndex>(smi); |
| // The other contents of the array are immaterial. |
| empty_instantiations_cache_array_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the empty subtype test cache array instance, |
| // which contains a single unoccupied entry. |
| { |
| const intptr_t array_size = SubtypeTestCache::kTestEntryLength; |
| uword address = |
| heap->Allocate(thread, Array::InstanceSize(array_size), Heap::kOld); |
| InitializeObjectVariant<Array>(address, kImmutableArrayCid, array_size); |
| Array::initializeHandle(empty_subtype_test_cache_array_, |
| static_cast<ArrayPtr>(address + kHeapObjectTag)); |
| empty_subtype_test_cache_array_->untag()->set_length(Smi::New(array_size)); |
| // Make the first (and only) entry unoccupied by setting its first element |
| // to the null value. |
| empty_subtype_test_cache_array_->SetAt( |
| SubtypeTestCache::kInstanceCidOrSignature, Object::null_object()); |
| smi = TypeArguments::Cache::Sentinel(); |
| SubtypeTestCacheTable table(*empty_subtype_test_cache_array_); |
| table.At(0).Set<SubtypeTestCache::kInstanceCidOrSignature>( |
| Object::null_object()); |
| // The other contents of the array are immaterial. |
| empty_subtype_test_cache_array_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the canonical empty context scope object. |
| { |
| uword address = |
| heap->Allocate(thread, ContextScope::InstanceSize(0), Heap::kOld); |
| InitializeObject<ContextScope>(address, 0); |
| ContextScope::initializeHandle( |
| empty_context_scope_, |
| static_cast<ContextScopePtr>(address + kHeapObjectTag)); |
| empty_context_scope_->StoreNonPointer( |
| &empty_context_scope_->untag()->num_variables_, 0); |
| empty_context_scope_->StoreNonPointer( |
| &empty_context_scope_->untag()->is_implicit_, true); |
| empty_context_scope_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the canonical empty object pool object. |
| { |
| uword address = |
| heap->Allocate(thread, ObjectPool::InstanceSize(0), Heap::kOld); |
| InitializeObject<ObjectPool>(address, 0); |
| ObjectPool::initializeHandle( |
| empty_object_pool_, |
| static_cast<ObjectPoolPtr>(address + kHeapObjectTag)); |
| empty_object_pool_->StoreNonPointer(&empty_object_pool_->untag()->length_, |
| 0); |
| empty_object_pool_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the empty_compressed_stackmaps instance. |
| { |
| const intptr_t instance_size = CompressedStackMaps::InstanceSize(0); |
| uword address = heap->Allocate(thread, instance_size, Heap::kOld); |
| InitializeObject<CompressedStackMaps>(address, 0); |
| CompressedStackMaps::initializeHandle( |
| empty_compressed_stackmaps_, |
| static_cast<CompressedStackMapsPtr>(address + kHeapObjectTag)); |
| empty_compressed_stackmaps_->untag()->payload()->set_flags_and_size(0); |
| empty_compressed_stackmaps_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the empty_descriptors instance. |
| { |
| uword address = |
| heap->Allocate(thread, PcDescriptors::InstanceSize(0), Heap::kOld); |
| InitializeObject<PcDescriptors>(address, 0); |
| PcDescriptors::initializeHandle( |
| empty_descriptors_, |
| static_cast<PcDescriptorsPtr>(address + kHeapObjectTag)); |
| empty_descriptors_->StoreNonPointer(&empty_descriptors_->untag()->length_, |
| 0); |
| empty_descriptors_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the canonical empty variable descriptor object. |
| { |
| uword address = heap->Allocate(thread, LocalVarDescriptors::InstanceSize(0), |
| Heap::kOld); |
| InitializeObject<LocalVarDescriptors>(address, 0); |
| LocalVarDescriptors::initializeHandle( |
| empty_var_descriptors_, |
| static_cast<LocalVarDescriptorsPtr>(address + kHeapObjectTag)); |
| empty_var_descriptors_->StoreNonPointer( |
| &empty_var_descriptors_->untag()->num_entries_, 0); |
| empty_var_descriptors_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the canonical empty exception handler info object. |
| // The vast majority of all functions do not contain an exception handler |
| // and can share this canonical descriptor. |
| { |
| uword address = |
| heap->Allocate(thread, ExceptionHandlers::InstanceSize(0), Heap::kOld); |
| InitializeObject<ExceptionHandlers>(address, 0); |
| ExceptionHandlers::initializeHandle( |
| empty_exception_handlers_, |
| static_cast<ExceptionHandlersPtr>(address + kHeapObjectTag)); |
| empty_exception_handlers_->StoreNonPointer( |
| &empty_exception_handlers_->untag()->packed_fields_, 0); |
| empty_exception_handlers_->SetCanonical(); |
| } |
| |
| // Empty exception handlers for async/async* functions. |
| { |
| uword address = |
| heap->Allocate(thread, ExceptionHandlers::InstanceSize(0), Heap::kOld); |
| InitializeObject<ExceptionHandlers>(address, 0); |
| ExceptionHandlers::initializeHandle( |
| empty_async_exception_handlers_, |
| static_cast<ExceptionHandlersPtr>(address + kHeapObjectTag)); |
| empty_async_exception_handlers_->StoreNonPointer( |
| &empty_async_exception_handlers_->untag()->packed_fields_, |
| UntaggedExceptionHandlers::AsyncHandlerBit::update(true, 0)); |
| empty_async_exception_handlers_->SetCanonical(); |
| } |
| |
| // Allocate and initialize the canonical empty type arguments object. |
| { |
| uword address = |
| heap->Allocate(thread, TypeArguments::InstanceSize(0), Heap::kOld); |
| InitializeObject<TypeArguments>(address, 0); |
| TypeArguments::initializeHandle( |
| empty_type_arguments_, |
| static_cast<TypeArgumentsPtr>(address + kHeapObjectTag)); |
| empty_type_arguments_->untag()->set_length(Smi::New(0)); |
| empty_type_arguments_->untag()->set_hash(Smi::New(0)); |
| empty_type_arguments_->ComputeHash(); |
| empty_type_arguments_->SetCanonical(); |
| } |
| |
| // The VM isolate snapshot object table is initialized to an empty array |
| // as we do not have any VM isolate snapshot at this time. |
| *vm_isolate_snapshot_object_table_ = Object::empty_array().ptr(); |
| |
| cls = Class::New<Instance, RTN::Instance>(kDynamicCid, isolate_group); |
| cls.set_is_abstract(); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| dynamic_class_ = cls.ptr(); |
| |
| cls = Class::New<Instance, RTN::Instance>(kVoidCid, isolate_group); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| void_class_ = cls.ptr(); |
| |
| cls = Class::New<Type, RTN::Type>(isolate_group); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| |
| cls = Class::New<FunctionType, RTN::FunctionType>(isolate_group); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| |
| cls = Class::New<RecordType, RTN::RecordType>(isolate_group); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| |
| cls = dynamic_class_; |
| *dynamic_type_ = |
| Type::New(cls, Object::null_type_arguments(), Nullability::kNullable); |
| dynamic_type_->SetIsFinalized(); |
| dynamic_type_->ComputeHash(); |
| dynamic_type_->SetCanonical(); |
| |
| cls = void_class_; |
| *void_type_ = |
| Type::New(cls, Object::null_type_arguments(), Nullability::kNullable); |
| void_type_->SetIsFinalized(); |
| void_type_->ComputeHash(); |
| void_type_->SetCanonical(); |
| |
| // Since TypeArguments objects are passed as function arguments, make them |
| // behave as Dart instances, although they are just VM objects. |
| // Note that we cannot set the super type to ObjectType, which does not live |
| // in the vm isolate. See special handling in Class::SuperClass(). |
| cls = type_arguments_class_; |
| cls.set_interfaces(Object::empty_array()); |
| cls.SetFields(Object::empty_array()); |
| cls.SetFunctions(Object::empty_array()); |
| |
| cls = Class::New<Bool, RTN::Bool>(isolate_group); |
| isolate_group->object_store()->set_bool_class(cls); |
| |
| *smi_illegal_cid_ = Smi::New(kIllegalCid); |
| *smi_zero_ = Smi::New(0); |
| |
| String& error_str = String::Handle(); |
| error_str = String::New( |
| "Callbacks into the Dart VM are currently prohibited. Either there are " |
| "outstanding pointers from Dart_TypedDataAcquireData that have not been " |
| "released with Dart_TypedDataReleaseData, or a finalizer is running.", |
| Heap::kOld); |
| *no_callbacks_error_ = ApiError::New(error_str, Heap::kOld); |
| error_str = String::New( |
| "No api calls are allowed while unwind is in progress", Heap::kOld); |
| *unwind_in_progress_error_ = UnwindError::New(error_str, Heap::kOld); |
| error_str = String::New("SnapshotWriter Error", Heap::kOld); |
| *snapshot_writer_error_ = |
| LanguageError::New(error_str, Report::kError, Heap::kOld); |
| error_str = String::New("Branch offset overflow", Heap::kOld); |
| *branch_offset_error_ = |
| LanguageError::New(error_str, Report::kBailout, Heap::kOld); |
| error_str = String::New("Speculative inlining failed", Heap::kOld); |
| *speculative_inlining_error_ = |
| LanguageError::New(error_str, Report::kBailout, Heap::kOld); |
| error_str = String::New("Background Compilation Failed", Heap::kOld); |
| *background_compilation_error_ = |
| LanguageError::New(error_str, Report::kBailout, Heap::kOld); |
| error_str = String::New("No debuggable code where breakpoint was requested", |
| Heap::kOld); |
| *no_debuggable_code_error_ = |
| LanguageError::New(error_str, Report::kError, Heap::kOld); |
| error_str = String::New("Out of memory", Heap::kOld); |
| *out_of_memory_error_ = |
| LanguageError::New(error_str, Report::kError, Heap::kOld); |
| |
| // Allocate the parameter types and names for synthetic getters. |
| *synthetic_getter_parameter_types_ = Array::New(1, Heap::kOld); |
| synthetic_getter_parameter_types_->SetAt(0, Object::dynamic_type()); |
| *synthetic_getter_parameter_names_ = Array::New(1, Heap::kOld); |
| // Fill in synthetic_getter_parameter_names_ later, after symbols are |
| // initialized (in Object::FinalizeVMIsolate). |
| // synthetic_getter_parameter_names_ object needs to be created earlier as |
| // VM isolate snapshot reader references it before Object::FinalizeVMIsolate. |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| *implicit_getter_bytecode_ = |
| CreateVMInternalBytecode(KernelBytecode::kVMInternal_ImplicitGetter); |
| |
| *implicit_setter_bytecode_ = |
| CreateVMInternalBytecode(KernelBytecode::kVMInternal_ImplicitSetter); |
| |
| *implicit_static_getter_bytecode_ = CreateVMInternalBytecode( |
| KernelBytecode::kVMInternal_ImplicitStaticGetter); |
| |
| *implicit_static_setter_bytecode_ = CreateVMInternalBytecode( |
| KernelBytecode::kVMInternal_ImplicitStaticSetter); |
| |
| *method_extractor_bytecode_ = |
| CreateVMInternalBytecode(KernelBytecode::kVMInternal_MethodExtractor); |
| |
| *invoke_closure_bytecode_ = |
| CreateVMInternalBytecode(KernelBytecode::kVMInternal_InvokeClosure); |
| |
| *invoke_field_bytecode_ = |
| CreateVMInternalBytecode(KernelBytecode::kVMInternal_InvokeField); |
| |
| *nsm_dispatcher_bytecode_ = CreateVMInternalBytecode( |
| KernelBytecode::kVMInternal_NoSuchMethodDispatcher); |
| |
| *dynamic_invocation_forwarder_bytecode_ = CreateVMInternalBytecode( |
| KernelBytecode::kVMInternal_ForwardDynamicInvocation); |
| |
| *implicit_static_closure_bytecode_ = CreateVMInternalBytecode( |
| KernelBytecode::kVMInternal_ImplicitStaticClosure); |
| |
| *implicit_instance_closure_bytecode_ = CreateVMInternalBytecode( |
| KernelBytecode::kVMInternal_ImplicitInstanceClosure); |
| |
| *implicit_constructor_closure_bytecode_ = CreateVMInternalBytecode( |
| KernelBytecode::kVMInternal_ImplicitConstructorClosure); |
| #endif // defined(DART_DYNAMIC_MODULES) |
| |
| // Some thread fields need to be reinitialized as null constants have not been |
| // initialized until now. |
| thread->ClearStickyError(); |
| |
| ASSERT(!null_object_->IsSmi()); |
| ASSERT(!null_class_->IsSmi()); |
| ASSERT(null_class_->IsClass()); |
| ASSERT(!null_array_->IsSmi()); |
| ASSERT(null_array_->IsArray()); |
| ASSERT(!null_string_->IsSmi()); |
| ASSERT(null_string_->IsString()); |
| ASSERT(!null_instance_->IsSmi()); |
| ASSERT(null_instance_->IsInstance()); |
| ASSERT(!null_function_->IsSmi()); |
| ASSERT(null_function_->IsFunction()); |
| ASSERT(!null_function_type_->IsSmi()); |
| ASSERT(null_function_type_->IsFunctionType()); |
| ASSERT(!null_record_type_->IsSmi()); |
| ASSERT(null_record_type_->IsRecordType()); |
| ASSERT(!null_type_arguments_->IsSmi()); |
| ASSERT(null_type_arguments_->IsTypeArguments()); |
| ASSERT(!null_compressed_stackmaps_->IsSmi()); |
| ASSERT(null_compressed_stackmaps_->IsCompressedStackMaps()); |
| ASSERT(!empty_array_->IsSmi()); |
| ASSERT(empty_array_->IsArray()); |
| ASSERT(!empty_instantiations_cache_array_->IsSmi()); |
| ASSERT(empty_instantiations_cache_array_->IsArray()); |
| ASSERT(!empty_subtype_test_cache_array_->IsSmi()); |
| ASSERT(empty_subtype_test_cache_array_->IsArray()); |
| ASSERT(!empty_type_arguments_->IsSmi()); |
| ASSERT(empty_type_arguments_->IsTypeArguments()); |
| ASSERT(!empty_context_scope_->IsSmi()); |
| ASSERT(empty_context_scope_->IsContextScope()); |
| ASSERT(!empty_compressed_stackmaps_->IsSmi()); |
| ASSERT(empty_compressed_stackmaps_->IsCompressedStackMaps()); |
| ASSERT(!empty_descriptors_->IsSmi()); |
| ASSERT(empty_descriptors_->IsPcDescriptors()); |
| ASSERT(!empty_var_descriptors_->IsSmi()); |
| ASSERT(empty_var_descriptors_->IsLocalVarDescriptors()); |
| ASSERT(!empty_exception_handlers_->IsSmi()); |
| ASSERT(empty_exception_handlers_->IsExceptionHandlers()); |
| ASSERT(!empty_async_exception_handlers_->IsSmi()); |
| ASSERT(empty_async_exception_handlers_->IsExceptionHandlers()); |
| ASSERT(!sentinel_->IsSmi()); |
| ASSERT(sentinel_->IsSentinel()); |
| ASSERT(!unknown_constant_->IsSmi()); |
| ASSERT(unknown_constant_->IsSentinel()); |
| ASSERT(!non_constant_->IsSmi()); |
| ASSERT(non_constant_->IsSentinel()); |
| ASSERT(!optimized_out_->IsSmi()); |
| ASSERT(optimized_out_->IsSentinel()); |
| ASSERT(!bool_true_->IsSmi()); |
| ASSERT(bool_true_->IsBool()); |
| ASSERT(!bool_false_->IsSmi()); |
| ASSERT(bool_false_->IsBool()); |
| ASSERT(smi_illegal_cid_->IsSmi()); |
| ASSERT(smi_zero_->IsSmi()); |
| ASSERT(!no_callbacks_error_->IsSmi()); |
| ASSERT(no_callbacks_error_->IsApiError()); |
| ASSERT(!unwind_in_progress_error_->IsSmi()); |
| ASSERT(unwind_in_progress_error_->IsUnwindError()); |
| ASSERT(!snapshot_writer_error_->IsSmi()); |
| ASSERT(snapshot_writer_error_->IsLanguageError()); |
| ASSERT(!branch_offset_error_->IsSmi()); |
| ASSERT(branch_offset_error_->IsLanguageError()); |
| ASSERT(!speculative_inlining_error_->IsSmi()); |
| ASSERT(speculative_inlining_error_->IsLanguageError()); |
| ASSERT(!background_compilation_error_->IsSmi()); |
| ASSERT(background_compilation_error_->IsLanguageError()); |
| ASSERT(!out_of_memory_error_->IsSmi()); |
| ASSERT(out_of_memory_error_->IsLanguageError()); |
| ASSERT(!vm_isolate_snapshot_object_table_->IsSmi()); |
| ASSERT(vm_isolate_snapshot_object_table_->IsArray()); |
| ASSERT(!synthetic_getter_parameter_types_->IsSmi()); |
| ASSERT(synthetic_getter_parameter_types_->IsArray()); |
| ASSERT(!synthetic_getter_parameter_names_->IsSmi()); |
| ASSERT(synthetic_getter_parameter_names_->IsArray()); |
| ASSERT(!implicit_getter_bytecode_->IsSmi()); |
| ASSERT(implicit_getter_bytecode_->IsBytecode()); |
| ASSERT(!implicit_setter_bytecode_->IsSmi()); |
| ASSERT(implicit_setter_bytecode_->IsBytecode()); |
| ASSERT(!implicit_static_getter_bytecode_->IsSmi()); |
| ASSERT(implicit_static_getter_bytecode_->IsBytecode()); |
| ASSERT(!implicit_static_setter_bytecode_->IsSmi()); |
| ASSERT(implicit_static_setter_bytecode_->IsBytecode()); |
| ASSERT(!method_extractor_bytecode_->IsSmi()); |
| ASSERT(method_extractor_bytecode_->IsBytecode()); |
| ASSERT(!invoke_closure_bytecode_->IsSmi()); |
| ASSERT(invoke_closure_bytecode_->IsBytecode()); |
| ASSERT(!invoke_field_bytecode_->IsSmi()); |
| ASSERT(invoke_field_bytecode_->IsBytecode()); |
| ASSERT(!nsm_dispatcher_bytecode_->IsSmi()); |
| ASSERT(nsm_dispatcher_bytecode_->IsBytecode()); |
| ASSERT(!dynamic_invocation_forwarder_bytecode_->IsSmi()); |
| ASSERT(dynamic_invocation_forwarder_bytecode_->IsBytecode()); |
| ASSERT(!implicit_static_closure_bytecode_->IsSmi()); |
| ASSERT(implicit_static_closure_bytecode_->IsBytecode()); |
| ASSERT(!implicit_instance_closure_bytecode_->IsSmi()); |
| ASSERT(implicit_instance_closure_bytecode_->IsBytecode()); |
| ASSERT(!implicit_constructor_closure_bytecode_->IsSmi()); |
| ASSERT(implicit_constructor_closure_bytecode_->IsBytecode()); |
| } |
| |
| void Object::FinishInit(IsolateGroup* isolate_group) { |
| // The type testing stubs we initialize in AbstractType objects for the |
| // canonical type of kDynamicCid/kVoidCid need to be set in this |
| // method, which is called after StubCode::InitOnce(). |
| Code& code = Code::Handle(); |
| |
| code = TypeTestingStubGenerator::DefaultCodeForType(*dynamic_type_); |
| dynamic_type_->InitializeTypeTestingStubNonAtomic(code); |
| |
| code = TypeTestingStubGenerator::DefaultCodeForType(*void_type_); |
| void_type_->InitializeTypeTestingStubNonAtomic(code); |
| } |
| |
| void Object::Cleanup() { |
| null_ = static_cast<ObjectPtr>(RAW_NULL); |
| true_ = static_cast<BoolPtr>(RAW_NULL); |
| false_ = static_cast<BoolPtr>(RAW_NULL); |
| class_class_ = static_cast<ClassPtr>(RAW_NULL); |
| dynamic_class_ = static_cast<ClassPtr>(RAW_NULL); |
| void_class_ = static_cast<ClassPtr>(RAW_NULL); |
| type_parameters_class_ = static_cast<ClassPtr>(RAW_NULL); |
| type_arguments_class_ = static_cast<ClassPtr>(RAW_NULL); |
| patch_class_class_ = static_cast<ClassPtr>(RAW_NULL); |
| function_class_ = static_cast<ClassPtr>(RAW_NULL); |
| closure_data_class_ = static_cast<ClassPtr>(RAW_NULL); |
| ffi_trampoline_data_class_ = static_cast<ClassPtr>(RAW_NULL); |
| field_class_ = static_cast<ClassPtr>(RAW_NULL); |
| script_class_ = static_cast<ClassPtr>(RAW_NULL); |
| library_class_ = static_cast<ClassPtr>(RAW_NULL); |
| namespace_class_ = static_cast<ClassPtr>(RAW_NULL); |
| kernel_program_info_class_ = static_cast<ClassPtr>(RAW_NULL); |
| code_class_ = static_cast<ClassPtr>(RAW_NULL); |
| instructions_class_ = static_cast<ClassPtr>(RAW_NULL); |
| instructions_section_class_ = static_cast<ClassPtr>(RAW_NULL); |
| instructions_table_class_ = static_cast<ClassPtr>(RAW_NULL); |
| object_pool_class_ = static_cast<ClassPtr>(RAW_NULL); |
| pc_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); |
| code_source_map_class_ = static_cast<ClassPtr>(RAW_NULL); |
| compressed_stackmaps_class_ = static_cast<ClassPtr>(RAW_NULL); |
| var_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); |
| exception_handlers_class_ = static_cast<ClassPtr>(RAW_NULL); |
| context_class_ = static_cast<ClassPtr>(RAW_NULL); |
| context_scope_class_ = static_cast<ClassPtr>(RAW_NULL); |
| bytecode_class_ = static_cast<ClassPtr>(RAW_NULL); |
| singletargetcache_class_ = static_cast<ClassPtr>(RAW_NULL); |
| unlinkedcall_class_ = static_cast<ClassPtr>(RAW_NULL); |
| monomorphicsmiablecall_class_ = static_cast<ClassPtr>(RAW_NULL); |
| icdata_class_ = static_cast<ClassPtr>(RAW_NULL); |
| megamorphic_cache_class_ = static_cast<ClassPtr>(RAW_NULL); |
| subtypetestcache_class_ = static_cast<ClassPtr>(RAW_NULL); |
| loadingunit_class_ = static_cast<ClassPtr>(RAW_NULL); |
| api_error_class_ = static_cast<ClassPtr>(RAW_NULL); |
| language_error_class_ = static_cast<ClassPtr>(RAW_NULL); |
| unhandled_exception_class_ = static_cast<ClassPtr>(RAW_NULL); |
| unwind_error_class_ = static_cast<ClassPtr>(RAW_NULL); |
| } |
| |
| // An object visitor which will mark all visited objects. This is used to |
| // premark all objects in the vm_isolate_ heap. Also precalculates hash |
| // codes so that we can get the identity hash code of objects in the read- |
| // only VM isolate. |
| class FinalizeVMIsolateVisitor : public ObjectVisitor { |
| public: |
| FinalizeVMIsolateVisitor() |
| #if defined(HASH_IN_OBJECT_HEADER) |
| : counter_(1337) |
| #endif |
| { |
| } |
| |
| void VisitObject(ObjectPtr obj) { |
| // Free list elements should never be marked. |
| ASSERT(!obj->untag()->IsMarked()); |
| // No forwarding corpses in the VM isolate. |
| ASSERT(!obj->IsForwardingCorpse()); |
| if (!obj->IsFreeListElement()) { |
| obj->untag()->SetMarkBitUnsynchronized(); |
| Object::FinalizeReadOnlyObject(obj); |
| #if defined(HASH_IN_OBJECT_HEADER) |
| // These objects end up in the read-only VM isolate which is shared |
| // between isolates, so we have to prepopulate them with identity hash |
| // codes, since we can't add hash codes later. |
| if (Object::GetCachedHash(obj) == 0) { |
| // Some classes have identity hash codes that depend on their contents, |
| // not per object. |
| ASSERT(!obj->IsStringInstance()); |
| if (obj == Object::null()) { |
| Object::SetCachedHashIfNotSet(obj, kNullIdentityHash); |
| } else if (obj == Object::bool_true().ptr()) { |
| Object::SetCachedHashIfNotSet(obj, kTrueIdentityHash); |
| } else if (obj == Object::bool_false().ptr()) { |
| Object::SetCachedHashIfNotSet(obj, kFalseIdentityHash); |
| } else if (!obj->IsMint() && !obj->IsDouble()) { |
| counter_ += 2011; // The year Dart was announced and a prime. |
| counter_ &= 0x3fffffff; |
| if (counter_ == 0) counter_++; |
| Object::SetCachedHashIfNotSet(obj, counter_); |
| } |
| } |
| #endif |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (obj->IsClass()) { |
| // Won't be able to update read-only VM isolate classes if implementors |
| // are discovered later. We use kVoidCid instead of kDynamicCid here to |
| // be able to distinguish read-only VM isolate classes during reload. |
| // See ProgramReloadContext::RestoreClassHierarchyInvariants. |
| static_cast<ClassPtr>(obj)->untag()->implementor_cid_ = kVoidCid; |
| } |
| #endif |
| } |
| } |
| |
| private: |
| #if defined(HASH_IN_OBJECT_HEADER) |
| int32_t counter_; |
| #endif |
| }; |
| |
| #define SET_CLASS_NAME(class_name, name) \ |
| cls = class_name##_class(); \ |
| cls.set_name(Symbols::name()); |
| |
| void Object::FinalizeVMIsolate(IsolateGroup* isolate_group) { |
| // Should only be run by the vm isolate. |
| ASSERT(isolate_group == Dart::vm_isolate_group()); |
| |
| // Finish initialization of synthetic_getter_parameter_names_ which was |
| // Started in Object::InitOnce() |
| synthetic_getter_parameter_names_->SetAt(0, Symbols::This()); |
| |
| // Set up names for all VM singleton classes. |
| Class& cls = Class::Handle(); |
| |
| SET_CLASS_NAME(class, Class); |
| SET_CLASS_NAME(dynamic, Dynamic); |
| SET_CLASS_NAME(void, Void); |
| SET_CLASS_NAME(type_parameters, TypeParameters); |
| SET_CLASS_NAME(type_arguments, TypeArguments); |
| SET_CLASS_NAME(patch_class, PatchClass); |
| SET_CLASS_NAME(function, Function); |
| SET_CLASS_NAME(closure_data, ClosureData); |
| SET_CLASS_NAME(ffi_trampoline_data, FfiTrampolineData); |
| SET_CLASS_NAME(field, Field); |
| SET_CLASS_NAME(script, Script); |
| SET_CLASS_NAME(library, LibraryClass); |
| SET_CLASS_NAME(namespace, Namespace); |
| SET_CLASS_NAME(kernel_program_info, KernelProgramInfo); |
| SET_CLASS_NAME(weak_serialization_reference, WeakSerializationReference); |
| SET_CLASS_NAME(weak_array, WeakArray); |
| SET_CLASS_NAME(code, Code); |
| SET_CLASS_NAME(instructions, Instructions); |
| SET_CLASS_NAME(instructions_section, InstructionsSection); |
| SET_CLASS_NAME(instructions_table, InstructionsTable); |
| SET_CLASS_NAME(object_pool, ObjectPool); |
| SET_CLASS_NAME(code_source_map, CodeSourceMap); |
| SET_CLASS_NAME(pc_descriptors, PcDescriptors); |
| SET_CLASS_NAME(compressed_stackmaps, CompressedStackMaps); |
| SET_CLASS_NAME(var_descriptors, LocalVarDescriptors); |
| SET_CLASS_NAME(exception_handlers, ExceptionHandlers); |
| SET_CLASS_NAME(context, Context); |
| SET_CLASS_NAME(context_scope, ContextScope); |
| SET_CLASS_NAME(bytecode, Bytecode); |
| SET_CLASS_NAME(sentinel, Sentinel); |
| SET_CLASS_NAME(singletargetcache, SingleTargetCache); |
| SET_CLASS_NAME(unlinkedcall, UnlinkedCall); |
| SET_CLASS_NAME(monomorphicsmiablecall, MonomorphicSmiableCall); |
| SET_CLASS_NAME(icdata, ICData); |
| SET_CLASS_NAME(megamorphic_cache, MegamorphicCache); |
| SET_CLASS_NAME(subtypetestcache, SubtypeTestCache); |
| SET_CLASS_NAME(loadingunit, LoadingUnit); |
| SET_CLASS_NAME(api_error, ApiError); |
| SET_CLASS_NAME(language_error, LanguageError); |
| SET_CLASS_NAME(unhandled_exception, UnhandledException); |
| SET_CLASS_NAME(unwind_error, UnwindError); |
| |
| // Set up names for classes which are also pre-allocated in the vm isolate. |
| cls = isolate_group->object_store()->array_class(); |
| cls.set_name(Symbols::_List()); |
| cls = isolate_group->object_store()->one_byte_string_class(); |
| cls.set_name(Symbols::OneByteString()); |
| cls = isolate_group->object_store()->never_class(); |
| cls.set_name(Symbols::Never()); |
| |
| // Set up names for the pseudo-classes for free list elements and forwarding |
| // corpses. Mainly this makes VM debugging easier. |
| cls = isolate_group->class_table()->At(kFreeListElement); |
| cls.set_name(Symbols::FreeListElement()); |
| cls = isolate_group->class_table()->At(kForwardingCorpse); |
| cls.set_name(Symbols::ForwardingCorpse()); |
| |
| #if defined(DART_PRECOMPILER) |
| const auto& function = |
| Function::Handle(StubCode::UnknownDartCode().function()); |
| function.set_name(Symbols::OptimizedOut()); |
| #endif // defined(DART_PRECOMPILER) |
| |
| { |
| ASSERT(isolate_group == Dart::vm_isolate_group()); |
| Thread* thread = Thread::Current(); |
| WritableVMIsolateScope scope(thread); |
| HeapIterationScope iteration(thread); |
| FinalizeVMIsolateVisitor premarker; |
| ASSERT(isolate_group->heap()->UsedInWords(Heap::kNew) == 0); |
| iteration.IterateOldObjectsNoImagePages(&premarker); |
| // Make the VM isolate read-only again after setting all objects as marked. |
| // Note objects in image pages are already pre-marked. |
| } |
| } |
| |
| void Object::FinalizeReadOnlyObject(ObjectPtr object) { |
| NoSafepointScope no_safepoint; |
| intptr_t cid = object->GetClassIdOfHeapObject(); |
| if (cid == kOneByteStringCid) { |
| OneByteStringPtr str = static_cast<OneByteStringPtr>(object); |
| if (String::GetCachedHash(str) == 0) { |
| intptr_t hash = String::Hash(str); |
| String::SetCachedHashIfNotSet(str, hash); |
| } |
| intptr_t size = OneByteString::UnroundedSize(str); |
| ASSERT(size <= str->untag()->HeapSize()); |
| memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(str) + size), 0, |
| str->untag()->HeapSize() - size); |
| } else if (cid == kTwoByteStringCid) { |
| TwoByteStringPtr str = static_cast<TwoByteStringPtr>(object); |
| if (String::GetCachedHash(str) == 0) { |
| intptr_t hash = String::Hash(str); |
| String::SetCachedHashIfNotSet(str, hash); |
| } |
| ASSERT(String::GetCachedHash(str) != 0); |
| intptr_t size = TwoByteString::UnroundedSize(str); |
| ASSERT(size <= str->untag()->HeapSize()); |
| memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(str) + size), 0, |
| str->untag()->HeapSize() - size); |
| } else if (cid == kCodeSourceMapCid) { |
| CodeSourceMapPtr map = CodeSourceMap::RawCast(object); |
| intptr_t size = CodeSourceMap::UnroundedSize(map); |
| ASSERT(size <= map->untag()->HeapSize()); |
| memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(map) + size), 0, |
| map->untag()->HeapSize() - size); |
| } else if (cid == kCompressedStackMapsCid) { |
| CompressedStackMapsPtr maps = CompressedStackMaps::RawCast(object); |
| intptr_t size = CompressedStackMaps::UnroundedSize(maps); |
| ASSERT(size <= maps->untag()->HeapSize()); |
| memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(maps) + size), 0, |
| maps->untag()->HeapSize() - size); |
| } else if (cid == kPcDescriptorsCid) { |
| PcDescriptorsPtr desc = PcDescriptors::RawCast(object); |
| intptr_t size = PcDescriptors::UnroundedSize(desc); |
| ASSERT(size <= desc->untag()->HeapSize()); |
| memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(desc) + size), 0, |
| desc->untag()->HeapSize() - size); |
| } |
| } |
| |
| void Object::set_vm_isolate_snapshot_object_table(const Array& table) { |
| ASSERT(Isolate::Current() == Dart::vm_isolate()); |
| *vm_isolate_snapshot_object_table_ = table.ptr(); |
| } |
| |
| // Make unused space in an object whose type has been transformed safe |
| // for traversing during GC. |
| // The unused part of the transformed object is marked as a FreeListElement |
| // object that is not inserted into to the freelist. |
| void Object::MakeUnusedSpaceTraversable(const Object& obj, |
| intptr_t original_size, |
| intptr_t used_size) { |
| ASSERT(Thread::Current()->no_safepoint_scope_depth() > 0); |
| ASSERT(!obj.IsNull()); |
| ASSERT(original_size >= used_size); |
| if (original_size > used_size) { |
| intptr_t leftover_size = original_size - used_size; |
| uword addr = UntaggedObject::ToAddr(obj.ptr()) + used_size; |
| if (obj.ptr()->IsNewObject()) { |
| FreeListElement::AsElementNew(addr, leftover_size); |
| } else { |
| FreeListElement::AsElement(addr, leftover_size); |
| } |
| // On architectures with a relaxed memory model, the concurrent marker may |
| // observe the write of the filler object's header before observing the |
| // new array length, and so treat it as a pointer. Ensure it is a Smi so |
| // the marker won't dereference it. |
| ASSERT((*reinterpret_cast<uword*>(addr) & kSmiTagMask) == kSmiTag); |
| ASSERT((*reinterpret_cast<uword*>(addr + kWordSize) & kSmiTagMask) == |
| kSmiTag); |
| } |
| } |
| |
| void Object::VerifyBuiltinVtables() { |
| #if defined(DEBUG) |
| ASSERT(builtin_vtables_[kIllegalCid] == 0); |
| ASSERT(builtin_vtables_[kFreeListElement] == 0); |
| ASSERT(builtin_vtables_[kForwardingCorpse] == 0); |
| ClassTable* table = IsolateGroup::Current()->class_table(); |
| for (intptr_t cid = kObjectCid; cid < kNumPredefinedCids; cid++) { |
| if (table->HasValidClassAt(cid)) { |
| ASSERT(builtin_vtables_[cid] != 0); |
| } |
| } |
| #endif |
| } |
| |
| void Object::RegisterClass(const Class& cls, |
| const String& name, |
| const Library& lib) { |
| ASSERT(name.Length() > 0); |
| ASSERT(name.CharAt(0) != '_'); |
| cls.set_name(name); |
| lib.AddClass(cls); |
| } |
| |
| void Object::RegisterPrivateClass(const Class& cls, |
| const String& public_class_name, |
| const Library& lib) { |
| ASSERT(public_class_name.Length() > 0); |
| ASSERT(public_class_name.CharAt(0) == '_'); |
| String& str = String::Handle(); |
| str = lib.PrivateName(public_class_name); |
| cls.set_name(str); |
| lib.AddClass(cls); |
| } |
| |
| // Initialize a new isolate from source or from a snapshot. |
| // |
| // There are three possibilities: |
| // 1. Running a Kernel binary. This function will bootstrap from the KERNEL |
| // file. |
| // 2. There is no vm snapshot. This function will bootstrap from source. |
| // 3. There is a vm snapshot. The caller should initialize from the snapshot. |
| // |
| // A non-null kernel argument indicates (1). |
| // A nullptr kernel indicates (2) or (3). |
| ErrorPtr Object::Init(IsolateGroup* isolate_group, |
| const uint8_t* kernel_buffer, |
| intptr_t kernel_buffer_size) { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| ASSERT(isolate_group == thread->isolate_group()); |
| TIMELINE_DURATION(thread, Isolate, "Object::Init"); |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const bool bootstrapping = false; |
| #else |
| const bool is_kernel = (kernel_buffer != nullptr); |
| const bool bootstrapping = |
| (Dart::vm_snapshot_kind() == Snapshot::kNone) || is_kernel; |
| #endif // defined(DART_PRECOMPILED_RUNTIME). |
| |
| if (bootstrapping) { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| // Object::Init version when we are bootstrapping from source or from a |
| // Kernel binary. |
| // This will initialize isolate group object_store, shared by all isolates |
| // running in the isolate group. |
| ObjectStore* object_store = isolate_group->object_store(); |
| SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); |
| |
| Class& cls = Class::Handle(zone); |
| Type& type = Type::Handle(zone); |
| Array& array = Array::Handle(zone); |
| WeakArray& weak_array = WeakArray::Handle(zone); |
| Library& lib = Library::Handle(zone); |
| TypeArguments& type_args = TypeArguments::Handle(zone); |
| |
| // All RawArray fields will be initialized to an empty array, therefore |
| // initialize array class first. |
| cls = Class::New<Array, RTN::Array>(isolate_group); |
| ASSERT(object_store->array_class() == Class::null()); |
| object_store->set_array_class(cls); |
| |
| // VM classes that are parameterized (Array, ImmutableArray, |
| // GrowableObjectArray, Map, ConstMap, |
| // Set, ConstSet) are also pre-finalized, so |
| // CalculateFieldOffsets() is not called, so we need to set the offset |
| // of their type_arguments_ field, which is explicitly |
| // declared in their respective Raw* classes. |
| cls.set_type_arguments_field_offset(Array::type_arguments_offset(), |
| RTN::Array::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| |
| // Set up the growable object array class (Has to be done after the array |
| // class is setup as one of its field is an array object). |
| cls = Class::New<GrowableObjectArray, RTN::GrowableObjectArray>( |
| isolate_group); |
| object_store->set_growable_object_array_class(cls); |
| cls.set_type_arguments_field_offset( |
| GrowableObjectArray::type_arguments_offset(), |
| RTN::GrowableObjectArray::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| |
| // Initialize hash set for regexp_table_. |
| const intptr_t kInitialCanonicalRegExpSize = 4; |
| weak_array = HashTables::New<CanonicalRegExpSet>( |
| kInitialCanonicalRegExpSize, Heap::kOld); |
| object_store->set_regexp_table(weak_array); |
| |
| // Initialize hash set for canonical types. |
| const intptr_t kInitialCanonicalTypeSize = 16; |
| array = HashTables::New<CanonicalTypeSet>(kInitialCanonicalTypeSize, |
| Heap::kOld); |
| object_store->set_canonical_types(array); |
| |
| // Initialize hash set for canonical function types. |
| const intptr_t kInitialCanonicalFunctionTypeSize = 16; |
| array = HashTables::New<CanonicalFunctionTypeSet>( |
| kInitialCanonicalFunctionTypeSize, Heap::kOld); |
| object_store->set_canonical_function_types(array); |
| |
| // Initialize hash set for canonical record types. |
| const intptr_t kInitialCanonicalRecordTypeSize = 16; |
| array = HashTables::New<CanonicalRecordTypeSet>( |
| kInitialCanonicalRecordTypeSize, Heap::kOld); |
| object_store->set_canonical_record_types(array); |
| |
| // Initialize hash set for canonical type parameters. |
| const intptr_t kInitialCanonicalTypeParameterSize = 4; |
| array = HashTables::New<CanonicalTypeParameterSet>( |
| kInitialCanonicalTypeParameterSize, Heap::kOld); |
| object_store->set_canonical_type_parameters(array); |
| |
| // Initialize hash set for canonical_type_arguments_. |
| const intptr_t kInitialCanonicalTypeArgumentsSize = 4; |
| array = HashTables::New<CanonicalTypeArgumentsSet>( |
| kInitialCanonicalTypeArgumentsSize, Heap::kOld); |
| object_store->set_canonical_type_arguments(array); |
| |
| // Setup type class early in the process. |
| const Class& type_cls = |
| Class::Handle(zone, Class::New<Type, RTN::Type>(isolate_group)); |
| const Class& function_type_cls = Class::Handle( |
| zone, Class::New<FunctionType, RTN::FunctionType>(isolate_group)); |
| const Class& record_type_cls = Class::Handle( |
| zone, Class::New<RecordType, RTN::RecordType>(isolate_group)); |
| const Class& type_parameter_cls = Class::Handle( |
| zone, Class::New<TypeParameter, RTN::TypeParameter>(isolate_group)); |
| const Class& library_prefix_cls = Class::Handle( |
| zone, Class::New<LibraryPrefix, RTN::LibraryPrefix>(isolate_group)); |
| |
| // Pre-allocate the OneByteString class needed by the symbol table. |
| cls = Class::NewStringClass(kOneByteStringCid, isolate_group); |
| object_store->set_one_byte_string_class(cls); |
| |
| // Pre-allocate the TwoByteString class needed by the symbol table. |
| cls = Class::NewStringClass(kTwoByteStringCid, isolate_group); |
| object_store->set_two_byte_string_class(cls); |
| |
| // Setup the symbol table for the symbols created in the isolate. |
| Symbols::SetupSymbolTable(isolate_group); |
| |
| // Set up the libraries array before initializing the core library. |
| const GrowableObjectArray& libraries = |
| GrowableObjectArray::Handle(zone, GrowableObjectArray::New(Heap::kOld)); |
| object_store->set_libraries(libraries); |
| |
| // Pre-register the core library. |
| Library::InitCoreLibrary(isolate_group); |
| |
| // Basic infrastructure has been setup, initialize the class dictionary. |
| const Library& core_lib = Library::Handle(zone, Library::CoreLibrary()); |
| ASSERT(!core_lib.IsNull()); |
| |
| const GrowableObjectArray& pending_classes = |
| GrowableObjectArray::Handle(zone, GrowableObjectArray::New()); |
| object_store->set_pending_classes(pending_classes); |
| |
| // Now that the symbol table is initialized and that the core dictionary as |
| // well as the core implementation dictionary have been setup, preallocate |
| // remaining classes and register them by name in the dictionaries. |
| String& name = String::Handle(zone); |
| cls = object_store->array_class(); // Was allocated above. |
| RegisterPrivateClass(cls, Symbols::_List(), core_lib); |
| pending_classes.Add(cls); |
| // We cannot use NewNonParameterizedType(), because Array is |
| // parameterized. Warning: class _List has not been patched yet. Its |
| // declared number of type parameters is still 0. It will become 1 after |
| // patching. The array type allocated below represents the raw type _List |
| // and not _List<E> as we could expect. Use with caution. |
| type = Type::New(Class::Handle(zone, cls.ptr()), |
| Object::null_type_arguments(), Nullability::kNonNullable); |
| type.SetIsFinalized(); |
| type ^= type.Canonicalize(thread); |
| object_store->set_array_type(type); |
| |
| cls = object_store->growable_object_array_class(); // Was allocated above. |
| RegisterPrivateClass(cls, Symbols::_GrowableList(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Array, RTN::Array>(kImmutableArrayCid, isolate_group); |
| object_store->set_immutable_array_class(cls); |
| cls.set_type_arguments_field_offset(Array::type_arguments_offset(), |
| RTN::Array::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| ASSERT(object_store->immutable_array_class() != |
| object_store->array_class()); |
| cls.set_is_prefinalized(); |
| RegisterPrivateClass(cls, Symbols::_ImmutableList(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = object_store->one_byte_string_class(); // Was allocated above. |
| RegisterPrivateClass(cls, Symbols::OneByteString(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = object_store->two_byte_string_class(); // Was allocated above. |
| RegisterPrivateClass(cls, Symbols::TwoByteString(), core_lib); |
| pending_classes.Add(cls); |
| |
| // Pre-register the isolate library so the native class implementations can |
| // be hooked up before compiling it. |
| Library& isolate_lib = Library::Handle( |
| zone, Library::LookupLibrary(thread, Symbols::DartIsolate())); |
| if (isolate_lib.IsNull()) { |
| isolate_lib = Library::NewLibraryHelper(Symbols::DartIsolate(), true); |
| isolate_lib.SetLoadRequested(); |
| isolate_lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kIsolate, isolate_lib); |
| ASSERT(!isolate_lib.IsNull()); |
| ASSERT(isolate_lib.ptr() == Library::IsolateLibrary()); |
| |
| cls = Class::New<Capability, RTN::Capability>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_Capability(), isolate_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<ReceivePort, RTN::ReceivePort>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_RawReceivePort(), isolate_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<SendPort, RTN::SendPort>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_SendPort(), isolate_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<TransferableTypedData, RTN::TransferableTypedData>( |
| isolate_group); |
| RegisterPrivateClass(cls, Symbols::_TransferableTypedDataImpl(), |
| isolate_lib); |
| pending_classes.Add(cls); |
| |
| const Class& stacktrace_cls = Class::Handle( |
| zone, Class::New<StackTrace, RTN::StackTrace>(isolate_group)); |
| RegisterPrivateClass(stacktrace_cls, Symbols::_StackTrace(), core_lib); |
| pending_classes.Add(stacktrace_cls); |
| // Super type set below, after Object is allocated. |
| |
| cls = Class::New<RegExp, RTN::RegExp>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_RegExp(), core_lib); |
| pending_classes.Add(cls); |
| |
| // Initialize the base interfaces used by the core VM classes. |
| |
| // Allocate and initialize the pre-allocated classes in the core library. |
| // The script and token index of these pre-allocated classes is set up when |
| // the corelib script is compiled. |
| cls = Class::New<Instance, RTN::Instance>(kInstanceCid, isolate_group); |
| object_store->set_object_class(cls); |
| cls.set_name(Symbols::Object()); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| cls.set_is_const(); |
| core_lib.AddClass(cls); |
| pending_classes.Add(cls); |
| type = Type::NewNonParameterizedType(cls); |
| ASSERT(type.IsCanonical()); |
| object_store->set_object_type(type); |
| type = type.ToNullability(Nullability::kNonNullable, Heap::kOld); |
| ASSERT(type.IsCanonical()); |
| object_store->set_non_nullable_object_type(type); |
| type = type.ToNullability(Nullability::kNullable, Heap::kOld); |
| ASSERT(type.IsCanonical()); |
| object_store->set_nullable_object_type(type); |
| |
| cls = Class::New<Bool, RTN::Bool>(isolate_group); |
| object_store->set_bool_class(cls); |
| RegisterClass(cls, Symbols::Bool(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Instance, RTN::Instance>(kNullCid, isolate_group); |
| object_store->set_null_class(cls); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| RegisterClass(cls, Symbols::Null(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Instance, RTN::Instance>(kNeverCid, isolate_group); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_allocate_finalized(); |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| cls.set_name(Symbols::Never()); |
| object_store->set_never_class(cls); |
| |
| ASSERT(!library_prefix_cls.IsNull()); |
| RegisterPrivateClass(library_prefix_cls, Symbols::_LibraryPrefix(), |
| core_lib); |
| pending_classes.Add(library_prefix_cls); |
| |
| RegisterPrivateClass(type_cls, Symbols::_Type(), core_lib); |
| pending_classes.Add(type_cls); |
| |
| RegisterPrivateClass(function_type_cls, Symbols::_FunctionType(), core_lib); |
| pending_classes.Add(function_type_cls); |
| |
| RegisterPrivateClass(record_type_cls, Symbols::_RecordType(), core_lib); |
| pending_classes.Add(record_type_cls); |
| |
| RegisterPrivateClass(type_parameter_cls, Symbols::_TypeParameter(), |
| core_lib); |
| pending_classes.Add(type_parameter_cls); |
| |
| cls = Class::New<Integer, RTN::Integer>(isolate_group); |
| object_store->set_integer_implementation_class(cls); |
| RegisterPrivateClass(cls, Symbols::_IntegerImplementation(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Smi, RTN::Smi>(isolate_group); |
| object_store->set_smi_class(cls); |
| RegisterPrivateClass(cls, Symbols::_Smi(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Mint, RTN::Mint>(isolate_group); |
| object_store->set_mint_class(cls); |
| RegisterPrivateClass(cls, Symbols::_Mint(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Double, RTN::Double>(isolate_group); |
| object_store->set_double_class(cls); |
| RegisterPrivateClass(cls, Symbols::_Double(), core_lib); |
| pending_classes.Add(cls); |
| |
| // Class that represents the Dart class _Closure and C++ class Closure. |
| cls = Class::New<Closure, RTN::Closure>(isolate_group); |
| object_store->set_closure_class(cls); |
| RegisterPrivateClass(cls, Symbols::_Closure(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Record, RTN::Record>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_Record(), core_lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<WeakProperty, RTN::WeakProperty>(isolate_group); |
| object_store->set_weak_property_class(cls); |
| RegisterPrivateClass(cls, Symbols::_WeakProperty(), core_lib); |
| |
| cls = Class::New<WeakReference, RTN::WeakReference>(isolate_group); |
| cls.set_type_arguments_field_offset( |
| WeakReference::type_arguments_offset(), |
| RTN::WeakReference::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| object_store->set_weak_reference_class(cls); |
| RegisterPrivateClass(cls, Symbols::_WeakReference(), core_lib); |
| |
| // Pre-register the mirrors library so we can place the vm class |
| // MirrorReference there rather than the core library. |
| lib = Library::LookupLibrary(thread, Symbols::DartMirrors()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartMirrors(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kMirrors, lib); |
| ASSERT(!lib.IsNull()); |
| ASSERT(lib.ptr() == Library::MirrorsLibrary()); |
| |
| cls = Class::New<MirrorReference, RTN::MirrorReference>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_MirrorReference(), lib); |
| |
| // Pre-register dart:_compact_hash library so that we could place |
| // collection classes (_Map, _ConstMap, _Set, _ConstSet) here. |
| lib = Library::LookupLibrary(thread, Symbols::DartCompactHash()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartCompactHash(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kCompactHash, lib); |
| |
| ASSERT(!lib.IsNull()); |
| ASSERT(lib.ptr() == Library::CompactHashLibrary()); |
| cls = Class::New<Map, RTN::Map>(isolate_group); |
| object_store->set_map_impl_class(cls); |
| cls.set_type_arguments_field_offset(Map::type_arguments_offset(), |
| RTN::Map::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(2); |
| RegisterPrivateClass(cls, Symbols::_Map(), lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Map, RTN::Map>(kConstMapCid, isolate_group); |
| object_store->set_const_map_impl_class(cls); |
| cls.set_type_arguments_field_offset(Map::type_arguments_offset(), |
| RTN::Map::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(2); |
| cls.set_is_prefinalized(); |
| RegisterPrivateClass(cls, Symbols::_ConstMap(), lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Set, RTN::Set>(isolate_group); |
| object_store->set_set_impl_class(cls); |
| cls.set_type_arguments_field_offset(Set::type_arguments_offset(), |
| RTN::Set::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| RegisterPrivateClass(cls, Symbols::_Set(), lib); |
| pending_classes.Add(cls); |
| |
| cls = Class::New<Set, RTN::Set>(kConstSetCid, isolate_group); |
| object_store->set_const_set_impl_class(cls); |
| cls.set_type_arguments_field_offset(Set::type_arguments_offset(), |
| RTN::Set::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| cls.set_is_prefinalized(); |
| RegisterPrivateClass(cls, Symbols::_ConstSet(), lib); |
| pending_classes.Add(cls); |
| |
| // Pre-register the collection library. |
| lib = Library::LookupLibrary(thread, Symbols::DartCollection()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartCollection(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kCollection, lib); |
| |
| // Pre-register the async library so we can place the vm class |
| // FutureOr there rather than the core library. |
| lib = Library::LookupLibrary(thread, Symbols::DartAsync()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartAsync(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kAsync, lib); |
| ASSERT(!lib.IsNull()); |
| ASSERT(lib.ptr() == Library::AsyncLibrary()); |
| cls = Class::New<FutureOr, RTN::FutureOr>(isolate_group); |
| cls.set_type_arguments_field_offset(FutureOr::type_arguments_offset(), |
| RTN::FutureOr::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| RegisterClass(cls, Symbols::FutureOr(), lib); |
| pending_classes.Add(cls); |
| object_store->set_future_or_class(cls); |
| |
| cls = Class::New<SuspendState, RTN::SuspendState>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_SuspendState(), lib); |
| pending_classes.Add(cls); |
| |
| // Pre-register the developer library so we can place the vm class |
| // UserTag there rather than the core library. |
| lib = Library::LookupLibrary(thread, Symbols::DartDeveloper()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartDeveloper(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kDeveloper, lib); |
| ASSERT(!lib.IsNull()); |
| ASSERT(lib.ptr() == Library::DeveloperLibrary()); |
| cls = Class::New<UserTag, RTN::UserTag>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_UserTag(), lib); |
| pending_classes.Add(cls); |
| |
| // Setup some default native field classes which can be extended for |
| // specifying native fields in dart classes. |
| Library::InitNativeWrappersLibrary(isolate_group, is_kernel); |
| ASSERT(object_store->native_wrappers_library() != Library::null()); |
| |
| // Pre-register the typed_data library so the native class implementations |
| // can be hooked up before compiling it. |
| lib = Library::LookupLibrary(thread, Symbols::DartTypedData()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartTypedData(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kTypedData, lib); |
| ASSERT(!lib.IsNull()); |
| ASSERT(lib.ptr() == Library::TypedDataLibrary()); |
| #define REGISTER_TYPED_DATA_CLASS(clazz) \ |
| cls = Class::NewTypedDataClass(kTypedData##clazz##ArrayCid, isolate_group); \ |
| RegisterPrivateClass(cls, Symbols::_##clazz##List(), lib); |
| |
| DART_CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_CLASS); |
| #undef REGISTER_TYPED_DATA_CLASS |
| #define REGISTER_TYPED_DATA_VIEW_CLASS(clazz) \ |
| cls = \ |
| Class::NewTypedDataViewClass(kTypedData##clazz##ViewCid, isolate_group); \ |
| RegisterPrivateClass(cls, Symbols::_##clazz##View(), lib); \ |
| pending_classes.Add(cls); \ |
| cls = Class::NewUnmodifiableTypedDataViewClass( \ |
| kUnmodifiableTypedData##clazz##ViewCid, isolate_group); \ |
| RegisterPrivateClass(cls, Symbols::_Unmodifiable##clazz##View(), lib); \ |
| pending_classes.Add(cls); |
| |
| CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_VIEW_CLASS); |
| |
| cls = Class::NewTypedDataViewClass(kByteDataViewCid, isolate_group); |
| RegisterPrivateClass(cls, Symbols::_ByteDataView(), lib); |
| pending_classes.Add(cls); |
| cls = Class::NewUnmodifiableTypedDataViewClass(kUnmodifiableByteDataViewCid, |
| isolate_group); |
| RegisterPrivateClass(cls, Symbols::_UnmodifiableByteDataView(), lib); |
| pending_classes.Add(cls); |
| |
| #undef REGISTER_TYPED_DATA_VIEW_CLASS |
| #define REGISTER_EXT_TYPED_DATA_CLASS(clazz) \ |
| cls = Class::NewExternalTypedDataClass(kExternalTypedData##clazz##Cid, \ |
| isolate_group); \ |
| RegisterPrivateClass(cls, Symbols::_External##clazz(), lib); |
| |
| cls = Class::New<Instance, RTN::Instance>(kByteBufferCid, isolate_group, |
| /*register_class=*/false); |
| cls.set_instance_size(0, 0); |
| cls.set_next_field_offset(-kWordSize, -compiler::target::kWordSize); |
| isolate_group->class_table()->Register(cls); |
| RegisterPrivateClass(cls, Symbols::_ByteBuffer(), lib); |
| pending_classes.Add(cls); |
| |
| CLASS_LIST_TYPED_DATA(REGISTER_EXT_TYPED_DATA_CLASS); |
| #undef REGISTER_EXT_TYPED_DATA_CLASS |
| // Register Float32x4, Int32x4, and Float64x2 in the object store. |
| cls = Class::New<Float32x4, RTN::Float32x4>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_Float32x4(), lib); |
| pending_classes.Add(cls); |
| object_store->set_float32x4_class(cls); |
| |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| RegisterClass(cls, Symbols::Float32x4(), lib); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_float32x4_type(type); |
| |
| cls = Class::New<Int32x4, RTN::Int32x4>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_Int32x4(), lib); |
| pending_classes.Add(cls); |
| object_store->set_int32x4_class(cls); |
| |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| RegisterClass(cls, Symbols::Int32x4(), lib); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_int32x4_type(type); |
| |
| cls = Class::New<Float64x2, RTN::Float64x2>(isolate_group); |
| RegisterPrivateClass(cls, Symbols::_Float64x2(), lib); |
| pending_classes.Add(cls); |
| object_store->set_float64x2_class(cls); |
| |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| RegisterClass(cls, Symbols::Float64x2(), lib); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_float64x2_type(type); |
| |
| // Set the super type of class StackTrace to Object type so that the |
| // 'toString' method is implemented. |
| type = object_store->object_type(); |
| stacktrace_cls.set_super_type(type); |
| |
| // Abstract class that represents the Dart class Type. |
| // Note that this class is implemented by Dart class _AbstractType. |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| RegisterClass(cls, Symbols::Type(), core_lib); |
| pending_classes.Add(cls); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_type_type(type); |
| |
| // Abstract class that represents the Dart class Function. |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| RegisterClass(cls, Symbols::Function(), core_lib); |
| pending_classes.Add(cls); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_function_type(type); |
| |
| // Abstract class that represents the Dart class Record. |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| RegisterClass(cls, Symbols::Record(), core_lib); |
| pending_classes.Add(cls); |
| object_store->set_record_class(cls); |
| |
| cls = Class::New<Number, RTN::Number>(isolate_group); |
| RegisterClass(cls, Symbols::Number(), core_lib); |
| pending_classes.Add(cls); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_number_type(type); |
| type = type.ToNullability(Nullability::kNullable, Heap::kOld); |
| object_store->set_nullable_number_type(type); |
| |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| RegisterClass(cls, Symbols::Int(), core_lib); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| pending_classes.Add(cls); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_int_type(type); |
| type = type.ToNullability(Nullability::kNonNullable, Heap::kOld); |
| object_store->set_non_nullable_int_type(type); |
| type = type.ToNullability(Nullability::kNullable, Heap::kOld); |
| object_store->set_nullable_int_type(type); |
| |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| RegisterClass(cls, Symbols::Double(), core_lib); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| pending_classes.Add(cls); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_double_type(type); |
| type = type.ToNullability(Nullability::kNullable, Heap::kOld); |
| object_store->set_nullable_double_type(type); |
| |
| name = Symbols::_String().ptr(); |
| cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, |
| /*register_class=*/true, |
| /*is_abstract=*/true); |
| RegisterClass(cls, name, core_lib); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| pending_classes.Add(cls); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_string_type(type); |
| |
| cls = object_store->bool_class(); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_bool_type(type); |
| |
| cls = object_store->smi_class(); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_smi_type(type); |
| |
| cls = object_store->mint_class(); |
| type = Type::NewNonParameterizedType(cls); |
| object_store->set_mint_type(type); |
| |
| // The classes 'void' and 'dynamic' are phony classes to make type checking |
| // more regular; they live in the VM isolate. The class 'void' is not |
| // registered in the class dictionary because its name is a reserved word. |
| // The class 'dynamic' is registered in the class dictionary because its |
| // name is a built-in identifier (this is wrong). The corresponding types |
| // are stored in the object store. |
| cls = object_store->null_class(); |
| type = |
| Type::New(cls, Object::null_type_arguments(), Nullability::kNullable); |
| type.SetIsFinalized(); |
| type ^= type.Canonicalize(thread); |
| object_store->set_null_type(type); |
| cls.set_declaration_type(type); |
| ASSERT(type.IsNullable()); |
| |
| // Consider removing when/if Null becomes an ordinary class. |
| type = object_store->object_type(); |
| cls.set_super_type(type); |
| |
| cls = object_store->never_class(); |
| type = Type::New(cls, Object::null_type_arguments(), |
| Nullability::kNonNullable); |
| type.SetIsFinalized(); |
| type ^= type.Canonicalize(thread); |
| object_store->set_never_type(type); |
| type_args = TypeArguments::New(1); |
| type_args.SetTypeAt(0, type); |
| type_args = type_args.Canonicalize(thread); |
| object_store->set_type_argument_never(type_args); |
| |
| // Create and cache commonly used type arguments <int>, <double>, |
| // <String>, <String, dynamic> and <String, String>. |
| type_args = TypeArguments::New(1); |
| type = object_store->int_type(); |
| type_args.SetTypeAt(0, type); |
| type_args = type_args.Canonicalize(thread); |
| object_store->set_type_argument_int(type_args); |
| |
| type_args = TypeArguments::New(1); |
| type = object_store->double_type(); |
| type_args.SetTypeAt(0, type); |
| type_args = type_args.Canonicalize(thread); |
| object_store->set_type_argument_double(type_args); |
| |
| type_args = TypeArguments::New(1); |
| type = object_store->string_type(); |
| type_args.SetTypeAt(0, type); |
| type_args = type_args.Canonicalize(thread); |
| object_store->set_type_argument_string(type_args); |
| |
| type_args = TypeArguments::New(2); |
| type = object_store->string_type(); |
| type_args.SetTypeAt(0, type); |
| type_args.SetTypeAt(1, Object::dynamic_type()); |
| type_args = type_args.Canonicalize(thread); |
| object_store->set_type_argument_string_dynamic(type_args); |
| |
| type_args = TypeArguments::New(2); |
| type = object_store->string_type(); |
| type_args.SetTypeAt(0, type); |
| type_args.SetTypeAt(1, type); |
| type_args = type_args.Canonicalize(thread); |
| object_store->set_type_argument_string_string(type_args); |
| |
| lib = Library::LookupLibrary(thread, Symbols::DartFfi()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartFfi(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kFfi, lib); |
| |
| cls = Class::New<Instance, RTN::Instance>(kFfiNativeTypeCid, isolate_group); |
| cls.set_num_type_arguments_unsafe(0); |
| cls.set_is_prefinalized(); |
| pending_classes.Add(cls); |
| object_store->set_ffi_native_type_class(cls); |
| RegisterClass(cls, Symbols::FfiNativeType(), lib); |
| |
| #define REGISTER_FFI_TYPE_MARKER(clazz) \ |
| cls = Class::New<Instance, RTN::Instance>(kFfi##clazz##Cid, isolate_group); \ |
| cls.set_num_type_arguments_unsafe(0); \ |
| cls.set_is_prefinalized(); \ |
| pending_classes.Add(cls); \ |
| RegisterClass(cls, Symbols::Ffi##clazz(), lib); |
| CLASS_LIST_FFI_TYPE_MARKER(REGISTER_FFI_TYPE_MARKER); |
| #undef REGISTER_FFI_TYPE_MARKER |
| |
| cls = Class::New<Instance, RTN::Instance>(kFfiNativeFunctionCid, |
| isolate_group); |
| cls.set_type_arguments_field_offset(Instance::NextFieldOffset(), |
| RTN::Instance::NextFieldOffset()); |
| cls.set_num_type_arguments_unsafe(1); |
| cls.set_is_prefinalized(); |
| pending_classes.Add(cls); |
| RegisterClass(cls, Symbols::FfiNativeFunction(), lib); |
| |
| cls = Class::NewPointerClass(kPointerCid, isolate_group); |
| object_store->set_ffi_pointer_class(cls); |
| pending_classes.Add(cls); |
| RegisterClass(cls, Symbols::FfiPointer(), lib); |
| |
| cls = Class::New<DynamicLibrary, RTN::DynamicLibrary>(kDynamicLibraryCid, |
| isolate_group); |
| cls.set_instance_size(DynamicLibrary::InstanceSize(), |
| compiler::target::RoundedAllocationSize( |
| RTN::DynamicLibrary::InstanceSize())); |
| cls.set_is_prefinalized(); |
| pending_classes.Add(cls); |
| RegisterClass(cls, Symbols::FfiDynamicLibrary(), lib); |
| |
| cls = Class::New<NativeFinalizer, RTN::NativeFinalizer>(isolate_group); |
| object_store->set_native_finalizer_class(cls); |
| RegisterPrivateClass(cls, Symbols::_NativeFinalizer(), lib); |
| |
| cls = Class::New<Finalizer, RTN::Finalizer>(isolate_group); |
| cls.set_type_arguments_field_offset( |
| Finalizer::type_arguments_offset(), |
| RTN::Finalizer::type_arguments_offset()); |
| cls.set_num_type_arguments_unsafe(1); |
| object_store->set_finalizer_class(cls); |
| pending_classes.Add(cls); |
| RegisterPrivateClass(cls, Symbols::_FinalizerImpl(), core_lib); |
| |
| // Pre-register the internal library so we can place the vm class |
| // FinalizerEntry there rather than the core library. |
| lib = Library::LookupLibrary(thread, Symbols::DartInternal()); |
| if (lib.IsNull()) { |
| lib = Library::NewLibraryHelper(Symbols::DartInternal(), true); |
| lib.SetLoadRequested(); |
| lib.Register(thread); |
| } |
| object_store->set_bootstrap_library(ObjectStore::kInternal, lib); |
| ASSERT(!lib.IsNull()); |
| ASSERT(lib.ptr() == Library::InternalLibrary()); |
| |
| cls = Class::New<FinalizerEntry, RTN::FinalizerEntry>(isolate_group); |
| object_store->set_finalizer_entry_class(cls); |
| pending_classes.Add(cls); |
| RegisterClass(cls, Symbols::FinalizerEntry(), lib); |
| |
| // Finish the initialization by compiling the bootstrap scripts containing |
| // the base interfaces and the implementation of the internal classes. |
| const Error& error = Error::Handle( |
| zone, Bootstrap::DoBootstrapping(kernel_buffer, kernel_buffer_size)); |
| if (!error.IsNull()) { |
| return error.ptr(); |
| } |
| |
| isolate_group->class_table()->CopySizesFromClassObjects(); |
| |
| ClassFinalizer::VerifyBootstrapClasses(); |
| |
| // Adds static const fields (class ids) to the class 'ClassID'); |
| lib = Library::LookupLibrary(thread, Symbols::DartInternal()); |
| ASSERT(!lib.IsNull()); |
| cls = lib.LookupClassAllowPrivate(Symbols::ClassID()); |
| ASSERT(!cls.IsNull()); |
| const bool injected = cls.InjectCIDFields(); |
| ASSERT(injected); |
| |
| // Set up recognized state of all functions (core, math and typed data). |
| MethodRecognizer::InitializeState(); |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| } else { |
| // Object::Init version when we are running in a version of dart that has a |
| // full snapshot linked in and an isolate is initialized using the full |
| // snapshot. |
| ObjectStore* object_store = isolate_group->object_store(); |
| SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); |
| |
| Class& cls = Class::Handle(zone); |
| |
| // Set up empty classes in the object store, these will get initialized |
| // correctly when we read from the snapshot. This is done to allow |
| // bootstrapping of reading classes from the snapshot. Some classes are not |
| // stored in the object store. Yet we still need to create their Class |
| // object so that they get put into the class_table (as a side effect of |
| // Class::New()). |
| cls = Class::New<Instance, RTN::Instance>(kInstanceCid, isolate_group); |
| object_store->set_object_class(cls); |
| |
| cls = Class::New<LibraryPrefix, RTN::LibraryPrefix>(isolate_group); |
| cls = Class::New<Type, RTN::Type>(isolate_group); |
| cls = Class::New<FunctionType, RTN::FunctionType>(isolate_group); |
| cls = Class::New<RecordType, RTN::RecordType>(isolate_group); |
| cls = Class::New<TypeParameter, RTN::TypeParameter>(isolate_group); |
| |
| cls = Class::New<Array, RTN::Array>(isolate_group); |
| object_store->set_array_class(cls); |
| |
| cls = Class::New<Array, RTN::Array>(kImmutableArrayCid, isolate_group); |
| object_store->set_immutable_array_class(cls); |
| |
| cls = Class::New<GrowableObjectArray, RTN::GrowableObjectArray>( |
| isolate_group); |
| object_store->set_growable_object_array_class(cls); |
| |
| cls = Class::New<Map, RTN::Map>(isolate_group); |
| object_store->set_map_impl_class(cls); |
| |
| cls = Class::New<Map, RTN::Map>(kConstMapCid, isolate_group); |
| object_store->set_const_map_impl_class(cls); |
| |
| cls = Class::New<Set, RTN::Set>(isolate_group); |
| object_store->set_set_impl_class(cls); |
| |
| cls = Class::New<Set, RTN::Set>(kConstSetCid, isolate_group); |
| object_store->set_const_set_impl_class(cls); |
| |
| cls = Class::New<Float32x4, RTN::Float32x4>(isolate_group); |
| object_store->set_float32x4_class(cls); |
| |
| cls = Class::New<Int32x4, RTN::Int32x4>(isolate_group); |
| object_store->set_int32x4_class(cls); |
| |
| cls = Class::New<Float64x2, RTN::Float64x2>(isolate_group); |
| object_store->set_float64x2_class(cls); |
| |
| #define REGISTER_TYPED_DATA_CLASS(clazz) \ |
| cls = Class::NewTypedDataClass(kTypedData##clazz##Cid, isolate_group); |
| CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_CLASS); |
| #undef REGISTER_TYPED_DATA_CLASS |
| #define REGISTER_TYPED_DATA_VIEW_CLASS(clazz) \ |
| cls = \ |
| Class::NewTypedDataViewClass(kTypedData##clazz##ViewCid, isolate_group); \ |
| cls = Class::NewUnmodifiableTypedDataViewClass( \ |
| kUnmodifiableTypedData##clazz##ViewCid, isolate_group); |
| CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_VIEW_CLASS); |
| #undef REGISTER_TYPED_DATA_VIEW_CLASS |
| cls = Class::NewTypedDataViewClass(kByteDataViewCid, isolate_group); |
| cls = Class::NewUnmodifiableTypedDataViewClass(kUnmodifiableByteDataViewCid, |
| isolate_group); |
| #define REGISTER_EXT_TYPED_DATA_CLASS(clazz) \ |
| cls = Class::NewExternalTypedDataClass(kExternalTypedData##clazz##Cid, \ |
| isolate_group); |
| CLASS_LIST_TYPED_DATA(REGISTER_EXT_TYPED_DATA_CLASS); |
| #undef REGISTER_EXT_TYPED_DATA_CLASS |
| |
| cls = Class::New<Instance, RTN::Instance>(kFfiNativeTypeCid, isolate_group); |
| object_store->set_ffi_native_type_class(cls); |
| |
| #define REGISTER_FFI_CLASS(clazz) \ |
| cls = Class::New<Instance, RTN::Instance>(kFfi##clazz##Cid, isolate_group); |
| CLASS_LIST_FFI_TYPE_MARKER(REGISTER_FFI_CLASS); |
| #undef REGISTER_FFI_CLASS |
| |
| cls = Class::New<Instance, RTN::Instance>(kFfiNativeFunctionCid, |
| isolate_group); |
| |
| cls = Class::NewPointerClass(kPointerCid, isolate_group); |
| object_store->set_ffi_pointer_class(cls); |
| |
| cls = Class::New<DynamicLibrary, RTN::DynamicLibrary>(kDynamicLibraryCid, |
| isolate_group); |
| |
| cls = Class::New<Instance, RTN::Instance>(kByteBufferCid, isolate_group, |
| /*register_isolate_group=*/false); |
| cls.set_instance_size_in_words(0, 0); |
| isolate_group->class_table()->Register(cls); |
| |
| cls = Class::New<Integer, RTN::Integer>(isolate_group); |
| object_store->set_integer_implementation_class(cls); |
| |
| cls = Class::New<Smi, RTN::Smi>(isolate_group); |
| object_store->set_smi_class(cls); |
| |
| cls = Class::New<Mint, RTN::Mint>(isolate_group); |
| object_store->set_mint_class(cls); |
| |
| cls = Class::New<Double, RTN::Double>(isolate_group); |
| object_store->set_double_class(cls); |
| |
| cls = Class::New<Closure, RTN::Closure>(isolate_group); |
| object_store->set_closure_class(cls); |
| |
| cls = Class::New<Record, RTN::Record>(isolate_group); |
| |
| cls = Class::NewStringClass(kOneByteStringCid, isolate_group); |
| object_store->set_one_byte_string_class(cls); |
| |
| cls = Class::NewStringClass(kTwoByteStringCid, isolate_group); |
| object_store->set_two_byte_string_class(cls); |
| |
| cls = Class::New<Bool, RTN::Bool>(isolate_group); |
| object_store->set_bool_class(cls); |
| |
| cls = Class::New<Instance, RTN::Instance>(kNullCid, isolate_group); |
| object_store->set_null_class(cls); |
| |
| cls = Class::New<Instance, RTN::Instance>(kNeverCid, isolate_group); |
| object_store->set_never_class(cls); |
| |
| cls = Class::New<Capability, RTN::Capability>(isolate_group); |
| cls = Class::New<ReceivePort, RTN::ReceivePort>(isolate_group); |
| cls = Class::New<SendPort, RTN::SendPort>(isolate_group); |
| cls = Class::New<StackTrace, RTN::StackTrace>(isolate_group); |
| cls = Class::New<SuspendState, RTN::SuspendState>(isolate_group); |
| cls = Class::New<RegExp, RTN::RegExp>(isolate_group); |
| cls = Class::New<Number, RTN::Number>(isolate_group); |
| |
| cls = Class::New<WeakProperty, RTN::WeakProperty>(isolate_group); |
| object_store->set_weak_property_class(cls); |
| cls = Class::New<WeakReference, RTN::WeakReference>(isolate_group); |
| object_store->set_weak_reference_class(cls); |
| cls = Class::New<Finalizer, RTN::Finalizer>(isolate_group); |
| object_store->set_finalizer_class(cls); |
| cls = Class::New<NativeFinalizer, RTN::NativeFinalizer>(isolate_group); |
| object_store->set_native_finalizer_class(cls); |
| cls = Class::New<FinalizerEntry, RTN::FinalizerEntry>(isolate_group); |
| object_store->set_finalizer_entry_class(cls); |
| |
| cls = Class::New<MirrorReference, RTN::MirrorReference>(isolate_group); |
| cls = Class::New<UserTag, RTN::UserTag>(isolate_group); |
| cls = Class::New<FutureOr, RTN::FutureOr>(isolate_group); |
| object_store->set_future_or_class(cls); |
| cls = Class::New<TransferableTypedData, RTN::TransferableTypedData>( |
| isolate_group); |
| } |
| return Error::null(); |
| } |
| |
| #if defined(DEBUG) |
| bool Object::InVMIsolateHeap() const { |
| return ptr()->untag()->InVMIsolateHeap(); |
| } |
| #endif // DEBUG |
| |
| void Object::Print() const { |
| THR_Print("%s\n", ToCString()); |
| } |
| |
| StringPtr Object::DictionaryName() const { |
| return String::null(); |
| } |
| |
| bool Object::ShouldHaveImmutabilityBitSet(classid_t class_id) { |
| if (class_id < kNumPredefinedCids) { |
| return ShouldHaveImmutabilityBitSetCid(class_id); |
| } else { |
| return Class::IsDeeplyImmutable( |
| IsolateGroup::Current()->class_table()->At(class_id)); |
| } |
| } |
| |
| void Object::InitializeObject(uword address, |
| intptr_t class_id, |
| intptr_t size, |
| bool compressed, |
| uword ptr_field_start_offset, |
| uword ptr_field_end_offset) { |
| // Note: we skip the header word here to avoid a racy read in the concurrent |
| // marker from observing the null object when it reads into a heap page |
| // allocated after marking started. |
| uword cur = address + sizeof(UntaggedObject); |
| uword ptr_field_start = address + ptr_field_start_offset; |
| uword ptr_field_end = address + ptr_field_end_offset; |
| uword end = address + size; |
| // The start of pointer fields should always be past the object header, even |
| // if there are no pointer fields (ptr_field_end < ptr_field_start). |
| ASSERT(cur <= ptr_field_start); |
| // The start of pointer fields can be at the end for empty payload objects. |
| ASSERT(ptr_field_start <= end); |
| // The end of pointer fields should always be before the end, as the end of |
| // pointer fields is inclusive (the address of the last field to initialize). |
| ASSERT(ptr_field_end < end); |
| bool needs_init = true; |
| if (IsTypedDataBaseClassId(class_id) || class_id == kArrayCid) { |
| // If the size is greater than both kNewAllocatableSize and |
| // kAllocatablePageSize, the object must have been allocated to a new |
| // large page, which must already have been zero initialized by the OS. |
| // Note that zero is a GC-safe value. |
| // |
| // For arrays, the caller will then initialize the fields to null with |
| // safepoint checks to avoid blocking for the full duration of |
| // initializing this array. |
| needs_init = |
| IsAllocatableInNewSpace(size) || IsAllocatableViaFreeLists(size); |
| } |
| if (needs_init) { |
| // Initialize the memory prior to any pointer fields with 0. (This loop |
| // and the next will be a no-op if the object has no pointer fields.) |
| uword initial_value = 0; |
| while (cur < ptr_field_start) { |
| *reinterpret_cast<uword*>(cur) = initial_value; |
| cur += kWordSize; |
| } |
| // Initialize any pointer fields with Object::null(). |
| initial_value = static_cast<uword>(null_); |
| #if defined(DART_COMPRESSED_POINTERS) |
| if (compressed) { |
| initial_value &= 0xFFFFFFFF; |
| initial_value |= initial_value << 32; |
| } |
| const bool has_pointer_fields = ptr_field_start <= ptr_field_end; |
| // If there are compressed pointer fields and the first compressed pointer |
| // field is not at a word start, then initialize it to Object::null(). |
| if (compressed && has_pointer_fields && |
| (ptr_field_start % kWordSize != 0)) { |
| *reinterpret_cast<compressed_uword*>(ptr_field_start) = initial_value; |
| } |
| #endif |
| while (cur <= ptr_field_end) { |
| *reinterpret_cast<uword*>(cur) = initial_value; |
| cur += kWordSize; |
| } |
| // Initialize the memory after any pointer fields with 0, unless this is |
| // an instructions object in which case we use the break instruction. |
| initial_value = class_id == kInstructionsCid ? kBreakInstructionFiller : 0; |
| #if defined(DART_COMPRESSED_POINTERS) |
| // If there are compressed pointer fields and the last compressed pointer |
| // field is the start of a word, then initialize the other part of the word |
| // to the new initial value. |
| // |
| // (We're guaranteed there's always space in the object after the last |
| // pointer field in this case since objects are allocated in multiples of |
| // the word size.) |
| if (compressed && has_pointer_fields && (ptr_field_end % kWordSize == 0)) { |
| *reinterpret_cast<compressed_uword*>(ptr_field_end + |
| kCompressedWordSize) = initial_value; |
| } |
| #endif |
| while (cur < end) { |
| *reinterpret_cast<uword*>(cur) = initial_value; |
| cur += kWordSize; |
| } |
| } else { |
| // Check that MemorySanitizer understands this is initialized. |
| MSAN_CHECK_INITIALIZED(reinterpret_cast<void*>(address), size); |
| #if defined(DEBUG) |
| const uword initial_value = 0; |
| while (cur < end) { |
| ASSERT_EQUAL(*reinterpret_cast<uword*>(cur), initial_value); |
| cur += kWordSize; |
| } |
| #endif |
| } |
| uword tags = 0; |
| ASSERT(class_id != kIllegalCid); |
| tags = UntaggedObject::ClassIdTag::update(class_id, tags); |
| tags = UntaggedObject::SizeTag::update(size, tags); |
| const bool is_old = |
| (address & kNewObjectAlignmentOffset) == kOldObjectAlignmentOffset; |
| tags = UntaggedObject::AlwaysSetBit::update(true, tags); |
| tags = UntaggedObject::NotMarkedBit::update(true, tags); |
| tags = UntaggedObject::OldAndNotRememberedBit::update(is_old, tags); |
| tags = UntaggedObject::NewOrEvacuationCandidateBit::update(!is_old, tags); |
| tags = UntaggedObject::ImmutableBit::update( |
| Object::ShouldHaveImmutabilityBitSet(class_id), tags); |
| #if defined(HASH_IN_OBJECT_HEADER) |
| tags = UntaggedObject::HashTag::update(0, tags); |
| #endif |
| reinterpret_cast<UntaggedObject*>(address)->tags_ = tags; |
| } |
| |
| void Object::CheckHandle() const { |
| #if defined(DEBUG) |
| if (ptr_ != Object::null()) { |
| intptr_t cid = ptr_->GetClassId(); |
| if (cid >= kNumPredefinedCids) { |
| cid = kInstanceCid; |
| } |
| ASSERT(vtable() == builtin_vtables_[cid]); |
| } |
| #endif |
| } |
| |
| ObjectPtr Object::Allocate(intptr_t cls_id, |
| intptr_t size, |
| Heap::Space space, |
| bool compressed, |
| uword ptr_field_start_offset, |
| uword ptr_field_end_offset) { |
| ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->execution_state() == Thread::kThreadInVM); |
| ASSERT(thread->no_safepoint_scope_depth() == 0); |
| ASSERT(thread->no_callback_scope_depth() == 0); |
| Heap* heap = thread->heap(); |
| |
| uword address = heap->Allocate(thread, size, space); |
| if (UNLIKELY(address == 0)) { |
| // SuspendLongJumpScope during Dart entry ensures that if a longjmp base is |
| // available, it is the innermost error handler, so check for a longjmp base |
| // before checking for an exit frame. |
| if (thread->long_jump_base() != nullptr) { |
| Report::LongJump(Object::out_of_memory_error()); |
| UNREACHABLE(); |
| } else if (thread->top_exit_frame_info() != 0) { |
| // Use the preallocated out of memory exception to avoid calling |
| // into dart code or allocating any code. |
| Exceptions::ThrowOOM(); |
| UNREACHABLE(); |
| } else { |
| // Nowhere to propagate an exception to. |
| OUT_OF_MEMORY(); |
| } |
| } |
| |
| ObjectPtr raw_obj; |
| NoSafepointScope no_safepoint(thread); |
| InitializeObject(address, cls_id, size, compressed, ptr_field_start_offset, |
| ptr_field_end_offset); |
| raw_obj = static_cast<ObjectPtr>(address + kHeapObjectTag); |
| ASSERT(cls_id == UntaggedObject::ClassIdTag::decode(raw_obj->untag()->tags_)); |
| if (raw_obj->IsOldObject() && UNLIKELY(thread->is_marking())) { |
| // Black allocation. Prevents a data race between the mutator and |
| // concurrent marker on ARM and ARM64 (the marker may observe a |
| // publishing store of this object before the stores that initialize its |
| // slots), and helps the collection to finish sooner. |
| // release: Setting the mark bit must not be ordered after a publishing |
| // store of this object. Compare Scavenger::ScavengePointer. |
| raw_obj->untag()->SetMarkBitRelease(); |
| heap->old_space()->AllocateBlack(size); |
| } |
| |
| #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| HeapProfileSampler& heap_sampler = thread->heap_sampler(); |
| if (heap_sampler.HasOutstandingSample()) { |
| thread->IncrementNoCallbackScopeDepth(); |
| void* data = heap_sampler.InvokeCallbackForLastSample(cls_id); |
| heap->SetHeapSamplingData(raw_obj, data); |
| thread->DecrementNoCallbackScopeDepth(); |
| } |
| #endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| |
| #if !defined(PRODUCT) |
| auto class_table = thread->isolate_group()->class_table(); |
| if (class_table->ShouldTraceAllocationFor(cls_id)) { |
| uint32_t hash = |
| HeapSnapshotWriter::GetHeapSnapshotIdentityHash(thread, raw_obj); |
| Profiler::SampleAllocation(thread, cls_id, hash); |
| } |
| #endif // !defined(PRODUCT) |
| return raw_obj; |
| } |
| |
| class WriteBarrierUpdateVisitor : public ObjectPointerVisitor { |
| public: |
| explicit WriteBarrierUpdateVisitor(Thread* thread, ObjectPtr obj) |
| : ObjectPointerVisitor(thread->isolate_group()), |
| thread_(thread), |
| old_obj_(obj) { |
| ASSERT(old_obj_->IsOldObject()); |
| } |
| |
| void VisitPointers(ObjectPtr* from, ObjectPtr* to) override { |
| if (old_obj_->IsArray()) { |
| for (ObjectPtr* slot = from; slot <= to; ++slot) { |
| ObjectPtr value = *slot; |
| if (value->IsHeapObject()) { |
| old_obj_->untag()->CheckArrayPointerStore(slot, value, thread_); |
| } |
| } |
| } else { |
| for (ObjectPtr* slot = from; slot <= to; ++slot) { |
| ObjectPtr value = *slot; |
| if (value->IsHeapObject()) { |
| old_obj_->untag()->CheckHeapPointerStore(value, thread_); |
| } |
| } |
| } |
| } |
| |
| #if defined(DART_COMPRESSED_POINTERS) |
| void VisitCompressedPointers(uword heap_base, |
| CompressedObjectPtr* from, |
| CompressedObjectPtr* to) override { |
| if (old_obj_->IsArray()) { |
| for (CompressedObjectPtr* slot = from; slot <= to; ++slot) { |
| ObjectPtr value = slot->Decompress(heap_base); |
| if (value->IsHeapObject()) { |
| old_obj_->untag()->CheckArrayPointerStore(slot, value, thread_); |
| } |
| } |
| } else { |
| for (CompressedObjectPtr* slot = from; slot <= to; ++slot) { |
| ObjectPtr value = slot->Decompress(heap_base); |
| if (value->IsHeapObject()) { |
| old_obj_->untag()->CheckHeapPointerStore(value, thread_); |
| } |
| } |
| } |
| } |
| #endif |
| |
| private: |
| Thread* thread_; |
| ObjectPtr old_obj_; |
| |
| DISALLOW_COPY_AND_ASSIGN(WriteBarrierUpdateVisitor); |
| }; |
| |
| #if defined(DEBUG) |
| bool Object::IsZoneHandle() const { |
| return VMHandles::IsZoneHandle(reinterpret_cast<uword>(this)); |
| } |
| |
| bool Object::IsReadOnlyHandle() const { |
| return Dart::IsReadOnlyHandle(reinterpret_cast<uword>(this)); |
| } |
| |
| bool Object::IsNotTemporaryScopedHandle() const { |
| return (IsZoneHandle() || IsReadOnlyHandle()); |
| } |
| #endif |
| |
| ObjectPtr Object::Clone(const Object& orig, |
| Heap::Space space, |
| bool load_with_relaxed_atomics) { |
| ASSERT(orig.ptr()->IsHeapObject()); |
| // Generic function types should be cloned with FunctionType::Clone. |
| ASSERT(!orig.IsFunctionType() || !FunctionType::Cast(orig).IsGeneric()); |
| const Class& cls = Class::Handle(orig.clazz()); |
| intptr_t size = orig.ptr()->untag()->HeapSize(); |
| // All fields (including non-SmiPtr fields) will be initialized with Smi 0, |
| // but the contents of the original object are copied over before the thread |
| // is allowed to reach a safepoint. |
| ObjectPtr raw_clone = |
| Object::Allocate(cls.id(), size, space, cls.HasCompressedPointers(), |
| from_offset<Object>(), to_offset<Object>()); |
| NoSafepointScope no_safepoint; |
| // Copy the body of the original into the clone. |
| uword orig_addr = UntaggedObject::ToAddr(orig.ptr()); |
| uword clone_addr = UntaggedObject::ToAddr(raw_clone); |
| const intptr_t kHeaderSizeInBytes = sizeof(UntaggedObject); |
| if (load_with_relaxed_atomics) { |
| auto orig_atomics_ptr = reinterpret_cast<std::atomic<uword>*>(orig_addr); |
| auto clone_ptr = reinterpret_cast<uword*>(clone_addr); |
| for (intptr_t i = kHeaderSizeInBytes / kWordSize; i < size / kWordSize; |
| i++) { |
| *(clone_ptr + i) = |
| (orig_atomics_ptr + i)->load(std::memory_order_relaxed); |
| } |
| } else { |
| memmove(reinterpret_cast<uint8_t*>(clone_addr + kHeaderSizeInBytes), |
| reinterpret_cast<uint8_t*>(orig_addr + kHeaderSizeInBytes), |
| size - kHeaderSizeInBytes); |
| } |
| |
| if (IsTypedDataClassId(raw_clone->GetClassIdOfHeapObject())) { |
| auto raw_typed_data = TypedData::RawCast(raw_clone); |
| raw_typed_data.untag()->RecomputeDataField(); |
| } |
| |
| // Add clone to store buffer, if needed. |
| if (!raw_clone->IsOldObject()) { |
| // No need to remember an object in new space. |
| return raw_clone; |
| } |
| WriteBarrierUpdateVisitor visitor(Thread::Current(), raw_clone); |
| raw_clone->untag()->VisitPointers(&visitor); |
| return raw_clone; |
| } |
| |
| bool Class::HasCompressedPointers() const { |
| const intptr_t cid = id(); |
| switch (cid) { |
| case kByteBufferCid: |
| return ByteBuffer::ContainsCompressedPointers(); |
| #define HANDLE_CASE(clazz) \ |
| case k##clazz##Cid: \ |
| return dart::clazz::ContainsCompressedPointers(); |
| CLASS_LIST(HANDLE_CASE) |
| #undef HANDLE_CASE |
| #define HANDLE_CASE(clazz) \ |
| case kTypedData##clazz##Cid: \ |
| return dart::TypedData::ContainsCompressedPointers(); \ |
| case kTypedData##clazz##ViewCid: \ |
| case kUnmodifiableTypedData##clazz##ViewCid: \ |
| return dart::TypedDataView::ContainsCompressedPointers(); \ |
| case kExternalTypedData##clazz##Cid: \ |
| return dart::ExternalTypedData::ContainsCompressedPointers(); |
| CLASS_LIST_TYPED_DATA(HANDLE_CASE) |
| #undef HANDLE_CASE |
| default: |
| if (cid >= kNumPredefinedCids) { |
| return dart::Instance::ContainsCompressedPointers(); |
| } |
| } |
| FATAL("Unsupported class for compressed pointers translation: %s (id=%" Pd |
| ", kNumPredefinedCids=%" Pd ")\n", |
| ToCString(), cid, kNumPredefinedCids); |
| return false; |
| } |
| |
| StringPtr Class::Name() const { |
| return untag()->name(); |
| } |
| |
| StringPtr Class::ScrubbedName() const { |
| return Symbols::New(Thread::Current(), ScrubbedNameCString()); |
| } |
| |
| const char* Class::ScrubbedNameCString() const { |
| return String::ScrubName(String::Handle(Name())); |
| } |
| |
| StringPtr Class::UserVisibleName() const { |
| #if !defined(PRODUCT) |
| ASSERT(untag()->user_name() != String::null()); |
| return untag()->user_name(); |
| #endif // !defined(PRODUCT) |
| // No caching in PRODUCT, regenerate. |
| return Symbols::New(Thread::Current(), GenerateUserVisibleName()); |
| } |
| |
| const char* Class::UserVisibleNameCString() const { |
| #if !defined(PRODUCT) |
| ASSERT(untag()->user_name() != String::null()); |
| return String::Handle(untag()->user_name()).ToCString(); |
| #endif // !defined(PRODUCT) |
| return GenerateUserVisibleName(); // No caching in PRODUCT, regenerate. |
| } |
| |
| const char* Class::NameCString(NameVisibility name_visibility) const { |
| switch (name_visibility) { |
| case Object::kInternalName: |
| return String::Handle(Name()).ToCString(); |
| case Object::kScrubbedName: |
| return ScrubbedNameCString(); |
| case Object::kUserVisibleName: |
| return UserVisibleNameCString(); |
| default: |
| UNREACHABLE(); |
| return nullptr; |
| } |
| } |
| |
| ClassPtr Class::Mixin() const { |
| if (is_transformed_mixin_application()) { |
| const Array& interfaces = Array::Handle(this->interfaces()); |
| const Type& mixin_type = |
| Type::Handle(Type::RawCast(interfaces.At(interfaces.Length() - 1))); |
| return mixin_type.type_class(); |
| } |
| return ptr(); |
| } |
| |
| bool Class::IsInFullSnapshot() const { |
| NoSafepointScope no_safepoint; |
| return UntaggedLibrary::InFullSnapshotBit::decode( |
| untag()->library()->untag()->flags_); |
| } |
| |
| TypePtr Class::RareType() const { |
| if (!IsGeneric()) { |
| return DeclarationType(); |
| } |
| ASSERT(is_declaration_loaded()); |
| Thread* const thread = Thread::Current(); |
| Zone* const zone = thread->zone(); |
| const auto& inst_to_bounds = |
| TypeArguments::Handle(zone, DefaultTypeArguments(zone)); |
| ASSERT(inst_to_bounds.ptr() != Object::empty_type_arguments().ptr()); |
| auto& type = Type::Handle( |
| zone, Type::New(*this, inst_to_bounds, Nullability::kNonNullable)); |
| type ^= ClassFinalizer::FinalizeType(type); |
| return type.ptr(); |
| } |
| |
| template <class FakeObject, class TargetFakeObject> |
| ClassPtr Class::New(IsolateGroup* isolate_group, bool register_class) { |
| ASSERT(Object::class_class() != Class::null()); |
| const auto& result = Class::Handle(Object::Allocate<Class>(Heap::kOld)); |
| Object::VerifyBuiltinVtable<FakeObject>(FakeObject::kClassId); |
| NOT_IN_PRECOMPILED(result.set_token_pos(TokenPosition::kNoSource)); |
| NOT_IN_PRECOMPILED(result.set_end_token_pos(TokenPosition::kNoSource)); |
| result.set_instance_size(FakeObject::InstanceSize(), |
| compiler::target::RoundedAllocationSize( |
| TargetFakeObject::InstanceSize())); |
| result.set_type_arguments_field_offset_in_words(kNoTypeArguments, |
| RTN::Class::kNoTypeArguments); |
| const intptr_t host_next_field_offset = FakeObject::NextFieldOffset(); |
| const intptr_t target_next_field_offset = TargetFakeObject::NextFieldOffset(); |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| COMPILE_ASSERT((FakeObject::kClassId != kInstanceCid)); |
| result.set_id(FakeObject::kClassId); |
| NOT_IN_PRECOMPILED(result.set_implementor_cid(kIllegalCid)); |
| result.set_num_type_arguments_unsafe(0); |
| result.set_num_native_fields(0); |
| result.set_state_bits(0); |
| if (IsInternalOnlyClassId(FakeObject::kClassId) || |
| (FakeObject::kClassId == kTypeArgumentsCid)) { |
| // VM internal classes are done. There is no finalization needed or |
| // possible in this case. |
| result.set_is_declaration_loaded(); |
| result.set_is_type_finalized(); |
| result.set_is_allocate_finalized(); |
| } else if (FakeObject::kClassId != kClosureCid) { |
| // VM backed classes are almost ready: run checks and resolve class |
| // references, but do not recompute size. |
| result.set_is_prefinalized(); |
| } |
| if (FakeObject::kClassId < kNumPredefinedCids && |
| IsDeeplyImmutableCid(FakeObject::kClassId)) { |
| result.set_is_deeply_immutable(true); |
| } |
| NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); |
| result.InitEmptyFields(); |
| if (register_class) { |
| isolate_group->class_table()->Register(result); |
| } |
| return result.ptr(); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) || defined(DART_DYNAMIC_MODULES) |
| static void ReportTooManyTypeArguments(const Class& cls) { |
| Report::MessageF(Report::kError, Script::Handle(cls.script()), |
| cls.token_pos(), Report::AtLocation, |
| "too many type parameters declared in class '%s' or in its " |
| "super classes", |
| String::Handle(cls.Name()).ToCString()); |
| UNREACHABLE(); |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) || defined(DART_DYNAMIC_MODULES) |
| |
| void Class::set_num_type_arguments(intptr_t value) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| UNREACHABLE(); |
| #else |
| if (!Utils::IsInt(16, value)) { |
| ReportTooManyTypeArguments(*this); |
| } |
| // We allow concurrent calculation of the number of type arguments. If two |
| // threads perform this operation it doesn't matter which one wins. |
| DEBUG_ONLY(intptr_t old_value = num_type_arguments()); |
| DEBUG_ASSERT(old_value == kUnknownNumTypeArguments || old_value == value); |
| StoreNonPointer<int16_t, int16_t, std::memory_order_relaxed>( |
| &untag()->num_type_arguments_, value); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| } |
| |
| void Class::set_num_type_arguments_unsafe(intptr_t value) const { |
| StoreNonPointer(&untag()->num_type_arguments_, value); |
| } |
| |
| void Class::set_has_pragma(bool value) const { |
| set_state_bits(HasPragmaBit::update(value, state_bits())); |
| } |
| |
| void Class::set_is_isolate_unsendable(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(IsIsolateUnsendableBit::update(value, state_bits())); |
| } |
| |
| void Class::set_is_isolate_unsendable_due_to_pragma(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits( |
| IsIsolateUnsendableDueToPragmaBit::update(value, state_bits())); |
| } |
| |
| void Class::set_is_deeply_immutable(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(IsDeeplyImmutableBit::update(value, state_bits())); |
| } |
| |
| void Class::set_is_future_subtype(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(IsFutureSubtypeBit::update(value, state_bits())); |
| } |
| |
| void Class::set_can_be_future(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(CanBeFutureBit::update(value, state_bits())); |
| } |
| |
| void Class::set_is_dynamically_extendable(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(IsDynamicallyExtendableBit::update(value, state_bits())); |
| } |
| |
| void Class::set_has_dynamically_extendable_subtypes(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits( |
| HasDynamicallyExtendableSubtypesBit::update(value, state_bits())); |
| } |
| |
| // Initialize class fields of type Array with empty array. |
| void Class::InitEmptyFields() const { |
| if (Object::empty_array().ptr() == Array::null()) { |
| // The empty array has not been initialized yet. |
| return; |
| } |
| untag()->set_interfaces(Object::empty_array().ptr()); |
| untag()->set_constants(Object::null_array().ptr()); |
| set_functions(Object::empty_array()); |
| set_fields(Object::empty_array()); |
| set_invocation_dispatcher_cache(Object::empty_array()); |
| } |
| |
| ArrayPtr Class::OffsetToFieldMap( |
| ClassTable* class_table /* = nullptr */) const { |
| ASSERT(is_finalized()); |
| if (untag()->offset_in_words_to_field<std::memory_order_acquire>() == |
| Array::null()) { |
| // Even if multiple threads are calling this concurrently, all of them would |
| // compute the same array, so we intentionally don't acquire any locks here. |
| const intptr_t length = untag()->host_instance_size_in_words_; |
| const Array& array = Array::Handle(Array::New(length, Heap::kOld)); |
| Class& cls = Class::Handle(this->ptr()); |
| Array& fields = Array::Handle(); |
| Field& f = Field::Handle(); |
| while (!cls.IsNull()) { |
| fields = cls.fields(); |
| for (intptr_t i = 0; i < fields.Length(); ++i) { |
| f ^= fields.At(i); |
| if (f.is_instance()) { |
| array.SetAt(f.HostOffset() >> kCompressedWordSizeLog2, f); |
| } |
| } |
| cls = cls.SuperClass(class_table); |
| } |
| untag()->set_offset_in_words_to_field<std::memory_order_release>( |
| array.ptr()); |
| } |
| return untag()->offset_in_words_to_field<std::memory_order_acquire>(); |
| } |
| |
| bool Class::HasInstanceFields() const { |
| const Array& field_array = Array::Handle(fields()); |
| Field& field = Field::Handle(); |
| for (intptr_t i = 0; i < field_array.Length(); ++i) { |
| field ^= field_array.At(i); |
| if (!field.is_static()) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| class FunctionName { |
| public: |
| FunctionName(const String& name, String* tmp_string) |
| : name_(name), tmp_string_(tmp_string) {} |
| bool Matches(const Function& function) const { |
| if (name_.IsSymbol()) { |
| return name_.ptr() == function.name(); |
| } else { |
| *tmp_string_ = function.name(); |
| return name_.Equals(*tmp_string_); |
| } |
| } |
| intptr_t Hash() const { return name_.Hash(); } |
| |
| private: |
| const String& name_; |
| String* tmp_string_; |
| }; |
| |
| // Traits for looking up Functions by name. |
| class ClassFunctionsTraits { |
| public: |
| static const char* Name() { return "ClassFunctionsTraits"; } |
| static bool ReportStats() { return false; } |
| |
| // Called when growing the table. |
| static bool IsMatch(const Object& a, const Object& b) { |
| ASSERT(a.IsFunction() && b.IsFunction()); |
| // Function objects are always canonical. |
| return a.ptr() == b.ptr(); |
| } |
| static bool IsMatch(const FunctionName& name, const Object& obj) { |
| return name.Matches(Function::Cast(obj)); |
| } |
| static uword Hash(const Object& key) { |
| return String::HashRawSymbol(Function::Cast(key).name()); |
| } |
| static uword Hash(const FunctionName& name) { return name.Hash(); } |
| }; |
| typedef UnorderedHashSet<ClassFunctionsTraits> ClassFunctionsSet; |
| |
| void Class::SetFunctions(const Array& value) const { |
| ASSERT(!value.IsNull()); |
| const intptr_t len = value.Length(); |
| #if defined(DEBUG) |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); |
| if (is_finalized()) { |
| Function& function = Function::Handle(); |
| FunctionType& signature = FunctionType::Handle(); |
| for (intptr_t i = 0; i < len; ++i) { |
| function ^= value.At(i); |
| signature = function.signature(); |
| ASSERT(signature.IsFinalized()); |
| } |
| } |
| #endif |
| set_functions(value); |
| if (len >= kFunctionLookupHashThreshold) { |
| ClassFunctionsSet set(HashTables::New<ClassFunctionsSet>(len, Heap::kOld)); |
| Function& func = Function::Handle(); |
| for (intptr_t i = 0; i < len; ++i) { |
| func ^= value.At(i); |
| // Verify that all the functions in the array have this class as owner. |
| ASSERT(func.Owner() == ptr()); |
| set.Insert(func); |
| } |
| untag()->set_functions_hash_table(set.Release().ptr()); |
| } else { |
| untag()->set_functions_hash_table(Array::null()); |
| } |
| } |
| |
| void Class::AddFunction(const Function& function) const { |
| #if defined(DEBUG) |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->IsDartMutatorThread()); |
| ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(!is_finalized() || |
| FunctionType::Handle(function.signature()).IsFinalized()); |
| #endif |
| const Array& arr = Array::Handle(functions()); |
| const Array& new_array = |
| Array::Handle(Array::Grow(arr, arr.Length() + 1, Heap::kOld)); |
| new_array.SetAt(arr.Length(), function); |
| set_functions(new_array); |
| // Add to hash table, if any. |
| const intptr_t new_len = new_array.Length(); |
| if (new_len == kFunctionLookupHashThreshold) { |
| // Transition to using hash table. |
| SetFunctions(new_array); |
| } else if (new_len > kFunctionLookupHashThreshold) { |
| ClassFunctionsSet set(untag()->functions_hash_table()); |
| set.Insert(function); |
| untag()->set_functions_hash_table(set.Release().ptr()); |
| } |
| } |
| |
| intptr_t Class::FindFunctionIndex(const Function& needle) const { |
| Thread* thread = Thread::Current(); |
| if (EnsureIsFinalized(thread) != Error::null()) { |
| return -1; |
| } |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FUNCTION_HANDLESCOPE(thread); |
| Array& funcs = thread->ArrayHandle(); |
| Function& function = thread->FunctionHandle(); |
| funcs = current_functions(); |
| ASSERT(!funcs.IsNull()); |
| const intptr_t len = funcs.Length(); |
| for (intptr_t i = 0; i < len; i++) { |
| function ^= funcs.At(i); |
| if (needle.ptr() == function.ptr()) { |
| return i; |
| } |
| } |
| // No function found. |
| return -1; |
| } |
| |
| FunctionPtr Class::FunctionFromIndex(intptr_t idx) const { |
| const Array& funcs = Array::Handle(current_functions()); |
| if ((idx < 0) || (idx >= funcs.Length())) { |
| return Function::null(); |
| } |
| Function& func = Function::Handle(); |
| func ^= funcs.At(idx); |
| ASSERT(!func.IsNull()); |
| return func.ptr(); |
| } |
| |
| FunctionPtr Class::ImplicitClosureFunctionFromIndex(intptr_t idx) const { |
| Function& func = Function::Handle(FunctionFromIndex(idx)); |
| if (func.IsNull() || !func.HasImplicitClosureFunction()) { |
| return Function::null(); |
| } |
| func = func.ImplicitClosureFunction(); |
| ASSERT(!func.IsNull()); |
| return func.ptr(); |
| } |
| |
| intptr_t Class::FindImplicitClosureFunctionIndex(const Function& needle) const { |
| Thread* thread = Thread::Current(); |
| if (EnsureIsFinalized(thread) != Error::null()) { |
| return -1; |
| } |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FUNCTION_HANDLESCOPE(thread); |
| Array& funcs = thread->ArrayHandle(); |
| Function& function = thread->FunctionHandle(); |
| funcs = current_functions(); |
| ASSERT(!funcs.IsNull()); |
| Function& implicit_closure = Function::Handle(thread->zone()); |
| const intptr_t len = funcs.Length(); |
| for (intptr_t i = 0; i < len; i++) { |
| function ^= funcs.At(i); |
| implicit_closure = function.implicit_closure_function(); |
| if (implicit_closure.IsNull()) { |
| // Skip non-implicit closure functions. |
| continue; |
| } |
| if (needle.ptr() == implicit_closure.ptr()) { |
| return i; |
| } |
| } |
| // No function found. |
| return -1; |
| } |
| |
| intptr_t Class::FindInvocationDispatcherFunctionIndex( |
| const Function& needle) const { |
| Thread* thread = Thread::Current(); |
| if (EnsureIsFinalized(thread) != Error::null()) { |
| return -1; |
| } |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_OBJECT_HANDLESCOPE(thread); |
| Array& funcs = thread->ArrayHandle(); |
| Object& object = thread->ObjectHandle(); |
| funcs = invocation_dispatcher_cache(); |
| ASSERT(!funcs.IsNull()); |
| const intptr_t len = funcs.Length(); |
| for (intptr_t i = 0; i < len; i++) { |
| object = funcs.At(i); |
| // The invocation_dispatcher_cache is a table with some entries that |
| // are functions. |
| if (object.IsFunction()) { |
| if (Function::Cast(object).ptr() == needle.ptr()) { |
| return i; |
| } |
| } |
| } |
| // No function found. |
| return -1; |
| } |
| |
| FunctionPtr Class::InvocationDispatcherFunctionFromIndex(intptr_t idx) const { |
| Thread* thread = Thread::Current(); |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_OBJECT_HANDLESCOPE(thread); |
| Array& dispatcher_cache = thread->ArrayHandle(); |
| Object& object = thread->ObjectHandle(); |
| dispatcher_cache = invocation_dispatcher_cache(); |
| object = dispatcher_cache.At(idx); |
| if (!object.IsFunction()) { |
| return Function::null(); |
| } |
| return Function::Cast(object).ptr(); |
| } |
| |
| void Class::set_state_bits(intptr_t bits) const { |
| StoreNonPointer<uint32_t, uint32_t, std::memory_order_release>( |
| &untag()->state_bits_, static_cast<uint32_t>(bits)); |
| } |
| |
| void Class::set_library(const Library& value) const { |
| untag()->set_library(value.ptr()); |
| } |
| |
| void Class::set_type_parameters(const TypeParameters& value) const { |
| ASSERT((num_type_arguments() == kUnknownNumTypeArguments) || |
| is_declared_in_bytecode() || is_prefinalized()); |
| untag()->set_type_parameters(value.ptr()); |
| } |
| |
| void Class::set_functions(const Array& value) const { |
| // Ensure all writes to the [Function]s are visible by the time the array |
| // is visible. |
| untag()->set_functions<std::memory_order_release>(value.ptr()); |
| } |
| |
| void Class::set_fields(const Array& value) const { |
| // Ensure all writes to the [Field]s are visible by the time the array |
| // is visible. |
| untag()->set_fields<std::memory_order_release>(value.ptr()); |
| } |
| |
| void Class::set_invocation_dispatcher_cache(const Array& cache) const { |
| // Ensure all writes to the cache are visible by the time the array |
| // is visible. |
| untag()->set_invocation_dispatcher_cache<std::memory_order_release>( |
| cache.ptr()); |
| } |
| |
| void Class::set_declaration_instance_type_arguments( |
| const TypeArguments& value) const { |
| ASSERT(value.IsNull() || (value.IsCanonical() && value.IsOld())); |
| ASSERT((declaration_instance_type_arguments() == TypeArguments::null()) || |
| (declaration_instance_type_arguments() == value.ptr())); |
| untag()->set_declaration_instance_type_arguments<std::memory_order_release>( |
| value.ptr()); |
| } |
| |
| TypeArgumentsPtr Class::GetDeclarationInstanceTypeArguments() const { |
| const intptr_t num_type_arguments = NumTypeArguments(); |
| if (num_type_arguments == 0) { |
| return TypeArguments::null(); |
| } |
| if (declaration_instance_type_arguments() != TypeArguments::null()) { |
| return declaration_instance_type_arguments(); |
| } |
| Thread* thread = Thread::Current(); |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| if (declaration_instance_type_arguments() != TypeArguments::null()) { |
| return declaration_instance_type_arguments(); |
| } |
| Zone* zone = thread->zone(); |
| auto& args = TypeArguments::Handle(zone); |
| auto& type = AbstractType::Handle(zone); |
| const intptr_t num_type_parameters = NumTypeParameters(thread); |
| if (num_type_arguments == num_type_parameters) { |
| type = DeclarationType(); |
| args = Type::Cast(type).arguments(); |
| } else { |
| type = super_type(); |
| const auto& super_args = TypeArguments::Handle( |
| zone, Type::Cast(type).GetInstanceTypeArguments(thread)); |
| if ((num_type_parameters == 0) || |
| (!super_args.IsNull() && (super_args.Length() == num_type_arguments))) { |
| args = super_args.ptr(); |
| } else { |
| args = TypeArguments::New(num_type_arguments); |
| const intptr_t offset = num_type_arguments - num_type_parameters; |
| for (intptr_t i = 0; i < offset; ++i) { |
| type = super_args.TypeAtNullSafe(i); |
| args.SetTypeAt(i, type); |
| } |
| type = DeclarationType(); |
| const auto& decl_args = |
| TypeArguments::Handle(zone, Type::Cast(type).arguments()); |
| for (intptr_t i = 0; i < num_type_parameters; ++i) { |
| type = decl_args.TypeAt(i); |
| args.SetTypeAt(offset + i, type); |
| } |
| } |
| } |
| args = args.Canonicalize(thread); |
| set_declaration_instance_type_arguments(args); |
| return args.ptr(); |
| } |
| |
| TypeArgumentsPtr Class::GetInstanceTypeArguments( |
| Thread* thread, |
| const TypeArguments& type_arguments, |
| bool canonicalize) const { |
| const intptr_t num_type_arguments = NumTypeArguments(); |
| if (num_type_arguments == 0) { |
| return TypeArguments::null(); |
| } |
| Zone* zone = thread->zone(); |
| auto& args = TypeArguments::Handle(zone); |
| const intptr_t num_type_parameters = NumTypeParameters(thread); |
| ASSERT(type_arguments.IsNull() || |
| type_arguments.Length() == num_type_parameters); |
| if (num_type_arguments == num_type_parameters) { |
| args = type_arguments.ptr(); |
| } else { |
| args = GetDeclarationInstanceTypeArguments(); |
| if (num_type_parameters == 0) { |
| return args.ptr(); |
| } |
| args = args.InstantiateFrom( |
| TypeArguments::Handle( |
| zone, type_arguments.ToInstantiatorTypeArguments(thread, *this)), |
| Object::null_type_arguments(), kAllFree, Heap::kOld); |
| } |
| if (canonicalize) { |
| args = args.Canonicalize(thread); |
| } |
| return args.ptr(); |
| } |
| |
| intptr_t Class::NumTypeParameters(Thread* thread) const { |
| if (!is_declaration_loaded()) { |
| ASSERT(is_prefinalized()); |
| const intptr_t cid = id(); |
| if ((cid == kArrayCid) || (cid == kImmutableArrayCid) || |
| (cid == kGrowableObjectArrayCid)) { |
| return 1; // List's type parameter may not have been parsed yet. |
| } |
| return 0; |
| } |
| if (type_parameters() == TypeParameters::null()) { |
| return 0; |
| } |
| REUSABLE_TYPE_PARAMETERS_HANDLESCOPE(thread); |
| TypeParameters& type_params = thread->TypeParametersHandle(); |
| type_params = type_parameters(); |
| return type_params.Length(); |
| } |
| |
| intptr_t Class::ComputeNumTypeArguments() const { |
| ASSERT(is_declaration_loaded()); |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| auto isolate_group = thread->isolate_group(); |
| const intptr_t num_type_params = NumTypeParameters(); |
| |
| if ((super_type() == AbstractType::null()) || |
| (super_type() == isolate_group->object_store()->object_type())) { |
| return num_type_params; |
| } |
| |
| const auto& sup_type = Type::Handle(zone, super_type()); |
| const auto& sup_class = Class::Handle(zone, sup_type.type_class()); |
| const intptr_t sup_class_num_type_args = sup_class.NumTypeArguments(); |
| if (num_type_params == 0) { |
| return sup_class_num_type_args; |
| } |
| |
| const auto& sup_type_args = TypeArguments::Handle(zone, sup_type.arguments()); |
| if (sup_type_args.IsNull()) { |
| // The super type is raw or the super class is non generic. |
| // In either case, overlapping is not possible. |
| return sup_class_num_type_args + num_type_params; |
| } |
| |
| const intptr_t sup_type_args_length = sup_type_args.Length(); |
| // Determine the maximum overlap of a prefix of the vector consisting of the |
| // type parameters of this class with a suffix of the vector consisting of the |
| // type arguments of the super type of this class. |
| // The number of own type arguments of this class is the number of its type |
| // parameters minus the number of type arguments in the overlap. |
| // Attempt to overlap the whole vector of type parameters; reduce the size |
| // of the vector (keeping the first type parameter) until it fits or until |
| // its size is zero. |
| auto& sup_type_arg = AbstractType::Handle(zone); |
| for (intptr_t num_overlapping_type_args = |
| (num_type_params < sup_type_args_length) ? num_type_params |
| : sup_type_args_length; |
| num_overlapping_type_args > 0; num_overlapping_type_args--) { |
| intptr_t i = 0; |
| for (; i < num_overlapping_type_args; i++) { |
| sup_type_arg = sup_type_args.TypeAt(sup_type_args_length - |
| num_overlapping_type_args + i); |
| ASSERT(!sup_type_arg.IsNull()); |
| if (!sup_type_arg.IsTypeParameter()) break; |
| // The only type parameters appearing in the type arguments of the super |
| // type are those declared by this class. Their finalized indices depend |
| // on the number of type arguments being computed here. Therefore, they |
| // cannot possibly be finalized yet. |
| ASSERT(!TypeParameter::Cast(sup_type_arg).IsFinalized()); |
| if (TypeParameter::Cast(sup_type_arg).index() != i || |
| TypeParameter::Cast(sup_type_arg).IsNullable()) { |
| break; |
| } |
| } |
| if (i == num_overlapping_type_args) { |
| // Overlap found. |
| return sup_class_num_type_args + num_type_params - |
| num_overlapping_type_args; |
| } |
| } |
| // No overlap found. |
| return sup_class_num_type_args + num_type_params; |
| } |
| |
| intptr_t Class::NumTypeArguments() const { |
| // Return cached value if already calculated. |
| intptr_t num_type_args = num_type_arguments(); |
| if (num_type_args != kUnknownNumTypeArguments) { |
| return num_type_args; |
| } |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| return 0; |
| #else |
| num_type_args = ComputeNumTypeArguments(); |
| ASSERT(num_type_args != kUnknownNumTypeArguments); |
| set_num_type_arguments(num_type_args); |
| return num_type_args; |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| TypeArgumentsPtr Class::DefaultTypeArguments(Zone* zone) const { |
| if (type_parameters() == TypeParameters::null()) { |
| return Object::empty_type_arguments().ptr(); |
| } |
| return TypeParameters::Handle(zone, type_parameters()).defaults(); |
| } |
| |
| ClassPtr Class::SuperClass(ClassTable* class_table /* = nullptr */) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| if (class_table == nullptr) { |
| class_table = thread->isolate_group()->class_table(); |
| } |
| |
| if (super_type() == AbstractType::null()) { |
| if (id() == kTypeArgumentsCid) { |
| // Pretend TypeArguments objects are Dart instances. |
| return class_table->At(kInstanceCid); |
| } |
| return Class::null(); |
| } |
| const AbstractType& sup_type = AbstractType::Handle(zone, super_type()); |
| const intptr_t type_class_id = sup_type.type_class_id(); |
| return class_table->At(type_class_id); |
| } |
| |
| void Class::set_super_type(const Type& value) const { |
| ASSERT(value.IsNull() || !value.IsDynamicType()); |
| untag()->set_super_type(value.ptr()); |
| } |
| |
| TypeParameterPtr Class::TypeParameterAt(intptr_t index, |
| Nullability nullability) const { |
| ASSERT(index >= 0 && index < NumTypeParameters()); |
| TypeParameter& type_param = |
| TypeParameter::Handle(TypeParameter::New(*this, 0, index, nullability)); |
| // Finalize type parameter only if its declaring class is |
| // finalized and available in the current class table. |
| if (is_type_finalized() && (type_param.parameterized_class() == ptr())) { |
| type_param ^= ClassFinalizer::FinalizeType(type_param); |
| } |
| return type_param.ptr(); |
| } |
| |
| intptr_t Class::UnboxedFieldSizeInBytesByCid(intptr_t cid) { |
| switch (cid) { |
| case kDoubleCid: |
| return sizeof(UntaggedDouble::value_); |
| case kFloat32x4Cid: |
| return sizeof(UntaggedFloat32x4::value_); |
| case kFloat64x2Cid: |
| return sizeof(UntaggedFloat64x2::value_); |
| default: |
| return sizeof(UntaggedMint::value_); |
| } |
| } |
| |
| UnboxedFieldBitmap Class::CalculateFieldOffsets() const { |
| Array& flds = Array::Handle(fields()); |
| const Class& super = Class::Handle(SuperClass()); |
| intptr_t host_offset = 0; |
| UnboxedFieldBitmap host_bitmap{}; |
| // Target offsets might differ if the word size are different |
| intptr_t target_offset = 0; |
| intptr_t host_type_args_field_offset = kNoTypeArguments; |
| intptr_t target_type_args_field_offset = RTN::Class::kNoTypeArguments; |
| if (super.IsNull()) { |
| host_offset = Instance::NextFieldOffset(); |
| target_offset = RTN::Instance::NextFieldOffset(); |
| ASSERT(host_offset > 0); |
| ASSERT(target_offset > 0); |
| } else { |
| ASSERT(super.is_finalized() || super.is_prefinalized()); |
| host_type_args_field_offset = super.host_type_arguments_field_offset(); |
| target_type_args_field_offset = super.target_type_arguments_field_offset(); |
| host_offset = super.host_next_field_offset(); |
| ASSERT(host_offset > 0); |
| target_offset = super.target_next_field_offset(); |
| ASSERT(target_offset > 0); |
| // We should never call CalculateFieldOffsets for native wrapper |
| // classes, assert this. |
| ASSERT(num_native_fields() == 0); |
| const intptr_t num_native_fields = super.num_native_fields(); |
| set_num_native_fields(num_native_fields); |
| if (num_native_fields > 0 || is_isolate_unsendable_due_to_pragma()) { |
| set_is_isolate_unsendable(true); |
| } |
| |
| host_bitmap = IsolateGroup::Current()->class_table()->GetUnboxedFieldsMapAt( |
| super.id()); |
| } |
| // If the super class is parameterized, use the same type_arguments field, |
| // otherwise, if this class is the first in the super chain to be |
| // parameterized, introduce a new type_arguments field. |
| if (host_type_args_field_offset == kNoTypeArguments) { |
| ASSERT(target_type_args_field_offset == RTN::Class::kNoTypeArguments); |
| if (IsGeneric()) { |
| // The instance needs a type_arguments field. |
| host_type_args_field_offset = host_offset; |
| target_type_args_field_offset = target_offset; |
| host_offset += kCompressedWordSize; |
| target_offset += compiler::target::kCompressedWordSize; |
| } |
| } else { |
| ASSERT(target_type_args_field_offset != RTN::Class::kNoTypeArguments); |
| } |
| |
| set_type_arguments_field_offset(host_type_args_field_offset, |
| target_type_args_field_offset); |
| ASSERT(host_offset > 0); |
| ASSERT(target_offset > 0); |
| Field& field = Field::Handle(); |
| const intptr_t len = flds.Length(); |
| for (intptr_t i = 0; i < len; i++) { |
| field ^= flds.At(i); |
| // Offset is computed only for instance fields. |
| if (!field.is_static()) { |
| ASSERT(field.HostOffset() == 0); |
| ASSERT(field.TargetOffset() == 0); |
| field.SetOffset(host_offset, target_offset); |
| |
| if (field.is_unboxed()) { |
| const intptr_t field_size = |
| UnboxedFieldSizeInBytesByCid(field.guarded_cid()); |
| |
| const intptr_t host_num_words = field_size / kCompressedWordSize; |
| const intptr_t host_next_offset = host_offset + field_size; |
| const intptr_t host_next_position = |
| host_next_offset / kCompressedWordSize; |
| |
| const intptr_t target_next_offset = target_offset + field_size; |
| const intptr_t target_next_position = |
| target_next_offset / compiler::target::kCompressedWordSize; |
| |
| // The bitmap has fixed length. Checks if the offset position is smaller |
| // than its length. If it is not, than the field should be boxed |
| if (host_next_position <= UnboxedFieldBitmap::Length() && |
| target_next_position <= UnboxedFieldBitmap::Length()) { |
| for (intptr_t j = 0; j < host_num_words; j++) { |
| // Activate the respective bit in the bitmap, indicating that the |
| // content is not a pointer |
| host_bitmap.Set(host_offset / kCompressedWordSize); |
| host_offset += kCompressedWordSize; |
| } |
| |
| ASSERT(host_offset == host_next_offset); |
| target_offset = target_next_offset; |
| } else { |
| // Make the field boxed |
| field.set_is_unboxed(false); |
| host_offset += kCompressedWordSize; |
| target_offset += compiler::target::kCompressedWordSize; |
| } |
| } else { |
| host_offset += kCompressedWordSize; |
| target_offset += compiler::target::kCompressedWordSize; |
| } |
| } |
| } |
| |
| const intptr_t host_instance_size = RoundedAllocationSize(host_offset); |
| const intptr_t target_instance_size = |
| compiler::target::RoundedAllocationSize(target_offset); |
| if (!Utils::IsInt(32, target_instance_size)) { |
| // Many parts of the compiler assume offsets can be represented with |
| // int32_t. |
| FATAL("Too many fields in %s\n", UserVisibleNameCString()); |
| } |
| set_instance_size(host_instance_size, target_instance_size); |
| set_next_field_offset(host_offset, target_offset); |
| return host_bitmap; |
| } |
| |
| void Class::AddInvocationDispatcher(const String& target_name, |
| const Array& args_desc, |
| const Function& dispatcher) const { |
| auto thread = Thread::Current(); |
| ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); |
| |
| ASSERT(target_name.ptr() == dispatcher.name()); |
| |
| DispatcherSet dispatchers(invocation_dispatcher_cache() == |
| Array::empty_array().ptr() |
| ? HashTables::New<DispatcherSet>(4, Heap::kOld) |
| : invocation_dispatcher_cache()); |
| dispatchers.Insert(dispatcher); |
| set_invocation_dispatcher_cache(dispatchers.Release()); |
| } |
| |
| FunctionPtr Class::GetInvocationDispatcher(const String& target_name, |
| const Array& args_desc, |
| UntaggedFunction::Kind kind, |
| bool create_if_absent) const { |
| ASSERT(kind == UntaggedFunction::kNoSuchMethodDispatcher || |
| kind == UntaggedFunction::kInvokeFieldDispatcher || |
| kind == UntaggedFunction::kDynamicInvocationForwarder); |
| auto thread = Thread::Current(); |
| auto Z = thread->zone(); |
| auto& function = Function::Handle(Z); |
| |
| // First we'll try to find it without using locks. |
| DispatcherKey key(target_name, args_desc, kind); |
| if (invocation_dispatcher_cache() != Array::empty_array().ptr()) { |
| DispatcherSet dispatchers(Z, invocation_dispatcher_cache()); |
| function ^= dispatchers.GetOrNull(key); |
| dispatchers.Release(); |
| } |
| if (!function.IsNull() || !create_if_absent) { |
| return function.ptr(); |
| } |
| |
| // If we failed to find it and possibly need to create it, use a write lock. |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| |
| // Try to find it again & return if it was added in the meantime. |
| if (invocation_dispatcher_cache() != Array::empty_array().ptr()) { |
| DispatcherSet dispatchers(Z, invocation_dispatcher_cache()); |
| function ^= dispatchers.GetOrNull(key); |
| dispatchers.Release(); |
| } |
| if (!function.IsNull()) return function.ptr(); |
| |
| // Otherwise create it & add it. |
| function = CreateInvocationDispatcher(target_name, args_desc, kind); |
| AddInvocationDispatcher(target_name, args_desc, function); |
| return function.ptr(); |
| } |
| |
| FunctionPtr Class::CreateInvocationDispatcher( |
| const String& target_name, |
| const Array& args_desc, |
| UntaggedFunction::Kind kind) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| FunctionType& signature = FunctionType::Handle(zone, FunctionType::New()); |
| Function& invocation = Function::Handle( |
| zone, Function::New( |
| signature, |
| String::Handle(zone, Symbols::New(thread, target_name)), kind, |
| false, // Not static. |
| false, // Not const. |
| false, // Not abstract. |
| false, // Not external. |
| false, // Not native. |
| *this, TokenPosition::kMinSource)); |
| ArgumentsDescriptor desc(args_desc); |
| const intptr_t type_args_len = desc.TypeArgsLen(); |
| if (type_args_len > 0) { |
| // Make dispatcher function generic, since type arguments are passed. |
| const auto& type_parameters = |
| TypeParameters::Handle(zone, TypeParameters::New(type_args_len)); |
| // Allow any type, as any type checking is compiled into the dispatcher. |
| auto& bound = Type::Handle( |
| zone, IsolateGroup::Current()->object_store()->nullable_object_type()); |
| for (intptr_t i = 0; i < type_args_len; i++) { |
| // The name of the type parameter does not matter, as a type error using |
| // it should never be thrown. |
| type_parameters.SetNameAt(i, Symbols::OptimizedOut()); |
| type_parameters.SetBoundAt(i, bound); |
| // Type arguments will always be provided, so the default is not used. |
| type_parameters.SetDefaultAt(i, Object::dynamic_type()); |
| } |
| signature.SetTypeParameters(type_parameters); |
| } |
| |
| signature.set_num_fixed_parameters(desc.PositionalCount()); |
| signature.SetNumOptionalParameters(desc.NamedCount(), |
| false); // Not positional. |
| signature.set_parameter_types( |
| Array::Handle(zone, Array::New(desc.Count(), Heap::kOld))); |
| invocation.CreateNameArray(); |
| signature.CreateNameArrayIncludingFlags(); |
| // Receiver. |
| signature.SetParameterTypeAt(0, Object::dynamic_type()); |
| invocation.SetParameterNameAt(0, Symbols::This()); |
| // Remaining positional parameters. |
| for (intptr_t i = 1; i < desc.PositionalCount(); i++) { |
| signature.SetParameterTypeAt(i, Object::dynamic_type()); |
| char name[64]; |
| Utils::SNPrint(name, 64, ":p%" Pd, i); |
| invocation.SetParameterNameAt( |
| i, String::Handle(zone, Symbols::New(thread, name))); |
| } |
| |
| // Named parameters. |
| for (intptr_t i = 0; i < desc.NamedCount(); i++) { |
| const intptr_t param_index = desc.PositionAt(i); |
| const auto& param_name = String::Handle(zone, desc.NameAt(i)); |
| signature.SetParameterTypeAt(param_index, Object::dynamic_type()); |
| signature.SetParameterNameAt(param_index, param_name); |
| } |
| signature.FinalizeNameArray(); |
| signature.set_result_type(Object::dynamic_type()); |
| invocation.set_is_debuggable(false); |
| invocation.set_is_visible(false); |
| invocation.set_is_reflectable(false); |
| invocation.set_saved_args_desc(args_desc); |
| |
| signature ^= ClassFinalizer::FinalizeType(signature); |
| invocation.SetSignature(signature); |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const bool attach_bytecode = true; |
| #else |
| const bool attach_bytecode = is_declared_in_bytecode(); |
| #endif |
| if (attach_bytecode) { |
| switch (kind) { |
| case UntaggedFunction::kNoSuchMethodDispatcher: |
| invocation.AttachBytecode(Object::nsm_dispatcher_bytecode()); |
| break; |
| case UntaggedFunction::kInvokeFieldDispatcher: |
| invocation.AttachBytecode(Object::invoke_field_bytecode()); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| #endif // defined(DART_DYNAMIC_MODULES) |
| |
| return invocation.ptr(); |
| } |
| |
| // Method extractors are used to create implicit closures from methods. |
| // When an expression obj.M is evaluated for the first time and receiver obj |
| // does not have a getter called M but has a method called M then an extractor |
| // is created and injected as a getter (under the name get:M) into the class |
| // owning method M. |
| FunctionPtr Function::CreateMethodExtractor(const String& getter_name) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| ASSERT(Field::IsGetterName(getter_name)); |
| const Function& closure_function = |
| Function::Handle(zone, ImplicitClosureFunction()); |
| |
| const Class& owner = Class::Handle(zone, closure_function.Owner()); |
| FunctionType& signature = FunctionType::Handle(zone, FunctionType::New()); |
| const Function& extractor = Function::Handle( |
| zone, |
| Function::New(signature, |
| String::Handle(zone, Symbols::New(thread, getter_name)), |
| UntaggedFunction::kMethodExtractor, |
| false, // Not static. |
| false, // Not const. |
| is_abstract(), |
| false, // Not external. |
| false, // Not native. |
| owner, TokenPosition::kMethodExtractor)); |
| |
| // Initialize signature: receiver is a single fixed parameter. |
| const intptr_t kNumParameters = 1; |
| signature.set_num_fixed_parameters(kNumParameters); |
| signature.SetNumOptionalParameters(0, false); |
| signature.set_parameter_types(Object::synthetic_getter_parameter_types()); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| extractor.set_positional_parameter_names( |
| Object::synthetic_getter_parameter_names()); |
| #endif |
| signature.set_result_type(Object::dynamic_type()); |
| |
| extractor.InheritKernelOffsetFrom(*this); |
| |
| extractor.set_extracted_method_closure(closure_function); |
| extractor.set_is_debuggable(false); |
| extractor.set_is_visible(false); |
| |
| signature ^= ClassFinalizer::FinalizeType(signature); |
| extractor.SetSignature(signature); |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const bool attach_bytecode = true; |
| #else |
| const bool attach_bytecode = is_declared_in_bytecode(); |
| #endif |
| if (attach_bytecode) { |
| extractor.AttachBytecode(Object::method_extractor_bytecode()); |
| } |
| #endif // defined(DART_DYNAMIC_MODULES) |
| |
| owner.AddFunction(extractor); |
| |
| return extractor.ptr(); |
| } |
| |
| FunctionPtr Function::GetMethodExtractor(const String& getter_name) const { |
| ASSERT(Field::IsGetterName(getter_name)); |
| const Function& closure_function = |
| Function::Handle(ImplicitClosureFunction()); |
| const Class& owner = Class::Handle(closure_function.Owner()); |
| Thread* thread = Thread::Current(); |
| if (owner.EnsureIsFinalized(thread) != Error::null()) { |
| return Function::null(); |
| } |
| IsolateGroup* group = thread->isolate_group(); |
| Function& result = Function::Handle( |
| Resolver::ResolveDynamicFunction(thread->zone(), owner, getter_name)); |
| if (result.IsNull()) { |
| SafepointWriteRwLocker ml(thread, group->program_lock()); |
| result = owner.LookupDynamicFunctionUnsafe(getter_name); |
| if (result.IsNull()) { |
| result = CreateMethodExtractor(getter_name); |
| } |
| } |
| ASSERT(result.kind() == UntaggedFunction::kMethodExtractor); |
| return result.ptr(); |
| } |
| |
| // Record field getters are used to access fields of arbitrary |
| // record instances dynamically. |
| FunctionPtr Class::CreateRecordFieldGetter(const String& getter_name) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| ASSERT(IsRecordClass()); |
| ASSERT(Field::IsGetterName(getter_name)); |
| FunctionType& signature = FunctionType::Handle(zone, FunctionType::New()); |
| const Function& getter = Function::Handle( |
| zone, |
| Function::New(signature, |
| String::Handle(zone, Symbols::New(thread, getter_name)), |
| UntaggedFunction::kRecordFieldGetter, |
| false, // Not static. |
| false, // Not const. |
| false, // Not abstract. |
| false, // Not external. |
| false, // Not native. |
| *this, TokenPosition::kMinSource)); |
| |
| // Initialize signature: receiver is a single fixed parameter. |
| const intptr_t kNumParameters = 1; |
| signature.set_num_fixed_parameters(kNumParameters); |
| signature.SetNumOptionalParameters(0, false); |
| signature.set_parameter_types(Object::synthetic_getter_parameter_types()); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| getter.set_positional_parameter_names( |
| Object::synthetic_getter_parameter_names()); |
| #endif |
| signature.set_result_type(Object::dynamic_type()); |
| |
| getter.set_is_debuggable(false); |
| getter.set_is_visible(false); |
| |
| signature ^= ClassFinalizer::FinalizeType(signature); |
| getter.SetSignature(signature); |
| |
| AddFunction(getter); |
| |
| return getter.ptr(); |
| } |
| |
| FunctionPtr Class::GetRecordFieldGetter(const String& getter_name) const { |
| ASSERT(IsRecordClass()); |
| ASSERT(Field::IsGetterName(getter_name)); |
| Thread* thread = Thread::Current(); |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| Function& result = Function::Handle(thread->zone(), |
| LookupDynamicFunctionUnsafe(getter_name)); |
| if (result.IsNull()) { |
| result = CreateRecordFieldGetter(getter_name); |
| } |
| ASSERT(result.kind() == UntaggedFunction::kRecordFieldGetter); |
| return result.ptr(); |
| } |
| |
| bool FindPragmaInMetadata(Thread* T, |
| const Object& metadata_obj, |
| const String& pragma_name, |
| bool multiple, |
| Object* options) { |
| auto IG = T->isolate_group(); |
| auto Z = T->zone(); |
| |
| // If there is a compile-time error while evaluating the metadata, we will |
| // simply claim there was no @pragma annotation. |
| if (metadata_obj.IsNull() || metadata_obj.IsLanguageError()) { |
| return false; |
| } |
| ASSERT(metadata_obj.IsArray()); |
| |
| auto& metadata = Array::Cast(metadata_obj); |
| auto& pragma_class = Class::Handle(Z, IG->object_store()->pragma_class()); |
| if (pragma_class.IsNull()) { |
| // Precompiler may drop pragma class. |
| return false; |
| } |
| auto& pragma_name_field = |
| Field::Handle(Z, pragma_class.LookupField(Symbols::name())); |
| auto& pragma_options_field = |
| Field::Handle(Z, pragma_class.LookupField(Symbols::options())); |
| |
| auto& pragma = Object::Handle(Z); |
| bool found = false; |
| auto& options_value = Object::Handle(Z); |
| auto& results = GrowableObjectArray::Handle(Z); |
| if (multiple) { |
| ASSERT(options != nullptr); |
| results ^= GrowableObjectArray::New(1); |
| } |
| for (intptr_t i = 0; i < metadata.Length(); ++i) { |
| pragma = metadata.At(i); |
| if (pragma.clazz() != pragma_class.ptr() || |
| Instance::Cast(pragma).GetField(pragma_name_field) != |
| pragma_name.ptr()) { |
| continue; |
| } |
| options_value = Instance::Cast(pragma).GetField(pragma_options_field); |
| found = true; |
| if (multiple) { |
| results.Add(options_value); |
| continue; |
| } |
| if (options != nullptr) { |
| *options = options_value.ptr(); |
| } |
| return true; |
| } |
| |
| if (found && options != nullptr) { |
| *options = results.ptr(); |
| } |
| return false; |
| } |
| |
| bool Library::FindPragma(Thread* T, |
| bool only_core, |
| const Object& obj, |
| const String& pragma_name, |
| bool multiple, |
| Object* options) { |
| auto Z = T->zone(); |
| auto& lib = Library::Handle(Z); |
| |
| if (obj.IsLibrary()) { |
| lib = Library::Cast(obj).ptr(); |
| } else if (obj.IsClass()) { |
| auto& klass = Class::Cast(obj); |
| if (!klass.has_pragma()) return false; |
| lib = klass.library(); |
| } else if (obj.IsFunction()) { |
| auto& function = Function::Cast(obj); |
| if (!function.has_pragma()) return false; |
| lib = Class::Handle(Z, function.Owner()).library(); |
| } else if (obj.IsField()) { |
| auto& field = Field::Cast(obj); |
| if (!field.has_pragma()) return false; |
| lib = Class::Handle(Z, field.Owner()).library(); |
| } else { |
| UNREACHABLE(); |
| } |
| |
| if (only_core && !lib.IsAnyCoreLibrary()) { |
| return false; |
| } |
| |
| Object& metadata_obj = Object::Handle(Z, lib.GetMetadata(obj)); |
| if (metadata_obj.IsUnwindError()) { |
| Report::LongJump(UnwindError::Cast(metadata_obj)); |
| } |
| |
| return FindPragmaInMetadata(T, metadata_obj, pragma_name, multiple, options); |
| } |
| |
| bool Function::IsDynamicInvocationForwarderName(const String& name) { |
| return IsDynamicInvocationForwarderName(name.ptr()); |
| } |
| |
| bool Function::IsDynamicInvocationForwarderName(StringPtr name) { |
| return String::StartsWith(name, Symbols::DynamicPrefix().ptr()); |
| } |
| |
| StringPtr Function::DemangleDynamicInvocationForwarderName(const String& name) { |
| const intptr_t kDynamicPrefixLength = 4; // "dyn:" |
| ASSERT(Symbols::DynamicPrefix().Length() == kDynamicPrefixLength); |
| return Symbols::New(Thread::Current(), name, kDynamicPrefixLength, |
| name.Length() - kDynamicPrefixLength); |
| } |
| |
| StringPtr Function::CreateDynamicInvocationForwarderName(const String& name) { |
| return Symbols::FromConcat(Thread::Current(), Symbols::DynamicPrefix(), name); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) || defined(DART_DYNAMIC_MODULES) |
| FunctionPtr Function::CreateDynamicInvocationForwarder( |
| const String& mangled_name) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| |
| Function& forwarder = Function::Handle(zone); |
| forwarder ^= Object::Clone(*this, Heap::kOld); |
| |
| forwarder.reset_unboxed_parameters_and_return(); |
| |
| forwarder.set_name(mangled_name); |
| forwarder.set_is_native(false); |
| // TODO(dartbug.com/37737): Currently, we intentionally keep the recognized |
| // kind when creating the dynamic invocation forwarder. |
| forwarder.set_kind(UntaggedFunction::kDynamicInvocationForwarder); |
| forwarder.set_modifier(UntaggedFunction::kNoModifier); |
| forwarder.set_is_debuggable(false); |
| |
| // TODO(vegorov) for error reporting reasons it is better to make this |
| // function visible and instead use a TailCall to invoke the target. |
| // Our TailCall instruction is not ready for such usage though it |
| // blocks inlining and can't take Function-s only Code objects. |
| forwarder.set_is_visible(false); |
| |
| forwarder.ClearICDataArray(); |
| forwarder.ClearCode(); |
| forwarder.set_usage_counter(0); |
| forwarder.set_deoptimization_counter(0); |
| forwarder.set_optimized_instruction_count(0); |
| forwarder.set_inlining_depth(0); |
| forwarder.set_optimized_call_site_count(0); |
| |
| forwarder.InheritKernelOffsetFrom(*this); |
| forwarder.SetForwardingTarget(*this); |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const bool attach_bytecode = true; |
| #else |
| const bool attach_bytecode = is_declared_in_bytecode(); |
| #endif |
| if (attach_bytecode) { |
| forwarder.AttachBytecode(Object::dynamic_invocation_forwarder_bytecode()); |
| } |
| #endif |
| |
| return forwarder.ptr(); |
| } |
| |
| FunctionPtr Function::GetDynamicInvocationForwarder( |
| const String& mangled_name) const { |
| ASSERT(IsDynamicInvocationForwarderName(mangled_name)); |
| auto thread = Thread::Current(); |
| auto zone = thread->zone(); |
| const Class& owner = Class::Handle(zone, Owner()); |
| Function& result = Function::Handle(zone); |
| |
| // First we'll try to find it without using locks. |
| result = owner.GetInvocationDispatcher( |
| mangled_name, Array::null_array(), |
| UntaggedFunction::kDynamicInvocationForwarder, |
| /*create_if_absent=*/false); |
| if (!result.IsNull()) return result.ptr(); |
| |
| const bool needs_dyn_forwarder = |
| #if defined(DART_DYNAMIC_MODULES) && defined(DART_PRECOMPILED_RUNTIME) |
| // TODO(alexmarkov) |
| false; |
| #else |
| kernel::NeedsDynamicInvocationForwarder(*this); |
| #endif |
| if (!needs_dyn_forwarder) { |
| return ptr(); |
| } |
| |
| // If we failed to find it and possibly need to create it, use a write lock. |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| |
| // Try to find it again & return if it was added in the mean time. |
| result = owner.GetInvocationDispatcher( |
| mangled_name, Array::null_array(), |
| UntaggedFunction::kDynamicInvocationForwarder, |
| /*create_if_absent=*/false); |
| if (!result.IsNull()) return result.ptr(); |
| |
| // Otherwise create it & add it. |
| result = CreateDynamicInvocationForwarder(mangled_name); |
| owner.AddInvocationDispatcher(mangled_name, Array::null_array(), result); |
| return result.ptr(); |
| } |
| |
| void Function::ReadParameterCovariance( |
| BitVector* is_covariant, |
| BitVector* is_generic_covariant_impl) const { |
| #if defined(DART_DYNAMIC_MODULES) |
| if (is_declared_in_bytecode()) { |
| bytecode::BytecodeReader::ReadParameterCovariance( |
| *this, is_covariant, is_generic_covariant_impl); |
| return; |
| } |
| #endif |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| kernel::ReadParameterCovariance(*this, is_covariant, |
| is_generic_covariant_impl); |
| #endif |
| } |
| |
| #endif |
| |
| bool AbstractType::InstantiateAndTestSubtype( |
| AbstractType* subtype, |
| AbstractType* supertype, |
| const TypeArguments& instantiator_type_args, |
| const TypeArguments& function_type_args) { |
| if (!subtype->IsInstantiated()) { |
| *subtype = subtype->InstantiateFrom( |
| instantiator_type_args, function_type_args, kAllFree, Heap::kOld); |
| } |
| if (!supertype->IsInstantiated()) { |
| *supertype = supertype->InstantiateFrom( |
| instantiator_type_args, function_type_args, kAllFree, Heap::kOld); |
| } |
| return subtype->IsSubtypeOf(*supertype, Heap::kOld); |
| } |
| |
| ArrayPtr Class::invocation_dispatcher_cache() const { |
| return untag()->invocation_dispatcher_cache<std::memory_order_acquire>(); |
| } |
| |
| void Class::Finalize() const { |
| auto thread = Thread::Current(); |
| auto isolate_group = thread->isolate_group(); |
| ASSERT(!thread->isolate_group()->all_classes_finalized()); |
| ASSERT(!is_finalized()); |
| // Prefinalized classes have a VM internal representation and no Dart fields. |
| // Their instance size is precomputed and field offsets are known. |
| if (!is_prefinalized()) { |
| // Compute offsets of instance fields, instance size and bitmap for unboxed |
| // fields. |
| const auto host_bitmap = CalculateFieldOffsets(); |
| if (ptr() == isolate_group->class_table()->At(id())) { |
| if (!ClassTable::IsTopLevelCid(id())) { |
| // Unless class is top-level, which don't get instantiated, |
| // sets the new size in the class table. |
| isolate_group->class_table()->UpdateClassSize(id(), ptr()); |
| isolate_group->class_table()->SetUnboxedFieldsMapAt(id(), host_bitmap); |
| } |
| } |
| } |
| |
| #if defined(DEBUG) |
| if (is_const()) { |
| // Double-check that all fields are final (CFE should guarantee that if it |
| // marks the class as having a constant constructor). |
| auto Z = thread->zone(); |
| const auto& super_class = Class::Handle(Z, SuperClass()); |
| ASSERT(super_class.IsNull() || super_class.is_const()); |
| const auto& fields = Array::Handle(Z, this->fields()); |
| auto& field = Field::Handle(Z); |
| for (intptr_t i = 0; i < fields.Length(); ++i) { |
| field ^= fields.At(i); |
| ASSERT(field.is_static() || field.is_final()); |
| } |
| } |
| #endif |
| |
| set_is_finalized(); |
| } |
| |
| #if defined(DEBUG) |
| static bool IsMutatorOrAtDeoptSafepoint() { |
| Thread* thread = Thread::Current(); |
| return thread->IsDartMutatorThread() || thread->OwnsDeoptSafepoint(); |
| } |
| #endif |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| |
| class CHACodeArray : public WeakCodeReferences { |
| public: |
| explicit CHACodeArray(const Class& cls) |
| : WeakCodeReferences(WeakArray::Handle(cls.dependent_code())), |
| cls_(cls) {} |
| |
| virtual void UpdateArrayTo(const WeakArray& value) { |
| // TODO(fschneider): Fails for classes in the VM isolate. |
| cls_.set_dependent_code(value); |
| } |
| |
| virtual void ReportDeoptimization(const Code& code) { |
| if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { |
| Function& function = Function::Handle(code.function()); |
| THR_Print("Deoptimizing %s because CHA optimized (%s).\n", |
| function.ToFullyQualifiedCString(), cls_.ToCString()); |
| } |
| } |
| |
| virtual void ReportSwitchingCode(const Code& code) { |
| if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { |
| Function& function = Function::Handle(code.function()); |
| THR_Print( |
| "Switching %s to unoptimized code because CHA invalid" |
| " (%s)\n", |
| function.ToFullyQualifiedCString(), cls_.ToCString()); |
| } |
| } |
| |
| private: |
| const Class& cls_; |
| DISALLOW_COPY_AND_ASSIGN(CHACodeArray); |
| }; |
| |
| void Class::RegisterCHACode(const Code& code) { |
| if (FLAG_trace_cha) { |
| THR_Print("RegisterCHACode '%s' depends on class '%s'\n", |
| Function::Handle(code.function()).ToQualifiedCString(), |
| ToCString()); |
| } |
| DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint()); |
| ASSERT(code.is_optimized()); |
| CHACodeArray a(*this); |
| a.Register(code); |
| } |
| |
| void Class::DisableCHAOptimizedCode(const Class& subclass) { |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| CHACodeArray a(*this); |
| if (FLAG_trace_deoptimization && a.HasCodes()) { |
| if (subclass.IsNull()) { |
| THR_Print("Deopt for CHA (all)\n"); |
| } else { |
| THR_Print("Deopt for CHA (new subclass %s)\n", subclass.ToCString()); |
| } |
| } |
| a.DisableCode(/*are_mutators_stopped=*/false); |
| } |
| |
| void Class::DisableAllCHAOptimizedCode() { |
| DisableCHAOptimizedCode(Class::Handle()); |
| } |
| |
| WeakArrayPtr Class::dependent_code() const { |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadReader()); |
| return untag()->dependent_code(); |
| } |
| |
| void Class::set_dependent_code(const WeakArray& array) const { |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| untag()->set_dependent_code(array.ptr()); |
| } |
| |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| bool Class::TraceAllocation(IsolateGroup* isolate_group) const { |
| #ifndef PRODUCT |
| auto class_table = isolate_group->class_table(); |
| return class_table->ShouldTraceAllocationFor(id()); |
| #else |
| return false; |
| #endif |
| } |
| |
| void Class::SetTraceAllocation(bool trace_allocation) const { |
| #ifndef PRODUCT |
| auto isolate_group = IsolateGroup::Current(); |
| const bool changed = trace_allocation != this->TraceAllocation(isolate_group); |
| if (changed) { |
| auto class_table = isolate_group->class_table(); |
| class_table->SetTraceAllocationFor(id(), trace_allocation); |
| #ifdef TARGET_ARCH_IA32 |
| DisableAllocationStub(); |
| #endif |
| } |
| #else |
| UNREACHABLE(); |
| #endif |
| } |
| |
| // Conventions: |
| // * For throwing a NSM in a library or top-level class (i.e., level is |
| // kTopLevel), if a method was found but was incompatible, we pass the |
| // signature of the found method as a string, otherwise the null instance. |
| // * Otherwise, for throwing a NSM in a class klass we use its runtime type as |
| // receiver, i.e., klass.RareType(). |
| static ObjectPtr ThrowNoSuchMethod(const Instance& receiver, |
| const String& function_name, |
| const Array& arguments, |
| const Array& argument_names, |
| const InvocationMirror::Level level, |
| const InvocationMirror::Kind kind) { |
| const Smi& invocation_type = |
| Smi::Handle(Smi::New(InvocationMirror::EncodeType(level, kind))); |
| |
| ASSERT(!receiver.IsNull() || level == InvocationMirror::Level::kTopLevel); |
| ASSERT(level != InvocationMirror::Level::kTopLevel || receiver.IsString()); |
| const Array& args = Array::Handle(Array::New(7)); |
| args.SetAt(0, receiver); |
| args.SetAt(1, function_name); |
| args.SetAt(2, invocation_type); |
| args.SetAt(3, Object::smi_zero()); // Type arguments length. |
| args.SetAt(4, Object::null_type_arguments()); |
| args.SetAt(5, arguments); |
| args.SetAt(6, argument_names); |
| |
| const Library& libcore = Library::Handle(Library::CoreLibrary()); |
| const Class& cls = |
| Class::Handle(libcore.LookupClass(Symbols::NoSuchMethodError())); |
| ASSERT(!cls.IsNull()); |
| const auto& error = cls.EnsureIsFinalized(Thread::Current()); |
| ASSERT(error == Error::null()); |
| const Function& throwNew = |
| Function::Handle(cls.LookupFunctionAllowPrivate(Symbols::ThrowNew())); |
| return DartEntry::InvokeFunction(throwNew, args); |
| } |
| |
| static ObjectPtr ThrowTypeError(const TokenPosition token_pos, |
| const Instance& src_value, |
| const AbstractType& dst_type, |
| const String& dst_name) { |
| const Array& args = Array::Handle(Array::New(4)); |
| const Smi& pos = Smi::Handle(Smi::New(token_pos.Serialize())); |
| args.SetAt(0, pos); |
| args.SetAt(1, src_value); |
| args.SetAt(2, dst_type); |
| args.SetAt(3, dst_name); |
| |
| const Library& libcore = Library::Handle(Library::CoreLibrary()); |
| const Class& cls = |
| Class::Handle(libcore.LookupClassAllowPrivate(Symbols::TypeError())); |
| const auto& error = cls.EnsureIsFinalized(Thread::Current()); |
| ASSERT(error == Error::null()); |
| const Function& throwNew = |
| Function::Handle(cls.LookupFunctionAllowPrivate(Symbols::ThrowNew())); |
| return DartEntry::InvokeFunction(throwNew, args); |
| } |
| |
| ObjectPtr Class::InvokeGetter(const String& getter_name, |
| bool throw_nsm_if_absent, |
| bool respect_reflectable, |
| bool check_is_entrypoint) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| |
| CHECK_ERROR(EnsureIsFinalized(thread)); |
| |
| // Note static fields do not have implicit getters. |
| const Field& field = Field::Handle(zone, LookupStaticField(getter_name)); |
| |
| if (!field.IsNull() && check_is_entrypoint) { |
| CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kGetterOnly)); |
| } |
| |
| if (field.IsNull() || field.IsUninitialized()) { |
| const String& internal_getter_name = |
| String::Handle(zone, Field::GetterName(getter_name)); |
| Function& getter = |
| Function::Handle(zone, LookupStaticFunction(internal_getter_name)); |
| |
| if (field.IsNull() && !getter.IsNull() && check_is_entrypoint) { |
| CHECK_ERROR(getter.VerifyCallEntryPoint()); |
| } |
| |
| if (getter.IsNull() || (respect_reflectable && !getter.is_reflectable())) { |
| if (getter.IsNull()) { |
| getter = LookupStaticFunction(getter_name); |
| if (!getter.IsNull()) { |
| if (check_is_entrypoint) { |
| CHECK_ERROR(getter.VerifyClosurizedEntryPoint()); |
| } |
| if (getter.SafeToClosurize()) { |
| // Looking for a getter but found a regular method: closurize it. |
| const Function& closure_function = |
| Function::Handle(zone, getter.ImplicitClosureFunction()); |
| return closure_function.ImplicitStaticClosure(); |
| } |
| } |
| } |
| if (throw_nsm_if_absent) { |
| return ThrowNoSuchMethod( |
| AbstractType::Handle(zone, RareType()), getter_name, |
| Object::null_array(), Object::null_array(), |
| InvocationMirror::kStatic, InvocationMirror::kGetter); |
| } |
| // Fall through case: Indicate that we didn't find any function or field |
| // using a special null instance. This is different from a field being |
| // null. Callers make sure that this null does not leak into Dartland. |
| return Object::sentinel().ptr(); |
| } |
| |
| // Invoke the getter and return the result. |
| return DartEntry::InvokeFunction(getter, Object::empty_array()); |
| } |
| |
| return field.StaticValue(); |
| } |
| |
| ObjectPtr Class::InvokeSetter(const String& setter_name, |
| const Instance& value, |
| bool respect_reflectable, |
| bool check_is_entrypoint) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| |
| CHECK_ERROR(EnsureIsFinalized(thread)); |
| |
| // Check for real fields and user-defined setters. |
| const Field& field = Field::Handle(zone, LookupStaticField(setter_name)); |
| const String& internal_setter_name = |
| String::Handle(zone, Field::SetterName(setter_name)); |
| |
| if (!field.IsNull() && check_is_entrypoint) { |
| CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kSetterOnly)); |
| } |
| |
| AbstractType& parameter_type = AbstractType::Handle(zone); |
| if (field.IsNull()) { |
| const Function& setter = |
| Function::Handle(zone, LookupStaticFunction(internal_setter_name)); |
| if (!setter.IsNull() && check_is_entrypoint) { |
| CHECK_ERROR(setter.VerifyCallEntryPoint()); |
| } |
| const int kNumArgs = 1; |
| const Array& args = Array::Handle(zone, Array::New(kNumArgs)); |
| args.SetAt(0, value); |
| if (setter.IsNull() || (respect_reflectable && !setter.is_reflectable())) { |
| return ThrowNoSuchMethod(AbstractType::Handle(zone, RareType()), |
| internal_setter_name, args, Object::null_array(), |
| InvocationMirror::kStatic, |
| InvocationMirror::kSetter); |
| } |
| parameter_type = setter.ParameterTypeAt(0); |
| if (!value.RuntimeTypeIsSubtypeOf(parameter_type, |
| Object::null_type_arguments(), |
| Object::null_type_arguments())) { |
| const String& argument_name = |
| String::Handle(zone, setter.ParameterNameAt(0)); |
| return ThrowTypeError(setter.token_pos(), value, parameter_type, |
| argument_name); |
| } |
| // Invoke the setter and return the result. |
| return DartEntry::InvokeFunction(setter, args); |
| } |
| |
| if (field.is_final() || (respect_reflectable && !field.is_reflectable())) { |
| const int kNumArgs = 1; |
| const Array& args = Array::Handle(zone, Array::New(kNumArgs)); |
| args.SetAt(0, value); |
| return ThrowNoSuchMethod(AbstractType::Handle(zone, RareType()), |
| internal_setter_name, args, Object::null_array(), |
| InvocationMirror::kStatic, |
| InvocationMirror::kSetter); |
| } |
| |
| parameter_type = field.type(); |
| if (!value.RuntimeTypeIsSubtypeOf(parameter_type, |
| Object::null_type_arguments(), |
| Object::null_type_arguments())) { |
| const String& argument_name = String::Handle(zone, field.name()); |
| return ThrowTypeError(field.token_pos(), value, parameter_type, |
| argument_name); |
| } |
| field.SetStaticValue(value); |
| return value.ptr(); |
| } |
| |
| // Creates a new array of boxed arguments suitable for invoking the callable |
| // from the original boxed arguments for a static call. Also sets the contents |
| // of the handle pointed to by [callable_args_desc_array_out] to an appropriate |
| // arguments descriptor array for the new arguments. |
| // |
| // Assumes [arg_names] are consistent with [static_args_descriptor]. |
| static ArrayPtr CreateCallableArgumentsFromStatic( |
| Zone* zone, |
| const Instance& receiver, |
| const Array& static_args, |
| const Array& arg_names, |
| const ArgumentsDescriptor& static_args_descriptor) { |
| const intptr_t num_static_type_args = static_args_descriptor.TypeArgsLen(); |
| const intptr_t num_static_args = static_args_descriptor.Count(); |
| // Double check that the static args descriptor expects boxed arguments |
| // and the static args descriptor is consistent with the static arguments. |
| ASSERT_EQUAL(static_args_descriptor.Size(), num_static_args); |
| ASSERT_EQUAL(static_args.Length(), |
| num_static_args + (num_static_type_args > 0 ? 1 : 0)); |
| // Add an additional slot to store the callable as the receiver. |
| const auto& callable_args = |
| Array::Handle(zone, Array::New(static_args.Length() + 1)); |
| const intptr_t first_arg_index = static_args_descriptor.FirstArgIndex(); |
| auto& temp = Object::Handle(zone); |
| // Copy the static args into the corresponding slots of the callable args. |
| if (num_static_type_args > 0) { |
| temp = static_args.At(0); |
| callable_args.SetAt(0, temp); |
| } |
| for (intptr_t i = first_arg_index; i < static_args.Length(); i++) { |
| temp = static_args.At(i); |
| callable_args.SetAt(i + 1, temp); |
| } |
| // Set the receiver slot in the callable args. |
| callable_args.SetAt(first_arg_index, receiver); |
| return callable_args.ptr(); |
| } |
| |
| ObjectPtr Class::Invoke(const String& function_name, |
| const Array& args, |
| const Array& arg_names, |
| bool respect_reflectable, |
| bool check_is_entrypoint) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| CHECK_ERROR(EnsureIsFinalized(thread)); |
| |
| // We don't pass any explicit type arguments, which will be understood as |
| // using dynamic for any function type arguments by lower layers. |
| const int kTypeArgsLen = 0; |
| const Array& args_descriptor_array = Array::Handle( |
| zone, ArgumentsDescriptor::NewBoxed(kTypeArgsLen, args.Length(), |
| arg_names, Heap::kNew)); |
| ArgumentsDescriptor args_descriptor(args_descriptor_array); |
| |
| Function& function = |
| Function::Handle(zone, LookupStaticFunction(function_name)); |
| |
| if (!function.IsNull() && check_is_entrypoint) { |
| CHECK_ERROR(function.VerifyCallEntryPoint()); |
| } |
| |
| if (function.IsNull()) { |
| // Didn't find a method: try to find a getter and invoke call on its result. |
| const Object& getter_result = Object::Handle( |
| zone, InvokeGetter(function_name, false, respect_reflectable, |
| check_is_entrypoint)); |
| if (getter_result.ptr() != Object::sentinel().ptr()) { |
| if (check_is_entrypoint) { |
| CHECK_ERROR(EntryPointFieldInvocationError(function_name)); |
| } |
| const auto& call_args_descriptor_array = Array::Handle( |
| zone, ArgumentsDescriptor::NewBoxed(args_descriptor.TypeArgsLen(), |
| args_descriptor.Count() + 1, |
| arg_names, Heap::kNew)); |
| const auto& call_args = Array::Handle( |
| zone, |
| CreateCallableArgumentsFromStatic(zone, Instance::Cast(getter_result), |
| args, arg_names, args_descriptor)); |
| return DartEntry::InvokeClosure(thread, call_args, |
| call_args_descriptor_array); |
| } |
| } |
| |
| if (function.IsNull() || |
| !function.AreValidArguments(args_descriptor, nullptr) || |
| (respect_reflectable && !function.is_reflectable())) { |
| return ThrowNoSuchMethod( |
| AbstractType::Handle(zone, RareType()), function_name, args, arg_names, |
| InvocationMirror::kStatic, InvocationMirror::kMethod); |
| } |
| // This is a static function, so we pass an empty instantiator tav. |
| ASSERT(function.is_static()); |
| ObjectPtr type_error = function.DoArgumentTypesMatch( |
| args, args_descriptor, Object::empty_type_arguments()); |
| if (type_error != Error::null()) { |
| return type_error; |
| } |
| return DartEntry::InvokeFunction(function, args, args_descriptor_array); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| |
| static ObjectPtr LoadExpressionEvaluationFunction( |
| Zone* zone, |
| const ExternalTypedData& kernel_buffer, |
| const String& library_url, |
| const String& klass) { |
| std::unique_ptr<kernel::Program> kernel_pgm = |
| kernel::Program::ReadFromTypedData(kernel_buffer); |
| |
| if (kernel_pgm == nullptr) { |
| return ApiError::New(String::Handle( |
| zone, String::New("Kernel isolate returned ill-formed kernel."))); |
| } |
| |
| auto& result = Object::Handle(zone); |
| { |
| kernel::KernelLoader loader(kernel_pgm.get(), |
| /*uri_to_source_table=*/nullptr); |
| result = loader.LoadExpressionEvaluationFunction(library_url, klass); |
| kernel_pgm.reset(); |
| } |
| if (result.IsError()) return result.ptr(); |
| return Function::Cast(result).ptr(); |
| } |
| |
| static bool EvaluationFunctionNeedsReceiver(Thread* thread, |
| Zone* zone, |
| const Function& eval_function) { |
| auto parsed_function = new ParsedFunction( |
| thread, Function::ZoneHandle(zone, eval_function.ptr())); |
| parsed_function->EnsureKernelScopes(); |
| return parsed_function->is_receiver_used(); |
| } |
| |
| static ObjectPtr EvaluateCompiledExpressionHelper( |
| Zone* zone, |
| const Function& eval_function, |
| const Array& type_definitions, |
| const Array& arguments, |
| const TypeArguments& type_arguments) { |
| // type_arguments is null if all type arguments are dynamic. |
| if (type_definitions.Length() == 0 || type_arguments.IsNull()) { |
| return DartEntry::InvokeFunction(eval_function, arguments); |
| } |
| |
| intptr_t num_type_args = type_arguments.Length(); |
| const auto& real_arguments = |
| Array::Handle(zone, Array::New(arguments.Length() + 1)); |
| real_arguments.SetAt(0, type_arguments); |
| Object& arg = Object::Handle(zone); |
| for (intptr_t i = 0; i < arguments.Length(); ++i) { |
| arg = arguments.At(i); |
| real_arguments.SetAt(i + 1, arg); |
| } |
| |
| const Array& args_desc = |
| Array::Handle(zone, ArgumentsDescriptor::NewBoxed( |
| num_type_args, arguments.Length(), Heap::kNew)); |
| return DartEntry::InvokeFunction(eval_function, real_arguments, args_desc); |
| } |
| |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| ObjectPtr Library::EvaluateCompiledExpression( |
| const ExternalTypedData& kernel_buffer, |
| const Array& type_definitions, |
| const Array& arguments, |
| const TypeArguments& type_arguments) const { |
| const auto& klass = Class::Handle(toplevel_class()); |
| return klass.EvaluateCompiledExpression(kernel_buffer, type_definitions, |
| arguments, type_arguments); |
| } |
| |
| ObjectPtr Class::EvaluateCompiledExpression( |
| const ExternalTypedData& kernel_buffer, |
| const Array& type_definitions, |
| const Array& arguments, |
| const TypeArguments& type_arguments) const { |
| auto thread = Thread::Current(); |
| const auto& library = Library::Handle(thread->zone(), this->library()); |
| return Instance::EvaluateCompiledExpression( |
| thread, Instance::null_object(), library, *this, kernel_buffer, |
| type_definitions, arguments, type_arguments); |
| } |
| |
| ObjectPtr Instance::EvaluateCompiledExpression( |
| const Class& klass, |
| const ExternalTypedData& kernel_buffer, |
| const Array& type_definitions, |
| const Array& arguments, |
| const TypeArguments& type_arguments) const { |
| auto thread = Thread::Current(); |
| auto zone = thread->zone(); |
| const auto& library = Library::Handle(zone, klass.library()); |
| return Instance::EvaluateCompiledExpression(thread, *this, library, klass, |
| kernel_buffer, type_definitions, |
| arguments, type_arguments); |
| } |
| |
| ObjectPtr Instance::EvaluateCompiledExpression( |
| Thread* thread, |
| const Object& receiver, |
| const Library& library, |
| const Class& klass, |
| const ExternalTypedData& kernel_buffer, |
| const Array& type_definitions, |
| const Array& arguments, |
| const TypeArguments& type_arguments) { |
| auto zone = Thread::Current()->zone(); |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const auto& error_str = String::Handle( |
| zone, |
| String::New("Expression evaluation not available in precompiled mode.")); |
| return ApiError::New(error_str); |
| #else |
| if (IsInternalOnlyClassId(klass.id()) || (klass.id() == kTypeArgumentsCid)) { |
| const auto& exception = Instance::Handle( |
| zone, String::New("Expressions can be evaluated only with regular Dart " |
| "instances/classes.")); |
| return UnhandledException::New(exception, StackTrace::null_instance()); |
| } |
| |
| const auto& url = String::Handle(zone, library.url()); |
| const auto& klass_name = klass.IsTopLevel() |
| ? String::null_string() |
| : String::Handle(zone, klass.UserVisibleName()); |
| |
| const auto& result = Object::Handle( |
| zone, |
| LoadExpressionEvaluationFunction(zone, kernel_buffer, url, klass_name)); |
| if (result.IsError()) return result.ptr(); |
| |
| const auto& eval_function = Function::Cast(result); |
| |
| #if defined(DEBUG) |
| for (intptr_t i = 0; i < arguments.Length(); ++i) { |
| ASSERT(arguments.At(i) != Object::optimized_out().ptr()); |
| } |
| #endif // defined(DEBUG) |
| |
| auto& all_arguments = Array::Handle(zone, arguments.ptr()); |
| if (!eval_function.is_static()) { |
| // `this` may be optimized out (e.g. not accessible from breakpoint due to |
| // not being captured by closure). We allow this as long as the evaluation |
| // function doesn't actually need `this`. |
| if (receiver.IsNull() || receiver.ptr() == Object::optimized_out().ptr()) { |
| if (EvaluationFunctionNeedsReceiver(thread, zone, eval_function)) { |
| return Object::optimized_out().ptr(); |
| } |
| } |
| |
| all_arguments = Array::New(1 + arguments.Length()); |
| auto& param = PassiveObject::Handle(); |
| all_arguments.SetAt(0, receiver); |
| for (intptr_t i = 0; i < arguments.Length(); i++) { |
| param = arguments.At(i); |
| all_arguments.SetAt(i + 1, param); |
| } |
| } |
| |
| return EvaluateCompiledExpressionHelper(zone, eval_function, type_definitions, |
| all_arguments, type_arguments); |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| void Class::EnsureDeclarationLoaded() const { |
| if (!is_declaration_loaded()) { |
| #if defined(DART_DYNAMIC_MODULES) |
| // Loading of class declaration can be postponed until needed |
| // if class comes from bytecode. |
| if (is_declared_in_bytecode()) { |
| bytecode::BytecodeReader::LoadClassDeclaration(*this); |
| ASSERT(is_declaration_loaded()); |
| ASSERT(is_type_finalized()); |
| return; |
| } |
| #endif // defined(DART_DYNAMIC_MODULES) |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| FATAL("Unable to use class %s which is not loaded yet.", ToCString()); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| } |
| } |
| |
| // Ensure that top level parsing of the class has been done. |
| ErrorPtr Class::EnsureIsFinalized(Thread* thread) const { |
| ASSERT(!IsNull()); |
| if (is_finalized()) { |
| return Error::null(); |
| } |
| #if defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| UNREACHABLE(); |
| return Error::null(); |
| #else |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| if (is_finalized()) { |
| return Error::null(); |
| } |
| LeaveCompilerScope ncs(thread); |
| ASSERT(thread != nullptr); |
| const Error& error = |
| Error::Handle(thread->zone(), ClassFinalizer::LoadClassMembers(*this)); |
| if (!error.IsNull()) { |
| ASSERT(thread == Thread::Current()); |
| if (thread->long_jump_base() != nullptr) { |
| Report::LongJump(error); |
| UNREACHABLE(); |
| } |
| } |
| return error.ptr(); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| } |
| |
| // Ensure that code outdated by finalized class is cleaned up, new instance of |
| // this class is ready to be allocated. |
| ErrorPtr Class::EnsureIsAllocateFinalized(Thread* thread) const { |
| ASSERT(!IsNull()); |
| if (is_allocate_finalized()) { |
| return Error::null(); |
| } |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| if (is_allocate_finalized()) { |
| return Error::null(); |
| } |
| ASSERT(thread != nullptr); |
| Error& error = Error::Handle(thread->zone(), EnsureIsFinalized(thread)); |
| if (!error.IsNull()) { |
| ASSERT(thread == Thread::Current()); |
| if (thread->long_jump_base() != nullptr) { |
| Report::LongJump(error); |
| UNREACHABLE(); |
| } |
| } |
| // May be allocate-finalized recursively during EnsureIsFinalized. |
| if (is_allocate_finalized()) { |
| return Error::null(); |
| } |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| error ^= ClassFinalizer::AllocateFinalizeClass(*this); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| return error.ptr(); |
| } |
| |
| void Class::SetFields(const Array& value) const { |
| ASSERT(!value.IsNull()); |
| #if defined(DEBUG) |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); |
| // Verify that all the fields in the array have this class as owner. |
| Field& field = Field::Handle(); |
| intptr_t len = value.Length(); |
| for (intptr_t i = 0; i < len; i++) { |
| field ^= value.At(i); |
| ASSERT(field.IsOriginal()); |
| ASSERT(field.Owner() == ptr()); |
| } |
| #endif |
| // The value of static fields is already initialized to null. |
| set_fields(value); |
| } |
| |
| void Class::AddField(const Field& field) const { |
| #if defined(DEBUG) |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); |
| #endif |
| const Array& arr = Array::Handle(fields()); |
| const Array& new_arr = Array::Handle(Array::Grow(arr, arr.Length() + 1)); |
| new_arr.SetAt(arr.Length(), field); |
| SetFields(new_arr); |
| } |
| |
| void Class::AddFields(const GrowableArray<const Field*>& new_fields) const { |
| #if defined(DEBUG) |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); |
| #endif |
| const intptr_t num_new_fields = new_fields.length(); |
| if (num_new_fields == 0) return; |
| const Array& arr = Array::Handle(fields()); |
| const intptr_t num_old_fields = arr.Length(); |
| const Array& new_arr = Array::Handle( |
| Array::Grow(arr, num_old_fields + num_new_fields, Heap::kOld)); |
| for (intptr_t i = 0; i < num_new_fields; i++) { |
| new_arr.SetAt(i + num_old_fields, *new_fields.At(i)); |
| } |
| SetFields(new_arr); |
| } |
| |
| intptr_t Class::FindFieldIndex(const Field& needle) const { |
| Thread* thread = Thread::Current(); |
| if (EnsureIsFinalized(thread) != Error::null()) { |
| return -1; |
| } |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FIELD_HANDLESCOPE(thread); |
| Array& fields = thread->ArrayHandle(); |
| Field& field = thread->FieldHandle(); |
| fields = this->fields(); |
| ASSERT(!fields.IsNull()); |
| for (intptr_t i = 0, n = fields.Length(); i < n; ++i) { |
| field ^= fields.At(i); |
| if (needle.ptr() == field.ptr()) { |
| return i; |
| } |
| } |
| // Not found. |
| return -1; |
| } |
| |
| FieldPtr Class::FieldFromIndex(intptr_t idx) const { |
| Array& fields = Array::Handle(this->fields()); |
| if ((idx < 0) || (idx >= fields.Length())) { |
| return Field::null(); |
| } |
| return Field::RawCast(fields.At(idx)); |
| } |
| |
| bool Class::InjectCIDFields() const { |
| if (library() != Library::InternalLibrary() || |
| Name() != Symbols::ClassID().ptr()) { |
| return false; |
| } |
| |
| auto thread = Thread::Current(); |
| auto isolate_group = thread->isolate_group(); |
| auto zone = thread->zone(); |
| Field& field = Field::Handle(zone); |
| Smi& value = Smi::Handle(zone); |
| String& field_name = String::Handle(zone); |
| |
| // clang-format off |
| static const struct { |
| const char* const field_name; |
| const intptr_t cid; |
| } cid_fields[] = { |
| #define CLASS_LIST_WITH_NULL(V) \ |
| V(Null) \ |
| CLASS_LIST_NO_OBJECT(V) |
| #define ADD_SET_FIELD(clazz) \ |
| {"cid" #clazz, k##clazz##Cid}, |
| CLASS_LIST_WITH_NULL(ADD_SET_FIELD) |
| #undef ADD_SET_FIELD |
| #undef CLASS_LIST_WITH_NULL |
| #define ADD_SET_FIELD(clazz) \ |
| {"cid" #clazz, kTypedData##clazz##Cid}, \ |
| {"cid" #clazz "View", kTypedData##clazz##ViewCid}, \ |
| {"cidExternal" #clazz, kExternalTypedData##clazz##Cid}, \ |
| {"cidUnmodifiable" #clazz "View", kUnmodifiableTypedData##clazz##ViewCid}, \ |
| CLASS_LIST_TYPED_DATA(ADD_SET_FIELD) |
| #undef ADD_SET_FIELD |
| // Used in const hashing to determine whether we're dealing with a |
| // user-defined const. See lib/_internal/vm/lib/compact_hash.dart. |
| {"numPredefinedCids", kNumPredefinedCids}, |
| }; |
| // clang-format on |
| |
| const AbstractType& field_type = Type::Handle(zone, Type::IntType()); |
| for (size_t i = 0; i < ARRAY_SIZE(cid_fields); i++) { |
| field_name = Symbols::New(thread, cid_fields[i].field_name); |
| field = Field::New(field_name, /* is_static = */ true, |
| /* is_final = */ false, |
| /* is_const = */ true, |
| /* is_reflectable = */ false, |
| /* is_late = */ false, *this, field_type, |
| TokenPosition::kMinSource, TokenPosition::kMinSource); |
| value = Smi::New(cid_fields[i].cid); |
| isolate_group->RegisterStaticField(field, value); |
| AddField(field); |
| } |
| |
| return true; |
| } |
| |
| template <class FakeInstance, class TargetFakeInstance> |
| ClassPtr Class::NewCommon(intptr_t index) { |
| ASSERT(Object::class_class() != Class::null()); |
| const auto& result = Class::Handle(Object::Allocate<Class>(Heap::kOld)); |
| // Here kIllegalCid means not-yet-assigned. |
| Object::VerifyBuiltinVtable<FakeInstance>(index == kIllegalCid ? kInstanceCid |
| : index); |
| NOT_IN_PRECOMPILED(result.set_token_pos(TokenPosition::kNoSource)); |
| NOT_IN_PRECOMPILED(result.set_end_token_pos(TokenPosition::kNoSource)); |
| const intptr_t host_instance_size = FakeInstance::InstanceSize(); |
| const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( |
| TargetFakeInstance::InstanceSize()); |
| result.set_instance_size(host_instance_size, target_instance_size); |
| result.set_type_arguments_field_offset_in_words(kNoTypeArguments, |
| RTN::Class::kNoTypeArguments); |
| const intptr_t host_next_field_offset = FakeInstance::NextFieldOffset(); |
| const intptr_t target_next_field_offset = |
| TargetFakeInstance::NextFieldOffset(); |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| result.set_id(index); |
| NOT_IN_PRECOMPILED(result.set_implementor_cid(kIllegalCid)); |
| result.set_num_type_arguments_unsafe(kUnknownNumTypeArguments); |
| result.set_num_native_fields(0); |
| result.set_state_bits(0); |
| NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); |
| result.InitEmptyFields(); |
| return result.ptr(); |
| } |
| |
| template <class FakeInstance, class TargetFakeInstance> |
| ClassPtr Class::New(intptr_t index, |
| IsolateGroup* isolate_group, |
| bool register_class, |
| bool is_abstract) { |
| Class& result = |
| Class::Handle(NewCommon<FakeInstance, TargetFakeInstance>(index)); |
| if (is_abstract) { |
| result.set_is_abstract(); |
| } |
| if (register_class) { |
| isolate_group->class_table()->Register(result); |
| } |
| return result.ptr(); |
| } |
| |
| ClassPtr Class::New(const Library& lib, |
| const String& name, |
| const Script& script, |
| TokenPosition token_pos, |
| bool register_class) { |
| Class& result = |
| Class::Handle(NewCommon<Instance, RTN::Instance>(kIllegalCid)); |
| result.set_library(lib); |
| result.set_name(name); |
| result.set_script(script); |
| NOT_IN_PRECOMPILED(result.set_token_pos(token_pos)); |
| |
| // The size gets initialized to 0. Once the class gets finalized the class |
| // finalizer will set the correct size. |
| ASSERT(!result.is_finalized() && !result.is_prefinalized()); |
| result.set_instance_size_in_words(0, 0); |
| |
| if (register_class) { |
| IsolateGroup::Current()->RegisterClass(result); |
| } |
| return result.ptr(); |
| } |
| |
| ClassPtr Class::NewInstanceClass() { |
| return Class::New<Instance, RTN::Instance>(kIllegalCid, |
| IsolateGroup::Current()); |
| } |
| |
| ClassPtr Class::NewNativeWrapper(const Library& library, |
| const String& name, |
| int field_count) { |
| Class& cls = Class::Handle(library.LookupClass(name)); |
| if (cls.IsNull()) { |
| cls = New(library, name, Script::Handle(), TokenPosition::kNoSource); |
| cls.SetFields(Object::empty_array()); |
| cls.SetFunctions(Object::empty_array()); |
| // Set super class to Object. |
| cls.set_super_type(Type::Handle(Type::ObjectType())); |
| // Compute instance size. First word contains a pointer to a properly |
| // sized typed array once the first native field has been set. |
| const intptr_t host_instance_size = |
| sizeof(UntaggedInstance) + kCompressedWordSize; |
| #if defined(DART_PRECOMPILER) |
| const intptr_t target_instance_size = |
| compiler::target::Instance::InstanceSize() + |
| compiler::target::kCompressedWordSize; |
| #else |
| const intptr_t target_instance_size = |
| sizeof(UntaggedInstance) + compiler::target::kCompressedWordSize; |
| #endif |
| cls.set_instance_size( |
| RoundedAllocationSize(host_instance_size), |
| compiler::target::RoundedAllocationSize(target_instance_size)); |
| cls.set_next_field_offset(host_instance_size, target_instance_size); |
| cls.set_num_native_fields(field_count); |
| cls.set_is_allocate_finalized(); |
| // The signature of the constructor yet to be added to this class will have |
| // to be finalized explicitly, since the class is prematurely marked as |
| // 'is_allocate_finalized' and finalization of member types will not occur. |
| cls.set_is_declaration_loaded(); |
| cls.set_is_type_finalized(); |
| cls.set_is_synthesized_class(); |
| cls.set_is_isolate_unsendable(true); |
| NOT_IN_PRECOMPILED(cls.set_implementor_cid(kDynamicCid)); |
| library.AddClass(cls); |
| return cls.ptr(); |
| } else { |
| return Class::null(); |
| } |
| } |
| |
| ClassPtr Class::NewStringClass(intptr_t class_id, IsolateGroup* isolate_group) { |
| intptr_t host_instance_size, target_instance_size; |
| if (class_id == kOneByteStringCid) { |
| host_instance_size = OneByteString::InstanceSize(); |
| target_instance_size = compiler::target::RoundedAllocationSize( |
| RTN::OneByteString::InstanceSize()); |
| } else { |
| ASSERT(class_id == kTwoByteStringCid); |
| host_instance_size = TwoByteString::InstanceSize(); |
| target_instance_size = compiler::target::RoundedAllocationSize( |
| RTN::TwoByteString::InstanceSize()); |
| } |
| Class& result = Class::Handle(New<String, RTN::String>( |
| class_id, isolate_group, /*register_class=*/false)); |
| result.set_instance_size(host_instance_size, target_instance_size); |
| |
| const intptr_t host_next_field_offset = String::NextFieldOffset(); |
| const intptr_t target_next_field_offset = RTN::String::NextFieldOffset(); |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| result.set_is_prefinalized(); |
| ASSERT(IsDeeplyImmutableCid(class_id)); |
| result.set_is_deeply_immutable(true); |
| isolate_group->class_table()->Register(result); |
| return result.ptr(); |
| } |
| |
| ClassPtr Class::NewTypedDataClass(intptr_t class_id, |
| IsolateGroup* isolate_group) { |
| ASSERT(IsTypedDataClassId(class_id)); |
| const intptr_t host_instance_size = TypedData::InstanceSize(); |
| const intptr_t target_instance_size = |
| compiler::target::RoundedAllocationSize(RTN::TypedData::InstanceSize()); |
| Class& result = Class::Handle(New<TypedData, RTN::TypedData>( |
| class_id, isolate_group, /*register_class=*/false)); |
| result.set_instance_size(host_instance_size, target_instance_size); |
| |
| const intptr_t host_next_field_offset = TypedData::NextFieldOffset(); |
| const intptr_t target_next_field_offset = RTN::TypedData::NextFieldOffset(); |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| result.set_is_prefinalized(); |
| isolate_group->class_table()->Register(result); |
| return result.ptr(); |
| } |
| |
| ClassPtr Class::NewTypedDataViewClass(intptr_t class_id, |
| IsolateGroup* isolate_group) { |
| ASSERT(IsTypedDataViewClassId(class_id)); |
| const intptr_t host_instance_size = TypedDataView::InstanceSize(); |
| const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( |
| RTN::TypedDataView::InstanceSize()); |
| Class& result = Class::Handle(New<TypedDataView, RTN::TypedDataView>( |
| class_id, isolate_group, /*register_class=*/false)); |
| result.set_instance_size(host_instance_size, target_instance_size); |
| |
| const intptr_t host_next_field_offset = TypedDataView::NextFieldOffset(); |
| const intptr_t target_next_field_offset = |
| RTN::TypedDataView::NextFieldOffset(); |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| result.set_is_prefinalized(); |
| isolate_group->class_table()->Register(result); |
| return result.ptr(); |
| } |
| |
| ClassPtr Class::NewUnmodifiableTypedDataViewClass(intptr_t class_id, |
| IsolateGroup* isolate_group) { |
| ASSERT(IsUnmodifiableTypedDataViewClassId(class_id)); |
| const intptr_t host_instance_size = TypedDataView::InstanceSize(); |
| const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( |
| RTN::TypedDataView::InstanceSize()); |
| Class& result = Class::Handle(New<TypedDataView, RTN::TypedDataView>( |
| class_id, isolate_group, /*register_class=*/false)); |
| result.set_instance_size(host_instance_size, target_instance_size); |
| |
| const intptr_t host_next_field_offset = TypedDataView::NextFieldOffset(); |
| const intptr_t target_next_field_offset = |
| RTN::TypedDataView::NextFieldOffset(); |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| result.set_is_prefinalized(); |
| isolate_group->class_table()->Register(result); |
| return result.ptr(); |
| } |
| |
| ClassPtr Class::NewExternalTypedDataClass(intptr_t class_id, |
| IsolateGroup* isolate_group) { |
| ASSERT(IsExternalTypedDataClassId(class_id)); |
| const intptr_t host_instance_size = ExternalTypedData::InstanceSize(); |
| const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( |
| RTN::ExternalTypedData::InstanceSize()); |
| Class& result = Class::Handle(New<ExternalTypedData, RTN::ExternalTypedData>( |
| class_id, isolate_group, /*register_class=*/false)); |
| |
| const intptr_t host_next_field_offset = ExternalTypedData::NextFieldOffset(); |
| const intptr_t target_next_field_offset = |
| RTN::ExternalTypedData::NextFieldOffset(); |
| result.set_instance_size(host_instance_size, target_instance_size); |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| result.set_is_prefinalized(); |
| isolate_group->class_table()->Register(result); |
| return result.ptr(); |
| } |
| |
| ClassPtr Class::NewPointerClass(intptr_t class_id, |
| IsolateGroup* isolate_group) { |
| ASSERT(IsFfiPointerClassId(class_id)); |
| intptr_t host_instance_size = Pointer::InstanceSize(); |
| intptr_t target_instance_size = |
| compiler::target::RoundedAllocationSize(RTN::Pointer::InstanceSize()); |
| Class& result = Class::Handle(New<Pointer, RTN::Pointer>( |
| class_id, isolate_group, /*register_class=*/false)); |
| result.set_instance_size(host_instance_size, target_instance_size); |
| result.set_type_arguments_field_offset(Pointer::type_arguments_offset(), |
| RTN::Pointer::type_arguments_offset()); |
| |
| const intptr_t host_next_field_offset = Pointer::NextFieldOffset(); |
| const intptr_t target_next_field_offset = RTN::Pointer::NextFieldOffset(); |
| |
| result.set_next_field_offset(host_next_field_offset, |
| target_next_field_offset); |
| result.set_is_prefinalized(); |
| isolate_group->class_table()->Register(result); |
| return result.ptr(); |
| } |
| |
| void Class::set_name(const String& value) const { |
| ASSERT(untag()->name() == String::null()); |
| ASSERT(value.IsSymbol()); |
| untag()->set_name(value.ptr()); |
| #if !defined(PRODUCT) |
| if (untag()->user_name() == String::null()) { |
| // TODO(johnmccutchan): Eagerly set user name for VM isolate classes, |
| // lazily set user name for the other classes. |
| // Generate and set user_name. |
| const String& user_name = String::Handle( |
| Symbols::New(Thread::Current(), GenerateUserVisibleName())); |
| set_user_name(user_name); |
| } |
| #endif // !defined(PRODUCT) |
| } |
| |
| #if !defined(PRODUCT) |
| void Class::set_user_name(const String& value) const { |
| untag()->set_user_name(value.ptr()); |
| } |
| #endif // !defined(PRODUCT) |
| |
| #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| void Class::SetUserVisibleNameInClassTable() { |
| IsolateGroup* isolate_group = IsolateGroup::Current(); |
| auto class_table = isolate_group->class_table(); |
| if (class_table->UserVisibleNameFor(id()) == nullptr) { |
| String& name = String::Handle(UserVisibleName()); |
| class_table->SetUserVisibleNameFor(id(), name.ToMallocCString()); |
| } |
| } |
| #endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) |
| |
| const char* Class::GenerateUserVisibleName() const { |
| if (FLAG_show_internal_names) { |
| return String::Handle(Name()).ToCString(); |
| } |
| switch (id()) { |
| case kFloat32x4Cid: |
| return Symbols::Float32x4().ToCString(); |
| case kFloat64x2Cid: |
| return Symbols::Float64x2().ToCString(); |
| case kInt32x4Cid: |
| return Symbols::Int32x4().ToCString(); |
| case kTypedDataInt8ArrayCid: |
| case kExternalTypedDataInt8ArrayCid: |
| return Symbols::Int8List().ToCString(); |
| case kTypedDataUint8ArrayCid: |
| case kExternalTypedDataUint8ArrayCid: |
| return Symbols::Uint8List().ToCString(); |
| case kTypedDataUint8ClampedArrayCid: |
| case kExternalTypedDataUint8ClampedArrayCid: |
| return Symbols::Uint8ClampedList().ToCString(); |
| case kTypedDataInt16ArrayCid: |
| case kExternalTypedDataInt16ArrayCid: |
| return Symbols::Int16List().ToCString(); |
| case kTypedDataUint16ArrayCid: |
| case kExternalTypedDataUint16ArrayCid: |
| return Symbols::Uint16List().ToCString(); |
| case kTypedDataInt32ArrayCid: |
| case kExternalTypedDataInt32ArrayCid: |
| return Symbols::Int32List().ToCString(); |
| case kTypedDataUint32ArrayCid: |
| case kExternalTypedDataUint32ArrayCid: |
| return Symbols::Uint32List().ToCString(); |
| case kTypedDataInt64ArrayCid: |
| case kExternalTypedDataInt64ArrayCid: |
| return Symbols::Int64List().ToCString(); |
| case kTypedDataUint64ArrayCid: |
| case kExternalTypedDataUint64ArrayCid: |
| return Symbols::Uint64List().ToCString(); |
| case kTypedDataInt32x4ArrayCid: |
| case kExternalTypedDataInt32x4ArrayCid: |
| return Symbols::Int32x4List().ToCString(); |
| case kTypedDataFloat32x4ArrayCid: |
| case kExternalTypedDataFloat32x4ArrayCid: |
| return Symbols::Float32x4List().ToCString(); |
| case kTypedDataFloat64x2ArrayCid: |
| case kExternalTypedDataFloat64x2ArrayCid: |
| return Symbols::Float64x2List().ToCString(); |
| case kTypedDataFloat32ArrayCid: |
| case kExternalTypedDataFloat32ArrayCid: |
| return Symbols::Float32List().ToCString(); |
| case kTypedDataFloat64ArrayCid: |
| case kExternalTypedDataFloat64ArrayCid: |
| return Symbols::Float64List().ToCString(); |
| case kPointerCid: |
| return Symbols::FfiPointer().ToCString(); |
| case kDynamicLibraryCid: |
| return Symbols::FfiDynamicLibrary().ToCString(); |
| case kNullCid: |
| return Symbols::Null().ToCString(); |
| case kDynamicCid: |
| return Symbols::Dynamic().ToCString(); |
| case kVoidCid: |
| return Symbols::Void().ToCString(); |
| case kNeverCid: |
| return Symbols::Never().ToCString(); |
| case kClassCid: |
| return Symbols::Class().ToCString(); |
| case kTypeParametersCid: |
| return Symbols::TypeParameters().ToCString(); |
| case kTypeArgumentsCid: |
| return Symbols::TypeArguments().ToCString(); |
| case kPatchClassCid: |
| return Symbols::PatchClass().ToCString(); |
| case kFunctionCid: |
| return Symbols::Function().ToCString(); |
| case kClosureDataCid: |
| return Symbols::ClosureData().ToCString(); |
| case kFfiTrampolineDataCid: |
| return Symbols::FfiTrampolineData().ToCString(); |
| case kFieldCid: |
| return Symbols::Field().ToCString(); |
| case kScriptCid: |
| return Symbols::Script().ToCString(); |
| case kLibraryCid: |
| return Symbols::Library().ToCString(); |
| case kLibraryPrefixCid: |
| return Symbols::LibraryPrefix().ToCString(); |
| case kNamespaceCid: |
| return Symbols::Namespace().ToCString(); |
| case kKernelProgramInfoCid: |
| return Symbols::KernelProgramInfo().ToCString(); |
| case kWeakSerializationReferenceCid: |
| return Symbols::WeakSerializationReference().ToCString(); |
| case kWeakArrayCid: |
| return Symbols::WeakArray().ToCString(); |
| case kCodeCid: |
| return Symbols::Code().ToCString(); |
| case kBytecodeCid: |
| return Symbols::Bytecode().ToCString(); |
| case kInstructionsCid: |
| return Symbols::Instructions().ToCString(); |
| case kInstructionsSectionCid: |
| return Symbols::InstructionsSection().ToCString(); |
| case kInstructionsTableCid: |
| return Symbols::InstructionsTable().ToCString(); |
| case kObjectPoolCid: |
| return Symbols::ObjectPool().ToCString(); |
| case kCodeSourceMapCid: |
| return Symbols::CodeSourceMap().ToCString(); |
| case kPcDescriptorsCid: |
| return Symbols::PcDescriptors().ToCString(); |
| case kCompressedStackMapsCid: |
| return Symbols::CompressedStackMaps().ToCString(); |
| case kLocalVarDescriptorsCid: |
| return Symbols::LocalVarDescriptors().ToCString(); |
| case kExceptionHandlersCid: |
| return Symbols::ExceptionHandlers().ToCString(); |
| case kContextCid: |
| return Symbols::Context().ToCString(); |
| case kContextScopeCid: |
| return Symbols::ContextScope().ToCString(); |
| case kSentinelCid: |
| return Symbols::Sentinel().ToCString(); |
| case kSingleTargetCacheCid: |
| return Symbols::SingleTargetCache().ToCString(); |
| case kICDataCid: |
| return Symbols::ICData().ToCString(); |
| case kMegamorphicCacheCid: |
| return Symbols::MegamorphicCache().ToCString(); |
| case kSubtypeTestCacheCid: |
| return Symbols::SubtypeTestCache().ToCString(); |
| case kLoadingUnitCid: |
| return Symbols::LoadingUnit().ToCString(); |
| case kApiErrorCid: |
| return Symbols::ApiError().ToCString(); |
| case kLanguageErrorCid: |
| return Symbols::LanguageError().ToCString(); |
| case kUnhandledExceptionCid: |
| return Symbols::UnhandledException().ToCString(); |
| case kUnwindErrorCid: |
| return Symbols::UnwindError().ToCString(); |
| case kIntegerCid: |
| case kSmiCid: |
| case kMintCid: |
| return Symbols::Int().ToCString(); |
| case kDoubleCid: |
| return Symbols::Double().ToCString(); |
| case kOneByteStringCid: |
| case kTwoByteStringCid: |
| return Symbols::_String().ToCString(); |
| case kArrayCid: |
| case kImmutableArrayCid: |
| case kGrowableObjectArrayCid: |
| return Symbols::List().ToCString(); |
| } |
| String& name = String::Handle(Name()); |
| name = Symbols::New(Thread::Current(), String::ScrubName(name)); |
| if (name.ptr() == Symbols::_Future().ptr() && |
| library() == Library::AsyncLibrary()) { |
| return Symbols::Future().ToCString(); |
| } |
| return name.ToCString(); |
| } |
| |
| void Class::set_script(const Script& value) const { |
| untag()->set_script(value.ptr()); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| KernelProgramInfoPtr Class::KernelProgramInfo() const { |
| const auto& lib = Library::Handle(library()); |
| return lib.kernel_program_info(); |
| } |
| |
| void Class::set_token_pos(TokenPosition token_pos) const { |
| ASSERT(!token_pos.IsClassifying()); |
| StoreNonPointer(&untag()->token_pos_, token_pos); |
| } |
| |
| void Class::set_end_token_pos(TokenPosition token_pos) const { |
| ASSERT(!token_pos.IsClassifying()); |
| StoreNonPointer(&untag()->end_token_pos_, token_pos); |
| } |
| |
| void Class::set_implementor_cid(intptr_t value) const { |
| ASSERT(value >= 0 && value < std::numeric_limits<classid_t>::max()); |
| StoreNonPointer(&untag()->implementor_cid_, value); |
| } |
| |
| void Class::ClearImplementor() const { |
| // Check raw implementor_cid_ without normalization done by |
| // implementor_cid() accessor. |
| if (untag()->implementor_cid_ != kVoidCid) { |
| set_implementor_cid(kIllegalCid); |
| } |
| } |
| |
| bool Class::NoteImplementor(const Class& implementor) const { |
| ASSERT(!implementor.is_abstract()); |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| if (implementor_cid() == kDynamicCid) { |
| return false; |
| } else if (implementor_cid() == implementor.id()) { |
| return false; |
| } else if (implementor_cid() == kIllegalCid) { |
| set_implementor_cid(implementor.id()); |
| return true; // None -> One |
| } else { |
| set_implementor_cid(kDynamicCid); |
| return true; // One -> Many |
| } |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| uint32_t Class::Hash() const { |
| return Class::Hash(ptr()); |
| } |
| uint32_t Class::Hash(ClassPtr obj) { |
| return String::HashRawSymbol(obj.untag()->name()); |
| } |
| |
| int32_t Class::SourceFingerprint() const { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (is_declared_in_bytecode()) { |
| return 0; |
| } |
| return kernel::KernelSourceFingerprintHelper::CalculateClassFingerprint( |
| *this); |
| #else |
| return 0; |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| void Class::set_is_implemented(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_is_implemented_unsafe(value); |
| } |
| |
| void Class::set_is_implemented_unsafe(bool value) const { |
| set_state_bits(ImplementedBit::update(value, state_bits())); |
| } |
| |
| void Class::set_is_abstract() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(AbstractBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_declaration_loaded() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_is_declaration_loaded_unsafe(); |
| } |
| |
| void Class::set_is_declaration_loaded_unsafe() const { |
| ASSERT(!is_declaration_loaded()); |
| set_state_bits(ClassLoadingBits::update(UntaggedClass::kDeclarationLoaded, |
| state_bits())); |
| } |
| |
| void Class::set_is_type_finalized() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(is_declaration_loaded()); |
| ASSERT(!is_type_finalized()); |
| set_state_bits( |
| ClassLoadingBits::update(UntaggedClass::kTypeFinalized, state_bits())); |
| } |
| |
| void Class::set_is_synthesized_class() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_is_synthesized_class_unsafe(); |
| } |
| |
| void Class::set_is_synthesized_class_unsafe() const { |
| set_state_bits(SynthesizedClassBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_enum_class() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(EnumBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_const() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(ConstBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_transformed_mixin_application() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(TransformedMixinApplicationBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_sealed() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(SealedBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_mixin_class() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(MixinClassBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_base_class() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(BaseClassBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_interface_class() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(InterfaceClassBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_final() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(FinalBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_fields_marked_nullable() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(FieldsMarkedNullableBit::update(true, state_bits())); |
| } |
| |
| void Class::set_is_allocated(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_is_allocated_unsafe(value); |
| } |
| |
| void Class::set_is_allocated_unsafe(bool value) const { |
| set_state_bits(IsAllocatedBit::update(value, state_bits())); |
| } |
| |
| void Class::set_is_loaded(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(IsLoadedBit::update(value, state_bits())); |
| } |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| void Class::set_is_declared_in_bytecode(bool value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| set_state_bits(IsDeclaredInBytecodeBit::update(value, state_bits())); |
| } |
| #endif // defined(DART_DYNAMIC_MODULES) |
| |
| void Class::set_is_finalized() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(!is_finalized()); |
| set_is_finalized_unsafe(); |
| } |
| |
| void Class::set_is_finalized_unsafe() const { |
| set_state_bits( |
| ClassFinalizedBits::update(UntaggedClass::kFinalized, state_bits())); |
| } |
| |
| void Class::set_is_allocate_finalized() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(!is_allocate_finalized()); |
| set_state_bits(ClassFinalizedBits::update(UntaggedClass::kAllocateFinalized, |
| state_bits())); |
| } |
| |
| void Class::set_is_prefinalized() const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(!is_finalized()); |
| set_state_bits( |
| ClassFinalizedBits::update(UntaggedClass::kPreFinalized, state_bits())); |
| } |
| |
| void Class::set_interfaces(const Array& value) const { |
| ASSERT(!value.IsNull()); |
| untag()->set_interfaces(value.ptr()); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| |
| void Class::AddDirectImplementor(const Class& implementor, |
| bool is_mixin) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(is_implemented()); |
| ASSERT(!implementor.IsNull()); |
| GrowableObjectArray& direct_implementors = |
| GrowableObjectArray::Handle(untag()->direct_implementors()); |
| if (direct_implementors.IsNull()) { |
| direct_implementors = GrowableObjectArray::New(4, Heap::kOld); |
| untag()->set_direct_implementors(direct_implementors.ptr()); |
| } |
| #if defined(DEBUG) |
| // Verify that the same class is not added twice. |
| // The only exception is mixins: when mixin application is transformed, |
| // mixin is added to the end of interfaces list and may be duplicated: |
| // class X = A with B implements B; |
| // This is rare and harmless. |
| if (!is_mixin) { |
| for (intptr_t i = 0; i < direct_implementors.Length(); i++) { |
| ASSERT(direct_implementors.At(i) != implementor.ptr()); |
| } |
| } |
| #endif |
| direct_implementors.Add(implementor, Heap::kOld); |
| } |
| |
| void Class::set_direct_implementors( |
| const GrowableObjectArray& implementors) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| untag()->set_direct_implementors(implementors.ptr()); |
| } |
| |
| void Class::AddDirectSubclass(const Class& subclass) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(!subclass.IsNull()); |
| ASSERT(subclass.SuperClass() == ptr()); |
| // Do not keep track of the direct subclasses of class Object. |
| ASSERT(!IsObjectClass()); |
| GrowableObjectArray& direct_subclasses = |
| GrowableObjectArray::Handle(untag()->direct_subclasses()); |
| if (direct_subclasses.IsNull()) { |
| direct_subclasses = GrowableObjectArray::New(4, Heap::kOld); |
| untag()->set_direct_subclasses(direct_subclasses.ptr()); |
| } |
| #if defined(DEBUG) |
| // Verify that the same class is not added twice. |
| for (intptr_t i = 0; i < direct_subclasses.Length(); i++) { |
| ASSERT(direct_subclasses.At(i) != subclass.ptr()); |
| } |
| #endif |
| direct_subclasses.Add(subclass, Heap::kOld); |
| } |
| |
| void Class::set_direct_subclasses(const GrowableObjectArray& subclasses) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| untag()->set_direct_subclasses(subclasses.ptr()); |
| } |
| |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| ArrayPtr Class::constants() const { |
| return untag()->constants(); |
| } |
| |
| void Class::set_constants(const Array& value) const { |
| untag()->set_constants(value.ptr()); |
| } |
| |
| void Class::set_declaration_type(const Type& value) const { |
| ASSERT(id() != kDynamicCid && id() != kVoidCid); |
| ASSERT(!value.IsNull() && value.IsCanonical() && value.IsOld()); |
| ASSERT((declaration_type() == Object::null()) || |
| (declaration_type() == value.ptr())); // Set during own finalization. |
| // Since DeclarationType is used as the runtime type of instances of a |
| // non-generic class, its nullability must be kNonNullable. |
| // The exception is DeclarationType of Null which is kNullable. |
| ASSERT(value.type_class_id() != kNullCid || value.IsNullable()); |
| ASSERT(value.type_class_id() == kNullCid || value.IsNonNullable()); |
| untag()->set_declaration_type<std::memory_order_release>(value.ptr()); |
| } |
| |
| TypePtr Class::DeclarationType() const { |
| ASSERT(is_declaration_loaded()); |
| if (IsNullClass()) { |
| return Type::NullType(); |
| } |
| if (IsDynamicClass()) { |
| return Type::DynamicType(); |
| } |
| if (IsVoidClass()) { |
| return Type::VoidType(); |
| } |
| if (declaration_type() != Type::null()) { |
| return declaration_type(); |
| } |
| { |
| auto thread = Thread::Current(); |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| if (declaration_type() != Type::null()) { |
| return declaration_type(); |
| } |
| // For efficiency, the runtimeType intrinsic returns the type cached by |
| // DeclarationType without checking its nullability. Therefore, we |
| // consistently cache the kNonNullable version of the type. |
| // The exception is type Null which is stored as kNullable. |
| TypeArguments& type_args = TypeArguments::Handle(); |
| const intptr_t num_type_params = NumTypeParameters(); |
| if (num_type_params > 0) { |
| type_args = TypeArguments::New(num_type_params); |
| TypeParameter& type_param = TypeParameter::Handle(); |
| for (intptr_t i = 0; i < num_type_params; i++) { |
| type_param = TypeParameterAt(i); |
| type_args.SetTypeAt(i, type_param); |
| } |
| } |
| Type& type = |
| Type::Handle(Type::New(*this, type_args, Nullability::kNonNullable)); |
| type ^= ClassFinalizer::FinalizeType(type); |
| set_declaration_type(type); |
| return type.ptr(); |
| } |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| void Class::set_allocation_stub(const Code& value) const { |
| // Never clear the stub as it may still be a target, but will be GC-d if |
| // not referenced. |
| ASSERT(!value.IsNull()); |
| ASSERT(untag()->allocation_stub() == Code::null()); |
| untag()->set_allocation_stub(value.ptr()); |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| void Class::DisableAllocationStub() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| { |
| const Code& existing_stub = Code::Handle(allocation_stub()); |
| if (existing_stub.IsNull()) { |
| return; |
| } |
| } |
| auto thread = Thread::Current(); |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| const Code& existing_stub = Code::Handle(allocation_stub()); |
| if (existing_stub.IsNull()) { |
| return; |
| } |
| ASSERT(!existing_stub.IsDisabled()); |
| // Change the stub so that the next caller will regenerate the stub. |
| existing_stub.DisableStubCode(NumTypeParameters() > 0); |
| // Disassociate the existing stub from class. |
| untag()->set_allocation_stub(Code::null()); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| bool Class::IsDartFunctionClass() const { |
| return ptr() == Type::Handle(Type::DartFunctionType()).type_class(); |
| } |
| |
| bool Class::IsFutureClass() const { |
| // Looking up future_class in the object store would not work, because |
| // this function is called during class finalization, before the object store |
| // field would be initialized by InitKnownObjects(). |
| return (Name() == Symbols::Future().ptr()) && |
| (library() == Library::AsyncLibrary()); |
| } |
| |
| // Checks if type T0 is a subtype of type T1. |
| // Type T0 is specified by class 'cls' parameterized with 'type_arguments' and |
| // by 'nullability', and type T1 is specified by 'other' and must have a type |
| // class. |
| // [type_arguments] should be a flattened instance type arguments vector. |
| bool Class::IsSubtypeOf(const Class& cls, |
| const TypeArguments& type_arguments, |
| Nullability nullability, |
| const AbstractType& other, |
| Heap::Space space, |
| FunctionTypeMapping* function_type_equivalence) { |
| TRACE_TYPE_CHECKS_VERBOSE(" Class::IsSubtypeOf(%s %s, %s)\n", |
| cls.ToCString(), type_arguments.ToCString(), |
| other.ToCString()); |
| // This function does not support Null, Never, dynamic, or void as type T0. |
| classid_t this_cid = cls.id(); |
| ASSERT(this_cid != kNullCid && this_cid != kNeverCid && |
| this_cid != kDynamicCid && this_cid != kVoidCid); |
| ASSERT(type_arguments.IsNull() || |
| (type_arguments.Length() >= cls.NumTypeArguments())); |
| // Type T1 must have a type class (e.g. not a type param or a function type). |
| ASSERT(other.HasTypeClass()); |
| const classid_t other_cid = other.type_class_id(); |
| if (other_cid == kDynamicCid || other_cid == kVoidCid) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: true (right is top)\n"); |
| return true; |
| } |
| // Left nullable: |
| // if T0 is S0? then: |
| // T0 <: T1 iff S0 <: T1 and Null <: T1 |
| if ((nullability == Nullability::kNullable) && |
| !Instance::NullIsAssignableTo(other)) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: false (nullability)\n"); |
| return false; |
| } |
| |
| // Right Object. |
| if (other_cid == kObjectCid) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: true (right is Object)\n"); |
| return true; |
| } |
| |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| const Class& other_class = Class::Handle(zone, other.type_class()); |
| const TypeArguments& other_type_arguments = |
| TypeArguments::Handle(zone, other.arguments()); |
| // Use the 'this_class' object as if it was the receiver of this method, but |
| // instead of recursing, reset it to the super class and loop. |
| Class& this_class = Class::Handle(zone, cls.ptr()); |
| while (true) { |
| // Apply additional subtyping rules if T0 or T1 are 'FutureOr'. |
| |
| // Left FutureOr: |
| // if T0 is FutureOr<S0> then: |
| // T0 <: T1 iff Future<S0> <: T1 and S0 <: T1 |
| if (this_cid == kFutureOrCid) { |
| // Check Future<S0> <: T1. |
| ObjectStore* object_store = IsolateGroup::Current()->object_store(); |
| const Class& future_class = |
| Class::Handle(zone, object_store->future_class()); |
| ASSERT(!future_class.IsNull() && future_class.NumTypeParameters() == 1 && |
| this_class.NumTypeParameters() == 1); |
| ASSERT(type_arguments.IsNull() || type_arguments.Length() >= 1); |
| if (Class::IsSubtypeOf(future_class, type_arguments, |
| Nullability::kNonNullable, other, space, |
| function_type_equivalence)) { |
| // Check S0 <: T1. |
| const AbstractType& type_arg = |
| AbstractType::Handle(zone, type_arguments.TypeAtNullSafe(0)); |
| if (type_arg.IsSubtypeOf(other, space, function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: true (left is FutureOr)\n"); |
| return true; |
| } |
| } |
| } |
| |
| // Right FutureOr: |
| // if T1 is FutureOr<S1> then: |
| // T0 <: T1 iff any of the following hold: |
| // either T0 <: Future<S1> |
| // or T0 <: S1 |
| // or T0 is X0 and X0 has bound S0 and S0 <: T1 (checked elsewhere) |
| if (other_cid == kFutureOrCid) { |
| const AbstractType& other_type_arg = |
| AbstractType::Handle(zone, other_type_arguments.TypeAtNullSafe(0)); |
| // Check if S1 is a top type. |
| if (other_type_arg.IsTopTypeForSubtyping()) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: true (right is FutureOr top)\n"); |
| return true; |
| } |
| // Check T0 <: Future<S1> when T0 is Future<S0>. |
| if (this_class.IsFutureClass()) { |
| const AbstractType& type_arg = |
| AbstractType::Handle(zone, type_arguments.TypeAtNullSafe(0)); |
| // If T0 is Future<S0>, then T0 <: Future<S1>, iff S0 <: S1. |
| if (type_arg.IsSubtypeOf(other_type_arg, space, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: true (left is Future, right is FutureOr)\n"); |
| return true; |
| } |
| } |
| // Check T0 <: Future<S1> when T0 is FutureOr<S0> is already done. |
| // Check T0 <: S1. |
| if (other_type_arg.HasTypeClass() && |
| Class::IsSubtypeOf(this_class, type_arguments, nullability, |
| other_type_arg, space, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: true (right is FutureOr, subtype of arg)\n"); |
| return true; |
| } |
| } |
| |
| // Check for reflexivity. |
| if (this_class.ptr() == other_class.ptr()) { |
| const intptr_t num_type_params = this_class.NumTypeParameters(); |
| if (num_type_params == 0) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: true (same non-generic class)\n"); |
| return true; |
| } |
| // Check for covariance. |
| if (other_type_arguments.IsNull()) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: true (same class, dynamic type args)\n"); |
| return true; |
| } |
| const intptr_t num_type_args = this_class.NumTypeArguments(); |
| const intptr_t from_index = num_type_args - num_type_params; |
| ASSERT(other_type_arguments.Length() == num_type_params); |
| AbstractType& type = AbstractType::Handle(zone); |
| AbstractType& other_type = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < num_type_params; ++i) { |
| type = type_arguments.TypeAtNullSafe(from_index + i); |
| other_type = other_type_arguments.TypeAt(i); |
| ASSERT(!type.IsNull() && !other_type.IsNull()); |
| if (!type.IsSubtypeOf(other_type, space, function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (same class, type args mismatch)\n"); |
| return false; |
| } |
| } |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: true (same class, matching type args)\n"); |
| return true; |
| } |
| |
| // _Closure <: Function |
| if (this_class.IsClosureClass() && other_class.IsDartFunctionClass()) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: true (left is closure, right is Function)\n"); |
| return true; |
| } |
| |
| // Check for 'direct super type' specified in the implements clause |
| // and check for transitivity at the same time. |
| Array& interfaces = Array::Handle(zone, this_class.interfaces()); |
| Type& interface = Type::Handle(zone); |
| Class& interface_class = Class::Handle(zone); |
| TypeArguments& interface_args = TypeArguments::Handle(zone); |
| for (intptr_t i = 0; i < interfaces.Length(); i++) { |
| interface ^= interfaces.At(i); |
| ASSERT(interface.IsFinalized()); |
| interface_class = interface.type_class(); |
| interface_args = interface.arguments(); |
| if (!interface_args.IsNull() && !interface_args.IsInstantiated()) { |
| // This type class implements an interface that is parameterized with |
| // generic type(s), e.g. it implements List<T>. |
| // The uninstantiated type T must be instantiated using the type |
| // parameters of this type before performing the type test. |
| // The type arguments of this type that are referred to by the type |
| // parameters of the interface are at the end of the type vector, |
| // after the type arguments of the super type of this type. |
| // The index of the type parameters is adjusted upon finalization. |
| interface_args = interface_args.InstantiateFrom( |
| type_arguments, Object::null_type_arguments(), kNoneFree, space); |
| } |
| interface_args = interface_class.GetInstanceTypeArguments( |
| thread, interface_args, /*canonicalize=*/false); |
| // In Dart 2, implementing Function has no meaning. |
| // TODO(regis): Can we encounter and skip Object as well? |
| if (interface_class.IsDartFunctionClass()) { |
| continue; |
| } |
| if (Class::IsSubtypeOf(interface_class, interface_args, |
| Nullability::kNonNullable, other, space, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: true (interface found)\n"); |
| return true; |
| } |
| } |
| // "Recurse" up the class hierarchy until we have reached the top. |
| this_class = this_class.SuperClass(); |
| if (this_class.IsNull()) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: false (supertype not found)\n"); |
| return false; |
| } |
| this_cid = this_class.id(); |
| } |
| UNREACHABLE(); |
| return false; |
| } |
| |
| bool Class::IsTopLevel() const { |
| return Name() == Symbols::TopLevel().ptr(); |
| } |
| |
| bool Class::IsPrivate() const { |
| return Library::IsPrivate(String::Handle(Name())); |
| } |
| |
| FunctionPtr Class::LookupDynamicFunctionUnsafe(const String& name) const { |
| return LookupFunctionReadLocked(name, kInstance); |
| } |
| |
| FunctionPtr Class::LookupDynamicFunctionAllowPrivate(const String& name) const { |
| return LookupFunctionAllowPrivate(name, kInstance); |
| } |
| |
| FunctionPtr Class::LookupStaticFunction(const String& name) const { |
| Thread* thread = Thread::Current(); |
| SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| return LookupFunctionReadLocked(name, kStatic); |
| } |
| |
| FunctionPtr Class::LookupStaticFunctionAllowPrivate(const String& name) const { |
| return LookupFunctionAllowPrivate(name, kStatic); |
| } |
| |
| FunctionPtr Class::LookupConstructor(const String& name) const { |
| Thread* thread = Thread::Current(); |
| SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| return LookupFunctionReadLocked(name, kConstructor); |
| } |
| |
| FunctionPtr Class::LookupConstructorAllowPrivate(const String& name) const { |
| return LookupFunctionAllowPrivate(name, kConstructor); |
| } |
| |
| FunctionPtr Class::LookupFactory(const String& name) const { |
| Thread* thread = Thread::Current(); |
| SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| return LookupFunctionReadLocked(name, kFactory); |
| } |
| |
| FunctionPtr Class::LookupFactoryAllowPrivate(const String& name) const { |
| return LookupFunctionAllowPrivate(name, kFactory); |
| } |
| |
| FunctionPtr Class::LookupFunctionAllowPrivate(const String& name) const { |
| return LookupFunctionAllowPrivate(name, kAny); |
| } |
| |
| FunctionPtr Class::LookupFunctionReadLocked(const String& name) const { |
| return LookupFunctionReadLocked(name, kAny); |
| } |
| |
| // Returns true if 'prefix' and 'accessor_name' match 'name'. |
| static bool MatchesAccessorName(const String& name, |
| const char* prefix, |
| intptr_t prefix_length, |
| const String& accessor_name) { |
| intptr_t name_len = name.Length(); |
| intptr_t accessor_name_len = accessor_name.Length(); |
| |
| if (name_len != (accessor_name_len + prefix_length)) { |
| return false; |
| } |
| for (intptr_t i = 0; i < prefix_length; i++) { |
| if (name.CharAt(i) != prefix[i]) { |
| return false; |
| } |
| } |
| for (intptr_t i = 0, j = prefix_length; i < accessor_name_len; i++, j++) { |
| if (name.CharAt(j) != accessor_name.CharAt(i)) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| FunctionPtr Class::CheckFunctionType(const Function& func, MemberKind kind) { |
| if ((kind == kInstance) || (kind == kInstanceAllowAbstract)) { |
| if (func.IsDynamicFunction(kind == kInstanceAllowAbstract)) { |
| return func.ptr(); |
| } |
| } else if (kind == kStatic) { |
| if (func.IsStaticFunction()) { |
| return func.ptr(); |
| } |
| } else if (kind == kConstructor) { |
| if (func.IsGenerativeConstructor()) { |
| ASSERT(!func.is_static()); |
| return func.ptr(); |
| } |
| } else if (kind == kFactory) { |
| if (func.IsFactory()) { |
| ASSERT(func.is_static()); |
| return func.ptr(); |
| } |
| } else if (kind == kAny) { |
| return func.ptr(); |
| } |
| return Function::null(); |
| } |
| |
| FunctionPtr Class::LookupFunctionReadLocked(const String& name, |
| MemberKind kind) const { |
| ASSERT(!IsNull()); |
| Thread* thread = Thread::Current(); |
| RELEASE_ASSERT(is_finalized()); |
| // Caller needs to ensure they grab program_lock because this method |
| // can be invoked with either ReadRwLock or WriteRwLock. |
| #if defined(DEBUG) |
| ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadReader()); |
| #endif |
| ASSERT(functions() != Array::null()); |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FUNCTION_HANDLESCOPE(thread); |
| Array& funcs = thread->ArrayHandle(); |
| funcs = functions(); |
| const intptr_t len = funcs.Length(); |
| Function& function = thread->FunctionHandle(); |
| if (len >= kFunctionLookupHashThreshold) { |
| // TODO(dartbug.com/36097): We require currently a read lock in the resolver |
| // to avoid read-write race access to this hash table. |
| // If we want to increase resolver speed by avoiding the need for read lock, |
| // we could make change this hash table to be lock-free for the reader. |
| const Array& hash_table = |
| Array::Handle(thread->zone(), untag()->functions_hash_table()); |
| if (!hash_table.IsNull()) { |
| ClassFunctionsSet set(hash_table.ptr()); |
| REUSABLE_STRING_HANDLESCOPE(thread); |
| function ^= set.GetOrNull(FunctionName(name, &(thread->StringHandle()))); |
| // No mutations. |
| ASSERT(set.Release().ptr() == hash_table.ptr()); |
| return function.IsNull() ? Function::null() |
| : CheckFunctionType(function, kind); |
| } |
| } |
| if (name.IsSymbol()) { |
| // Quick Symbol compare. |
| NoSafepointScope no_safepoint; |
| for (intptr_t i = 0; i < len; i++) { |
| function ^= funcs.At(i); |
| if (function.name() == name.ptr()) { |
| return CheckFunctionType(function, kind); |
| } |
| } |
| } else { |
| REUSABLE_STRING_HANDLESCOPE(thread); |
| String& function_name = thread->StringHandle(); |
| for (intptr_t i = 0; i < len; i++) { |
| function ^= funcs.At(i); |
| function_name = function.name(); |
| if (function_name.Equals(name)) { |
| return CheckFunctionType(function, kind); |
| } |
| } |
| } |
| // No function found. |
| return Function::null(); |
| } |
| |
| FunctionPtr Class::LookupFunctionAllowPrivate(const String& name, |
| MemberKind kind) const { |
| ASSERT(!IsNull()); |
| Thread* thread = Thread::Current(); |
| RELEASE_ASSERT(is_finalized()); |
| SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FUNCTION_HANDLESCOPE(thread); |
| REUSABLE_STRING_HANDLESCOPE(thread); |
| Array& funcs = thread->ArrayHandle(); |
| funcs = current_functions(); |
| ASSERT(!funcs.IsNull()); |
| const intptr_t len = funcs.Length(); |
| Function& function = thread->FunctionHandle(); |
| String& function_name = thread->StringHandle(); |
| for (intptr_t i = 0; i < len; i++) { |
| function ^= funcs.At(i); |
| function_name = function.name(); |
| if (String::EqualsIgnoringPrivateKey(function_name, name)) { |
| return CheckFunctionType(function, kind); |
| } |
| } |
| // No function found. |
| return Function::null(); |
| } |
| |
| FunctionPtr Class::LookupGetterFunction(const String& name) const { |
| return LookupAccessorFunction(kGetterPrefix, kGetterPrefixLength, name); |
| } |
| |
| FunctionPtr Class::LookupSetterFunction(const String& name) const { |
| return LookupAccessorFunction(kSetterPrefix, kSetterPrefixLength, name); |
| } |
| |
| FunctionPtr Class::LookupAccessorFunction(const char* prefix, |
| intptr_t prefix_length, |
| const String& name) const { |
| ASSERT(!IsNull()); |
| Thread* thread = Thread::Current(); |
| if (EnsureIsFinalized(thread) != Error::null()) { |
| return Function::null(); |
| } |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FUNCTION_HANDLESCOPE(thread); |
| REUSABLE_STRING_HANDLESCOPE(thread); |
| Array& funcs = thread->ArrayHandle(); |
| funcs = current_functions(); |
| intptr_t len = funcs.Length(); |
| Function& function = thread->FunctionHandle(); |
| String& function_name = thread->StringHandle(); |
| for (intptr_t i = 0; i < len; i++) { |
| function ^= funcs.At(i); |
| function_name = function.name(); |
| if (MatchesAccessorName(function_name, prefix, prefix_length, name)) { |
| return function.ptr(); |
| } |
| } |
| |
| // No function found. |
| return Function::null(); |
| } |
| |
| FieldPtr Class::LookupInstanceField(const String& name) const { |
| return LookupField(name, kInstance); |
| } |
| |
| FieldPtr Class::LookupStaticField(const String& name) const { |
| return LookupField(name, kStatic); |
| } |
| |
| FieldPtr Class::LookupField(const String& name) const { |
| return LookupField(name, kAny); |
| } |
| |
| FieldPtr Class::LookupField(const String& name, MemberKind kind) const { |
| ASSERT(!IsNull()); |
| Thread* thread = Thread::Current(); |
| if (EnsureIsFinalized(thread) != Error::null()) { |
| return Field::null(); |
| } |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FIELD_HANDLESCOPE(thread); |
| REUSABLE_STRING_HANDLESCOPE(thread); |
| Array& flds = thread->ArrayHandle(); |
| flds = fields(); |
| ASSERT(!flds.IsNull()); |
| intptr_t len = flds.Length(); |
| Field& field = thread->FieldHandle(); |
| if (name.IsSymbol()) { |
| // Use fast raw pointer string compare for symbols. |
| for (intptr_t i = 0; i < len; i++) { |
| field ^= flds.At(i); |
| if (name.ptr() == field.name()) { |
| if (kind == kInstance) { |
| return field.is_static() ? Field::null() : field.ptr(); |
| } else if (kind == kStatic) { |
| return field.is_static() ? field.ptr() : Field::null(); |
| } |
| ASSERT(kind == kAny); |
| return field.ptr(); |
| } |
| } |
| } else { |
| String& field_name = thread->StringHandle(); |
| for (intptr_t i = 0; i < len; i++) { |
| field ^= flds.At(i); |
| field_name = field.name(); |
| if (name.Equals(field_name)) { |
| if (kind == kInstance) { |
| return field.is_static() ? Field::null() : field.ptr(); |
| } else if (kind == kStatic) { |
| return field.is_static() ? field.ptr() : Field::null(); |
| } |
| ASSERT(kind == kAny); |
| return field.ptr(); |
| } |
| } |
| } |
| return Field::null(); |
| } |
| |
| FieldPtr Class::LookupFieldAllowPrivate(const String& name, |
| bool instance_only) const { |
| ASSERT(!IsNull()); |
| // Use slow string compare, ignoring privacy name mangling. |
| Thread* thread = Thread::Current(); |
| if (EnsureIsFinalized(thread) != Error::null()) { |
| return Field::null(); |
| } |
| REUSABLE_ARRAY_HANDLESCOPE(thread); |
| REUSABLE_FIELD_HANDLESCOPE(thread); |
| REUSABLE_STRING_HANDLESCOPE(thread); |
| Array& flds = thread->ArrayHandle(); |
| flds = fields(); |
| ASSERT(!flds.IsNull()); |
| intptr_t len = flds.Length(); |
| Field& field = thread->FieldHandle(); |
| String& field_name = thread->StringHandle(); |
| for (intptr_t i = 0; i < len; i++) { |
| field ^= flds.At(i); |
| field_name = field.name(); |
| if (field.is_static() && instance_only) { |
| // If we only care about instance fields, skip statics. |
| continue; |
| } |
| if (String::EqualsIgnoringPrivateKey(field_name, name)) { |
| return field.ptr(); |
| } |
| } |
| return Field::null(); |
| } |
| |
| FieldPtr Class::LookupInstanceFieldAllowPrivate(const String& name) const { |
| Field& field = Field::Handle(LookupFieldAllowPrivate(name, true)); |
| if (!field.IsNull() && !field.is_static()) { |
| return field.ptr(); |
| } |
| return Field::null(); |
| } |
| |
| FieldPtr Class::LookupStaticFieldAllowPrivate(const String& name) const { |
| Field& field = Field::Handle(LookupFieldAllowPrivate(name)); |
| if (!field.IsNull() && field.is_static()) { |
| return field.ptr(); |
| } |
| return Field::null(); |
| } |
| |
| const char* Class::ToCString() const { |
| NoSafepointScope no_safepoint; |
| const Library& lib = Library::Handle(library()); |
| const char* library_name = lib.IsNull() ? "" : lib.ToCString(); |
| const char* class_name = String::Handle(Name()).ToCString(); |
| return OS::SCreate(Thread::Current()->zone(), "%s Class: %s", library_name, |
| class_name); |
| } |
| |
| // Thomas Wang, Integer Hash Functions. |
| // https://gist.github.com/badboy/6267743 |
| // "64 bit to 32 bit Hash Functions" |
| static uword Hash64To32(uint64_t v) { |
| v = ~v + (v << 18); |
| v = v ^ (v >> 31); |
| v = v * 21; |
| v = v ^ (v >> 11); |
| v = v + (v << 6); |
| v = v ^ (v >> 22); |
| return static_cast<uint32_t>(v); |
| } |
| |
| InstancePtr Class::LookupCanonicalInstance(Zone* zone, |
| const Instance& value) const { |
| ASSERT(this->ptr() == value.clazz()); |
| ASSERT(is_finalized() || is_prefinalized()); |
| Instance& canonical_value = Instance::Handle(zone); |
| if (this->constants() != Array::null()) { |
| CanonicalInstancesSet constants(zone, this->constants()); |
| canonical_value ^= constants.GetOrNull(CanonicalInstanceKey(value)); |
| this->set_constants(constants.Release()); |
| } |
| return canonical_value.ptr(); |
| } |
| |
| InstancePtr Class::InsertCanonicalConstant(Zone* zone, |
| const Instance& constant) const { |
| ASSERT(constant.IsCanonical()); |
| ASSERT(this->ptr() == constant.clazz()); |
| Instance& canonical_value = Instance::Handle(zone); |
| if (this->constants() == Array::null()) { |
| CanonicalInstancesSet constants( |
| HashTables::New<CanonicalInstancesSet>(128, Heap::kOld)); |
| canonical_value ^= constants.InsertNewOrGet(CanonicalInstanceKey(constant)); |
| this->set_constants(constants.Release()); |
| } else { |
| CanonicalInstancesSet constants(Thread::Current()->zone(), |
| this->constants()); |
| canonical_value ^= constants.InsertNewOrGet(CanonicalInstanceKey(constant)); |
| this->set_constants(constants.Release()); |
| } |
| return canonical_value.ptr(); |
| } |
| |
| // Scoped mapping FunctionType -> FunctionType. |
| // Used for tracking and updating nested generic function types |
| // and their type parameters. |
| class FunctionTypeMapping : public ValueObject { |
| public: |
| FunctionTypeMapping(Zone* zone, |
| FunctionTypeMapping** mapping, |
| const FunctionType& from, |
| const FunctionType& to) |
| : zone_(zone), parent_(*mapping), from_(from), to_(to) { |
| // Add self to the linked list. |
| *mapping = this; |
| } |
| |
| const FunctionType* Find(const Object& from) const { |
| if (!from.IsFunctionType()) { |
| return nullptr; |
| } |
| for (const FunctionTypeMapping* scope = this; scope != nullptr; |
| scope = scope->parent_) { |
| if (scope->from_.ptr() == from.ptr()) { |
| return &(scope->to_); |
| } |
| } |
| return nullptr; |
| } |
| |
| TypeParameterPtr MapTypeParameter(const TypeParameter& type_param) const { |
| ASSERT(type_param.IsFunctionTypeParameter()); |
| const FunctionType* new_owner = Find( |
| FunctionType::Handle(zone_, type_param.parameterized_function_type())); |
| if (new_owner != nullptr) { |
| return new_owner->TypeParameterAt(type_param.index() - type_param.base(), |
| type_param.nullability()); |
| } |
| return type_param.ptr(); |
| } |
| |
| bool ContainsOwnersOfTypeParameters(const TypeParameter& p1, |
| const TypeParameter& p2) const { |
| auto& from = FunctionType::Handle(zone_, p1.parameterized_function_type()); |
| const FunctionType* to = Find(from); |
| if (to != nullptr) { |
| return to->ptr() == p2.parameterized_function_type(); |
| } |
| from = p2.parameterized_function_type(); |
| to = Find(from); |
| if (to != nullptr) { |
| return to->ptr() == p1.parameterized_function_type(); |
| } |
| return false; |
| } |
| |
| private: |
| Zone* zone_; |
| const FunctionTypeMapping* const parent_; |
| const FunctionType& from_; |
| const FunctionType& to_; |
| }; |
| |
| intptr_t TypeParameters::Length() const { |
| if (IsNull() || untag()->names() == Array::null()) return 0; |
| return Smi::Value(untag()->names()->untag()->length()); |
| } |
| |
| void TypeParameters::set_names(const Array& value) const { |
| ASSERT(!value.IsNull()); |
| untag()->set_names(value.ptr()); |
| } |
| |
| StringPtr TypeParameters::NameAt(intptr_t index) const { |
| const Array& names_array = Array::Handle(names()); |
| return String::RawCast(names_array.At(index)); |
| } |
| |
| void TypeParameters::SetNameAt(intptr_t index, const String& value) const { |
| const Array& names_array = Array::Handle(names()); |
| names_array.SetAt(index, value); |
| } |
| |
| void TypeParameters::set_flags(const Array& value) const { |
| untag()->set_flags(value.ptr()); |
| } |
| |
| void TypeParameters::set_bounds(const TypeArguments& value) const { |
| // A null value represents a vector of dynamic. |
| untag()->set_bounds(value.ptr()); |
| } |
| |
| AbstractTypePtr TypeParameters::BoundAt(intptr_t index) const { |
| const TypeArguments& upper_bounds = TypeArguments::Handle(bounds()); |
| return upper_bounds.IsNull() ? Type::DynamicType() |
| : upper_bounds.TypeAt(index); |
| } |
| |
| void TypeParameters::SetBoundAt(intptr_t index, |
| const AbstractType& value) const { |
| const TypeArguments& upper_bounds = TypeArguments::Handle(bounds()); |
| upper_bounds.SetTypeAt(index, value); |
| } |
| |
| bool TypeParameters::AllDynamicBounds() const { |
| return bounds() == TypeArguments::null(); |
| } |
| |
| void TypeParameters::set_defaults(const TypeArguments& value) const { |
| // The null value represents a vector of dynamic. |
| untag()->set_defaults(value.ptr()); |
| } |
| |
| AbstractTypePtr TypeParameters::DefaultAt(intptr_t index) const { |
| const TypeArguments& default_type_args = TypeArguments::Handle(defaults()); |
| return default_type_args.IsNull() ? Type::DynamicType() |
| : default_type_args.TypeAt(index); |
| } |
| |
| void TypeParameters::SetDefaultAt(intptr_t index, |
| const AbstractType& value) const { |
| const TypeArguments& default_type_args = TypeArguments::Handle(defaults()); |
| default_type_args.SetTypeAt(index, value); |
| } |
| |
| bool TypeParameters::AllDynamicDefaults() const { |
| return defaults() == TypeArguments::null(); |
| } |
| |
| void TypeParameters::AllocateFlags(Heap::Space space) const { |
| const intptr_t len = (Length() + kFlagsPerSmiMask) >> kFlagsPerSmiShift; |
| const Array& flags_array = Array::Handle(Array::New(len, space)); |
| // Initialize flags to 0. |
| const Smi& zero = Smi::Handle(Smi::New(0)); |
| for (intptr_t i = 0; i < len; i++) { |
| flags_array.SetAt(i, zero); |
| } |
| set_flags(flags_array); |
| } |
| |
| void TypeParameters::OptimizeFlags() const { |
| if (untag()->flags() == Array::null()) return; // Already optimized. |
| const intptr_t len = (Length() + kFlagsPerSmiMask) >> kFlagsPerSmiShift; |
| const Array& flags_array = Array::Handle(flags()); |
| const Smi& zero = Smi::Handle(Smi::New(0)); |
| for (intptr_t i = 0; i < len; i++) { |
| if (flags_array.At(i) != zero.ptr()) return; |
| } |
| set_flags(Object::null_array()); |
| } |
| |
| bool TypeParameters::IsGenericCovariantImplAt(intptr_t index) const { |
| if (untag()->flags() == Array::null()) return false; |
| const intptr_t flag = Smi::Value( |
| Smi::RawCast(Array::Handle(flags()).At(index >> kFlagsPerSmiShift))); |
| return (flag >> (index & kFlagsPerSmiMask)) != 0; |
| } |
| |
| void TypeParameters::SetIsGenericCovariantImplAt(intptr_t index, |
| bool value) const { |
| const Array& flg = Array::Handle(flags()); |
| intptr_t flag = Smi::Value(Smi::RawCast(flg.At(index >> kFlagsPerSmiShift))); |
| if (value) { |
| flag |= 1 << (index % kFlagsPerSmiMask); |
| } else { |
| flag &= ~(1 << (index % kFlagsPerSmiMask)); |
| } |
| flg.SetAt(index >> kFlagsPerSmiShift, Smi::Handle(Smi::New(flag))); |
| } |
| |
| void TypeParameters::Print(Thread* thread, |
| Zone* zone, |
| bool are_class_type_parameters, |
| intptr_t base, |
| NameVisibility name_visibility, |
| BaseTextBuffer* printer) const { |
| String& name = String::Handle(zone); |
| AbstractType& type = AbstractType::Handle(zone); |
| const intptr_t num_type_params = Length(); |
| for (intptr_t i = 0; i < num_type_params; i++) { |
| if (are_class_type_parameters) { |
| name = NameAt(i); |
| printer->AddString(name.ToCString()); |
| } else { |
| printer->AddString(TypeParameter::CanonicalNameCString( |
| are_class_type_parameters, base, base + i)); |
| } |
| if (FLAG_show_internal_names || !AllDynamicBounds()) { |
| type = BoundAt(i); |
| // Do not print default bound. |
| if (!type.IsNull() && (FLAG_show_internal_names || !type.IsObjectType() || |
| type.IsNonNullable())) { |
| printer->AddString(" extends "); |
| type.PrintName(name_visibility, printer); |
| if (FLAG_show_internal_names && !AllDynamicDefaults()) { |
| type = DefaultAt(i); |
| if (!type.IsNull() && |
| (FLAG_show_internal_names || !type.IsDynamicType())) { |
| printer->AddString(" defaults to "); |
| type.PrintName(name_visibility, printer); |
| } |
| } |
| } |
| } |
| if (i != num_type_params - 1) { |
| printer->AddString(", "); |
| } |
| } |
| } |
| |
| const char* TypeParameters::ToCString() const { |
| if (IsNull()) { |
| return "TypeParameters: null"; |
| } |
| auto thread = Thread::Current(); |
| auto zone = thread->zone(); |
| ZoneTextBuffer buffer(zone); |
| buffer.AddString("TypeParameters: "); |
| Print(thread, zone, true, 0, kInternalName, &buffer); |
| return buffer.buffer(); |
| } |
| |
| TypeParametersPtr TypeParameters::New(Heap::Space space) { |
| ASSERT(Object::type_parameters_class() != Class::null()); |
| return Object::Allocate<TypeParameters>(space); |
| } |
| |
| TypeParametersPtr TypeParameters::New(intptr_t count, Heap::Space space) { |
| const TypeParameters& result = |
| TypeParameters::Handle(TypeParameters::New(space)); |
| // Create an [ Array ] of [ String ] objects to represent the names. |
| // Create a [ TypeArguments ] vector representing the bounds. |
| // Create a [ TypeArguments ] vector representing the defaults. |
| // Create an [ Array ] of [ Smi] objects to represent the flags. |
| const Array& names_array = Array::Handle(Array::New(count, space)); |
| result.set_names(names_array); |
| TypeArguments& type_args = TypeArguments::Handle(); |
| type_args = TypeArguments::New(count, Heap::kNew); // Will get canonicalized. |
| result.set_bounds(type_args); |
| type_args = TypeArguments::New(count, Heap::kNew); // Will get canonicalized. |
| result.set_defaults(type_args); |
| result.AllocateFlags(space); // Will get optimized. |
| return result.ptr(); |
| } |
| |
| intptr_t TypeArguments::ComputeNullability() const { |
| if (IsNull()) return 0; |
| const intptr_t num_types = Length(); |
| intptr_t result = 0; |
| if (num_types <= kNullabilityMaxTypes) { |
| AbstractType& type = AbstractType::Handle(); |
| for (intptr_t i = 0; i < num_types; i++) { |
| type = TypeAt(i); |
| intptr_t type_bits = 0; |
| if (!type.IsNull()) { |
| switch (type.nullability()) { |
| case Nullability::kNullable: |
| type_bits = kNullableBit; |
| break; |
| case Nullability::kNonNullable: |
| type_bits = kNonNullableBit; |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| result |= (type_bits << (i * kNullabilityBitsPerType)); |
| } |
| } |
| set_nullability(result); |
| return result; |
| } |
| |
| void TypeArguments::set_nullability(intptr_t value) const { |
| untag()->set_nullability(Smi::New(value)); |
| } |
| |
| uword TypeArguments::HashForRange(intptr_t from_index, intptr_t len) const { |
| if (IsNull()) return kAllDynamicHash; |
| if (IsRaw(from_index, len)) return kAllDynamicHash; |
| uint32_t result = 0; |
| AbstractType& type = AbstractType::Handle(); |
| for (intptr_t i = 0; i < len; i++) { |
| type = TypeAt(from_index + i); |
| ASSERT(!type.IsNull()); |
| result = CombineHashes(result, type.Hash()); |
| } |
| result = FinalizeHash(result, kHashBits); |
| return result; |
| } |
| |
| uword TypeArguments::ComputeHash() const { |
| if (IsNull()) return kAllDynamicHash; |
| const uword result = HashForRange(0, Length()); |
| ASSERT(result != 0); |
| SetHash(result); |
| return result; |
| } |
| |
| TypeArgumentsPtr TypeArguments::Prepend(Zone* zone, |
| const TypeArguments& other, |
| intptr_t other_length, |
| intptr_t total_length) const { |
| if (other_length == 0) { |
| ASSERT(IsCanonical()); |
| return ptr(); |
| } else if (other_length == total_length) { |
| ASSERT(other.IsCanonical()); |
| return other.ptr(); |
| } else if (IsNull() && other.IsNull()) { |
| return TypeArguments::null(); |
| } |
| const TypeArguments& result = |
| TypeArguments::Handle(zone, TypeArguments::New(total_length, Heap::kNew)); |
| AbstractType& type = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < other_length; i++) { |
| type = other.IsNull() ? Type::DynamicType() : other.TypeAt(i); |
| result.SetTypeAt(i, type); |
| } |
| for (intptr_t i = other_length; i < total_length; i++) { |
| type = IsNull() ? Type::DynamicType() : TypeAt(i - other_length); |
| result.SetTypeAt(i, type); |
| } |
| return result.Canonicalize(Thread::Current()); |
| } |
| |
| TypeArgumentsPtr TypeArguments::ConcatenateTypeParameters( |
| Zone* zone, |
| const TypeArguments& other) const { |
| ASSERT(!IsNull() && !other.IsNull()); |
| const intptr_t this_len = Length(); |
| const intptr_t other_len = other.Length(); |
| const auto& result = TypeArguments::Handle( |
| zone, TypeArguments::New(this_len + other_len, Heap::kNew)); |
| auto& type = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < this_len; ++i) { |
| type = TypeAt(i); |
| result.SetTypeAt(i, type); |
| } |
| for (intptr_t i = 0; i < other_len; ++i) { |
| type = other.TypeAt(i); |
| result.SetTypeAt(this_len + i, type); |
| } |
| return result.ptr(); |
| } |
| |
| InstantiationMode TypeArguments::GetInstantiationMode(Zone* zone, |
| const Function* function, |
| const Class* cls) const { |
| if (IsNull() || IsInstantiated()) { |
| return InstantiationMode::kIsInstantiated; |
| } |
| if (function != nullptr) { |
| if (CanShareFunctionTypeArguments(*function)) { |
| return InstantiationMode::kSharesFunctionTypeArguments; |
| } |
| if (cls == nullptr) { |
| cls = &Class::Handle(zone, function->Owner()); |
| } |
| } |
| if (cls != nullptr) { |
| if (CanShareInstantiatorTypeArguments(*cls)) { |
| return InstantiationMode::kSharesInstantiatorTypeArguments; |
| } |
| } |
| return InstantiationMode::kNeedsInstantiation; |
| } |
| |
| StringPtr TypeArguments::Name() const { |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| PrintSubvectorName(0, Length(), kInternalName, &printer); |
| return Symbols::New(thread, printer.buffer()); |
| } |
| |
| StringPtr TypeArguments::UserVisibleName() const { |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| PrintSubvectorName(0, Length(), kUserVisibleName, &printer); |
| return Symbols::New(thread, printer.buffer()); |
| } |
| |
| void TypeArguments::PrintSubvectorName(intptr_t from_index, |
| intptr_t len, |
| NameVisibility name_visibility, |
| BaseTextBuffer* printer) const { |
| printer->AddString("<"); |
| AbstractType& type = AbstractType::Handle(); |
| for (intptr_t i = 0; i < len; i++) { |
| if (from_index + i < Length()) { |
| type = TypeAt(from_index + i); |
| if (type.IsNull()) { |
| printer->AddString("null"); // Unfinalized vector. |
| } else { |
| type.PrintName(name_visibility, printer); |
| } |
| } else { |
| printer->AddString("dynamic"); |
| } |
| if (i < len - 1) { |
| printer->AddString(", "); |
| } |
| } |
| printer->AddString(">"); |
| } |
| |
| void TypeArguments::PrintTo(BaseTextBuffer* buffer) const { |
| buffer->AddString("TypeArguments: "); |
| if (IsNull()) { |
| return buffer->AddString("null"); |
| } |
| buffer->Printf("(H%" Px ")", Smi::Value(untag()->hash())); |
| auto& type_at = AbstractType::Handle(); |
| for (intptr_t i = 0; i < Length(); i++) { |
| type_at = TypeAt(i); |
| buffer->Printf(" [%s]", type_at.IsNull() ? "null" : type_at.ToCString()); |
| } |
| } |
| |
| bool TypeArguments::IsSubvectorEquivalent( |
| const TypeArguments& other, |
| intptr_t from_index, |
| intptr_t len, |
| TypeEquality kind, |
| FunctionTypeMapping* function_type_equivalence) const { |
| if (this->ptr() == other.ptr()) { |
| return true; |
| } |
| if (kind == TypeEquality::kCanonical) { |
| if (IsNull() || other.IsNull()) { |
| return false; |
| } |
| if (Length() != other.Length()) { |
| return false; |
| } |
| } |
| AbstractType& type = AbstractType::Handle(); |
| AbstractType& other_type = AbstractType::Handle(); |
| for (intptr_t i = from_index; i < from_index + len; i++) { |
| type = IsNull() ? Type::DynamicType() : TypeAt(i); |
| ASSERT(!type.IsNull()); |
| other_type = other.IsNull() ? Type::DynamicType() : other.TypeAt(i); |
| ASSERT(!other_type.IsNull()); |
| if (!type.IsEquivalent(other_type, kind, function_type_equivalence)) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| bool TypeArguments::IsDynamicTypes(bool raw_instantiated, |
| intptr_t from_index, |
| intptr_t len) const { |
| ASSERT(Length() >= (from_index + len)); |
| AbstractType& type = AbstractType::Handle(); |
| Class& type_class = Class::Handle(); |
| for (intptr_t i = 0; i < len; i++) { |
| type = TypeAt(from_index + i); |
| if (type.IsNull()) { |
| return false; |
| } |
| if (!type.HasTypeClass()) { |
| if (raw_instantiated && type.IsTypeParameter()) { |
| // An uninstantiated type parameter is equivalent to dynamic. |
| continue; |
| } |
| return false; |
| } |
| type_class = type.type_class(); |
| if (!type_class.IsDynamicClass()) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| TypeArguments::Cache::Cache(Zone* zone, const TypeArguments& source) |
| : zone_(ASSERT_NOTNULL(zone)), |
| cache_container_(&source), |
| data_(Array::Handle(source.instantiations())), |
| smi_handle_(Smi::Handle(zone)) { |
| ASSERT(IsolateGroup::Current() |
| ->type_arguments_canonicalization_mutex() |
| ->IsOwnedByCurrentThread()); |
| } |
| |
| TypeArguments::Cache::Cache(Zone* zone, const Array& array) |
| : zone_(ASSERT_NOTNULL(zone)), |
| cache_container_(nullptr), |
| data_(Array::Handle(array.ptr())), |
| smi_handle_(Smi::Handle(zone)) { |
| ASSERT(IsolateGroup::Current() |
| ->type_arguments_canonicalization_mutex() |
| ->IsOwnedByCurrentThread()); |
| } |
| |
| bool TypeArguments::Cache::IsHash(const Array& array) { |
| return array.Length() > kMaxLinearCacheSize; |
| } |
| |
| intptr_t TypeArguments::Cache::NumOccupied(const Array& array) { |
| return NumOccupiedBits::decode( |
| RawSmiValue(Smi::RawCast(array.AtAcquire(kMetadataIndex)))); |
| } |
| |
| #if defined(DEBUG) |
| bool TypeArguments::Cache::IsValidStorageLocked(const Array& array) { |
| // We only require the mutex be held so we don't need to use acquire/release |
| // semantics to access and set the number of occupied entries in the header. |
| ASSERT(IsolateGroup::Current() |
| ->type_arguments_canonicalization_mutex() |
| ->IsOwnedByCurrentThread()); |
| // Quick check against the empty linear cache. |
| if (array.ptr() == EmptyStorage().ptr()) return true; |
| const intptr_t num_occupied = NumOccupied(array); |
| // We should be using the same shared value for an empty cache. |
| if (num_occupied == 0) return false; |
| const intptr_t storage_len = array.Length(); |
| // All caches have the metadata followed by a series of entries. |
| if ((storage_len % kEntrySize) != kHeaderSize) return false; |
| const intptr_t num_entries = NumEntries(array); |
| // Linear caches contain at least one unoccupied entry, and hash-based caches |
| // grow prior to hitting 100% occupancy. |
| if (num_occupied >= num_entries) return false; |
| // In a linear cache, all entries with indexes smaller than [num_occupied] |
| // should be occupied and ones greater than or equal should be unoccupied. |
| const bool is_linear_cache = IsLinear(array); |
| // The capacity of a hash-based cache must be a power of two (see |
| // EnsureCapacityLocked as to why). |
| if (!is_linear_cache) { |
| if (!Utils::IsPowerOfTwo(num_entries)) return false; |
| const intptr_t metadata = |
| RawSmiValue(Smi::RawCast(array.AtAcquire(kMetadataIndex))); |
| if ((1 << EntryCountLog2Bits::decode(metadata)) != num_entries) { |
| return false; |
| } |
| } |
| for (intptr_t i = 0; i < num_entries; i++) { |
| const intptr_t index = kHeaderSize + i * kEntrySize; |
| if (array.At(index + kSentinelIndex) == Sentinel()) { |
| if (is_linear_cache && i < num_occupied) return false; |
| continue; |
| } |
| if (is_linear_cache && i >= num_occupied) return false; |
| // The elements of an occupied entry are all TypeArguments values. |
| for (intptr_t j = index; j < index + kEntrySize; j++) { |
| if (!array.At(j)->IsHeapObject()) return false; |
| if (array.At(j) == Object::null()) continue; // null is a valid TAV. |
| if (!array.At(j)->IsTypeArguments()) return false; |
| } |
| } |
| return true; |
| } |
| #endif |
| |
| bool TypeArguments::Cache::IsOccupied(intptr_t entry) const { |
| InstantiationsCacheTable table(data_); |
| ASSERT(entry >= 0 && entry < table.Length()); |
| return table.At(entry).Get<kSentinelIndex>() != Sentinel(); |
| } |
| |
| TypeArgumentsPtr TypeArguments::Cache::Retrieve(intptr_t entry) const { |
| ASSERT(IsOccupied(entry)); |
| InstantiationsCacheTable table(data_); |
| return table.At(entry).Get<kInstantiatedTypeArgsIndex>(); |
| } |
| |
| intptr_t TypeArguments::Cache::NumEntries(const Array& array) { |
| InstantiationsCacheTable table(array); |
| return table.Length(); |
| } |
| |
| TypeArguments::Cache::KeyLocation TypeArguments::Cache::FindKeyOrUnused( |
| const Array& array, |
| const TypeArguments& instantiator_tav, |
| const TypeArguments& function_tav) { |
| const bool is_hash = IsHash(array); |
| InstantiationsCacheTable table(array); |
| const intptr_t num_entries = table.Length(); |
| // For a linear cache, start at the first entry and probe linearly. This can |
| // be done because a linear cache always has at least one unoccupied entry |
| // after all the occupied ones. |
| intptr_t probe = 0; |
| intptr_t probe_distance = 1; |
| if (is_hash) { |
| // For a hash-based cache, instead start at an entry determined by the hash |
| // of the keys. |
| auto hash = FinalizeHash( |
| CombineHashes(instantiator_tav.Hash(), function_tav.Hash())); |
| probe = hash & (num_entries - 1); |
| } |
| while (true) { |
| const auto& tuple = table.At(probe); |
| if (tuple.Get<kSentinelIndex>() == Sentinel()) break; |
| if ((tuple.Get<kInstantiatorTypeArgsIndex>() == instantiator_tav.ptr()) && |
| (tuple.Get<kFunctionTypeArgsIndex>() == function_tav.ptr())) { |
| return {probe, true}; |
| } |
| // Advance probe by the current probing distance. |
| probe = probe + probe_distance; |
| if (is_hash) { |
| // Wrap around if the probe goes off the end of the entries array. |
| probe = probe & (num_entries - 1); |
| // We had a collision, so increase the probe distance. See comment in |
| // EnsureCapacityLocked for an explanation of how this hits all slots. |
| probe_distance++; |
| } |
| } |
| // We should always get the next slot for a linear cache. |
| ASSERT(is_hash || probe == NumOccupied(array)); |
| return {probe, false}; |
| } |
| |
| TypeArguments::Cache::KeyLocation TypeArguments::Cache::AddEntry( |
| intptr_t entry, |
| const TypeArguments& instantiator_tav, |
| const TypeArguments& function_tav, |
| const TypeArguments& instantiated_tav) const { |
| // We don't do mutating operations in tests without a TypeArguments object. |
| ASSERT(cache_container_ != nullptr); |
| #if defined(DEBUG) |
| auto loc = FindKeyOrUnused(instantiator_tav, function_tav); |
| ASSERT_EQUAL(loc.entry, entry); |
| ASSERT(!loc.present); |
| #endif |
| // Double-check we got the expected entry index when adding to a linear array. |
| ASSERT(!IsLinear() || entry == NumOccupied()); |
| const intptr_t new_occupied = NumOccupied() + 1; |
| const bool storage_changed = EnsureCapacity(new_occupied); |
| // Note that this call to IsLinear() may return a different result than the |
| // earlier, since EnsureCapacity() may have swapped to hash-based storage. |
| if (storage_changed && !IsLinear()) { |
| // The capacity of the array has changed, and the capacity is used when |
| // probing further into the array due to collisions. Thus, we need to redo |
| // the entry index calculation. |
| auto loc = FindKeyOrUnused(instantiator_tav, function_tav); |
| ASSERT(!loc.present); |
| entry = loc.entry; |
| } |
| |
| // Go ahead and increment the number of occupied entries prior to adding the |
| // entry. Use a store-release barrier in case of concurrent readers. |
| const intptr_t metadata = RawSmiValue(Smi::RawCast(data_.At(kMetadataIndex))); |
| smi_handle_ = Smi::New(NumOccupiedBits::update(new_occupied, metadata)); |
| data_.SetAtRelease(kMetadataIndex, smi_handle_); |
| |
| InstantiationsCacheTable table(data_); |
| const auto& tuple = table.At(entry); |
| // The parts of the tuple that aren't used for sentinel checking are only |
| // retrieved if the entry is occupied. Entries in the cache are never deleted, |
| // so once the entry is marked as occupied, the contents of that entry never |
| // change. Thus, we don't need store-release barriers here. |
| tuple.Set<kFunctionTypeArgsIndex>(function_tav); |
| tuple.Set<kInstantiatedTypeArgsIndex>(instantiated_tav); |
| // For the sentinel position, though, we do. |
| static_assert( |
| kSentinelIndex == kInstantiatorTypeArgsIndex, |
| "the sentinel position is not protected with a store-release barrier"); |
| tuple.Set<kInstantiatorTypeArgsIndex, std::memory_order_release>( |
| instantiator_tav); |
| |
| if (storage_changed) { |
| // Only check for validity on growth, just to keep the overhead on DEBUG |
| // builds down. |
| DEBUG_ASSERT(IsValidStorageLocked(data_)); |
| // Update the container of the original cache to point to the new one. |
| cache_container_->set_instantiations(data_); |
| } |
| |
| return {entry, true}; |
| } |
| |
| SmiPtr TypeArguments::Cache::Sentinel() { |
| return Smi::New(kSentinelValue); |
| } |
| |
| bool TypeArguments::Cache::EnsureCapacity(intptr_t new_occupied) const { |
| ASSERT(new_occupied > NumOccupied()); |
| // How many entries are in the current array (including unoccupied entries). |
| const intptr_t current_capacity = NumEntries(); |
| |
| // Early returns for cases where no growth is needed. |
| const bool is_linear = IsLinear(); |
| if (is_linear) { |
| // We need at least one unoccupied entry in addition to the occupied ones. |
| if (current_capacity > new_occupied) return false; |
| } else { |
| if (LoadFactor(new_occupied, current_capacity) < kMaxLoadFactor) { |
| return false; |
| } |
| } |
| |
| if (new_occupied <= kMaxLinearCacheEntries) { |
| ASSERT(is_linear); |
| // Not enough room for both the new entry and at least one unoccupied |
| // entry, so grow the tuple capacity of the linear cache by about 50%, |
| // ensuring that space for at least one new tuple is added, capping the |
| // total number of occupied entries to the max allowed. |
| const intptr_t new_capacity = |
| Utils::Minimum(current_capacity + (current_capacity >> 1), |
| kMaxLinearCacheEntries) + |
| 1; |
| const intptr_t cache_size = kHeaderSize + new_capacity * kEntrySize; |
| ASSERT(cache_size <= kMaxLinearCacheSize); |
| data_ = Array::Grow(data_, cache_size, Heap::kOld); |
| ASSERT(!data_.IsNull()); |
| // No need to adjust the number of occupied entries or old entries, as they |
| // are copied over by Array::Grow. Just mark any new entries as unoccupied. |
| smi_handle_ = Sentinel(); |
| InstantiationsCacheTable table(data_); |
| for (intptr_t i = current_capacity; i < new_capacity; i++) { |
| const auto& tuple = table.At(i); |
| tuple.Set<kSentinelIndex>(smi_handle_); |
| } |
| return true; |
| } |
| |
| // Either we're converting a linear cache into a hash-based cache, or the |
| // load factor of the hash-based cache has increased to the point where we |
| // need to grow it. |
| const intptr_t new_capacity = |
| is_linear ? kNumInitialHashCacheEntries : 2 * current_capacity; |
| // Because we use quadratic (actually triangle number) probing it is |
| // important that the size is a power of two (otherwise we could fail to |
| // find an empty slot). This is described in Knuth's The Art of Computer |
| // Programming Volume 2, Chapter 6.4, exercise 20 (solution in the |
| // appendix, 2nd edition). |
| ASSERT(Utils::IsPowerOfTwo(new_capacity)); |
| ASSERT(LoadFactor(new_occupied, new_capacity) < kMaxLoadFactor); |
| const intptr_t new_size = kHeaderSize + new_capacity * kEntrySize; |
| const auto& new_data = |
| Array::Handle(zone_, Array::NewUninitialized(new_size, Heap::kOld)); |
| ASSERT(!new_data.IsNull()); |
| // First set up the metadata in new_data. |
| const intptr_t metadata = RawSmiValue(Smi::RawCast(data_.At(kMetadataIndex))); |
| smi_handle_ = Smi::New(EntryCountLog2Bits::update( |
| Utils::ShiftForPowerOfTwo(new_capacity), metadata)); |
| new_data.SetAt(kMetadataIndex, smi_handle_); |
| // Then mark all the entries in new_data as unoccupied. |
| smi_handle_ = Sentinel(); |
| InstantiationsCacheTable to_table(new_data); |
| for (const auto& tuple : to_table) { |
| tuple.Set<kSentinelIndex>(smi_handle_); |
| } |
| // Finally, copy over the entries. |
| auto& instantiator_tav = TypeArguments::Handle(zone_); |
| auto& function_tav = TypeArguments::Handle(zone_); |
| auto& result_tav = TypeArguments::Handle(zone_); |
| const InstantiationsCacheTable from_table(data_); |
| for (const auto& from_tuple : from_table) { |
| // Skip unoccupied entries. |
| if (from_tuple.Get<kSentinelIndex>() == Sentinel()) continue; |
| instantiator_tav ^= from_tuple.Get<kInstantiatorTypeArgsIndex>(); |
| function_tav = from_tuple.Get<kFunctionTypeArgsIndex>(); |
| result_tav = from_tuple.Get<kInstantiatedTypeArgsIndex>(); |
| // Since new_data has a different total capacity, we can't use the old |
| // entry indexes, but must recalculate them. |
| auto loc = FindKeyOrUnused(new_data, instantiator_tav, function_tav); |
| ASSERT(!loc.present); |
| const auto& to_tuple = to_table.At(loc.entry); |
| to_tuple.Set<kInstantiatorTypeArgsIndex>(instantiator_tav); |
| to_tuple.Set<kFunctionTypeArgsIndex>(function_tav); |
| to_tuple.Set<kInstantiatedTypeArgsIndex>(result_tav); |
| } |
| data_ = new_data.ptr(); |
| return true; |
| } |
| |
| bool TypeArguments::HasInstantiations() const { |
| return instantiations() != Cache::EmptyStorage().ptr(); |
| } |
| |
| ArrayPtr TypeArguments::instantiations() const { |
| // We rely on the fact that any loads from the array are dependent loads and |
| // avoid the load-acquire barrier here. |
| return untag()->instantiations(); |
| } |
| |
| void TypeArguments::set_instantiations(const Array& value) const { |
| // We have to ensure that initializing stores to the array are available |
| // when releasing the pointer to the array pointer. |
| // => We have to use store-release here. |
| ASSERT(!value.IsNull()); |
| untag()->set_instantiations<std::memory_order_release>(value.ptr()); |
| } |
| |
| bool TypeArguments::HasCount(intptr_t count) const { |
| if (IsNull()) { |
| return true; |
| } |
| return Length() == count; |
| } |
| |
| intptr_t TypeArguments::Length() const { |
| if (IsNull()) { |
| return 0; |
| } |
| return Smi::Value(untag()->length()); |
| } |
| |
| intptr_t TypeArguments::nullability() const { |
| if (IsNull()) { |
| return 0; |
| } |
| return Smi::Value(untag()->nullability()); |
| } |
| |
| AbstractTypePtr TypeArguments::TypeAt(intptr_t index) const { |
| ASSERT(!IsNull()); |
| ASSERT((index >= 0) && (index < Length())); |
| return untag()->element(index); |
| } |
| |
| AbstractTypePtr TypeArguments::TypeAtNullSafe(intptr_t index) const { |
| if (IsNull()) { |
| // null vector represents infinite list of dynamics |
| return Type::dynamic_type().ptr(); |
| } |
| ASSERT((index >= 0) && (index < Length())); |
| return TypeAt(index); |
| } |
| |
| void TypeArguments::SetTypeAt(intptr_t index, const AbstractType& value) const { |
| ASSERT(!IsCanonical()); |
| ASSERT((index >= 0) && (index < Length())); |
| return untag()->set_element(index, value.ptr()); |
| } |
| |
| bool TypeArguments::IsSubvectorInstantiated( |
| intptr_t from_index, |
| intptr_t len, |
| Genericity genericity, |
| intptr_t num_free_fun_type_params) const { |
| ASSERT(!IsNull()); |
| AbstractType& type = AbstractType::Handle(); |
| for (intptr_t i = 0; i < len; i++) { |
| type = TypeAt(from_index + i); |
| // If this type argument T is null, the type A containing T in its flattened |
| // type argument vector V is recursive and is still being finalized. |
| // T is the type argument of a super type of A. T is being instantiated |
| // during finalization of V, which is also the instantiator. T depends |
| // solely on the type parameters of A and will be replaced by a non-null |
| // type before A is marked as finalized. |
| if (!type.IsNull() && |
| !type.IsInstantiated(genericity, num_free_fun_type_params)) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| bool TypeArguments::IsUninstantiatedIdentity() const { |
| AbstractType& type = AbstractType::Handle(); |
| const intptr_t num_types = Length(); |
| for (intptr_t i = 0; i < num_types; i++) { |
| type = TypeAt(i); |
| if (type.IsNull()) { |
| return false; // Still unfinalized, too early to tell. |
| } |
| if (!type.IsTypeParameter()) { |
| return false; |
| } |
| const TypeParameter& type_param = TypeParameter::Cast(type); |
| ASSERT(type_param.IsFinalized()); |
| if ((type_param.index() != i) || type_param.IsFunctionTypeParameter()) { |
| return false; |
| } |
| // Instantiating nullable type parameters may change |
| // nullability of a type, so type arguments vector containing such type |
| // parameters cannot be substituted with instantiator type arguments. |
| if (type_param.IsNullable()) { |
| return false; |
| } |
| } |
| return true; |
| // Note that it is not necessary to verify at runtime that the instantiator |
| // type vector is long enough, since this uninstantiated vector contains as |
| // many different type parameters as it is long. |
| } |
| |
| // Return true if this uninstantiated type argument vector, once instantiated |
| // at runtime, is a prefix of the type argument vector of its instantiator. |
| // A runtime check may be required, as indicated by with_runtime_check. |
| bool TypeArguments::CanShareInstantiatorTypeArguments( |
| const Class& instantiator_class, |
| bool* with_runtime_check) const { |
| ASSERT(!IsInstantiated()); |
| if (with_runtime_check != nullptr) { |
| *with_runtime_check = false; |
| } |
| const intptr_t num_type_args = Length(); |
| const intptr_t num_instantiator_type_args = |
| instantiator_class.NumTypeArguments(); |
| if (num_type_args > num_instantiator_type_args) { |
| // This vector cannot be a prefix of a shorter vector. |
| return false; |
| } |
| const intptr_t num_instantiator_type_params = |
| instantiator_class.NumTypeParameters(); |
| const intptr_t first_type_param_offset = |
| num_instantiator_type_args - num_instantiator_type_params; |
| // At compile time, the type argument vector of the instantiator consists of |
| // the type argument vector of its super type, which may refer to the type |
| // parameters of the instantiator class, followed by (or overlapping partially |
| // or fully with) the type parameters of the instantiator class in declaration |
| // order. |
| // In other words, the only variables are the type parameters of the |
| // instantiator class. |
| // This uninstantiated type argument vector is also expressed in terms of the |
| // type parameters of the instantiator class. Therefore, in order to be a |
| // prefix once instantiated at runtime, every one of its type argument must be |
| // equal to the type argument of the instantiator vector at the same index. |
| |
| // As a first requirement, the last num_instantiator_type_params type |
| // arguments of this type argument vector must refer to the corresponding type |
| // parameters of the instantiator class. |
| AbstractType& type_arg = AbstractType::Handle(); |
| for (intptr_t i = first_type_param_offset; i < num_type_args; i++) { |
| type_arg = TypeAt(i); |
| if (!type_arg.IsTypeParameter()) { |
| return false; |
| } |
| const TypeParameter& type_param = TypeParameter::Cast(type_arg); |
| ASSERT(type_param.IsFinalized()); |
| if ((type_param.index() != i) || type_param.IsFunctionTypeParameter()) { |
| return false; |
| } |
| // Instantiating nullable type parameters may change nullability |
| // of a type, so type arguments vector containing such type parameters |
| // cannot be substituted with instantiator type arguments, unless we check |
| // at runtime the nullability of the first 1 or 2 type arguments of the |
| // instantiator. |
| // Note that the presence of non-overlapping super type arguments (i.e. |
| // first_type_param_offset > 0) will prevent this optimization. |
| if (type_param.IsNullable()) { |
| if (with_runtime_check == nullptr || i >= kNullabilityMaxTypes) { |
| return false; |
| } |
| *with_runtime_check = true; |
| } |
| } |
| // As a second requirement, the type arguments corresponding to the super type |
| // must be identical. Overlapping ones have already been checked starting at |
| // first_type_param_offset. |
| if (first_type_param_offset == 0) { |
| return true; |
| } |
| Type& super_type = Type::Handle(instantiator_class.super_type()); |
| const TypeArguments& super_type_args = |
| TypeArguments::Handle(super_type.GetInstanceTypeArguments( |
| Thread::Current(), /*canonicalize=*/false)); |
| if (super_type_args.IsNull()) { |
| ASSERT(!IsUninstantiatedIdentity()); |
| return false; |
| } |
| AbstractType& super_type_arg = AbstractType::Handle(); |
| for (intptr_t i = 0; (i < first_type_param_offset) && (i < num_type_args); |
| i++) { |
| type_arg = TypeAt(i); |
| super_type_arg = super_type_args.TypeAt(i); |
| if (!type_arg.Equals(super_type_arg)) { |
| ASSERT(!IsUninstantiatedIdentity()); |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| // Return true if this uninstantiated type argument vector, once instantiated |
| // at runtime, is a prefix of the enclosing function type arguments. |
| // A runtime check may be required, as indicated by with_runtime_check. |
| bool TypeArguments::CanShareFunctionTypeArguments( |
| const Function& function, |
| bool* with_runtime_check) const { |
| ASSERT(!IsInstantiated()); |
| if (with_runtime_check != nullptr) { |
| *with_runtime_check = false; |
| } |
| const intptr_t num_type_args = Length(); |
| const intptr_t num_parent_type_args = function.NumParentTypeArguments(); |
| const intptr_t num_function_type_params = function.NumTypeParameters(); |
| const intptr_t num_function_type_args = |
| num_parent_type_args + num_function_type_params; |
| if (num_type_args > num_function_type_args) { |
| // This vector cannot be a prefix of a shorter vector. |
| return false; |
| } |
| AbstractType& type_arg = AbstractType::Handle(); |
| for (intptr_t i = 0; i < num_type_args; i++) { |
| type_arg = TypeAt(i); |
| if (!type_arg.IsTypeParameter()) { |
| return false; |
| } |
| const TypeParameter& type_param = TypeParameter::Cast(type_arg); |
| ASSERT(type_param.IsFinalized()); |
| if ((type_param.index() != i) || !type_param.IsFunctionTypeParameter()) { |
| return false; |
| } |
| // Instantiating nullable type parameters may change nullability |
| // of a type, so type arguments vector containing such type parameters |
| // cannot be substituted with the enclosing function type arguments, unless |
| // we check at runtime the nullability of the first 1 or 2 type arguments of |
| // the enclosing function type arguments. |
| if (type_param.IsNullable()) { |
| if (with_runtime_check == nullptr || i >= kNullabilityMaxTypes) { |
| return false; |
| } |
| *with_runtime_check = true; |
| } |
| } |
| return true; |
| } |
| |
| TypeArgumentsPtr TypeArguments::TruncatedTo(intptr_t length) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| const TypeArguments& result = |
| TypeArguments::Handle(zone, TypeArguments::New(length)); |
| AbstractType& type = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < length; i++) { |
| type = TypeAt(i); |
| result.SetTypeAt(i, type); |
| } |
| return result.Canonicalize(thread); |
| } |
| |
| bool TypeArguments::IsFinalized() const { |
| ASSERT(!IsNull()); |
| AbstractType& type = AbstractType::Handle(); |
| const intptr_t num_types = Length(); |
| for (intptr_t i = 0; i < num_types; i++) { |
| type = TypeAt(i); |
| if (!type.IsFinalized()) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| TypeArgumentsPtr TypeArguments::InstantiateFrom( |
| const TypeArguments& instantiator_type_arguments, |
| const TypeArguments& function_type_arguments, |
| intptr_t num_free_fun_type_params, |
| Heap::Space space, |
| FunctionTypeMapping* function_type_mapping, |
| intptr_t num_parent_type_args_adjustment) const { |
| ASSERT(!IsInstantiated()); |
| if ((instantiator_type_arguments.IsNull() || |
| instantiator_type_arguments.Length() == Length()) && |
| IsUninstantiatedIdentity()) { |
| return instantiator_type_arguments.ptr(); |
| } |
| const intptr_t num_types = Length(); |
| TypeArguments& instantiated_array = |
| TypeArguments::Handle(TypeArguments::New(num_types, space)); |
| AbstractType& type = AbstractType::Handle(); |
| for (intptr_t i = 0; i < num_types; i++) { |
| type = TypeAt(i); |
| // If this type argument T is null, the type A containing T in its flattened |
| // type argument vector V is recursive and is still being finalized. |
| // T is the type argument of a super type of A. T is being instantiated |
| // during finalization of V, which is also the instantiator. T depends |
| // solely on the type parameters of A and will be replaced by a non-null |
| // type before A is marked as finalized. |
| if (!type.IsNull() && !type.IsInstantiated()) { |
| type = type.InstantiateFrom( |
| instantiator_type_arguments, function_type_arguments, |
| num_free_fun_type_params, space, function_type_mapping, |
| num_parent_type_args_adjustment); |
| // A returned null type indicates a failed instantiation in dead code that |
| // must be propagated up to the caller, the optimizing compiler. |
| if (type.IsNull()) { |
| return Object::empty_type_arguments().ptr(); |
| } |
| } |
| instantiated_array.SetTypeAt(i, type); |
| } |
| return instantiated_array.ptr(); |
| } |
| |
| TypeArgumentsPtr TypeArguments::UpdateFunctionTypes( |
| intptr_t num_parent_type_args_adjustment, |
| intptr_t num_free_fun_type_params, |
| Heap::Space space, |
| FunctionTypeMapping* function_type_mapping) const { |
| Zone* zone = Thread::Current()->zone(); |
| TypeArguments* updated_args = nullptr; |
| AbstractType& type = AbstractType::Handle(zone); |
| AbstractType& updated = AbstractType::Handle(zone); |
| for (intptr_t i = 0, n = Length(); i < n; ++i) { |
| type = TypeAt(i); |
| updated = type.UpdateFunctionTypes(num_parent_type_args_adjustment, |
| num_free_fun_type_params, space, |
| function_type_mapping); |
| if (type.ptr() != updated.ptr()) { |
| if (updated_args == nullptr) { |
| updated_args = |
| &TypeArguments::Handle(zone, TypeArguments::New(n, space)); |
| for (intptr_t j = 0; j < i; ++j) { |
| type = TypeAt(j); |
| updated_args->SetTypeAt(j, type); |
| } |
| } |
| } |
| if (updated_args != nullptr) { |
| updated_args->SetTypeAt(i, updated); |
| } |
| } |
| return (updated_args != nullptr) ? updated_args->ptr() : ptr(); |
| } |
| |
| #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) |
| // A local flag used only in object_test.cc that, when true, causes a failure |
| // when a cache entry for the given instantiator and function type arguments |
| // already exists. Used to check that the InstantiateTypeArguments stub found |
| // the cache entry instead of calling the runtime. |
| bool TESTING_runtime_fail_on_existing_cache_entry = false; |
| #endif |
| |
| TypeArgumentsPtr TypeArguments::InstantiateAndCanonicalizeFrom( |
| const TypeArguments& instantiator_type_arguments, |
| const TypeArguments& function_type_arguments) const { |
| auto thread = Thread::Current(); |
| auto zone = thread->zone(); |
| SafepointMutexLocker ml( |
| thread->isolate_group()->type_arguments_canonicalization_mutex()); |
| |
| ASSERT(!IsInstantiated()); |
| ASSERT(instantiator_type_arguments.IsNull() || |
| instantiator_type_arguments.IsCanonical()); |
| ASSERT(function_type_arguments.IsNull() || |
| function_type_arguments.IsCanonical()); |
| // Lookup instantiators and if found, return instantiated result. |
| Cache cache(zone, *this); |
| auto const loc = cache.FindKeyOrUnused(instantiator_type_arguments, |
| function_type_arguments); |
| if (loc.present) { |
| #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) |
| if (TESTING_runtime_fail_on_existing_cache_entry) { |
| TextBuffer buffer(1024); |
| buffer.Printf("for\n"); |
| buffer.Printf(" * uninstantiated type arguments %s\n", ToCString()); |
| buffer.Printf(" * instantiation type arguments: %s (hash: %" Pu ")\n", |
| instantiator_type_arguments.ToCString(), |
| instantiator_type_arguments.Hash()); |
| buffer.Printf(" * function type arguments: %s (hash: %" Pu ")\n", |
| function_type_arguments.ToCString(), |
| function_type_arguments.Hash()); |
| buffer.Printf(" * number of occupied entries in cache: %" Pd "\n", |
| cache.NumOccupied()); |
| buffer.Printf(" * number of total entries in cache: %" Pd "\n", |
| cache.NumEntries()); |
| buffer.Printf("expected to find entry %" Pd |
| " of cache in stub, but reached runtime", |
| loc.entry); |
| FATAL("%s", buffer.buffer()); |
| } |
| #endif |
| return cache.Retrieve(loc.entry); |
| } |
| // Cache lookup failed. Instantiate the type arguments. |
| TypeArguments& result = TypeArguments::Handle(zone); |
| result = InstantiateFrom(instantiator_type_arguments, function_type_arguments, |
| kAllFree, Heap::kOld); |
| // Canonicalize type arguments. |
| result = result.Canonicalize(thread); |
| // InstantiateAndCanonicalizeFrom is not reentrant. It cannot have been called |
| // indirectly, so the prior_instantiations array cannot have grown. |
| ASSERT(cache.data_.ptr() == instantiations()); |
| cache.AddEntry(loc.entry, instantiator_type_arguments, |
| function_type_arguments, result); |
| return result.ptr(); |
| } |
| |
| TypeArgumentsPtr TypeArguments::New(intptr_t len, Heap::Space space) { |
| if (len < 0 || len > kMaxElements) { |
| // This should be caught before we reach here. |
| FATAL("Fatal error in TypeArguments::New: invalid len %" Pd "\n", len); |
| } |
| TypeArguments& result = TypeArguments::Handle(); |
| { |
| auto raw = Object::Allocate<TypeArguments>(space, len); |
| NoSafepointScope no_safepoint; |
| result = raw; |
| // Length must be set before we start storing into the array. |
| result.SetLength(len); |
| result.SetHash(0); |
| result.set_nullability(0); |
| } |
| // The array used as storage for an empty linear cache should be initialized. |
| ASSERT(Cache::EmptyStorage().ptr() != Array::null()); |
| result.set_instantiations(Cache::EmptyStorage()); |
| return result.ptr(); |
| } |
| |
| void TypeArguments::SetLength(intptr_t value) const { |
| ASSERT(!IsCanonical()); |
| // This is only safe because we create a new Smi, which does not cause |
| // heap allocation. |
| untag()->set_length(Smi::New(value)); |
| } |
| |
| TypeArgumentsPtr TypeArguments::Canonicalize(Thread* thread) const { |
| if (IsNull() || IsCanonical()) { |
| ASSERT(IsOld()); |
| return this->ptr(); |
| } |
| const intptr_t num_types = Length(); |
| if (num_types == 0) { |
| return TypeArguments::empty_type_arguments().ptr(); |
| } else if (IsRaw(0, num_types)) { |
| return TypeArguments::null(); |
| } |
| Zone* zone = thread->zone(); |
| auto isolate_group = thread->isolate_group(); |
| ObjectStore* object_store = isolate_group->object_store(); |
| TypeArguments& result = TypeArguments::Handle(zone); |
| { |
| SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); |
| CanonicalTypeArgumentsSet table(zone, |
| object_store->canonical_type_arguments()); |
| result ^= table.GetOrNull(CanonicalTypeArgumentsKey(*this)); |
| object_store->set_canonical_type_arguments(table.Release()); |
| } |
| if (result.IsNull()) { |
| // Canonicalize each type argument. |
| AbstractType& type_arg = AbstractType::Handle(zone); |
| GrowableHandlePtrArray<const AbstractType> canonicalized_types(zone, |
| num_types); |
| for (intptr_t i = 0; i < num_types; i++) { |
| type_arg = TypeAt(i); |
| type_arg = type_arg.Canonicalize(thread); |
| canonicalized_types.Add(type_arg); |
| } |
| SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); |
| CanonicalTypeArgumentsSet table(zone, |
| object_store->canonical_type_arguments()); |
| // Since we canonicalized some type arguments above we need to lookup |
| // in the table again to make sure we don't already have an equivalent |
| // canonical entry. |
| result ^= table.GetOrNull(CanonicalTypeArgumentsKey(*this)); |
| if (result.IsNull()) { |
| for (intptr_t i = 0; i < num_types; i++) { |
| SetTypeAt(i, canonicalized_types.At(i)); |
| } |
| // Make sure we have an old space object and add it to the table. |
| if (this->IsNew()) { |
| result ^= Object::Clone(*this, Heap::kOld); |
| } else { |
| result = this->ptr(); |
| } |
| ASSERT(result.IsOld()); |
| result.ComputeNullability(); |
| result.SetCanonical(); // Mark object as being canonical. |
| // Now add this TypeArgument into the canonical list of type arguments. |
| bool present = table.Insert(result); |
| ASSERT(!present); |
| } |
| object_store->set_canonical_type_arguments(table.Release()); |
| } |
| ASSERT(result.Equals(*this)); |
| ASSERT(!result.IsNull()); |
| ASSERT(result.IsTypeArguments()); |
| ASSERT(result.IsCanonical()); |
| return result.ptr(); |
| } |
| |
| TypeArgumentsPtr TypeArguments::FromInstanceTypeArguments( |
| Thread* thread, |
| const Class& cls) const { |
| if (IsNull()) { |
| return ptr(); |
| } |
| const intptr_t num_type_arguments = cls.NumTypeArguments(); |
| const intptr_t num_type_parameters = cls.NumTypeParameters(thread); |
| ASSERT(Length() >= num_type_arguments); |
| if (Length() == num_type_parameters) { |
| return ptr(); |
| } |
| if (num_type_parameters == 0) { |
| return TypeArguments::null(); |
| } |
| Zone* zone = thread->zone(); |
| const auto& args = |
| TypeArguments::Handle(zone, TypeArguments::New(num_type_parameters)); |
| const intptr_t offset = num_type_arguments - num_type_parameters; |
| auto& type = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < num_type_parameters; ++i) { |
| type = TypeAt(offset + i); |
| args.SetTypeAt(i, type); |
| } |
| return args.ptr(); |
| } |
| |
| TypeArgumentsPtr TypeArguments::ToInstantiatorTypeArguments( |
| Thread* thread, |
| const Class& cls) const { |
| if (IsNull()) { |
| return ptr(); |
| } |
| const intptr_t num_type_arguments = cls.NumTypeArguments(); |
| const intptr_t num_type_parameters = cls.NumTypeParameters(thread); |
| ASSERT(Length() == num_type_parameters); |
| if (num_type_arguments == num_type_parameters) { |
| return ptr(); |
| } |
| Zone* zone = thread->zone(); |
| const auto& args = |
| TypeArguments::Handle(zone, TypeArguments::New(num_type_arguments)); |
| const intptr_t offset = num_type_arguments - num_type_parameters; |
| auto& type = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < num_type_parameters; ++i) { |
| type = TypeAt(i); |
| args.SetTypeAt(offset + i, type); |
| } |
| return args.ptr(); |
| } |
| |
| void TypeArguments::EnumerateURIs(URIs* uris) const { |
| if (IsNull()) { |
| return; |
| } |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| AbstractType& type = AbstractType::Handle(zone); |
| const intptr_t num_types = Length(); |
| for (intptr_t i = 0; i < num_types; i++) { |
| type = TypeAt(i); |
| type.EnumerateURIs(uris); |
| } |
| } |
| |
| const char* TypeArguments::ToCString() const { |
| if (IsNull()) { |
| return "TypeArguments: null"; // Optimizing the frequent case. |
| } |
| ZoneTextBuffer buffer(Thread::Current()->zone()); |
| PrintTo(&buffer); |
| return buffer.buffer(); |
| } |
| |
| const char* PatchClass::ToCString() const { |
| const Class& cls = Class::Handle(wrapped_class()); |
| const char* cls_name = cls.ToCString(); |
| return OS::SCreate(Thread::Current()->zone(), "PatchClass for %s", cls_name); |
| } |
| |
| PatchClassPtr PatchClass::New(const Class& wrapped_class, |
| const KernelProgramInfo& info, |
| const Script& script) { |
| const PatchClass& result = PatchClass::Handle(PatchClass::New()); |
| result.set_wrapped_class(wrapped_class); |
| NOT_IN_PRECOMPILED_RUNTIME( |
| result.untag()->set_kernel_program_info(info.ptr())); |
| result.set_script(script); |
| result.set_kernel_library_index(-1); |
| return result.ptr(); |
| } |
| |
| PatchClassPtr PatchClass::New() { |
| ASSERT(Object::patch_class_class() != Class::null()); |
| return Object::Allocate<PatchClass>(Heap::kOld); |
| } |
| |
| void PatchClass::set_wrapped_class(const Class& value) const { |
| untag()->set_wrapped_class(value.ptr()); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| void PatchClass::set_kernel_program_info(const KernelProgramInfo& info) const { |
| untag()->set_kernel_program_info(info.ptr()); |
| } |
| #endif |
| |
| void PatchClass::set_script(const Script& value) const { |
| untag()->set_script(value.ptr()); |
| } |
| |
| uword Function::Hash() const { |
| uword hash = String::HashRawSymbol(name()); |
| if (IsClosureFunction()) { |
| hash = hash ^ token_pos().Hash(); |
| } |
| if (Owner()->IsClass()) { |
| hash = hash ^ Class::Hash(Class::RawCast(Owner())); |
| } |
| return hash; |
| } |
| |
| bool Function::HasBreakpoint() const { |
| #if defined(PRODUCT) |
| return false; |
| #else |
| auto thread = Thread::Current(); |
| return thread->isolate_group()->debugger()->HasBreakpoint(thread, *this); |
| #endif |
| } |
| |
| void Function::InstallOptimizedCode(const Code& code) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| // We may not have previous code if FLAG_precompile is set. |
| // Hot-reload may have already disabled the current code. |
| if (HasCode() && !Code::Handle(CurrentCode()).IsDisabled()) { |
| Code::Handle(CurrentCode()).DisableDartCode(); |
| } |
| AttachCode(code); |
| } |
| |
| void Function::SetInstructions(const Code& value) const { |
| // Ensure that nobody is executing this function when we install it. |
| if (untag()->code() != Code::null() && HasCode()) { |
| GcSafepointOperationScope safepoint(Thread::Current()); |
| SetInstructionsSafe(value); |
| } else { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| SetInstructionsSafe(value); |
| } |
| } |
| |
| void Function::SetInstructionsSafe(const Code& value) const { |
| untag()->set_code<std::memory_order_release>(value.ptr()); |
| StoreNonPointer(&untag()->entry_point_, value.EntryPoint()); |
| StoreNonPointer(&untag()->unchecked_entry_point_, |
| value.UncheckedEntryPoint()); |
| } |
| |
| void Function::AttachCode(const Code& value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| // Finish setting up code before activating it. |
| value.set_owner(*this); |
| SetInstructions(value); |
| ASSERT(Function::Handle(value.function()).IsNull() || |
| (value.function() == this->ptr())); |
| } |
| |
| bool Function::HasCode() const { |
| NoSafepointScope no_safepoint; |
| ASSERT(untag()->code() != Code::null()); |
| return untag()->code() != StubCode::LazyCompile().ptr(); |
| } |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| |
| void Function::AttachBytecode(const Bytecode& value) const { |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(!value.IsNull()); |
| // Finish setting up code before activating it. |
| if (!value.InVMIsolateHeap()) { |
| value.set_function(*this); |
| } |
| ASSERT(untag()->ic_data_array_or_bytecode() == Object::null()); |
| untag()->set_ic_data_array_or_bytecode(value.ptr()); |
| |
| // Set the code entry_point to InterpretCall stub. |
| SetInstructions(StubCode::InterpretCall()); |
| } |
| |
| #endif // defined(DART_DYNAMIC_MODULES) |
| |
| bool Function::HasCode(FunctionPtr function) { |
| NoSafepointScope no_safepoint; |
| ASSERT(function->untag()->code() != Code::null()); |
| return function->untag()->code() != StubCode::LazyCompile().ptr(); |
| } |
| |
| void Function::ClearCode() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| untag()->set_unoptimized_code(Code::null()); |
| SetInstructions(StubCode::LazyCompile()); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| void Function::ClearCodeSafe() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| untag()->set_unoptimized_code(Code::null()); |
| |
| SetInstructionsSafe(StubCode::LazyCompile()); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| void Function::EnsureHasCompiledUnoptimizedCode() const { |
| ASSERT(!ForceOptimize()); |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->IsDartMutatorThread()); |
| DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame()); |
| Zone* zone = thread->zone(); |
| |
| const Error& error = |
| Error::Handle(zone, EnsureHasCompiledUnoptimizedCodeNoThrow()); |
| if (!error.IsNull()) { |
| Exceptions::PropagateError(error); |
| } |
| } |
| |
| ErrorPtr Function::EnsureHasCompiledUnoptimizedCodeNoThrow() const { |
| ASSERT(!ForceOptimize()); |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->IsDartMutatorThread()); |
| |
| return Compiler::EnsureUnoptimizedCode(thread, *this); |
| } |
| |
| void Function::SwitchToUnoptimizedCode() const { |
| ASSERT(HasOptimizedCode()); |
| ASSERT(!ForceOptimize()); |
| Thread* thread = Thread::Current(); |
| DEBUG_ASSERT( |
| thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); |
| Zone* zone = thread->zone(); |
| // TODO(35224): DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame()); |
| const Code& current_code = Code::Handle(zone, CurrentCode()); |
| |
| if (FLAG_trace_deoptimization_verbose) { |
| THR_Print("Disabling optimized code: '%s' entry: %#" Px "\n", |
| ToFullyQualifiedCString(), current_code.EntryPoint()); |
| } |
| current_code.DisableDartCode(); |
| const Error& error = |
| Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, *this)); |
| if (!error.IsNull()) { |
| Exceptions::PropagateError(error); |
| } |
| const Code& unopt_code = Code::Handle(zone, unoptimized_code()); |
| unopt_code.Enable(); |
| AttachCode(unopt_code); |
| } |
| |
| void Function::SwitchToLazyCompiledUnoptimizedCode() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| if (!HasOptimizedCode()) { |
| return; |
| } |
| |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| ASSERT(thread->IsDartMutatorThread()); |
| |
| const Code& current_code = Code::Handle(zone, CurrentCode()); |
| TIR_Print("Disabling optimized code for %s\n", ToCString()); |
| current_code.DisableDartCode(); |
| |
| const Code& unopt_code = Code::Handle(zone, unoptimized_code()); |
| if (unopt_code.IsNull()) { |
| // Set the lazy compile stub code. |
| TIR_Print("Switched to lazy compile stub for %s\n", ToCString()); |
| SetInstructions(StubCode::LazyCompile()); |
| return; |
| } |
| |
| TIR_Print("Switched to unoptimized code for %s\n", ToCString()); |
| |
| AttachCode(unopt_code); |
| unopt_code.Enable(); |
| #endif |
| } |
| |
| void Function::set_unoptimized_code(const Code& value) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint()); |
| ASSERT(value.IsNull() || !value.is_optimized()); |
| untag()->set_unoptimized_code(value.ptr()); |
| #endif |
| } |
| |
| ContextScopePtr Function::context_scope() const { |
| if (IsClosureFunction()) { |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| return ClosureData::Cast(obj).context_scope(); |
| } |
| return ContextScope::null(); |
| } |
| |
| void Function::set_context_scope(const ContextScope& value) const { |
| if (IsClosureFunction()) { |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| ClosureData::Cast(obj).set_context_scope(value); |
| return; |
| } |
| UNREACHABLE(); |
| } |
| |
| Function::AwaiterLink Function::awaiter_link() const { |
| if (IsClosureFunction()) { |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| return ClosureData::Cast(obj).awaiter_link(); |
| } |
| UNREACHABLE(); |
| return {}; |
| } |
| |
| void Function::set_awaiter_link(Function::AwaiterLink link) const { |
| if (IsClosureFunction()) { |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| ClosureData::Cast(obj).set_awaiter_link(link); |
| return; |
| } |
| UNREACHABLE(); |
| } |
| |
| ClosurePtr Function::implicit_static_closure() const { |
| if (IsImplicitStaticClosureFunction()) { |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| return ClosureData::Cast(obj).implicit_static_closure(); |
| } |
| return Closure::null(); |
| } |
| |
| void Function::set_implicit_static_closure(const Closure& closure) const { |
| if (IsImplicitStaticClosureFunction()) { |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| ClosureData::Cast(obj).set_implicit_static_closure(closure); |
| return; |
| } |
| UNREACHABLE(); |
| } |
| |
| ScriptPtr Function::eval_script() const { |
| const Object& obj = Object::Handle(untag()->data()); |
| if (obj.IsScript()) { |
| return Script::Cast(obj).ptr(); |
| } |
| return Script::null(); |
| } |
| |
| void Function::set_eval_script(const Script& script) const { |
| ASSERT(token_pos() == TokenPosition::kMinSource); |
| ASSERT(untag()->data() == Object::null()); |
| set_data(script); |
| } |
| |
| FunctionPtr Function::extracted_method_closure() const { |
| ASSERT(kind() == UntaggedFunction::kMethodExtractor); |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(obj.IsFunction()); |
| return Function::Cast(obj).ptr(); |
| } |
| |
| void Function::set_extracted_method_closure(const Function& value) const { |
| ASSERT(kind() == UntaggedFunction::kMethodExtractor); |
| ASSERT(untag()->data() == Object::null()); |
| set_data(value); |
| } |
| |
| ArrayPtr Function::saved_args_desc() const { |
| if (kind() == UntaggedFunction::kDynamicInvocationForwarder) { |
| return Array::null(); |
| } |
| ASSERT(kind() == UntaggedFunction::kNoSuchMethodDispatcher || |
| kind() == UntaggedFunction::kInvokeFieldDispatcher); |
| return Array::RawCast(untag()->data()); |
| } |
| |
| void Function::set_saved_args_desc(const Array& value) const { |
| ASSERT(kind() == UntaggedFunction::kNoSuchMethodDispatcher || |
| kind() == UntaggedFunction::kInvokeFieldDispatcher); |
| ASSERT(untag()->data() == Object::null()); |
| set_data(value); |
| } |
| |
| FieldPtr Function::accessor_field() const { |
| ASSERT(kind() == UntaggedFunction::kImplicitGetter || |
| kind() == UntaggedFunction::kImplicitSetter || |
| kind() == UntaggedFunction::kImplicitStaticGetter || |
| kind() == UntaggedFunction::kFieldInitializer); |
| return Field::RawCast(untag()->data()); |
| } |
| |
| void Function::set_accessor_field(const Field& value) const { |
| ASSERT(kind() == UntaggedFunction::kImplicitGetter || |
| kind() == UntaggedFunction::kImplicitSetter || |
| kind() == UntaggedFunction::kImplicitStaticGetter || |
| kind() == UntaggedFunction::kFieldInitializer); |
| // Top level classes may be finalized multiple times. |
| ASSERT(untag()->data() == Object::null() || untag()->data() == value.ptr()); |
| set_data(value); |
| } |
| |
| FunctionPtr Function::parent_function() const { |
| if (!IsClosureFunction()) return Function::null(); |
| Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| return ClosureData::Cast(obj).parent_function(); |
| } |
| |
| void Function::set_parent_function(const Function& value) const { |
| ASSERT(IsClosureFunction()); |
| const Object& obj = Object::Handle(untag()->data()); |
| ASSERT(!obj.IsNull()); |
| ClosureData::Cast(obj).set_parent_function(value); |
| } |
| |
| TypeArgumentsPtr Function::DefaultTypeArguments(Zone* zone) const { |
| if (type_parameters() == TypeParameters::null()) { |
| return Object::empty_type_arguments().ptr(); |
| } |
| return TypeParameters::Handle(zone, type_parameters()).defaults(); |
| } |
| |
| InstantiationMode Function::default_type_arguments_instantiation_mode() const { |
| if (!IsClosureFunction()) { |
| UNREACHABLE(); |
| } |
| return ClosureData::DefaultTypeArgumentsInstantiationMode( |
| ClosureData::RawCast(data())); |
| } |
| |
| void Function::set_default_type_arguments_instantiation_mode( |
| InstantiationMode value) const { |
| if (!IsClosureFunction()) { |
| UNREACHABLE(); |
| } |
| const auto& closure_data = ClosureData::Handle(ClosureData::RawCast(data())); |
| ASSERT(!closure_data.IsNull()); |
| closure_data.set_default_type_arguments_instantiation_mode(value); |
| } |
| |
| // Enclosing outermost function of this local function. |
| FunctionPtr Function::GetOutermostFunction() const { |
| FunctionPtr parent = parent_function(); |
| if (parent == Object::null()) { |
| return ptr(); |
| } |
| Function& function = Function::Handle(); |
| do { |
| function = parent; |
| parent = function.parent_function(); |
| } while (parent != Object::null()); |
| return function.ptr(); |
| } |
| |
| FunctionPtr Function::implicit_closure_function() const { |
| if (IsClosureFunction() || IsDispatcherOrImplicitAccessor() || |
| IsFieldInitializer() || IsFfiCallbackTrampoline() || |
| IsMethodExtractor()) { |
| return Function::null(); |
| } |
| const Object& obj = Object::Handle(data()); |
| ASSERT(obj.IsNull() || obj.IsScript() || obj.IsFunction() || obj.IsArray()); |
| if (obj.IsNull() || obj.IsScript()) { |
| return Function::null(); |
| } |
| if (obj.IsFunction()) { |
| return Function::Cast(obj).ptr(); |
| } |
| ASSERT(is_native()); |
| ASSERT(obj.IsArray()); |
| const Object& res = Object::Handle(Array::Cast(obj).AtAcquire(1)); |
| return res.IsNull() ? Function::null() : Function::Cast(res).ptr(); |
| } |
| |
| void Function::set_implicit_closure_function(const Function& value) const { |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(!IsClosureFunction()); |
| const Object& old_data = Object::Handle(data()); |
| if (is_old_native()) { |
| ASSERT(old_data.IsArray()); |
| const auto& pair = Array::Cast(old_data); |
| ASSERT(pair.AtAcquire(NativeFunctionData::kTearOff) == Object::null() || |
| value.IsNull()); |
| pair.SetAtRelease(NativeFunctionData::kTearOff, value); |
| } else { |
| ASSERT(old_data.IsNull() || value.IsNull()); |
| set_data(value); |
| } |
| } |
| |
| void Function::SetFfiCSignature(const FunctionType& sig) const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| FfiTrampolineData::Cast(obj).set_c_signature(sig); |
| } |
| |
| FunctionTypePtr Function::FfiCSignature() const { |
| auto* const zone = Thread::Current()->zone(); |
| if (IsFfiCallbackTrampoline()) { |
| const Object& obj = Object::Handle(zone, data()); |
| ASSERT(!obj.IsNull()); |
| return FfiTrampolineData::Cast(obj).c_signature(); |
| } |
| auto& pragma_value = Instance::Handle(zone); |
| if (is_ffi_native()) { |
| pragma_value = GetNativeAnnotation(); |
| } else if (IsFfiCallClosure()) { |
| pragma_value = GetFfiCallClosurePragmaValue(); |
| } else { |
| UNREACHABLE(); |
| } |
| const auto& type_args = |
| TypeArguments::Handle(zone, pragma_value.GetTypeArguments()); |
| ASSERT(type_args.Length() == 1); |
| const auto& native_type = |
| FunctionType::Cast(AbstractType::ZoneHandle(zone, type_args.TypeAt(0))); |
| return native_type.ptr(); |
| } |
| |
| bool Function::FfiCSignatureContainsHandles() const { |
| const FunctionType& c_signature = FunctionType::Handle(FfiCSignature()); |
| return c_signature.ContainsHandles(); |
| } |
| |
| bool FunctionType::ContainsHandles() const { |
| const intptr_t num_params = num_fixed_parameters(); |
| for (intptr_t i = 0; i < num_params; i++) { |
| const bool is_handle = |
| AbstractType::Handle(ParameterTypeAt(i)).type_class_id() == |
| kFfiHandleCid; |
| if (is_handle) { |
| return true; |
| } |
| } |
| return AbstractType::Handle(result_type()).type_class_id() == kFfiHandleCid; |
| } |
| |
| // Keep consistent with BaseMarshaller::IsCompound. |
| bool Function::FfiCSignatureReturnsStruct() const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| Zone* zone = Thread::Current()->zone(); |
| const auto& c_signature = FunctionType::Handle(zone, FfiCSignature()); |
| const auto& type = AbstractType::Handle(zone, c_signature.result_type()); |
| if (IsFfiTypeClassId(type.type_class_id())) { |
| return false; |
| } |
| const auto& cls = Class::Handle(zone, type.type_class()); |
| const auto& superClass = Class::Handle(zone, cls.SuperClass()); |
| const bool is_abi_specific_int = |
| String::Handle(zone, superClass.UserVisibleName()) |
| .Equals(Symbols::AbiSpecificInteger()); |
| if (is_abi_specific_int) { |
| return false; |
| } |
| #ifdef DEBUG |
| const bool is_struct = String::Handle(zone, superClass.UserVisibleName()) |
| .Equals(Symbols::Struct()); |
| const bool is_union = String::Handle(zone, superClass.UserVisibleName()) |
| .Equals(Symbols::Union()); |
| ASSERT(is_struct || is_union); |
| #endif |
| return true; |
| } |
| |
| int32_t Function::FfiCallbackId() const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| |
| const auto& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| const auto& trampoline_data = FfiTrampolineData::Cast(obj); |
| |
| ASSERT(trampoline_data.callback_id() != -1); |
| |
| return trampoline_data.callback_id(); |
| } |
| |
| void Function::AssignFfiCallbackId(int32_t callback_id) const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| |
| const auto& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| const auto& trampoline_data = FfiTrampolineData::Cast(obj); |
| |
| ASSERT(trampoline_data.callback_id() == -1); |
| trampoline_data.set_callback_id(callback_id); |
| } |
| |
| bool Function::FfiIsLeaf() const { |
| Zone* zone = Thread::Current()->zone(); |
| auto& pragma_value = Instance::Handle(zone); |
| if (is_ffi_native()) { |
| pragma_value = GetNativeAnnotation(); |
| } else if (IsFfiCallClosure()) { |
| pragma_value = GetFfiCallClosurePragmaValue(); |
| } else { |
| UNREACHABLE(); |
| } |
| const auto& pragma_value_class = Class::Handle(zone, pragma_value.clazz()); |
| const auto& pragma_value_fields = |
| Array::Handle(zone, pragma_value_class.fields()); |
| ASSERT(pragma_value_fields.Length() >= 1); |
| const auto& is_leaf_field = Field::Handle( |
| zone, |
| Field::RawCast(pragma_value_fields.At(pragma_value_fields.Length() - 1))); |
| ASSERT(is_leaf_field.name() == Symbols::isLeaf().ptr()); |
| return Bool::Handle(zone, Bool::RawCast(pragma_value.GetField(is_leaf_field))) |
| .value(); |
| } |
| |
| FunctionPtr Function::FfiCallbackTarget() const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| return FfiTrampolineData::Cast(obj).callback_target(); |
| } |
| |
| void Function::SetFfiCallbackTarget(const Function& target) const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| FfiTrampolineData::Cast(obj).set_callback_target(target); |
| } |
| |
| InstancePtr Function::FfiCallbackExceptionalReturn() const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| return FfiTrampolineData::Cast(obj).callback_exceptional_return(); |
| } |
| |
| void Function::SetFfiCallbackExceptionalReturn(const Instance& value) const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| FfiTrampolineData::Cast(obj).set_callback_exceptional_return(value); |
| } |
| |
| FfiCallbackKind Function::GetFfiCallbackKind() const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| return FfiTrampolineData::Cast(obj).ffi_function_kind(); |
| } |
| |
| void Function::SetFfiCallbackKind(FfiCallbackKind value) const { |
| ASSERT(IsFfiCallbackTrampoline()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(!obj.IsNull()); |
| FfiTrampolineData::Cast(obj).set_ffi_function_kind(value); |
| } |
| |
| const char* Function::KindToCString(UntaggedFunction::Kind kind) { |
| return UntaggedFunction::KindToCString(kind); |
| } |
| |
| FunctionPtr Function::ForwardingTarget() const { |
| ASSERT(kind() == UntaggedFunction::kDynamicInvocationForwarder); |
| return Function::RawCast(WeakSerializationReference::Unwrap(data())); |
| } |
| |
| void Function::SetForwardingTarget(const Function& target) const { |
| ASSERT(kind() == UntaggedFunction::kDynamicInvocationForwarder); |
| set_data(target); |
| } |
| |
| // This field is heavily overloaded: |
| // kernel eval function: Array[0] = Script |
| // Array[1] = KernelProgramInfo |
| // Array[2] = Kernel index of enclosing library |
| // method extractor: Function extracted closure function |
| // implicit getter: Field |
| // implicit setter: Field |
| // impl. static final gttr: Field |
| // field initializer: Field |
| // noSuchMethod dispatcher: Array arguments descriptor |
| // invoke-field dispatcher: Array arguments descriptor |
| // closure function: ClosureData |
| // irregexp function: Array[0] = RegExp |
| // Array[1] = Smi string specialization cid |
| // native function: Array[0] = String native name |
| // Array[1] = Function implicit closure function |
| // regular function: Function for implicit closure function |
| // constructor, factory: Function for implicit closure function |
| // ffi trampoline function: FfiTrampolineData (Dart->C) |
| // dyn inv forwarder: Forwarding target, a WSR pointing to it or null |
| // (null can only occur if forwarding target was |
| // dropped) |
| void Function::set_data(const Object& value) const { |
| untag()->set_data<std::memory_order_release>(value.ptr()); |
| } |
| |
| void Function::set_name(const String& value) const { |
| ASSERT(value.IsSymbol()); |
| untag()->set_name(value.ptr()); |
| } |
| |
| void Function::set_owner(const Object& value) const { |
| ASSERT(!value.IsNull()); |
| untag()->set_owner(value.ptr()); |
| } |
| |
| RegExpPtr Function::regexp() const { |
| ASSERT(kind() == UntaggedFunction::kIrregexpFunction); |
| const Array& pair = Array::Cast(Object::Handle(data())); |
| return RegExp::RawCast(pair.At(0)); |
| } |
| |
| using StickySpecialization = BitField<intptr_t, bool>; |
| using StringSpecializationCid = BitField<intptr_t, |
| intptr_t, |
| StickySpecialization::kNextBit, |
| UntaggedObject::ClassIdTag::bitsize()>; |
| |
| intptr_t Function::string_specialization_cid() const { |
| ASSERT(kind() == UntaggedFunction::kIrregexpFunction); |
| const Array& pair = Array::Cast(Object::Handle(data())); |
| return StringSpecializationCid::decode(Smi::Value(Smi::RawCast(pair.At(1)))); |
| } |
| |
| bool Function::is_sticky_specialization() const { |
| ASSERT(kind() == UntaggedFunction::kIrregexpFunction); |
| const Array& pair = Array::Cast(Object::Handle(data())); |
| return StickySpecialization::decode(Smi::Value(Smi::RawCast(pair.At(1)))); |
| } |
| |
| void Function::SetRegExpData(const RegExp& regexp, |
| intptr_t string_specialization_cid, |
| bool sticky) const { |
| ASSERT(kind() == UntaggedFunction::kIrregexpFunction); |
| ASSERT(IsStringClassId(string_specialization_cid)); |
| ASSERT(data() == Object::null()); |
| const Array& pair = Array::Handle(Array::New(2, Heap::kOld)); |
| pair.SetAt(0, regexp); |
| pair.SetAt(1, Smi::Handle(Smi::New(StickySpecialization::encode(sticky) | |
| StringSpecializationCid::encode( |
| string_specialization_cid)))); |
| set_data(pair); |
| } |
| |
| StringPtr Function::native_name() const { |
| ASSERT(is_native()); |
| const Object& obj = Object::Handle(data()); |
| ASSERT(obj.IsArray()); |
| return String::RawCast(Array::Cast(obj).At(0)); |
| } |
| |
| void Function::set_native_name(const String& value) const { |
| ASSERT(is_native()); |
| const auto& pair = Array::Cast(Object::Handle(data())); |
| ASSERT(pair.At(0) == Object::null()); |
| pair.SetAt(NativeFunctionData::kNativeName, value); |
| } |
| |
| InstancePtr Function::GetNativeAnnotation() const { |
| ASSERT(is_ffi_native()); |
| Zone* zone = Thread::Current()->zone(); |
| auto& pragma_value = Object::Handle(zone); |
| Library::FindPragma(dart::Thread::Current(), /*only_core=*/false, |
| Object::Handle(zone, ptr()), |
| String::Handle(zone, Symbols::vm_ffi_native().ptr()), |
| /*multiple=*/false, &pragma_value); |
| auto const& native_instance = Instance::Cast(pragma_value); |
| ASSERT(!native_instance.IsNull()); |
| #if defined(DEBUG) |
| const auto& native_class = Class::Handle(zone, native_instance.clazz()); |
| ASSERT(String::Handle(zone, native_class.UserVisibleName()) |
| .Equals(Symbols::FfiNative())); |
| #endif |
| return native_instance.ptr(); |
| } |
| |
| bool Function::is_old_native() const { |
| return is_native() && !is_external(); |
| } |
| |
| bool Function::is_ffi_native() const { |
| return is_native() && is_external(); |
| } |
| |
| void Function::SetSignature(const FunctionType& value) const { |
| set_signature(value); |
| ASSERT(NumImplicitParameters() == value.num_implicit_parameters()); |
| if (IsClosureFunction() && value.IsGeneric()) { |
| Zone* zone = Thread::Current()->zone(); |
| const TypeParameters& type_params = |
| TypeParameters::Handle(zone, value.type_parameters()); |
| const TypeArguments& defaults = |
| TypeArguments::Handle(zone, type_params.defaults()); |
| auto mode = defaults.GetInstantiationMode(zone, this); |
| set_default_type_arguments_instantiation_mode(mode); |
| } |
| } |
| |
| TypeParameterPtr FunctionType::TypeParameterAt(intptr_t index, |
| Nullability nullability) const { |
| ASSERT(index >= 0 && index < NumTypeParameters()); |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| TypeParameter& type_param = TypeParameter::Handle( |
| zone, TypeParameter::New(*this, NumParentTypeArguments(), |
| NumParentTypeArguments() + index, nullability)); |
| type_param.SetIsFinalized(); |
| if (IsFinalized()) { |
| type_param ^= type_param.Canonicalize(thread); |
| } |
| return type_param.ptr(); |
| } |
| |
| void FunctionType::set_result_type(const AbstractType& value) const { |
| ASSERT(!value.IsNull()); |
| untag()->set_result_type(value.ptr()); |
| } |
| |
| AbstractTypePtr Function::ParameterTypeAt(intptr_t index) const { |
| const Array& types = Array::Handle(parameter_types()); |
| return AbstractType::RawCast(types.At(index)); |
| } |
| |
| AbstractTypePtr FunctionType::ParameterTypeAt(intptr_t index) const { |
| const Array& parameter_types = Array::Handle(untag()->parameter_types()); |
| return AbstractType::RawCast(parameter_types.At(index)); |
| } |
| |
| void FunctionType::SetParameterTypeAt(intptr_t index, |
| const AbstractType& value) const { |
| ASSERT(!value.IsNull()); |
| const Array& parameter_types = Array::Handle(untag()->parameter_types()); |
| parameter_types.SetAt(index, value); |
| } |
| |
| void FunctionType::set_parameter_types(const Array& value) const { |
| ASSERT(value.IsNull() || value.Length() > 0); |
| untag()->set_parameter_types(value.ptr()); |
| } |
| |
| StringPtr Function::ParameterNameAt(intptr_t index) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| // Without the signature, we're guaranteed not to have any name information. |
| return Symbols::OptimizedOut().ptr(); |
| } |
| #endif |
| const intptr_t num_fixed = num_fixed_parameters(); |
| if (HasOptionalNamedParameters() && index >= num_fixed) { |
| const Array& parameter_names = |
| Array::Handle(signature()->untag()->named_parameter_names()); |
| return String::RawCast(parameter_names.At(index - num_fixed)); |
| } |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| return Symbols::OptimizedOut().ptr(); |
| #else |
| const Array& names = Array::Handle(untag()->positional_parameter_names()); |
| return String::RawCast(names.At(index)); |
| #endif |
| } |
| |
| void Function::SetParameterNameAt(intptr_t index, const String& value) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| ASSERT(!value.IsNull() && value.IsSymbol()); |
| if (HasOptionalNamedParameters() && index >= num_fixed_parameters()) { |
| // These should be set on the signature, not the function. |
| UNREACHABLE(); |
| } |
| const Array& parameter_names = |
| Array::Handle(untag()->positional_parameter_names()); |
| parameter_names.SetAt(index, value); |
| #endif |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| void Function::set_positional_parameter_names(const Array& value) const { |
| ASSERT(value.ptr() == Object::empty_array().ptr() || value.Length() > 0); |
| untag()->set_positional_parameter_names(value.ptr()); |
| } |
| #endif |
| |
| StringPtr FunctionType::ParameterNameAt(intptr_t index) const { |
| const intptr_t num_fixed = num_fixed_parameters(); |
| if (!HasOptionalNamedParameters() || index < num_fixed) { |
| // The positional parameter names are stored on the function, not here. |
| UNREACHABLE(); |
| } |
| const Array& parameter_names = |
| Array::Handle(untag()->named_parameter_names()); |
| return String::RawCast(parameter_names.At(index - num_fixed)); |
| } |
| |
| void FunctionType::SetParameterNameAt(intptr_t index, |
| const String& value) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| UNREACHABLE(); |
| #else |
| ASSERT(!value.IsNull() && value.IsSymbol()); |
| const intptr_t num_fixed = num_fixed_parameters(); |
| if (!HasOptionalNamedParameters() || index < num_fixed) { |
| UNREACHABLE(); |
| } |
| const Array& parameter_names = |
| Array::Handle(untag()->named_parameter_names()); |
| parameter_names.SetAt(index - num_fixed, value); |
| #endif |
| } |
| |
| void FunctionType::set_named_parameter_names(const Array& value) const { |
| ASSERT(value.ptr() == Object::empty_array().ptr() || value.Length() > 0); |
| untag()->set_named_parameter_names(value.ptr()); |
| } |
| |
| void Function::CreateNameArray(Heap::Space space) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| const intptr_t num_positional_params = |
| num_fixed_parameters() + NumOptionalPositionalParameters(); |
| if (num_positional_params == 0) { |
| set_positional_parameter_names(Object::empty_array()); |
| } else { |
| set_positional_parameter_names( |
| Array::Handle(Array::New(num_positional_params, space))); |
| } |
| #endif |
| } |
| |
| void FunctionType::CreateNameArrayIncludingFlags(Heap::Space space) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| UNREACHABLE(); |
| #else |
| const intptr_t num_named_parameters = NumOptionalNamedParameters(); |
| if (num_named_parameters == 0) { |
| return set_named_parameter_names(Object::empty_array()); |
| } |
| // Currently, we only store flags for named parameters. |
| const intptr_t last_index = (num_named_parameters - 1) / |
| compiler::target::kNumParameterFlagsPerElement; |
| const intptr_t num_flag_slots = last_index + 1; |
| intptr_t num_total_slots = num_named_parameters + num_flag_slots; |
| auto& array = Array::Handle(Array::New(num_total_slots, space)); |
| // Set flag slots to Smi 0 before handing off. |
| auto& empty_flags_smi = Smi::Handle(Smi::New(0)); |
| for (intptr_t i = num_named_parameters; i < num_total_slots; i++) { |
| array.SetAt(i, empty_flags_smi); |
| } |
| set_named_parameter_names(array); |
| #endif |
| } |
| |
| intptr_t FunctionType::GetRequiredFlagIndex(intptr_t index, |
| intptr_t* flag_mask) const { |
| // If these calculations change, also change |
| // FlowGraphBuilder::BuildClosureCallHasRequiredNamedArgumentsCheck. |
| ASSERT(HasOptionalNamedParameters()); |
| ASSERT(flag_mask != nullptr); |
| ASSERT(index >= num_fixed_parameters()); |
| index -= num_fixed_parameters(); |
| *flag_mask = (1 << compiler::target::kRequiredNamedParameterFlag) |
| << ((static_cast<uintptr_t>(index) % |
| compiler::target::kNumParameterFlagsPerElement) * |
| compiler::target::kNumParameterFlags); |
| return NumOptionalNamedParameters() + |
| index / compiler::target::kNumParameterFlagsPerElement; |
| } |
| |
| bool Function::HasRequiredNamedParameters() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| // Signatures for functions with required named parameters are not dropped. |
| return false; |
| } |
| #endif |
| return FunctionType::Handle(signature()).HasRequiredNamedParameters(); |
| } |
| |
| bool Function::IsRequiredAt(intptr_t index) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| // Signature is not dropped in aot when any named parameter is required. |
| return false; |
| } |
| #endif |
| if (!HasOptionalNamedParameters() || index < num_fixed_parameters()) { |
| return false; |
| } |
| const FunctionType& sig = FunctionType::Handle(signature()); |
| return sig.IsRequiredAt(index); |
| } |
| |
| bool FunctionType::IsRequiredAt(intptr_t index) const { |
| if (!HasOptionalNamedParameters() || index < num_fixed_parameters()) { |
| return false; |
| } |
| intptr_t flag_mask; |
| const intptr_t flag_index = GetRequiredFlagIndex(index, &flag_mask); |
| const Array& parameter_names = |
| Array::Handle(untag()->named_parameter_names()); |
| if (flag_index >= parameter_names.Length()) { |
| return false; |
| } |
| const intptr_t flags = |
| Smi::Value(Smi::RawCast(parameter_names.At(flag_index))); |
| return (flags & flag_mask) != 0; |
| } |
| |
| void FunctionType::SetIsRequiredAt(intptr_t index) const { |
| #if defined(DART_PRECOMPILER_RUNTIME) |
| UNREACHABLE(); |
| #else |
| intptr_t flag_mask; |
| const intptr_t flag_index = GetRequiredFlagIndex(index, &flag_mask); |
| const Array& parameter_names = |
| Array::Handle(untag()->named_parameter_names()); |
| ASSERT(flag_index < parameter_names.Length()); |
| const intptr_t flags = |
| Smi::Value(Smi::RawCast(parameter_names.At(flag_index))); |
| parameter_names.SetAt(flag_index, Smi::Handle(Smi::New(flags | flag_mask))); |
| #endif |
| } |
| |
| void FunctionType::FinalizeNameArray() const { |
| #if defined(DART_PRECOMPILER_RUNTIME) |
| UNREACHABLE(); |
| #else |
| const intptr_t num_named_parameters = NumOptionalNamedParameters(); |
| if (num_named_parameters == 0) { |
| ASSERT(untag()->named_parameter_names() == Object::empty_array().ptr()); |
| return; |
| } |
| const Array& parameter_names = |
| Array::Handle(untag()->named_parameter_names()); |
| // Truncate the parameter names array to remove unused flags from the end. |
| intptr_t last_used = parameter_names.Length() - 1; |
| for (; last_used >= num_named_parameters; --last_used) { |
| if (Smi::Value(Smi::RawCast(parameter_names.At(last_used))) != 0) { |
| break; |
| } |
| } |
| parameter_names.Truncate(last_used + 1); |
| #endif |
| } |
| |
| bool FunctionType::HasRequiredNamedParameters() const { |
| const intptr_t num_named_params = NumOptionalNamedParameters(); |
| if (num_named_params == 0) return false; |
| // Check for flag slots in the named parameter names array. |
| const auto& parameter_names = Array::Handle(named_parameter_names()); |
| ASSERT(!parameter_names.IsNull()); |
| return parameter_names.Length() > num_named_params; |
| } |
| |
| static void ReportTooManyTypeParameters(const FunctionType& sig) { |
| Report::MessageF(Report::kError, Script::Handle(), TokenPosition::kNoSource, |
| Report::AtLocation, |
| "too many type parameters declared in signature '%s' or in " |
| "its enclosing signatures", |
| sig.ToUserVisibleCString()); |
| UNREACHABLE(); |
| } |
| |
| void FunctionType::SetTypeParameters(const TypeParameters& value) const { |
| untag()->set_type_parameters(value.ptr()); |
| const intptr_t count = value.Length(); |
| if (!UntaggedFunctionType::PackedNumTypeParameters::is_valid(count)) { |
| ReportTooManyTypeParameters(*this); |
| } |
| untag()->packed_type_parameter_counts_.Update<PackedNumTypeParameters>(count); |
| } |
| |
| void FunctionType::SetNumParentTypeArguments(intptr_t value) const { |
| ASSERT(value >= 0); |
| if (!PackedNumParentTypeArguments::is_valid(value)) { |
| ReportTooManyTypeParameters(*this); |
| } |
| untag()->packed_type_parameter_counts_.Update<PackedNumParentTypeArguments>( |
| value); |
| } |
| |
| bool Function::IsGeneric() const { |
| return FunctionType::IsGeneric(signature()); |
| } |
| intptr_t Function::NumTypeParameters() const { |
| return FunctionType::NumTypeParametersOf(signature()); |
| } |
| intptr_t Function::NumParentTypeArguments() const { |
| return FunctionType::NumParentTypeArgumentsOf(signature()); |
| } |
| intptr_t Function::NumTypeArguments() const { |
| return FunctionType::NumTypeArgumentsOf(signature()); |
| } |
| intptr_t Function::num_fixed_parameters() const { |
| return FunctionType::NumFixedParametersOf(signature()); |
| } |
| bool Function::HasOptionalParameters() const { |
| return FunctionType::HasOptionalParameters(signature()); |
| } |
| bool Function::HasOptionalNamedParameters() const { |
| return FunctionType::HasOptionalNamedParameters(signature()); |
| } |
| bool Function::HasOptionalPositionalParameters() const { |
| return FunctionType::HasOptionalPositionalParameters(signature()); |
| } |
| intptr_t Function::NumOptionalParameters() const { |
| return FunctionType::NumOptionalParametersOf(signature()); |
| } |
| intptr_t Function::NumOptionalPositionalParameters() const { |
| return FunctionType::NumOptionalPositionalParametersOf(signature()); |
| } |
| intptr_t Function::NumOptionalNamedParameters() const { |
| return FunctionType::NumOptionalNamedParametersOf(signature()); |
| } |
| intptr_t Function::NumParameters() const { |
| return FunctionType::NumParametersOf(signature()); |
| } |
| |
| TypeParameterPtr Function::TypeParameterAt(intptr_t index, |
| Nullability nullability) const { |
| const FunctionType& sig = FunctionType::Handle(signature()); |
| return sig.TypeParameterAt(index, nullability); |
| } |
| |
| void Function::set_kind(UntaggedFunction::Kind value) const { |
| untag()->kind_tag_.Update<KindBits>(value); |
| } |
| |
| void Function::set_modifier(UntaggedFunction::AsyncModifier value) const { |
| untag()->kind_tag_.Update<ModifierBits>(value); |
| } |
| |
| void Function::set_recognized_kind(MethodRecognizer::Kind value) const { |
| // Prevent multiple settings of kind. |
| ASSERT((value == MethodRecognizer::kUnknown) || !IsRecognized()); |
| untag()->kind_tag_.Update<RecognizedBits>(value); |
| } |
| |
| void Function::set_token_pos(TokenPosition token_pos) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| ASSERT(!token_pos.IsClassifying() || IsMethodExtractor()); |
| StoreNonPointer(&untag()->token_pos_, token_pos); |
| #endif |
| } |
| |
| void Function::set_kind_tag(uint32_t value) const { |
| untag()->kind_tag_ = value; |
| } |
| |
| bool Function::is_eval_function() const { |
| if (data()->IsArray()) { |
| const intptr_t len = Array::LengthOf(Array::RawCast(data())); |
| return len == static_cast<intptr_t>(EvalFunctionData::kLength); |
| } |
| return false; |
| } |
| |
| bool Function::IsOptimizable() const { |
| if (FLAG_precompiled_mode) { |
| return true; |
| } |
| if (ForceOptimize()) return true; |
| if (is_old_native()) { |
| // Native methods don't need to be optimized. |
| return false; |
| } |
| if (is_optimizable() && (script() != Script::null())) { |
| // Additional check needed for implicit getters. |
| return (unoptimized_code() == Object::null()) || |
| (Code::Handle(unoptimized_code()).Size() < |
| FLAG_huge_method_cutoff_in_code_size); |
| } |
| return false; |
| } |
| |
| bool Function::IsTypedDataViewFactory() const { |
| switch (recognized_kind()) { |
| case MethodRecognizer::kTypedData_ByteDataView_factory: |
| case MethodRecognizer::kTypedData_Int8ArrayView_factory: |
| case MethodRecognizer::kTypedData_Uint8ArrayView_factory: |
| case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory: |
| case MethodRecognizer::kTypedData_Int16ArrayView_factory: |
| case MethodRecognizer::kTypedData_Uint16ArrayView_factory: |
| case MethodRecognizer::kTypedData_Int32ArrayView_factory: |
| case MethodRecognizer::kTypedData_Uint32ArrayView_factory: |
| case MethodRecognizer::kTypedData_Int64ArrayView_factory: |
| case MethodRecognizer::kTypedData_Uint64ArrayView_factory: |
| case MethodRecognizer::kTypedData_Float32ArrayView_factory: |
| case MethodRecognizer::kTypedData_Float64ArrayView_factory: |
| case MethodRecognizer::kTypedData_Float32x4ArrayView_factory: |
| case MethodRecognizer::kTypedData_Int32x4ArrayView_factory: |
| case MethodRecognizer::kTypedData_Float64x2ArrayView_factory: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| bool Function::IsUnmodifiableTypedDataViewFactory() const { |
| switch (recognized_kind()) { |
| case MethodRecognizer::kTypedData_UnmodifiableByteDataView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableInt8ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableUint8ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableUint8ClampedArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableInt16ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableUint16ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableInt32ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableUint32ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableInt64ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableUint64ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableFloat32ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableFloat64ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableFloat32x4ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableInt32x4ArrayView_factory: |
| case MethodRecognizer::kTypedData_UnmodifiableFloat64x2ArrayView_factory: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| static bool InVmTests(const Function& function) { |
| #if defined(TESTING) |
| return true; |
| #else |
| auto* zone = Thread::Current()->zone(); |
| const auto& cls = Class::Handle(zone, function.Owner()); |
| const auto& lib = Library::Handle(zone, cls.library()); |
| const auto& url = String::Handle(zone, lib.url()); |
| const bool in_vm_tests = |
| strstr(url.ToCString(), "runtime/tests/vm/") != nullptr; |
| return in_vm_tests; |
| #endif |
| } |
| |
| bool Function::ForceOptimize() const { |
| if (RecognizedKindForceOptimize() || IsFfiCallClosure() || |
| IsFfiCallbackTrampoline() || is_ffi_native() || |
| IsTypedDataViewFactory() || IsUnmodifiableTypedDataViewFactory()) { |
| return true; |
| } |
| |
| if (!has_pragma()) return false; |
| |
| const bool has_vm_pragma = Library::FindPragma( |
| Thread::Current(), false, *this, Symbols::vm_force_optimize()); |
| if (!has_vm_pragma) return false; |
| |
| // For run_vm_tests and runtime/tests/vm allow marking arbitrary functions as |
| // force-optimize via `@pragma('vm:force-optimize')`. |
| return InVmTests(*this); |
| } |
| |
| bool Function::IsPreferInline() const { |
| if (!has_pragma()) return false; |
| |
| return Library::FindPragma(Thread::Current(), /*only_core=*/false, *this, |
| Symbols::vm_prefer_inline()); |
| } |
| |
| bool Function::IsIdempotent() const { |
| if (!has_pragma()) return false; |
| |
| #if defined(TESTING) |
| const bool kAllowOnlyForCoreLibFunctions = false; |
| #else |
| const bool kAllowOnlyForCoreLibFunctions = true; |
| #endif // defined(TESTING) |
| |
| return Library::FindPragma(Thread::Current(), kAllowOnlyForCoreLibFunctions, |
| *this, Symbols::vm_idempotent()); |
| } |
| |
| bool Function::IsCachableIdempotent() const { |
| if (!has_pragma()) return false; |
| |
| const bool has_vm_pragma = |
| Library::FindPragma(Thread::Current(), /*only_core=*/false, *this, |
| Symbols::vm_cachable_idempotent()); |
| if (!has_vm_pragma) return false; |
| |
| // For run_vm_tests and runtime/tests/vm allow marking arbitrary functions. |
| return InVmTests(*this); |
| } |
| |
| bool Function::IsFfiCallClosure() const { |
| if (!IsNonImplicitClosureFunction()) return false; |
| if (!has_pragma()) return false; |
| return Library::FindPragma(Thread::Current(), /*only_core=*/false, *this, |
| Symbols::vm_ffi_call_closure()); |
| } |
| |
| InstancePtr Function::GetFfiCallClosurePragmaValue() const { |
| ASSERT(IsFfiCallClosure()); |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| auto& pragma_value = Object::Handle(zone); |
| Library::FindPragma(thread, /*only_core=*/false, *this, |
| Symbols::vm_ffi_call_closure(), |
| /*multiple=*/false, &pragma_value); |
| ASSERT(!pragma_value.IsNull()); |
| return Instance::Cast(pragma_value).ptr(); |
| } |
| |
| bool Function::RecognizedKindForceOptimize() const { |
| switch (recognized_kind()) { |
| // Uses unboxed/untagged data not supported in unoptimized, or uses |
| // LoadIndexed/StoreIndexed/MemoryCopy instructions with typed data |
| // arrays, which requires optimization for payload extraction. |
| case MethodRecognizer::kObjectArrayGetIndexed: |
| case MethodRecognizer::kGrowableArrayGetIndexed: |
| #define TYPED_DATA_GET_INDEXED_CASES(clazz) \ |
| case MethodRecognizer::k##clazz##ArrayGetIndexed: \ |
| FALL_THROUGH; \ |
| case MethodRecognizer::kExternal##clazz##ArrayGetIndexed: \ |
| FALL_THROUGH; \ |
| case MethodRecognizer::k##clazz##ArrayViewGetIndexed: \ |
| FALL_THROUGH; |
| DART_CLASS_LIST_TYPED_DATA(TYPED_DATA_GET_INDEXED_CASES) |
| #undef TYPED_DATA_GET_INDEXED_CASES |
| case MethodRecognizer::kCopyRangeFromUint8ListToOneByteString: |
| case MethodRecognizer::kFinalizerBase_getIsolateFinalizers: |
| case MethodRecognizer::kFinalizerBase_setIsolate: |
| case MethodRecognizer::kFinalizerBase_setIsolateFinalizers: |
| case MethodRecognizer::kFinalizerEntry_getExternalSize: |
| case MethodRecognizer::kExtensionStreamHasListener: |
| case MethodRecognizer::kFfiLoadInt8: |
| case MethodRecognizer::kFfiLoadInt16: |
| case MethodRecognizer::kFfiLoadInt32: |
| case MethodRecognizer::kFfiLoadInt64: |
| case MethodRecognizer::kFfiLoadUint8: |
| case MethodRecognizer::kFfiLoadUint16: |
| case MethodRecognizer::kFfiLoadUint32: |
| case MethodRecognizer::kFfiLoadUint64: |
| case MethodRecognizer::kFfiLoadFloat: |
| case MethodRecognizer::kFfiLoadFloatUnaligned: |
| case MethodRecognizer::kFfiLoadDouble: |
| case MethodRecognizer::kFfiLoadDoubleUnaligned: |
| case MethodRecognizer::kFfiLoadPointer: |
| case MethodRecognizer::kFfiStoreInt8: |
| case MethodRecognizer::kFfiStoreInt16: |
| case MethodRecognizer::kFfiStoreInt32: |
| case MethodRecognizer::kFfiStoreInt64: |
| case MethodRecognizer::kFfiStoreUint8: |
| case MethodRecognizer::kFfiStoreUint16: |
| case MethodRecognizer::kFfiStoreUint32: |
| case MethodRecognizer::kFfiStoreUint64: |
| case MethodRecognizer::kFfiStoreFloat: |
| case MethodRecognizer::kFfiStoreFloatUnaligned: |
| case MethodRecognizer::kFfiStoreDouble: |
| case MethodRecognizer::kFfiStoreDoubleUnaligned: |
| case MethodRecognizer::kFfiStorePointer: |
| case MethodRecognizer::kFfiFromAddress: |
| case MethodRecognizer::kFfiGetAddress: |
| case MethodRecognizer::kFfiAsExternalTypedDataInt8: |
| case MethodRecognizer::kFfiAsExternalTypedDataInt16: |
| case MethodRecognizer::kFfiAsExternalTypedDataInt32: |
| case MethodRecognizer::kFfiAsExternalTypedDataInt64: |
| case MethodRecognizer::kFfiAsExternalTypedDataUint8: |
| case MethodRecognizer::kFfiAsExternalTypedDataUint16: |
| case MethodRecognizer::kFfiAsExternalTypedDataUint32: |
| case MethodRecognizer::kFfiAsExternalTypedDataUint64: |
| case MethodRecognizer::kFfiAsExternalTypedDataFloat: |
| case MethodRecognizer::kFfiAsExternalTypedDataDouble: |
| case MethodRecognizer::kGetNativeField: |
| case MethodRecognizer::kRecord_fieldNames: |
| case MethodRecognizer::kRecord_numFields: |
| case MethodRecognizer::kStringBaseCodeUnitAt: |
| case MethodRecognizer::kUtf8DecoderScan: |
| case MethodRecognizer::kDouble_hashCode: |
| case MethodRecognizer::kTypedList_GetInt8: |
| case MethodRecognizer::kTypedList_SetInt8: |
| case MethodRecognizer::kTypedList_GetUint8: |
| case MethodRecognizer::kTypedList_SetUint8: |
| case MethodRecognizer::kTypedList_GetInt16: |
| case MethodRecognizer::kTypedList_SetInt16: |
| case MethodRecognizer::kTypedList_GetUint16: |
| case MethodRecognizer::kTypedList_SetUint16: |
| case MethodRecognizer::kTypedList_GetInt32: |
| case MethodRecognizer::kTypedList_SetInt32: |
| case MethodRecognizer::kTypedList_GetUint32: |
| case MethodRecognizer::kTypedList_SetUint32: |
| case MethodRecognizer::kTypedList_GetInt64: |
| case MethodRecognizer::kTypedList_SetInt64: |
| case MethodRecognizer::kTypedList_GetUint64: |
| case MethodRecognizer::kTypedList_SetUint64: |
| case MethodRecognizer::kTypedList_GetFloat32: |
| case MethodRecognizer::kTypedList_SetFloat32: |
| case MethodRecognizer::kTypedList_GetFloat64: |
| case MethodRecognizer::kTypedList_SetFloat64: |
| case MethodRecognizer::kTypedList_GetInt32x4: |
| case MethodRecognizer::kTypedList_SetInt32x4: |
| case MethodRecognizer::kTypedList_GetFloat32x4: |
| case MethodRecognizer::kTypedList_SetFloat32x4: |
| case MethodRecognizer::kTypedList_GetFloat64x2: |
| case MethodRecognizer::kTypedList_SetFloat64x2: |
| case MethodRecognizer::kTypedData_memMove1: |
| case MethodRecognizer::kTypedData_memMove2: |
| case MethodRecognizer::kTypedData_memMove4: |
| case MethodRecognizer::kTypedData_memMove8: |
| case MethodRecognizer::kTypedData_memMove16: |
| case MethodRecognizer::kMemCopy: |
| // Prevent the GC from running so that the operation is atomic from |
| // a GC point of view. Always double check implementation in |
| // kernel_to_il.cc that no GC can happen in between the relevant IL |
| // instructions. |
| // TODO(https://dartbug.com/48527): Support inlining. |
| case MethodRecognizer::kFinalizerBase_exchangeEntriesCollectedWithNull: |
| // Both unboxed/untagged data and atomic-to-GC operation. |
| case MethodRecognizer::kFinalizerEntry_allocate: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| bool Function::CanBeInlined() const { |
| if (ForceOptimize()) { |
| if (IsFfiCallClosure() || IsFfiCallbackTrampoline() || is_ffi_native()) { |
| // We currently don't support inlining FFI trampolines. Some of them |
| // are naturally non-inlinable because they contain a try/catch block, |
| // but this condition is broader than strictly necessary. |
| // The work necessary for inlining FFI trampolines is tracked by |
| // http://dartbug.com/45055. |
| return false; |
| } |
| if (CompilerState::Current().is_aot()) { |
| return true; |
| } |
| // Inlining of force-optimized functions requires target function to be |
| // idempotent becase if deoptimization is needed in inlined body, the |
| // execution of the force-optimized will be restarted at the beginning of |
| // the function. |
| ASSERT(!IsPreferInline() || IsIdempotent()); |
| return IsIdempotent(); |
| } |
| |
| if (HasBreakpoint()) { |
| return false; |
| } |
| |
| return is_inlinable(); |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| intptr_t Function::NumImplicitParameters() const { |
| const UntaggedFunction::Kind k = kind(); |
| if (k == UntaggedFunction::kConstructor) { |
| // Type arguments for factory; instance for generative constructor. |
| return 1; |
| } |
| if ((k == UntaggedFunction::kClosureFunction) || |
| (k == UntaggedFunction::kImplicitClosureFunction) || |
| (k == UntaggedFunction::kFfiTrampoline)) { |
| return 1; // Closure object. |
| } |
| if (!is_static()) { |
| // Closure functions defined inside instance (i.e. non-static) functions are |
| // marked as non-static, but they do not have a receiver. |
| // Closures are handled above. |
| ASSERT((k != UntaggedFunction::kClosureFunction) && |
| (k != UntaggedFunction::kImplicitClosureFunction)); |
| return 1; // Receiver. |
| } |
| return 0; // No implicit parameters. |
| } |
| |
| bool Function::AreValidArgumentCounts(intptr_t num_type_arguments, |
| intptr_t num_arguments, |
| intptr_t num_named_arguments, |
| String* error_message) const { |
| if ((num_type_arguments != 0) && |
| (num_type_arguments != NumTypeParameters())) { |
| if (error_message != nullptr) { |
| const intptr_t kMessageBufferSize = 64; |
| char message_buffer[kMessageBufferSize]; |
| Utils::SNPrint(message_buffer, kMessageBufferSize, |
| "%" Pd " type arguments passed, but %" Pd " expected", |
| num_type_arguments, NumTypeParameters()); |
| // Allocate in old space because it can be invoked in background |
| // optimizing compilation. |
| *error_message = String::New(message_buffer, Heap::kOld); |
| } |
| return false; // Too many type arguments. |
| } |
| if (num_named_arguments > NumOptionalNamedParameters()) { |
| if (error_message != nullptr) { |
| const intptr_t kMessageBufferSize = 64; |
| char message_buffer[kMessageBufferSize]; |
| Utils::SNPrint(message_buffer, kMessageBufferSize, |
| "%" Pd " named passed, at most %" Pd " expected", |
| num_named_arguments, NumOptionalNamedParameters()); |
| // Allocate in old space because it can be invoked in background |
| // optimizing compilation. |
| *error_message = String::New(message_buffer, Heap::kOld); |
| } |
| return false; // Too many named arguments. |
| } |
| const intptr_t num_pos_args = num_arguments - num_named_arguments; |
| const intptr_t num_opt_pos_params = NumOptionalPositionalParameters(); |
| const intptr_t num_pos_params = num_fixed_parameters() + num_opt_pos_params; |
| if (num_pos_args > num_pos_params) { |
| if (error_message != nullptr) { |
| const intptr_t kMessageBufferSize = 64; |
| char message_buffer[kMessageBufferSize]; |
| // Hide implicit parameters to the user. |
| const intptr_t num_hidden_params = NumImplicitParameters(); |
| Utils::SNPrint(message_buffer, kMessageBufferSize, |
| "%" Pd "%s passed, %s%" Pd " expected", |
| num_pos_args - num_hidden_params, |
| num_opt_pos_params > 0 ? " positional" : "", |
| num_opt_pos_params > 0 ? "at most " : "", |
| num_pos_params - num_hidden_params); |
| // Allocate in old space because it can be invoked in background |
| // optimizing compilation. |
| *error_message = String::New(message_buffer, Heap::kOld); |
| } |
| return false; // Too many fixed and/or positional arguments. |
| } |
| if (num_pos_args < num_fixed_parameters()) { |
| if (error_message != nullptr) { |
| const intptr_t kMessageBufferSize = 64; |
| char message_buffer[kMessageBufferSize]; |
| // Hide implicit parameters to the user. |
| const intptr_t num_hidden_params = NumImplicitParameters(); |
| Utils::SNPrint(message_buffer, kMessageBufferSize, |
| "%" Pd "%s passed, %s%" Pd " expected", |
| num_pos_args - num_hidden_params, |
| num_opt_pos_params > 0 ? " positional" : "", |
| num_opt_pos_params > 0 ? "at least " : "", |
| num_fixed_parameters() - num_hidden_params); |
| // Allocate in old space because it can be invoked in background |
| // optimizing compilation. |
| *error_message = String::New(message_buffer, Heap::kOld); |
| } |
| return false; // Too few fixed and/or positional arguments. |
| } |
| return true; |
| } |
| |
| bool Function::AreValidArguments(intptr_t num_type_arguments, |
| intptr_t num_arguments, |
| const Array& argument_names, |
| String* error_message) const { |
| const Array& args_desc_array = Array::Handle(ArgumentsDescriptor::NewBoxed( |
| num_type_arguments, num_arguments, argument_names, Heap::kNew)); |
| ArgumentsDescriptor args_desc(args_desc_array); |
| return AreValidArguments(args_desc, error_message); |
| } |
| |
| bool Function::AreValidArguments(const ArgumentsDescriptor& args_desc, |
| String* error_message) const { |
| const intptr_t num_type_arguments = args_desc.TypeArgsLen(); |
| const intptr_t num_arguments = args_desc.Count(); |
| const intptr_t num_named_arguments = args_desc.NamedCount(); |
| |
| if (!AreValidArgumentCounts(num_type_arguments, num_arguments, |
| num_named_arguments, error_message)) { |
| return false; |
| } |
| // Verify that all argument names are valid parameter names. |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| String& argument_name = String::Handle(zone); |
| String& parameter_name = String::Handle(zone); |
| const intptr_t num_positional_args = num_arguments - num_named_arguments; |
| const intptr_t num_parameters = NumParameters(); |
| for (intptr_t i = 0; i < num_named_arguments; i++) { |
| argument_name = args_desc.NameAt(i); |
| ASSERT(argument_name.IsSymbol()); |
| bool found = false; |
| for (intptr_t j = num_positional_args; j < num_parameters; j++) { |
| parameter_name = ParameterNameAt(j); |
| ASSERT(parameter_name.IsSymbol()); |
| if (argument_name.Equals(parameter_name)) { |
| found = true; |
| break; |
| } |
| } |
| if (!found) { |
| if (error_message != nullptr) { |
| const intptr_t kMessageBufferSize = 64; |
| char message_buffer[kMessageBufferSize]; |
| Utils::SNPrint(message_buffer, kMessageBufferSize, |
| "no optional formal parameter named '%s'", |
| argument_name.ToCString()); |
| *error_message = String::New(message_buffer); |
| } |
| return false; |
| } |
| } |
| // Verify that all required named parameters are filled. |
| for (intptr_t j = num_parameters - NumOptionalNamedParameters(); |
| j < num_parameters; j++) { |
| if (IsRequiredAt(j)) { |
| parameter_name = ParameterNameAt(j); |
| ASSERT(parameter_name.IsSymbol()); |
| bool found = false; |
| for (intptr_t i = 0; i < num_named_arguments; i++) { |
| argument_name = args_desc.NameAt(i); |
| ASSERT(argument_name.IsSymbol()); |
| if (argument_name.Equals(parameter_name)) { |
| found = true; |
| break; |
| } |
| } |
| if (!found) { |
| if (error_message != nullptr) { |
| const intptr_t kMessageBufferSize = 64; |
| char message_buffer[kMessageBufferSize]; |
| Utils::SNPrint(message_buffer, kMessageBufferSize, |
| "missing required named parameter '%s'", |
| parameter_name.ToCString()); |
| *error_message = String::New(message_buffer); |
| } |
| return false; |
| } |
| } |
| } |
| return true; |
| } |
| |
| // Retrieves the function type arguments, if any. This could be explicitly |
| // passed type from the arguments array, delayed type arguments in closures, |
| // or instantiated bounds for the type parameters if no other source for |
| // function type arguments are found. |
| static TypeArgumentsPtr RetrieveFunctionTypeArguments( |
| Thread* thread, |
| Zone* zone, |
| const Function& function, |
| const Instance& receiver, |
| const TypeArguments& instantiator_type_args, |
| const Array& args, |
| const ArgumentsDescriptor& args_desc) { |
| ASSERT(!function.IsNull()); |
| |
| const intptr_t kNumCurrentTypeArgs = function.NumTypeParameters(); |
| const intptr_t kNumParentTypeArgs = function.NumParentTypeArguments(); |
| const intptr_t kNumTypeArgs = kNumCurrentTypeArgs + kNumParentTypeArgs; |
| // Non-generic functions don't receive type arguments. |
| if (kNumTypeArgs == 0) return Object::empty_type_arguments().ptr(); |
| // Closure functions require that the receiver be provided (and is a closure). |
| ASSERT(!function.IsClosureFunction() || receiver.IsClosure()); |
| |
| // Only closure functions should have possibly generic parents. |
| ASSERT(function.IsClosureFunction() || kNumParentTypeArgs == 0); |
| const auto& parent_type_args = |
| function.IsClosureFunction() |
| ? TypeArguments::Handle( |
| zone, Closure::Cast(receiver).function_type_arguments()) |
| : Object::empty_type_arguments(); |
| // We don't try to instantiate the parent type parameters to their bounds |
| // if not provided or check any closed-over type arguments against the parent |
| // type parameter bounds (since they have been type checked already). |
| if (kNumCurrentTypeArgs == 0) return parent_type_args.ptr(); |
| |
| auto& function_type_args = TypeArguments::Handle(zone); |
| // First check for delayed type arguments before using either provided or |
| // default type arguments. |
| bool has_delayed_type_args = false; |
| if (function.IsClosureFunction()) { |
| const auto& closure = Closure::Cast(receiver); |
| function_type_args = closure.delayed_type_arguments(); |
| has_delayed_type_args = |
| function_type_args.ptr() != Object::empty_type_arguments().ptr(); |
| } |
| |
| if (args_desc.TypeArgsLen() > 0) { |
| // We should never end up here when the receiver is a closure with delayed |
| // type arguments unless this dynamically called closure function was |
| // retrieved directly from the closure instead of going through |
| // DartEntry::ResolveCallable, which appropriately checks for this case. |
| ASSERT(!has_delayed_type_args); |
| function_type_args ^= args.At(0); |
| } else if (!has_delayed_type_args) { |
| // We have no explicitly provided function type arguments, so instantiate |
| // the type parameters to bounds or replace as appropriate. |
| function_type_args = function.DefaultTypeArguments(zone); |
| auto const mode = |
| function.IsClosureFunction() |
| ? function.default_type_arguments_instantiation_mode() |
| : function_type_args.GetInstantiationMode(zone, &function); |
| switch (mode) { |
| case InstantiationMode::kIsInstantiated: |
| // Nothing left to do. |
| break; |
| case InstantiationMode::kNeedsInstantiation: |
| function_type_args = function_type_args.InstantiateAndCanonicalizeFrom( |
| instantiator_type_args, parent_type_args); |
| break; |
| case InstantiationMode::kSharesInstantiatorTypeArguments: |
| function_type_args = instantiator_type_args.ptr(); |
| break; |
| case InstantiationMode::kSharesFunctionTypeArguments: |
| function_type_args = parent_type_args.ptr(); |
| break; |
| } |
| } |
| |
| return function_type_args.Prepend(zone, parent_type_args, kNumParentTypeArgs, |
| kNumTypeArgs); |
| } |
| |
| // Retrieves the instantiator type arguments, if any, from the receiver. |
| static TypeArgumentsPtr RetrieveInstantiatorTypeArguments( |
| Zone* zone, |
| const Function& function, |
| const Instance& receiver) { |
| if (function.IsClosureFunction()) { |
| ASSERT(receiver.IsClosure()); |
| const auto& closure = Closure::Cast(receiver); |
| return closure.instantiator_type_arguments(); |
| } |
| if (!receiver.IsNull()) { |
| const auto& cls = Class::Handle(zone, receiver.clazz()); |
| if (cls.NumTypeArguments() > 0) { |
| return receiver.GetTypeArguments(); |
| } |
| } |
| return Object::empty_type_arguments().ptr(); |
| } |
| |
| ObjectPtr Function::DoArgumentTypesMatch( |
| const Array& args, |
| const ArgumentsDescriptor& args_desc) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| // Precompiler deleted signature because of missing entry point pragma. |
| return EntryPointMemberInvocationError(*this); |
| } |
| #endif |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| |
| auto& receiver = Instance::Handle(zone); |
| if (IsClosureFunction() || HasThisParameter()) { |
| receiver ^= args.At(args_desc.FirstArgIndex()); |
| } |
| const auto& instantiator_type_arguments = TypeArguments::Handle( |
| zone, RetrieveInstantiatorTypeArguments(zone, *this, receiver)); |
| return Function::DoArgumentTypesMatch(args, args_desc, |
| instantiator_type_arguments); |
| } |
| |
| ObjectPtr Function::DoArgumentTypesMatch( |
| const Array& args, |
| const ArgumentsDescriptor& args_desc, |
| const TypeArguments& instantiator_type_arguments) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| // Precompiler deleted signature because of missing entry point pragma. |
| return EntryPointMemberInvocationError(*this); |
| } |
| #endif |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| |
| auto& receiver = Instance::Handle(zone); |
| if (IsClosureFunction() || HasThisParameter()) { |
| receiver ^= args.At(args_desc.FirstArgIndex()); |
| } |
| |
| const auto& function_type_arguments = TypeArguments::Handle( |
| zone, RetrieveFunctionTypeArguments(thread, zone, *this, receiver, |
| instantiator_type_arguments, args, |
| args_desc)); |
| return Function::DoArgumentTypesMatch( |
| args, args_desc, instantiator_type_arguments, function_type_arguments); |
| } |
| |
| ObjectPtr Function::DoArgumentTypesMatch( |
| const Array& args, |
| const ArgumentsDescriptor& args_desc, |
| const TypeArguments& instantiator_type_arguments, |
| const TypeArguments& function_type_arguments) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| // Precompiler deleted signature because of missing entry point pragma. |
| return EntryPointMemberInvocationError(*this); |
| } |
| #endif |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| |
| // Perform any non-covariant bounds checks on the provided function type |
| // arguments to make sure they are appropriate subtypes of the bounds. |
| const intptr_t kNumLocalTypeArgs = NumTypeParameters(); |
| if (kNumLocalTypeArgs > 0) { |
| const intptr_t kNumParentTypeArgs = NumParentTypeArguments(); |
| ASSERT(function_type_arguments.HasCount(kNumParentTypeArgs + |
| kNumLocalTypeArgs)); |
| const auto& params = TypeParameters::Handle(zone, type_parameters()); |
| // No checks are needed if all bounds are dynamic. |
| if (!params.AllDynamicBounds()) { |
| auto& param = AbstractType::Handle(zone); |
| auto& bound = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < kNumLocalTypeArgs; i++) { |
| bound = params.BoundAt(i); |
| // Only perform non-covariant checks where the bound is not |
| // the top type. |
| if (params.IsGenericCovariantImplAt(i) || |
| bound.IsTopTypeForSubtyping()) { |
| continue; |
| } |
| param = TypeParameterAt(i); |
| if (!AbstractType::InstantiateAndTestSubtype( |
| ¶m, &bound, instantiator_type_arguments, |
| function_type_arguments)) { |
| const auto& names = Array::Handle(zone, params.names()); |
| auto& name = String::Handle(zone); |
| name ^= names.At(i); |
| return Error::RawCast( |
| ThrowTypeError(token_pos(), param, bound, name)); |
| } |
| } |
| } |
| } else { |
| ASSERT(function_type_arguments.HasCount(NumParentTypeArguments())); |
| } |
| |
| AbstractType& type = AbstractType::Handle(zone); |
| Instance& argument = Instance::Handle(zone); |
| |
| auto check_argument = [](const Instance& argument, const AbstractType& type, |
| const TypeArguments& instantiator_type_args, |
| const TypeArguments& function_type_args) -> bool { |
| // If the argument type is the top type, no need to check. |
| if (type.IsTopTypeForSubtyping()) return true; |
| if (argument.IsNull()) { |
| return Instance::NullIsAssignableTo(type, instantiator_type_args, |
| function_type_args); |
| } |
| return argument.IsAssignableTo(type, instantiator_type_args, |
| function_type_args); |
| }; |
| |
| // Check types of the provided arguments against the expected parameter types. |
| const intptr_t arg_offset = args_desc.FirstArgIndex(); |
| // Only check explicit arguments. |
| const intptr_t arg_start = arg_offset + NumImplicitParameters(); |
| const intptr_t end_positional_args = arg_offset + args_desc.PositionalCount(); |
| for (intptr_t arg_index = arg_start; arg_index < end_positional_args; |
| ++arg_index) { |
| argument ^= args.At(arg_index); |
| // Adjust for type arguments when they're present. |
| const intptr_t param_index = arg_index - arg_offset; |
| type = ParameterTypeAt(param_index); |
| if (!check_argument(argument, type, instantiator_type_arguments, |
| function_type_arguments)) { |
| auto& name = String::Handle(zone, ParameterNameAt(param_index)); |
| if (!type.IsInstantiated()) { |
| type = |
| type.InstantiateFrom(instantiator_type_arguments, |
| function_type_arguments, kAllFree, Heap::kNew); |
| } |
| return ThrowTypeError(token_pos(), argument, type, name); |
| } |
| } |
| |
| const intptr_t num_named_arguments = args_desc.NamedCount(); |
| if (num_named_arguments == 0) { |
| return Error::null(); |
| } |
| |
| const int num_parameters = NumParameters(); |
| const int num_fixed_params = num_fixed_parameters(); |
| |
| String& argument_name = String::Handle(zone); |
| String& parameter_name = String::Handle(zone); |
| |
| // Check types of named arguments against expected parameter type. |
| for (intptr_t named_index = 0; named_index < num_named_arguments; |
| named_index++) { |
| argument_name = args_desc.NameAt(named_index); |
| ASSERT(argument_name.IsSymbol()); |
| argument ^= args.At(arg_offset + args_desc.PositionAt(named_index)); |
| |
| // Try to find the named parameter that matches the provided argument. |
| // Even when annotated with @required, named parameters are still stored |
| // as if they were optional and so come after the fixed parameters. |
| // Currently O(n^2) as there's no guarantee from either the CFE or the |
| // VM that named parameters and named arguments are sorted in the same way. |
| intptr_t param_index = num_fixed_params; |
| for (; param_index < num_parameters; param_index++) { |
| parameter_name = ParameterNameAt(param_index); |
| ASSERT(parameter_name.IsSymbol()); |
| |
| if (!parameter_name.Equals(argument_name)) continue; |
| |
| type = ParameterTypeAt(param_index); |
| if (!check_argument(argument, type, instantiator_type_arguments, |
| function_type_arguments)) { |
| auto& name = String::Handle(zone, ParameterNameAt(param_index)); |
| if (!type.IsInstantiated()) { |
| type = type.InstantiateFrom(instantiator_type_arguments, |
| function_type_arguments, kAllFree, |
| Heap::kNew); |
| } |
| return ThrowTypeError(token_pos(), argument, type, name); |
| } |
| break; |
| } |
| // Only should fail if AreValidArguments returns a false positive. |
| ASSERT(param_index < num_parameters); |
| } |
| return Error::null(); |
| } |
| |
| // Helper allocating a C string buffer in the zone, printing the fully qualified |
| // name of a function in it, and replacing ':' by '_' to make sure the |
| // constructed name is a valid C++ identifier for debugging purpose. |
| // Set 'chars' to allocated buffer and return number of written characters. |
| |
| enum QualifiedFunctionLibKind { |
| kQualifiedFunctionLibKindLibUrl, |
| kQualifiedFunctionLibKindLibName |
| }; |
| |
| static intptr_t ConstructFunctionFullyQualifiedCString( |
| const Function& function, |
| char** chars, |
| intptr_t reserve_len, |
| bool with_lib, |
| QualifiedFunctionLibKind lib_kind) { |
| Zone* zone = Thread::Current()->zone(); |
| const char* name = String::Handle(zone, function.name()).ToCString(); |
| const char* function_format = (reserve_len == 0) ? "%s" : "%s_"; |
| reserve_len += Utils::SNPrint(nullptr, 0, function_format, name); |
| const Function& parent = Function::Handle(zone, function.parent_function()); |
| intptr_t written = 0; |
| if (parent.IsNull()) { |
| const Class& function_class = Class::Handle(zone, function.Owner()); |
| ASSERT(!function_class.IsNull()); |
| const char* class_name = |
| String::Handle(zone, function_class.Name()).ToCString(); |
| ASSERT(class_name != nullptr); |
| const char* library_name = nullptr; |
| const char* lib_class_format = nullptr; |
| if (with_lib) { |
| const Library& library = Library::Handle(zone, function_class.library()); |
| ASSERT(!library.IsNull()); |
| switch (lib_kind) { |
| case kQualifiedFunctionLibKindLibUrl: |
| library_name = String::Handle(zone, library.url()).ToCString(); |
| break; |
| case kQualifiedFunctionLibKindLibName: |
| library_name = String::Handle(zone, library.name()).ToCString(); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| ASSERT(library_name != nullptr); |
| lib_class_format = (library_name[0] == '\0') ? "%s%s_" : "%s_%s_"; |
| } else { |
| library_name = ""; |
| lib_class_format = "%s%s."; |
| } |
| reserve_len += |
| Utils::SNPrint(nullptr, 0, lib_class_format, library_name, class_name); |
| ASSERT(chars != nullptr); |
| *chars = zone->Alloc<char>(reserve_len + 1); |
| written = Utils::SNPrint(*chars, reserve_len + 1, lib_class_format, |
| library_name, class_name); |
| } else { |
| written = ConstructFunctionFullyQualifiedCString(parent, chars, reserve_len, |
| with_lib, lib_kind); |
| } |
| ASSERT(*chars != nullptr); |
| char* next = *chars + written; |
| written += Utils::SNPrint(next, reserve_len + 1, function_format, name); |
| // Replace ":" with "_". |
| while (true) { |
| next = strchr(next, ':'); |
| if (next == nullptr) break; |
| *next = '_'; |
| } |
| return written; |
| } |
| |
| const char* Function::ToFullyQualifiedCString() const { |
| char* chars = nullptr; |
| ConstructFunctionFullyQualifiedCString(*this, &chars, 0, true, |
| kQualifiedFunctionLibKindLibUrl); |
| return chars; |
| } |
| |
| const char* Function::ToLibNamePrefixedQualifiedCString() const { |
| char* chars = nullptr; |
| ConstructFunctionFullyQualifiedCString(*this, &chars, 0, true, |
| kQualifiedFunctionLibKindLibName); |
| return chars; |
| } |
| |
| const char* Function::ToQualifiedCString() const { |
| char* chars = nullptr; |
| ConstructFunctionFullyQualifiedCString(*this, &chars, 0, false, |
| kQualifiedFunctionLibKindLibUrl); |
| return chars; |
| } |
| |
| AbstractTypePtr FunctionType::InstantiateFrom( |
| const TypeArguments& instantiator_type_arguments, |
| const TypeArguments& function_type_arguments, |
| intptr_t num_free_fun_type_params, |
| Heap::Space space, |
| FunctionTypeMapping* function_type_mapping, |
| intptr_t num_parent_type_args_adjustment) const { |
| ASSERT(IsFinalized()); |
| Zone* zone = Thread::Current()->zone(); |
| const intptr_t num_parent_type_args = NumParentTypeArguments(); |
| bool delete_type_parameters = false; |
| if (num_free_fun_type_params == kCurrentAndEnclosingFree) { |
| // See the comment on kCurrentAndEnclosingFree to understand why we don't |
| // adjust 'num_free_fun_type_params' downward in this case. |
| num_free_fun_type_params = kAllFree; |
| delete_type_parameters = true; |
| } else { |
| ASSERT(!IsInstantiated(kAny, num_free_fun_type_params)); |
| // We only consider the function type parameters declared by the parents |
| // of this signature function as free. |
| if (num_parent_type_args < num_free_fun_type_params) { |
| num_free_fun_type_params = num_parent_type_args; |
| } |
| } |
| |
| // The number of parent type parameters that remain uninstantiated. |
| const intptr_t remaining_parent_type_params = |
| num_free_fun_type_params < num_parent_type_args |
| ? num_parent_type_args - num_free_fun_type_params |
| : 0; |
| |
| // Adjust number of parent type arguments for all nested substituted types. |
| num_parent_type_args_adjustment = |
| remaining_parent_type_params + |
| (delete_type_parameters ? 0 : NumTypeParameters()); |
| |
| FunctionType& sig = FunctionType::Handle( |
| FunctionType::New(remaining_parent_type_params, nullability(), space)); |
| AbstractType& type = AbstractType::Handle(zone); |
| |
| FunctionTypeMapping scope(zone, &function_type_mapping, *this, sig); |
| |
| // Copy the type parameters and instantiate their bounds and defaults. |
| if (!delete_type_parameters) { |
| const TypeParameters& type_params = |
| TypeParameters::Handle(zone, type_parameters()); |
| if (!type_params.IsNull()) { |
| const TypeParameters& sig_type_params = |
| TypeParameters::Handle(zone, TypeParameters::New()); |
| // No need to set names that are ignored in a signature, however, the |
| // length of the names array defines the number of type parameters. |
| sig_type_params.set_names(Array::Handle(zone, type_params.names())); |
| sig_type_params.set_flags(Array::Handle(zone, type_params.flags())); |
| sig.SetTypeParameters(sig_type_params); |
| TypeArguments& type_args = TypeArguments::Handle(zone); |
| type_args = type_params.bounds(); |
| if (!type_args.IsNull() && !type_args.IsInstantiated()) { |
| type_args = type_args.InstantiateFrom( |
| instantiator_type_arguments, function_type_arguments, |
| num_free_fun_type_params, space, function_type_mapping, |
| num_parent_type_args_adjustment); |
| } |
| sig_type_params.set_bounds(type_args); |
| type_args = type_params.defaults(); |
| if (!type_args.IsNull() && !type_args.IsInstantiated()) { |
| type_args = type_args.InstantiateFrom( |
| instantiator_type_arguments, function_type_arguments, |
| num_free_fun_type_params, space, function_type_mapping, |
| num_parent_type_args_adjustment); |
| } |
| sig_type_params.set_defaults(type_args); |
| } |
| } |
| |
| type = result_type(); |
| if (!type.IsInstantiated()) { |
| type = type.InstantiateFrom( |
| instantiator_type_arguments, function_type_arguments, |
| num_free_fun_type_params, space, function_type_mapping, |
| num_parent_type_args_adjustment); |
| // A returned null type indicates a failed instantiation in dead code that |
| // must be propagated up to the caller, the optimizing compiler. |
| if (type.IsNull()) { |
| return FunctionType::null(); |
| } |
| } |
| sig.set_result_type(type); |
| const intptr_t num_params = NumParameters(); |
| sig.set_num_implicit_parameters(num_implicit_parameters()); |
| sig.set_num_fixed_parameters(num_fixed_parameters()); |
| sig.SetNumOptionalParameters(NumOptionalParameters(), |
| HasOptionalPositionalParameters()); |
| sig.set_parameter_types(Array::Handle(Array::New(num_params, space))); |
| for (intptr_t i = 0; i < num_params; i++) { |
| type = ParameterTypeAt(i); |
| if (!type.IsInstantiated()) { |
| type = type.InstantiateFrom( |
| instantiator_type_arguments, function_type_arguments, |
| num_free_fun_type_params, space, function_type_mapping, |
| num_parent_type_args_adjustment); |
| // A returned null type indicates a failed instantiation in dead code that |
| // must be propagated up to the caller, the optimizing compiler. |
| if (type.IsNull()) { |
| return FunctionType::null(); |
| } |
| } |
| sig.SetParameterTypeAt(i, type); |
| } |
| sig.set_named_parameter_names(Array::Handle(zone, named_parameter_names())); |
| |
| if (delete_type_parameters) { |
| ASSERT(sig.IsInstantiated(kFunctions)); |
| } |
| |
| sig.SetIsFinalized(); |
| |
| // Canonicalization is not part of instantiation. |
| return sig.ptr(); |
| } |
| |
| AbstractTypePtr FunctionType::UpdateFunctionTypes( |
| intptr_t num_parent_type_args_adjustment, |
| intptr_t num_free_fun_type_params, |
| Heap::Space space, |
| FunctionTypeMapping* function_type_mapping) const { |
| ASSERT(num_parent_type_args_adjustment >= 0); |
| ASSERT(IsFinalized()); |
| Zone* zone = Thread::Current()->zone(); |
| |
| const intptr_t old_num_parent_type_args = NumParentTypeArguments(); |
| // From now on, adjust all type parameter types |
| // which belong to this or nested function types. |
| if (num_free_fun_type_params > old_num_parent_type_args) { |
| num_free_fun_type_params = old_num_parent_type_args; |
| } |
| |
| FunctionType& new_type = FunctionType::Handle( |
| zone, FunctionType::New( |
| NumParentTypeArguments() + num_parent_type_args_adjustment, |
| nullability(), space)); |
| AbstractType& type = AbstractType::Handle(zone); |
| |
| FunctionTypeMapping scope(zone, &function_type_mapping, *this, new_type); |
| |
| const TypeParameters& type_params = |
| TypeParameters::Handle(zone, type_parameters()); |
| if (!type_params.IsNull()) { |
| const TypeParameters& new_type_params = |
| TypeParameters::Handle(zone, TypeParameters::New()); |
| // No need to set names that are ignored in a signature, however, the |
| // length of the names array defines the number of type parameters. |
| new_type_params.set_names(Array::Handle(zone, type_params.names())); |
| new_type_params.set_flags(Array::Handle(zone, type_params.flags())); |
| TypeArguments& type_args = TypeArguments::Handle(zone); |
| type_args = type_params.bounds(); |
| if (!type_args.IsNull()) { |
| type_args = type_args.UpdateFunctionTypes(num_parent_type_args_adjustment, |
| num_free_fun_type_params, space, |
| function_type_mapping); |
| } |
| new_type_params.set_bounds(type_args); |
| type_args = type_params.defaults(); |
| if (!type_args.IsNull()) { |
| type_args = type_args.UpdateFunctionTypes(num_parent_type_args_adjustment, |
| num_free_fun_type_params, space, |
| function_type_mapping); |
| } |
| new_type_params.set_defaults(type_args); |
| new_type.SetTypeParameters(new_type_params); |
| } |
| |
| type = result_type(); |
| type = type.UpdateFunctionTypes(num_parent_type_args_adjustment, |
| num_free_fun_type_params, space, |
| function_type_mapping); |
| new_type.set_result_type(type); |
| |
| const intptr_t num_params = NumParameters(); |
| new_type.set_num_implicit_parameters(num_implicit_parameters()); |
| new_type.set_num_fixed_parameters(num_fixed_parameters()); |
| new_type.SetNumOptionalParameters(NumOptionalParameters(), |
| HasOptionalPositionalParameters()); |
| new_type.set_parameter_types(Array::Handle(Array::New(num_params, space))); |
| for (intptr_t i = 0; i < num_params; i++) { |
| type = ParameterTypeAt(i); |
| type = type.UpdateFunctionTypes(num_parent_type_args_adjustment, |
| num_free_fun_type_params, space, |
| function_type_mapping); |
| new_type.SetParameterTypeAt(i, type); |
| } |
| new_type.set_named_parameter_names( |
| Array::Handle(zone, named_parameter_names())); |
| new_type.SetIsFinalized(); |
| |
| return new_type.ptr(); |
| } |
| |
| // Checks if the type of the specified parameter of this signature is a |
| // supertype of the type of the specified parameter of the other signature |
| // (i.e. check parameter contravariance). |
| // Note that types marked as covariant are already dealt with in the front-end. |
| bool FunctionType::IsContravariantParameter( |
| intptr_t parameter_position, |
| const FunctionType& other, |
| intptr_t other_parameter_position, |
| Heap::Space space, |
| FunctionTypeMapping* function_type_equivalence) const { |
| const AbstractType& param_type = |
| AbstractType::Handle(ParameterTypeAt(parameter_position)); |
| if (param_type.IsTopTypeForSubtyping()) { |
| return true; |
| } |
| const AbstractType& other_param_type = |
| AbstractType::Handle(other.ParameterTypeAt(other_parameter_position)); |
| return other_param_type.IsSubtypeOf(param_type, space, |
| function_type_equivalence); |
| } |
| |
| bool FunctionType::HasSameTypeParametersAndBounds( |
| const FunctionType& other, |
| TypeEquality kind, |
| FunctionTypeMapping* function_type_equivalence) const { |
| Zone* const zone = Thread::Current()->zone(); |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " FunctionType::HasSameTypeParametersAndBounds(%s, %s)\n", ToCString(), |
| other.ToCString()); |
| |
| const intptr_t num_type_params = NumTypeParameters(); |
| if (num_type_params != other.NumTypeParameters()) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (number of type parameters)\n"); |
| return false; |
| } |
| if (num_type_params > 0) { |
| const TypeParameters& type_params = |
| TypeParameters::Handle(zone, type_parameters()); |
| ASSERT(!type_params.IsNull()); |
| const TypeParameters& other_type_params = |
| TypeParameters::Handle(zone, other.type_parameters()); |
| ASSERT(!other_type_params.IsNull()); |
| if (kind == TypeEquality::kInSubtypeTest) { |
| if (!type_params.AllDynamicBounds() || |
| !other_type_params.AllDynamicBounds()) { |
| AbstractType& bound = AbstractType::Handle(zone); |
| AbstractType& other_bound = AbstractType::Handle(zone); |
| for (intptr_t i = 0; i < num_type_params; i++) { |
| bound = type_params.BoundAt(i); |
| other_bound = other_type_params.BoundAt(i); |
| // Bounds that are mutual subtypes are considered equal. |
| if (!bound.IsSubtypeOf(other_bound, Heap::kOld, |
| function_type_equivalence) || |
| !other_bound.IsSubtypeOf(bound, Heap::kOld, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (bounds are not mutual subtypes)\n"); |
| return false; |
| } |
| } |
| } |
| } else { |
| if (NumParentTypeArguments() != other.NumParentTypeArguments()) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (mismatch in number of type arguments)\n"); |
| return false; |
| } |
| const TypeArguments& bounds = |
| TypeArguments::Handle(zone, type_params.bounds()); |
| const TypeArguments& other_bounds = |
| TypeArguments::Handle(zone, other_type_params.bounds()); |
| if (!bounds.IsEquivalent(other_bounds, kind, function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (bounds are not equivalent)\n"); |
| return false; |
| } |
| if (kind == TypeEquality::kCanonical) { |
| // Compare default arguments. |
| const TypeArguments& defaults = |
| TypeArguments::Handle(zone, type_params.defaults()); |
| const TypeArguments& other_defaults = |
| TypeArguments::Handle(zone, other_type_params.defaults()); |
| if (defaults.IsNull()) { |
| if (!other_defaults.IsNull()) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (mismatch in defaults)\n"); |
| return false; |
| } |
| } else if (!defaults.IsEquivalent(other_defaults, kind, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (default types are not equivalent)\n"); |
| return false; |
| } |
| } |
| } |
| if (kind != TypeEquality::kInSubtypeTest) { |
| // Compare flags (IsGenericCovariantImpl). |
| if (!Array::Equals(type_params.flags(), other_type_params.flags())) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: false (flags are not equal)\n"); |
| return false; |
| } |
| } |
| } |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: true\n"); |
| return true; |
| } |
| |
| bool FunctionType::IsSubtypeOf( |
| const FunctionType& other, |
| Heap::Space space, |
| FunctionTypeMapping* function_type_equivalence) const { |
| TRACE_TYPE_CHECKS_VERBOSE(" FunctionType::IsSubtypeOf(%s, %s)\n", |
| ToCString(), other.ToCString()); |
| const intptr_t num_fixed_params = num_fixed_parameters(); |
| const intptr_t num_opt_pos_params = NumOptionalPositionalParameters(); |
| const intptr_t num_opt_named_params = NumOptionalNamedParameters(); |
| const intptr_t other_num_fixed_params = other.num_fixed_parameters(); |
| const intptr_t other_num_opt_pos_params = |
| other.NumOptionalPositionalParameters(); |
| const intptr_t other_num_opt_named_params = |
| other.NumOptionalNamedParameters(); |
| // This signature requires the same arguments or less and accepts the same |
| // arguments or more. We can ignore implicit parameters. |
| const intptr_t num_ignored_params = num_implicit_parameters(); |
| const intptr_t other_num_ignored_params = other.num_implicit_parameters(); |
| if (((num_fixed_params - num_ignored_params) > |
| (other_num_fixed_params - other_num_ignored_params)) || |
| ((num_fixed_params - num_ignored_params + num_opt_pos_params) < |
| (other_num_fixed_params - other_num_ignored_params + |
| other_num_opt_pos_params)) || |
| (num_opt_named_params < other_num_opt_named_params)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (mismatch in number of parameters)\n"); |
| return false; |
| } |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| FunctionTypeMapping scope(zone, &function_type_equivalence, *this, other); |
| |
| // Check the type parameters and bounds of generic functions. |
| if (!HasSameTypeParametersAndBounds(other, TypeEquality::kInSubtypeTest, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (mismatch in type parameters)\n"); |
| return false; |
| } |
| // Check the result type. |
| const AbstractType& other_res_type = |
| AbstractType::Handle(zone, other.result_type()); |
| // 'void Function()' is a subtype of 'Object Function()'. |
| if (!other_res_type.IsTopTypeForSubtyping()) { |
| const AbstractType& res_type = AbstractType::Handle(zone, result_type()); |
| if (!res_type.IsSubtypeOf(other_res_type, space, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: false (result type)\n"); |
| return false; |
| } |
| } |
| // Check the types of fixed and optional positional parameters. |
| for (intptr_t i = 0; i < (other_num_fixed_params - other_num_ignored_params + |
| other_num_opt_pos_params); |
| i++) { |
| if (!IsContravariantParameter(i + num_ignored_params, other, |
| i + other_num_ignored_params, space, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: false (parameter type)\n"); |
| return false; |
| } |
| } |
| // Check that for each optional named parameter of type T of the other |
| // function type, there exists an optional named parameter of this function |
| // type with an identical name and with a type S that is a supertype of T. |
| // Note that SetParameterNameAt() guarantees that names are symbols, so we |
| // can compare their raw pointers. |
| const int num_params = num_fixed_params + num_opt_named_params; |
| const int other_num_params = |
| other_num_fixed_params + other_num_opt_named_params; |
| bool found_param_name; |
| String& other_param_name = String::Handle(zone); |
| for (intptr_t i = other_num_fixed_params; i < other_num_params; i++) { |
| other_param_name = other.ParameterNameAt(i); |
| ASSERT(other_param_name.IsSymbol()); |
| found_param_name = false; |
| for (intptr_t j = num_fixed_params; j < num_params; j++) { |
| ASSERT(String::Handle(zone, ParameterNameAt(j)).IsSymbol()); |
| if (ParameterNameAt(j) == other_param_name.ptr()) { |
| found_param_name = true; |
| if (!IsContravariantParameter(j, other, i, space, |
| function_type_equivalence)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (optional parameter type)\n"); |
| return false; |
| } |
| break; |
| } |
| } |
| if (!found_param_name) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (named parameter not found)\n"); |
| return false; |
| } |
| } |
| // Check that for each required named parameter in this function, there's a |
| // corresponding required named parameter in the other function. |
| String& param_name = other_param_name; |
| for (intptr_t j = num_params - num_opt_named_params; j < num_params; j++) { |
| if (IsRequiredAt(j)) { |
| param_name = ParameterNameAt(j); |
| ASSERT(param_name.IsSymbol()); |
| bool found = false; |
| for (intptr_t i = other_num_fixed_params; i < other_num_params; i++) { |
| ASSERT(String::Handle(zone, other.ParameterNameAt(i)).IsSymbol()); |
| if (other.ParameterNameAt(i) == param_name.ptr()) { |
| found = true; |
| if (!other.IsRequiredAt(i)) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (mismatch in required named " |
| "parameters)\n"); |
| return false; |
| } |
| } |
| } |
| if (!found) { |
| TRACE_TYPE_CHECKS_VERBOSE( |
| " - result: false (required named parameter not found)\n"); |
| return false; |
| } |
| } |
| } |
| TRACE_TYPE_CHECKS_VERBOSE(" - result: true\n"); |
| return true; |
| } |
| |
| // The compiler generates an implicit constructor if a class definition |
| // does not contain an explicit constructor or factory. The implicit |
| // constructor has the same token position as the owner class. |
| bool Function::IsImplicitConstructor() const { |
| return IsGenerativeConstructor() && (token_pos() == end_token_pos()); |
| } |
| |
| bool Function::IsImplicitStaticClosureFunction(FunctionPtr func) { |
| NoSafepointScope no_safepoint; |
| uint32_t kind_tag = func->untag()->kind_tag_.load(std::memory_order_relaxed); |
| return (KindBits::decode(kind_tag) == |
| UntaggedFunction::kImplicitClosureFunction) && |
| StaticBit::decode(kind_tag); |
| } |
| |
| bool Function::IsImplicitInstanceClosureFunction(FunctionPtr func) { |
| NoSafepointScope no_safepoint; |
| uint32_t kind_tag = func->untag()->kind_tag_.load(std::memory_order_relaxed); |
| return (KindBits::decode(kind_tag) == |
| UntaggedFunction::kImplicitClosureFunction) && |
| !StaticBit::decode(kind_tag); |
| } |
| |
| FunctionPtr Function::New(Heap::Space space) { |
| ASSERT(Object::function_class() != Class::null()); |
| return Object::Allocate<Function>(space); |
| } |
| |
| FunctionPtr Function::New(const FunctionType& signature, |
| const String& name, |
| UntaggedFunction::Kind kind, |
| bool is_static, |
| bool is_const, |
| bool is_abstract, |
| bool is_external, |
| bool is_native, |
| const Object& owner, |
| TokenPosition token_pos, |
| Heap::Space space) { |
| ASSERT(!owner.IsNull()); |
| ASSERT(!signature.IsNull()); |
| const Function& result = Function::Handle(Function::New(space)); |
| result.set_kind_tag(0); |
| result.set_name(name); |
| result.set_kind_tag(0); // Ensure determinism of uninitialized bits. |
| result.set_kind(kind); |
| result.set_recognized_kind(MethodRecognizer::kUnknown); |
| result.set_modifier(UntaggedFunction::kNoModifier); |
| result.set_is_static(is_static); |
| result.set_is_const(is_const); |
| result.set_is_abstract(is_abstract); |
| result.set_is_external(is_external); |
| result.set_is_native(is_native); |
| result.set_is_reflectable(true); // Will be computed later. |
| result.set_is_visible(true); // Will be computed later. |
| result.set_is_debuggable(true); // Will be computed later. |
| result.set_is_intrinsic(false); |
| result.set_has_pragma(false); |
| result.set_is_polymorphic_target(false); |
| result.set_is_synthetic(false); |
| NOT_IN_PRECOMPILED(result.set_state_bits(0)); |
| result.set_owner(owner); |
| NOT_IN_PRECOMPILED(result.set_token_pos(token_pos)); |
| NOT_IN_PRECOMPILED(result.set_end_token_pos(token_pos)); |
| NOT_IN_PRECOMPILED(result.set_usage_counter(0)); |
| NOT_IN_PRECOMPILED(result.set_deoptimization_counter(0)); |
| NOT_IN_PRECOMPILED(result.set_optimized_instruction_count(0)); |
| NOT_IN_PRECOMPILED(result.set_optimized_call_site_count(0)); |
| NOT_IN_PRECOMPILED(result.set_inlining_depth(0)); |
| NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); |
| NOT_IN_PRECOMPILED(result.set_is_optimizable(is_native ? false : true)); |
| result.set_is_inlinable(true); |
| result.reset_unboxed_parameters_and_return(); |
| result.SetInstructionsSafe(StubCode::LazyCompile()); |
| |
| // See Function::set_data() for more information. |
| if (kind == UntaggedFunction::kClosureFunction || |
| kind == UntaggedFunction::kImplicitClosureFunction) { |
| ASSERT(space == Heap::kOld); |
| const ClosureData& data = ClosureData::Handle(ClosureData::New()); |
| data.set_awaiter_link({}); |
| result.set_data(data); |
| } else if (kind == UntaggedFunction::kFfiTrampoline) { |
| const FfiTrampolineData& data = |
| FfiTrampolineData::Handle(FfiTrampolineData::New()); |
| result.set_data(data); |
| } else if (result.is_old_native()) { |
| const auto& data = |
| Array::Handle(Array::New(NativeFunctionData::kLength, Heap::kOld)); |
| result.set_data(data); |
| } else { |
| // Functions other than signature functions have no reason to be allocated |
| // in new space. |
| ASSERT(space == Heap::kOld); |
| } |
| |
| // Force-optimized functions are not debuggable because they cannot |
| // deoptimize. |
| if (result.ForceOptimize()) { |
| result.set_is_debuggable(false); |
| } |
| signature.set_num_implicit_parameters(result.NumImplicitParameters()); |
| result.SetSignature(signature); |
| NOT_IN_PRECOMPILED( |
| result.set_positional_parameter_names(Object::empty_array())); |
| return result.ptr(); |
| } |
| |
| FunctionPtr Function::NewClosureFunctionWithKind(UntaggedFunction::Kind kind, |
| const String& name, |
| const Function& parent, |
| bool is_static, |
| TokenPosition token_pos, |
| const Object& owner) { |
| ASSERT((kind == UntaggedFunction::kClosureFunction) || |
| (kind == UntaggedFunction::kImplicitClosureFunction)); |
| ASSERT(!parent.IsNull()); |
| ASSERT(!owner.IsNull()); |
| const FunctionType& signature = FunctionType::Handle(FunctionType::New( |
| kind == UntaggedFunction::kClosureFunction ? parent.NumTypeArguments() |
| : 0)); |
| const Function& result = Function::Handle( |
| Function::New(signature, name, kind, |
| /* is_static = */ is_static, |
| /* is_const = */ false, |
| /* is_abstract = */ false, |
| /* is_external = */ false, |
| /* is_native = */ false, owner, token_pos)); |
| result.set_parent_function(parent); |
| return result.ptr(); |
| } |
| |
| FunctionPtr Function::NewClosureFunction(const String& name, |
| const Function& parent, |
| TokenPosition token_pos) { |
| // Use the owner defining the parent function and not the class containing it. |
| const Object& parent_owner = Object::Handle(parent.RawOwner()); |
| return NewClosureFunctionWithKind(UntaggedFunction::kClosureFunction, name, |
| parent, parent.is_static(), token_pos, |
| parent_owner); |
| } |
| |
| FunctionPtr Function::NewImplicitClosureFunction(const String& name, |
| const Function& parent, |
| TokenPosition token_pos) { |
| // Use the owner defining the parent function and not the class containing it. |
| const Object& parent_owner = Object::Handle(parent.RawOwner()); |
| return NewClosureFunctionWithKind( |
| UntaggedFunction::kImplicitClosureFunction, name, parent, |
| parent.is_static() || parent.IsConstructor(), token_pos, parent_owner); |
| } |
| |
| bool Function::SafeToClosurize() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| return HasImplicitClosureFunction(); |
| #else |
| return true; |
| #endif |
| } |
| |
| bool Function::IsDynamicClosureCallDispatcher(Thread* thread) const { |
| if (!IsInvokeFieldDispatcher()) return false; |
| if (thread->isolate_group()->object_store()->closure_class() != Owner()) { |
| return false; |
| } |
| const auto& handle = String::Handle(thread->zone(), name()); |
| return handle.Equals(Symbols::DynamicCall()); |
| } |
| |
| FunctionPtr Function::ImplicitClosureFunction() const { |
| // Return the existing implicit closure function if any. |
| if (implicit_closure_function() != Function::null()) { |
| return implicit_closure_function(); |
| } |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| // In AOT mode all implicit closures are pre-created. |
| FATAL("Cannot create implicit closure in AOT!"); |
| return Function::null(); |
| #else |
| ASSERT(!IsClosureFunction()); |
| Thread* thread = Thread::Current(); |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| |
| if (implicit_closure_function() != Function::null()) { |
| return implicit_closure_function(); |
| } |
| |
| // Create closure function. |
| Zone* zone = thread->zone(); |
| const String& closure_name = String::Handle(zone, name()); |
| const Function& closure_function = Function::Handle( |
| zone, NewImplicitClosureFunction(closure_name, *this, token_pos())); |
| |
| // Set closure function's context scope. |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (is_static() || IsConstructor()) { |
| closure_function.set_context_scope(Object::empty_context_scope()); |
| } else { |
| const ContextScope& context_scope = ContextScope::Handle( |
| zone, LocalScope::CreateImplicitClosureScope(*this)); |
| closure_function.set_context_scope(context_scope); |
| } |
| #endif |
| |
| FunctionType& closure_signature = |
| FunctionType::Handle(zone, closure_function.signature()); |
| |
| const auto& cls = Class::Handle(zone, Owner()); |
| |
| if (!is_static() && !IsConstructor() && |
| StackTraceUtils::IsPossibleAwaiterLink(cls)) { |
| closure_function.set_awaiter_link({0, 0}); |
| } |
| |
| const intptr_t num_type_params = |
| IsConstructor() ? cls.NumTypeParameters() : NumTypeParameters(); |
| |
| TypeArguments& instantiator_type_arguments = TypeArguments::Handle(zone); |
| TypeArguments& function_type_arguments = TypeArguments::Handle(zone); |
| |
| FunctionTypeMapping* function_type_mapping = nullptr; |
| FunctionTypeMapping scope(zone, &function_type_mapping, |
| FunctionType::Handle(zone, signature()), |
| closure_signature); |
| |
| auto transform_type = [&](AbstractType& type) { |
| if (num_type_params > 0) { |
| if (IsConstructor()) { |
| type = type.UpdateFunctionTypes(num_type_params, kAllFree, Heap::kOld, |
| nullptr); |
| if (!type.IsInstantiated(kCurrentClass)) { |
| type = type.InstantiateFrom( |
| instantiator_type_arguments, function_type_arguments, |
| kNoneFree /* avoid truncating parent type args */, Heap::kOld); |
| } |
| } else { |
| type = type.UpdateFunctionTypes(0, kNoneFree, Heap::kOld, |
| function_type_mapping); |
| } |
| } |
| }; |
| |
| auto transform_type_args = [&](TypeArguments& type_args) { |
| ASSERT(num_type_params > 0); |
| if (!type_args.IsNull()) { |
| if (IsConstructor()) { |
| type_args = type_args.UpdateFunctionTypes(num_type_params, kAllFree, |
| Heap::kOld, nullptr); |
| if (!type_args.IsInstantiated(kCurrentClass)) { |
| type_args = type_args.InstantiateFrom( |
| instantiator_type_arguments, function_type_arguments, |
| kNoneFree /* avoid truncating parent type args */, Heap::kOld); |
| } |
| } else { |
| type_args = type_args.UpdateFunctionTypes(0, kNoneFree, Heap::kOld, |
| function_type_mapping); |
| } |
| } |
| }; |
| |
| // Set closure function's type parameters. |
| if (num_type_params > 0) { |
| const TypeParameters& old_type_params = TypeParameters::Handle( |
| zone, IsConstructor() ? cls.type_parameters() : type_parameters()); |
| const TypeParameters& new_type_params = |
| TypeParameters::Handle(zone, TypeParameters::New()); |
| // No need to set names that are ignored in a signature, however, the |
| // length of the names array defines the number of type parameters. |
| new_type_params.set_names(Array::Handle(zone, old_type_params.names())); |
| new_type_params.set_flags(Array::Handle(zone, old_type_params.flags())); |
| |
| closure_signature.SetTypeParameters(new_type_params); |
| ASSERT(closure_signature.NumTypeParameters() == num_type_params); |
| |
| TypeArguments& type_args = TypeArguments::Handle(zone); |
| type_args = TypeArguments::New(num_type_params); |
| TypeParameter& type_param = TypeParameter::Handle(zone); |
| for (intptr_t i = 0; i < num_type_params; i++) { |
| type_param = closure_signature.TypeParameterAt(i); |
| type_args.SetTypeAt(i, type_param); |
| } |
| |
| if (IsConstructor()) { |
| instantiator_type_arguments = |
| type_args.ToInstantiatorTypeArguments(thread, cls); |
| } else { |
| ASSERT(NumTypeArguments() == type_args.Length()); |
| function_type_arguments = type_args.ptr(); |
| } |
| |
| type_args = old_type_params.bounds(); |
| transform_type_args(type_args); |
| new_type_params.set_bounds(type_args); |
| |
| type_args = old_type_params.defaults(); |
| transform_type_args(type_args); |
| new_type_params.set_defaults(type_args); |
| } |
| |
| // Set closure function's result type. |
| AbstractType& result_type = AbstractType::Handle(zone); |
| if (IsConstructor()) { |
| result_type = cls.DeclarationType(); |
| } else { |
| result_type = this->result_type(); |
| } |
| transform_type(result_type); |
| closure_signature.set_result_type(result_type); |
| |
| // Set closure function's end token to this end token. |
| NOT_IN_PRECOMPILED(closure_function.set_end_token_pos(end_token_pos())); |
| |
| // The closurized method stub just calls into the original method and should |
| // therefore be skipped by the debugger and in stack traces. |
| closure_function.set_is_debuggable(false); |
| closure_function.set_is_visible(false); |
| |
| // Set closure function's formal parameters to this formal parameters, |
| // removing the receiver if this is an instance method and adding the closure |
| // object as first parameter. |
| const int kClosure = 1; |
| const int num_implicit_params = NumImplicitParameters(); |
| const int num_fixed_params = |
| kClosure - num_implicit_params + num_fixed_parameters(); |
| const int num_opt_params = NumOptionalParameters(); |
| const bool has_opt_pos_params = HasOptionalPositionalParameters(); |
| const int num_params = num_fixed_params + num_opt_params; |
| const int num_pos_params = has_opt_pos_params ? num_params : num_fixed_params; |
| closure_signature.set_num_fixed_parameters(num_fixed_params); |
| closure_signature.SetNumOptionalParameters(num_opt_params, |
| has_opt_pos_params); |
| closure_signature.set_parameter_types( |
| Array::Handle(zone, Array::New(num_params, Heap::kOld))); |
| NOT_IN_PRECOMPILED(closure_function.CreateNameArray()); |
| closure_signature.CreateNameArrayIncludingFlags(); |
| AbstractType& param_type = AbstractType::Handle(zone); |
| String& param_name = String::Handle(zone); |
| // Add implicit closure object parameter. |
| param_type = Type::DynamicType(); |
| closure_signature.SetParameterTypeAt(0, param_type); |
| NOT_IN_PRECOMPILED( |
| closure_function.SetParameterNameAt(0, Symbols::ClosureParameter())); |
| for (int i = kClosure; i < num_pos_params; i++) { |
| param_type = ParameterTypeAt(num_implicit_params - kClosure + i); |
| transform_type(param_type); |
| closure_signature.SetParameterTypeAt(i, param_type); |
| param_name = ParameterNameAt(num_implicit_params - kClosure + i); |
| // Set the name in the function for positional parameters. |
| NOT_IN_PRECOMPILED(closure_function.SetParameterNameAt(i, param_name)); |
| } |
| for (int i = num_pos_params; i < num_params; i++) { |
| param_type = ParameterTypeAt(num_implicit_params - kClosure + i); |
| transform_type(param_type); |
| closure_signature.SetParameterTypeAt(i, param_type); |
| param_name = ParameterNameAt(num_implicit_params - kClosure + i); |
| // Set the name in the signature for named parameters. |
| closure_signature.SetParameterNameAt(i, param_name); |
| if (IsRequiredAt(num_implicit_params - kClosure + i)) { |
| closure_signature.SetIsRequiredAt(i); |
| } |
| } |
| closure_signature.FinalizeNameArray(); |
| closure_function.InheritKernelOffsetFrom(*this); |
| |
| if (!is_static() && !IsConstructor()) { |
| // Change covariant parameter types to Object?. |
| BitVector is_covariant(zone, NumParameters()); |
| BitVector is_generic_covariant_impl(zone, NumParameters()); |
| ReadParameterCovariance(&is_covariant, &is_generic_covariant_impl); |
| |
| ObjectStore* object_store = IsolateGroup::Current()->object_store(); |
| const auto& object_type = |
| Type::Handle(zone, object_store->nullable_object_type()); |
| ASSERT(object_type.IsCanonical()); |
| for (intptr_t i = kClosure; i < num_params; ++i) { |
| const intptr_t original_param_index = num_implicit_params - kClosure + i; |
| if (is_covariant.Contains(original_param_index) || |
| is_generic_covariant_impl.Contains(original_param_index)) { |
| closure_signature.SetParameterTypeAt(i, object_type); |
| } |
| } |
| } |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const bool attach_bytecode = true; |
| #else |
| const bool attach_bytecode = is_declared_in_bytecode(); |
| #endif |
| if (attach_bytecode) { |
| if (is_static()) { |
| closure_function.AttachBytecode( |
| Object::implicit_static_closure_bytecode()); |
| } else if (IsConstructor()) { |
| closure_function.AttachBytecode( |
| Object::implicit_constructor_closure_bytecode()); |
| } else { |
| closure_function.AttachBytecode( |
| Object::implicit_instance_closure_bytecode()); |
| } |
| } |
| #endif |
| |
| ASSERT(!closure_signature.IsFinalized()); |
| closure_signature ^= ClassFinalizer::FinalizeType(closure_signature); |
| closure_function.SetSignature(closure_signature); |
| set_implicit_closure_function(closure_function); |
| ASSERT(closure_function.IsImplicitClosureFunction()); |
| ASSERT(HasImplicitClosureFunction()); |
| return closure_function.ptr(); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| void Function::DropUncompiledImplicitClosureFunction() const { |
| if (implicit_closure_function() != Function::null()) { |
| const Function& func = Function::Handle(implicit_closure_function()); |
| if (!func.HasCode()) { |
| set_implicit_closure_function(Function::Handle()); |
| } |
| } |
| } |
| |
| StringPtr Function::InternalSignature() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| return String::null(); |
| } |
| #endif |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| const FunctionType& sig = FunctionType::Handle(signature()); |
| sig.Print(kInternalName, &printer); |
| return Symbols::New(thread, printer.buffer()); |
| } |
| |
| StringPtr Function::UserVisibleSignature() const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| if (signature() == FunctionType::null()) { |
| return String::null(); |
| } |
| #endif |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| const FunctionType& sig = FunctionType::Handle(signature()); |
| sig.Print(kUserVisibleName, &printer); |
| return Symbols::New(thread, printer.buffer()); |
| } |
| |
| void FunctionType::PrintParameters(Thread* thread, |
| Zone* zone, |
| NameVisibility name_visibility, |
| BaseTextBuffer* printer) const { |
| AbstractType& param_type = AbstractType::Handle(zone); |
| const intptr_t num_params = NumParameters(); |
| const intptr_t num_fixed_params = num_fixed_parameters(); |
| const intptr_t num_opt_pos_params = NumOptionalPositionalParameters(); |
| const intptr_t num_opt_named_params = NumOptionalNamedParameters(); |
| const intptr_t num_opt_params = num_opt_pos_params + num_opt_named_params; |
| ASSERT((num_fixed_params + num_opt_params) == num_params); |
| intptr_t i = 0; |
| if (name_visibility == kUserVisibleName) { |
| // Hide implicit parameters. |
| i = num_implicit_parameters(); |
| } |
| String& name = String::Handle(zone); |
| while (i < num_fixed_params) { |
| param_type = ParameterTypeAt(i); |
| ASSERT(!param_type.IsNull()); |
| param_type.PrintName(name_visibility, printer); |
| if (i != (num_params - 1)) { |
| printer->AddString(", "); |
| } |
| i++; |
| } |
| if (num_opt_params > 0) { |
| if (num_opt_pos_params > 0) { |
| printer->AddString("["); |
| } else { |
| printer->AddString("{"); |
| } |
| for (intptr_t i = num_fixed_params; i < num_params; i++) { |
| if (num_opt_named_params > 0 && IsRequiredAt(i)) { |
| printer->AddString("required "); |
| } |
| param_type = ParameterTypeAt(i); |
| ASSERT(!param_type.IsNull()); |
| param_type.PrintName(name_visibility, printer); |
| // The parameter name of an optional positional parameter does not need |
| // to be part of the signature, since it is not used. |
| if (num_opt_named_params > 0) { |
| name = ParameterNameAt(i); |
| printer->AddString(" "); |
| printer->AddString(name.ToCString()); |
| } |
| if (i != (num_params - 1)) { |
| printer->AddString(", "); |
| } |
| } |
| if (num_opt_pos_params > 0) { |
| printer->AddString("]"); |
| } else { |
| printer->AddString("}"); |
| } |
| } |
| } |
| |
| ClosurePtr Function::ImplicitStaticClosure() const { |
| ASSERT(IsImplicitStaticClosureFunction()); |
| if (implicit_static_closure() != Closure::null()) { |
| return implicit_static_closure(); |
| } |
| |
| auto thread = Thread::Current(); |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| |
| if (implicit_static_closure() != Closure::null()) { |
| return implicit_static_closure(); |
| } |
| |
| Zone* zone = thread->zone(); |
| const auto& closure = |
| Closure::Handle(zone, Closure::New(Object::null_type_arguments(), |
| Object::null_type_arguments(), *this, |
| Object::null_object(), Heap::kOld)); |
| set_implicit_static_closure(closure); |
| return implicit_static_closure(); |
| } |
| |
| ClosurePtr Function::ImplicitInstanceClosure(const Instance& receiver) const { |
| ASSERT(IsImplicitClosureFunction()); |
| Zone* zone = Thread::Current()->zone(); |
| TypeArguments& instantiator_type_arguments = TypeArguments::Handle(zone); |
| if (!HasInstantiatedSignature(kCurrentClass)) { |
| instantiator_type_arguments = receiver.GetTypeArguments(); |
| } |
| ASSERT(!HasGenericParent()); // No generic parent function. |
| return Closure::New(instantiator_type_arguments, |
| Object::null_type_arguments(), *this, receiver); |
| } |
| |
| FunctionPtr Function::ImplicitClosureTarget(Zone* zone) const { |
| const auto& parent = Function::Handle(zone, parent_function()); |
| const auto& func_name = String::Handle(zone, parent.name()); |
| const auto& owner = Class::Handle(zone, parent.Owner()); |
| Thread* thread = Thread::Current(); |
| const auto& error = owner.EnsureIsFinalized(thread); |
| ASSERT(error == Error::null()); |
| auto& target = |
| Function::Handle(zone, Resolver::ResolveFunction(zone, owner, func_name)); |
| |
| if (!target.IsNull() && (target.ptr() != parent.ptr())) { |
| DEBUG_ASSERT(IsolateGroup::Current()->HasAttemptedReload()); |
| if ((target.is_static() != parent.is_static()) || |
| (target.kind() != parent.kind())) { |
| target = Function::null(); |
| } |
| } |
| |
| return target.ptr(); |
| } |
| |
| void FunctionType::Print(NameVisibility name_visibility, |
| BaseTextBuffer* printer) const { |
| if (IsNull()) { |
| printer->AddString("null"); // Signature optimized out in precompiler. |
| return; |
| } |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| const TypeParameters& type_params = |
| TypeParameters::Handle(zone, type_parameters()); |
| if (!type_params.IsNull()) { |
| printer->AddString("<"); |
| const intptr_t base = NumParentTypeArguments(); |
| const bool kIsClassTypeParameter = false; |
| // Type parameter names are meaningless after canonicalization. |
| type_params.Print(thread, zone, kIsClassTypeParameter, base, |
| name_visibility, printer); |
| printer->AddString(">"); |
| } |
| printer->AddString("("); |
| PrintParameters(thread, zone, name_visibility, printer); |
| printer->AddString(") => "); |
| const AbstractType& res_type = AbstractType::Handle(zone, result_type()); |
| if (!res_type.IsNull()) { |
| res_type.PrintName(name_visibility, printer); |
| } else { |
| printer->AddString("null"); |
| } |
| } |
| |
| bool Function::HasInstantiatedSignature( |
| Genericity genericity, |
| intptr_t num_free_fun_type_params) const { |
| return FunctionType::Handle(signature()) |
| .IsInstantiated(genericity, num_free_fun_type_params); |
| } |
| |
| bool FunctionType::IsInstantiated(Genericity genericity, |
| intptr_t num_free_fun_type_params) const { |
| if (num_free_fun_type_params == kCurrentAndEnclosingFree) { |
| num_free_fun_type_params = kAllFree; |
| } else if (genericity != kCurrentClass) { |
| const intptr_t num_parent_type_args = NumParentTypeArguments(); |
| if (num_parent_type_args > 0 && num_free_fun_type_params > 0) { |
| // The number of parent type arguments is cached in the FunctionType, so |
| // we can't consider any FunctionType with free parent type arguments as |
| // fully instantiated. Instead, the FunctionType must be instantiated to |
| // reduce the number of parent type arguments, even if they're unused in |
| // its component types. |
| return false; |
| } |
| // Don't consider local function type parameters as free. |
| if (num_free_fun_type_params > num_parent_type_args) { |
| num_free_fun_type_params = num_parent_type_args; |
| } |
| } |
| AbstractType& type = AbstractType::Handle(result_type()); |
| if (!type.IsInstantiated(genericity, num_free_fun_type_params)) { |
| return false; |
| } |
| const intptr_t num_parameters = NumParameters(); |
| for (intptr_t i = 0; i < num_parameters; i++) { |
| type = ParameterTypeAt(i); |
| if (!type.IsInstantiated(genericity, num_free_fun_type_params)) { |
| return false; |
| } |
| } |
| const intptr_t num_type_params = NumTypeParameters(); |
| if (num_type_params > 0) { |
| TypeParameters& type_params = TypeParameters::Handle(type_parameters()); |
| if (!type_params.AllDynamicBounds()) { |
| for (intptr_t i = 0; i < type_params.Length(); ++i) { |
| type = type_params.BoundAt(i); |
| if (!type.IsInstantiated(genericity, num_free_fun_type_params)) { |
| return false; |
| } |
| } |
| } |
| } |
| return true; |
| } |
| |
| bool Function::IsPrivate() const { |
| return Library::IsPrivate(String::Handle(name())); |
| } |
| |
| ClassPtr Function::Owner(FunctionPtr function) { |
| ObjectPtr owner = function->untag()->owner(); |
| ASSERT(owner != Object::null()); |
| if (owner->IsClass()) { |
| return Class::RawCast(owner); |
| } |
| ASSERT(owner->IsPatchClass()); |
| return PatchClass::RawCast(owner)->untag()->wrapped_class(); |
| } |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| bool Function::is_declared_in_bytecode() const { |
| return Class::Handle(Owner()).is_declared_in_bytecode(); |
| } |
| #endif |
| |
| void Function::InheritKernelOffsetFrom(const Function& src) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| #if !defined(DART_DYNAMIC_MODULES) |
| UNREACHABLE(); |
| #endif |
| #else |
| StoreNonPointer(&untag()->kernel_offset_, src.untag()->kernel_offset_); |
| #endif |
| } |
| |
| void Function::InheritKernelOffsetFrom(const Field& src) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| #if !defined(DART_DYNAMIC_MODULES) |
| UNREACHABLE(); |
| #endif |
| #else |
| set_kernel_offset(src.kernel_offset()); |
| #endif |
| } |
| |
| void Function::SetKernelLibraryAndEvalScript( |
| const Script& script, |
| const class KernelProgramInfo& kernel_program_info, |
| intptr_t index) const { |
| Array& data_field = Array::Handle( |
| Array::New(static_cast<intptr_t>(EvalFunctionData::kLength))); |
| data_field.SetAt(static_cast<intptr_t>(EvalFunctionData::kScript), script); |
| data_field.SetAt(static_cast<intptr_t>(EvalFunctionData::kKernelProgramInfo), |
| kernel_program_info); |
| data_field.SetAt(static_cast<intptr_t>(EvalFunctionData::kKernelLibraryIndex), |
| Smi::Handle(Smi::New(index))); |
| set_data(data_field); |
| } |
| |
| ScriptPtr Function::script() const { |
| // NOTE(turnidge): If you update this function, you probably want to |
| // update Class::PatchFieldsAndFunctions() at the same time. |
| if (IsDynamicInvocationForwarder()) { |
| const Function& target = Function::Handle(ForwardingTarget()); |
| return target.IsNull() ? Script::null() : target.script(); |
| } |
| if (IsImplicitGetterOrSetter()) { |
| const auto& field = Field::Handle(accessor_field()); |
| return field.IsNull() ? Script::null() : field.Script(); |
| } |
| if (is_eval_function()) { |
| const auto& fdata = Array::Handle(Array::RawCast(data())); |
| return Script::RawCast( |
| fdata.At(static_cast<intptr_t>(EvalFunctionData::kScript))); |
| } |
| if (token_pos() == TokenPosition::kMinSource) { |
| // Testing for position 0 is an optimization that relies on temporary |
| // eval functions having token position 0. |
| const Script& script = Script::Handle(eval_script()); |
| if (!script.IsNull()) { |
| return script.ptr(); |
| } |
| } |
| const Object& obj = Object::Handle(untag()->owner()); |
| if (obj.IsPatchClass()) { |
| return PatchClass::Cast(obj).script(); |
| } |
| if (IsClosureFunction()) { |
| const Function& function = Function::Handle(parent_function()); |
| if (function.IsNull()) return Script::null(); |
| return function.script(); |
| } |
| ASSERT(obj.IsClass()); |
| return Class::Cast(obj).script(); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| KernelProgramInfoPtr Function::KernelProgramInfo() const { |
| if (is_eval_function()) { |
| const auto& fdata = Array::Handle(Array::RawCast(data())); |
| return KernelProgramInfo::RawCast( |
| fdata.At(static_cast<intptr_t>(EvalFunctionData::kKernelProgramInfo))); |
| } |
| if (IsClosureFunction()) { |
| const auto& parent = Function::Handle(parent_function()); |
| return parent.KernelProgramInfo(); |
| } |
| const auto& owner = Object::Handle(RawOwner()); |
| if (owner.IsClass()) { |
| return Class::Cast(owner).KernelProgramInfo(); |
| } |
| return PatchClass::Cast(owner).kernel_program_info(); |
| } |
| |
| TypedDataViewPtr Function::KernelLibrary() const { |
| const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); |
| return info.KernelLibrary(KernelLibraryIndex()); |
| } |
| |
| intptr_t Function::KernelLibraryOffset() const { |
| const intptr_t kernel_library_index = KernelLibraryIndex(); |
| if (kernel_library_index == -1) return 0; |
| const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); |
| return info.KernelLibraryStartOffset(kernel_library_index); |
| } |
| |
| intptr_t Function::KernelLibraryIndex() const { |
| ASSERT(!is_declared_in_bytecode()); |
| if (IsNoSuchMethodDispatcher() || IsInvokeFieldDispatcher() || |
| IsFfiCallbackTrampoline()) { |
| return -1; |
| } |
| if (is_eval_function()) { |
| const auto& fdata = Array::Handle(Array::RawCast(data())); |
| return Smi::Value(static_cast<SmiPtr>(fdata.At( |
| static_cast<intptr_t>(EvalFunctionData::kKernelLibraryIndex)))); |
| } |
| if (IsClosureFunction()) { |
| const auto& parent = Function::Handle(parent_function()); |
| ASSERT(!parent.IsNull()); |
| return parent.KernelLibraryIndex(); |
| } |
| |
| const auto& obj = Object::Handle(untag()->owner()); |
| if (obj.IsClass()) { |
| const auto& lib = Library::Handle(Class::Cast(obj).library()); |
| return lib.kernel_library_index(); |
| } |
| ASSERT(obj.IsPatchClass()); |
| return PatchClass::Cast(obj).kernel_library_index(); |
| } |
| #endif |
| |
| bool Function::HasOptimizedCode() const { |
| return HasCode() && Code::Handle(CurrentCode()).is_optimized(); |
| } |
| |
| const char* Function::NameCString(NameVisibility name_visibility) const { |
| switch (name_visibility) { |
| case kInternalName: |
| return String::Handle(name()).ToCString(); |
| case kScrubbedName: |
| case kUserVisibleName: |
| return UserVisibleNameCString(); |
| } |
| UNREACHABLE(); |
| return nullptr; |
| } |
| |
| const char* Function::UserVisibleNameCString() const { |
| if (FLAG_show_internal_names) { |
| return String::Handle(name()).ToCString(); |
| } |
| is_extension_type_member(); |
| return String::ScrubName(String::Handle(name()), |
| is_extension_member() || is_extension_type_member()); |
| } |
| |
| StringPtr Function::UserVisibleName() const { |
| if (FLAG_show_internal_names) { |
| return name(); |
| } |
| return Symbols::New( |
| Thread::Current(), |
| String::ScrubName(String::Handle(name()), |
| is_extension_member() || is_extension_type_member())); |
| } |
| |
| StringPtr Function::QualifiedScrubbedName() const { |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| PrintName(NameFormattingParams(kScrubbedName), &printer); |
| return Symbols::New(thread, printer.buffer()); |
| } |
| |
| const char* Function::QualifiedScrubbedNameCString() const { |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| PrintName(NameFormattingParams(kScrubbedName), &printer); |
| return printer.buffer(); |
| } |
| |
| StringPtr Function::QualifiedUserVisibleName() const { |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| PrintName(NameFormattingParams(kUserVisibleName), &printer); |
| return Symbols::New(thread, printer.buffer()); |
| } |
| |
| const char* Function::QualifiedUserVisibleNameCString() const { |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| PrintName(NameFormattingParams(kUserVisibleName), &printer); |
| return printer.buffer(); |
| } |
| |
| static void FunctionPrintNameHelper(const Function& fun, |
| const NameFormattingParams& params, |
| BaseTextBuffer* printer) { |
| if (fun.IsNonImplicitClosureFunction()) { |
| if (params.include_parent_name) { |
| const auto& parent = Function::Handle(fun.parent_function()); |
| if (parent.IsNull()) { |
| printer->AddString(Symbols::OptimizedOut().ToCString()); |
| } else { |
| parent.PrintName(params, printer); |
| } |
| // A function's scrubbed name and its user visible name are identical. |
| printer->AddString("."); |
| } |
| if (params.disambiguate_names && |
| fun.name() == Symbols::AnonymousClosure().ptr()) { |
| if (fun.token_pos().IsReal()) { |
| printer->Printf("<anonymous closure @%" Pd ">", fun.token_pos().Pos()); |
| } else { |
| printer->Printf("<anonymous closure @no position>"); |
| } |
| } else { |
| printer->AddString(fun.NameCString(params.name_visibility)); |
| if (params.disambiguate_names) { |
| if (fun.token_pos().IsReal()) { |
| printer->Printf("@<%" Pd ">", fun.token_pos().Pos()); |
| } else { |
| printer->Printf("@<no position>"); |
| } |
| } |
| } |
| return; |
| } |
| if (params.disambiguate_names) { |
| if (fun.IsInvokeFieldDispatcher()) { |
| printer->AddString("[invoke-field] "); |
| } |
| if (fun.IsNoSuchMethodDispatcher()) { |
| printer->AddString("[no-such-method] "); |
| } |
| if (fun.IsImplicitClosureFunction()) { |
| printer->AddString("[tear-off] "); |
| } |
| if (fun.IsMethodExtractor()) { |
| printer->AddString("[tear-off-extractor] "); |
| } |
| } |
| |
| if (fun.kind() == UntaggedFunction::kConstructor) { |
| printer->AddString("new "); |
| } else if (params.include_class_name) { |
| const Class& cls = Class::Handle(fun.Owner()); |
| if (!cls.IsTopLevel()) { |
| const Class& mixin = Class::Handle(cls.Mixin()); |
| printer->AddString(params.name_visibility == Object::kUserVisibleName |
| ? mixin.UserVisibleNameCString() |
| : cls.NameCString(params.name_visibility)); |
| printer->AddString("."); |
| } |
| } |
| |
| printer->AddString(fun.NameCString(params.name_visibility)); |
| |
| // Dispatchers that are created with an arguments descriptor need both the |
| // name and the saved arguments descriptor to disambiguate. |
| if (params.disambiguate_names && fun.HasSavedArgumentsDescriptor()) { |
| const auto& args_desc_array = Array::Handle(fun.saved_args_desc()); |
| const ArgumentsDescriptor args_desc(args_desc_array); |
| args_desc.PrintTo(printer); |
| } |
| } |
| |
| void Function::PrintName(const NameFormattingParams& params, |
| BaseTextBuffer* printer) const { |
| if (!IsLocalFunction()) { |
| FunctionPrintNameHelper(*this, params, printer); |
| return; |
| } |
| auto& fun = Function::Handle(ptr()); |
| FunctionPrintNameHelper(fun, params, printer); |
| } |
| |
| StringPtr Function::GetSource() const { |
| if (IsImplicitConstructor() || is_synthetic()) { |
| // We may need to handle more cases when the restrictions on mixins are |
| // relaxed. In particular we might start associating some source with the |
| // forwarding constructors when it becomes possible to specify a particular |
| // constructor from the mixin to use. |
| return String::null(); |
| } |
| Zone* zone = Thread::Current()->zone(); |
| const Script& func_script = Script::Handle(zone, script()); |
| |
| intptr_t from_line, from_col; |
| if (!func_script.GetTokenLocation(token_pos(), &from_line, &from_col)) { |
| return String::null(); |
| } |
| intptr_t to_line, to_col; |
| if (!func_script.GetTokenLocation(end_token_pos(), &to_line, &to_col)) { |
| return String::null(); |
| } |
| intptr_t to_length = func_script.GetTokenLength(end_token_pos()); |
| if (to_length < 0) { |
| return String::null(); |
| } |
| |
| if (to_length == 1) { |
| // Handle special cases for end tokens of closures (where we exclude the |
| // last token): |
| // (1) "foo(() => null, bar);": End token is `,', but we don't print it. |
| // (2) "foo(() => null);": End token is ')`, but we don't print it. |
| // (3) "var foo = () => null;": End token is `;', but in this case the |
| // token semicolon belongs to the assignment so we skip it. |
| const String& src = String::Handle(func_script.Source()); |
| if (src.IsNull() || src.Length() == 0) { |
| return Symbols::OptimizedOut().ptr(); |
| } |
| uint16_t end_char = src.CharAt(end_token_pos().Pos()); |
| if ((end_char == ',') || // Case 1. |
| (end_char == ')') || // Case 2. |
| (end_char == ';' && String::Handle(zone, name()) |
| .Equals("<anonymous closure>"))) { // Case 3. |
| to_length = 0; |
| } |
| } |
| |
| return func_script.GetSnippet(from_line, from_col, to_line, |
| to_col + to_length); |
| } |
| |
| // Construct fingerprint from token stream. The token stream contains also |
| // arguments. |
| int32_t Function::SourceFingerprint() const { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (is_declared_in_bytecode()) { |
| return 0; |
| } |
| return kernel::KernelSourceFingerprintHelper::CalculateFunctionFingerprint( |
| *this); |
| #else |
| return 0; |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| void Function::SaveICDataMap( |
| const ZoneGrowableArray<const ICData*>& deopt_id_to_ic_data, |
| const Array& edge_counters_array, |
| const Array& coverage_array) const { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| // Already installed nothing to do. |
| if (ic_data_array() != Array::null()) { |
| ASSERT(coverage_array.ptr() == GetCoverageArray()); |
| return; |
| } |
| |
| // Compute number of ICData objects to save. |
| intptr_t count = 0; |
| for (intptr_t i = 0; i < deopt_id_to_ic_data.length(); i++) { |
| if (deopt_id_to_ic_data[i] != nullptr) { |
| count++; |
| } |
| } |
| |
| // Compress sparse deopt_id_to_ic_data mapping into a linear sequence of |
| // ICData objects. |
| const Array& array = Array::Handle( |
| Array::New(ICDataArrayIndices::kFirstICData + count, Heap::kOld)); |
| for (intptr_t i = 0, pos = ICDataArrayIndices::kFirstICData; |
| i < deopt_id_to_ic_data.length(); i++) { |
| if (deopt_id_to_ic_data[i] != nullptr) { |
| ASSERT(i == deopt_id_to_ic_data[i]->deopt_id()); |
| array.SetAt(pos++, *deopt_id_to_ic_data[i]); |
| } |
| } |
| array.SetAt(ICDataArrayIndices::kEdgeCounters, edge_counters_array); |
| // Preserve coverage_array which is stored early after graph construction. |
| array.SetAt(ICDataArrayIndices::kCoverageData, coverage_array); |
| set_ic_data_array(array); |
| #else // DART_PRECOMPILED_RUNTIME |
| UNREACHABLE(); |
| #endif // DART_PRECOMPILED_RUNTIME |
| } |
| |
| void Function::RestoreICDataMap( |
| ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data, |
| bool clone_ic_data) const { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (FLAG_force_clone_compiler_objects) { |
| clone_ic_data = true; |
| } |
| ASSERT(deopt_id_to_ic_data->is_empty()); |
| Zone* zone = Thread::Current()->zone(); |
| const Array& saved_ic_data = Array::Handle(zone, ic_data_array()); |
| if (saved_ic_data.IsNull()) { |
| // Could happen with not-yet compiled unoptimized code or force-optimized |
| // functions. |
| return; |
| } |
| const intptr_t saved_length = saved_ic_data.Length(); |
| ASSERT(saved_length > 0); |
| if (saved_length > ICDataArrayIndices::kFirstICData) { |
| const intptr_t restored_length = |
| ICData::Cast(Object::Handle(zone, saved_ic_data.At(saved_length - 1))) |
| .deopt_id() + |
| 1; |
| deopt_id_to_ic_data->SetLength(restored_length); |
| for (intptr_t i = 0; i < restored_length; i++) { |
| (*deopt_id_to_ic_data)[i] = nullptr; |
| } |
| for (intptr_t i = ICDataArrayIndices::kFirstICData; i < saved_length; i++) { |
| ICData& ic_data = ICData::ZoneHandle(zone); |
| ic_data ^= saved_ic_data.At(i); |
| if (clone_ic_data) { |
| const ICData& original_ic_data = ICData::Handle(zone, ic_data.ptr()); |
| ic_data = ICData::Clone(ic_data); |
| ic_data.SetOriginal(original_ic_data); |
| } |
| ASSERT(deopt_id_to_ic_data->At(ic_data.deopt_id()) == nullptr); |
| (*deopt_id_to_ic_data)[ic_data.deopt_id()] = &ic_data; |
| } |
| } |
| #else // DART_PRECOMPILED_RUNTIME |
| UNREACHABLE(); |
| #endif // DART_PRECOMPILED_RUNTIME |
| } |
| |
| ArrayPtr Function::GetCoverageArray() const { |
| const Array& arr = Array::Handle(ic_data_array()); |
| if (arr.IsNull()) { |
| return Array::null(); |
| } |
| return Array::RawCast(arr.At(ICDataArrayIndices::kCoverageData)); |
| } |
| |
| void Function::set_ic_data_array(const Array& value) const { |
| #if defined(DART_DYNAMIC_MODULES) |
| ASSERT(!HasBytecode()); |
| #endif |
| untag()->set_ic_data_array_or_bytecode<std::memory_order_release>( |
| value.ptr()); |
| } |
| |
| ArrayPtr Function::ic_data_array() const { |
| ObjectPtr value = |
| untag()->ic_data_array_or_bytecode<std::memory_order_acquire>(); |
| #if defined(DART_DYNAMIC_MODULES) |
| if (value->IsBytecode()) { |
| return Array::null(); |
| } |
| #endif |
| return Array::RawCast(value); |
| } |
| |
| void Function::ClearICDataArray() const { |
| set_ic_data_array(Array::null_array()); |
| } |
| |
| ICDataPtr Function::FindICData(intptr_t deopt_id) const { |
| const Array& array = Array::Handle(ic_data_array()); |
| ICData& ic_data = ICData::Handle(); |
| for (intptr_t i = ICDataArrayIndices::kFirstICData; i < array.Length(); i++) { |
| ic_data ^= array.At(i); |
| if (ic_data.deopt_id() == deopt_id) { |
| return ic_data.ptr(); |
| } |
| } |
| return ICData::null(); |
| } |
| |
| void Function::SetDeoptReasonForAll(intptr_t deopt_id, |
| ICData::DeoptReasonId reason) { |
| const Array& array = Array::Handle(ic_data_array()); |
| ICData& ic_data = ICData::Handle(); |
| for (intptr_t i = ICDataArrayIndices::kFirstICData; i < array.Length(); i++) { |
| ic_data ^= array.At(i); |
| if (ic_data.deopt_id() == deopt_id) { |
| ic_data.AddDeoptReason(reason); |
| } |
| } |
| } |
| |
| bool Function::CheckSourceFingerprint(int32_t fp, const char* kind) const { |
| #if !defined(DEBUG) |
| return true; // Only check on debug. |
| #endif |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| // Check that the function is marked as recognized via the vm:recognized |
| // pragma. This is so that optimizations that change the signature will know |
| // not to touch it. |
| if (kind != nullptr && !MethodRecognizer::IsMarkedAsRecognized(*this, kind)) { |
| OS::PrintErr( |
| "Recognized method %s should be marked with: " |
| "@pragma(\"vm:recognized\", \"%s\")\n", |
| ToQualifiedCString(), kind); |
| return false; |
| } |
| #endif |
| |
| if (IsolateGroup::Current()->obfuscate() || FLAG_precompiled_mode || |
| (Dart::vm_snapshot_kind() != Snapshot::kNone)) { |
| return true; // The kernel structure has been altered, skip checking. |
| } |
| |
| ASSERT(!is_declared_in_bytecode()); |
| |
| if (SourceFingerprint() != fp) { |
| // This output can be copied into a file, then used with sed |
| // to replace the old values. |
| // sed -i.bak -f /tmp/newkeys \ |
| // runtime/vm/compiler/recognized_methods_list.h |
| THR_Print("s/0x%08x/0x%08x/\n", fp, SourceFingerprint()); |
| return false; |
| } |
| return true; |
| } |
| |
| CodePtr Function::EnsureHasCode() const { |
| if (HasCode()) { |
| return CurrentCode(); |
| } |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->IsDartMutatorThread()); |
| DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame()); |
| Zone* zone = thread->zone(); |
| const Object& result = Object::Handle(zone, EnsureHasCodeNoThrow()); |
| if (result.IsError()) { |
| if (result.ptr() == Object::out_of_memory_error().ptr()) { |
| Exceptions::ThrowOOM(); |
| UNREACHABLE(); |
| } |
| if (result.IsLanguageError()) { |
| Exceptions::ThrowCompileTimeError(LanguageError::Cast(result)); |
| UNREACHABLE(); |
| } |
| Exceptions::PropagateError(Error::Cast(result)); |
| UNREACHABLE(); |
| } else { |
| return Code::Cast(result).ptr(); |
| } |
| } |
| |
| ObjectPtr Function::EnsureHasCodeNoThrow() const { |
| if (HasCode()) { |
| return CurrentCode(); |
| } |
| Thread* thread = Thread::Current(); |
| ASSERT(thread->IsDartMutatorThread()); |
| Zone* zone = thread->zone(); |
| const Object& result = |
| Object::Handle(zone, Compiler::CompileFunction(thread, *this)); |
| if (result.IsError()) { |
| return result.ptr(); |
| } |
| // Compiling in unoptimized mode should never fail if there are no errors. |
| RELEASE_ASSERT(HasCode()); |
| ASSERT(ForceOptimize() || unoptimized_code() == result.ptr()); |
| return CurrentCode(); |
| } |
| |
| bool Function::NeedsMonomorphicCheckedEntry(Zone* zone) const { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (!IsDynamicFunction()) { |
| return false; |
| } |
| |
| // For functions which need an args descriptor the switchable call sites will |
| // transition directly to calling via a stub (and therefore never call the |
| // monomorphic entry). |
| // |
| // See runtime_entry.cc:DEFINE_RUNTIME_ENTRY(UnlinkedCall) |
| if (PrologueNeedsArgumentsDescriptor()) { |
| return false; |
| } |
| |
| // All dyn:* forwarders are called via SwitchableCalls and all except the ones |
| // with `PrologueNeedsArgumentsDescriptor()` transition into monomorphic |
| // state. |
| if (Function::IsDynamicInvocationForwarderName(name())) { |
| return true; |
| } |
| |
| // AOT mode uses table dispatch. |
| // In JIT mode all instance calls use switchable calls. |
| if (!FLAG_precompiled_mode) { |
| return true; |
| } |
| |
| // Any method from the class with a dynamically loaded subtype |
| // can be called via switchable call (when cid range check fails |
| // during conditional table dispatch). |
| if (Class::Handle(zone, Owner()).has_dynamically_extendable_subtypes()) { |
| return true; |
| } |
| |
| // Only if there are dynamic callers and if we didn't create a dyn:* forwarder |
| // for it do we need the monomorphic checked entry. |
| return HasDynamicCallers(zone) && |
| !kernel::NeedsDynamicInvocationForwarder(*this); |
| #else |
| UNREACHABLE(); |
| return true; |
| #endif |
| } |
| |
| bool Function::HasDynamicCallers(Zone* zone) const { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| // Issue(dartbug.com/42719): |
| // Right now the metadata of _Closure.call says there are no dynamic callers - |
| // even though there can be. To be conservative we return true. |
| if ((name() == Symbols::GetCall().ptr() || name() == Symbols::call().ptr()) && |
| Class::IsClosureClass(Owner())) { |
| return true; |
| } |
| |
| // Use the results of TFA to determine whether this function is ever |
| // called dynamically, i.e. using switchable calls. |
| kernel::ProcedureAttributesMetadata metadata; |
| metadata = kernel::ProcedureAttributesOf(*this, zone); |
| if (IsGetterFunction() || IsImplicitGetterFunction() || IsMethodExtractor()) { |
| // Dynamic method call through field/getter involves dynamic call of |
| // the field/getter. |
| return metadata.getter_called_dynamically || |
| metadata.method_or_setter_called_dynamically; |
| } else { |
| return metadata.method_or_setter_called_dynamically; |
| } |
| #else |
| UNREACHABLE(); |
| return true; |
| #endif |
| } |
| |
| bool Function::PrologueNeedsArgumentsDescriptor() const { |
| // These functions have a saved compile-time arguments descriptor that is |
| // used in lieu of the runtime arguments descriptor in generated IL. |
| if (HasSavedArgumentsDescriptor()) { |
| return false; |
| } |
| #if defined(DART_DYNAMIC_MODULES) |
| // Entering interpreter needs arguments descriptor. |
| if (is_declared_in_bytecode()) { |
| return true; |
| } |
| #endif |
| // The prologue of those functions need to examine the arg descriptor for |
| // various purposes. |
| return IsGeneric() || HasOptionalParameters(); |
| } |
| |
| bool Function::MayHaveUncheckedEntryPoint() const { |
| return FLAG_enable_multiple_entrypoints && |
| (NeedsTypeArgumentTypeChecks() || NeedsArgumentTypeChecks()); |
| } |
| |
| intptr_t Function::SourceSize() const { |
| const TokenPosition& start = token_pos(); |
| const TokenPosition& end = end_token_pos(); |
| if (!end.IsReal() || start.IsNoSource() || start.IsClassifying()) { |
| // No source information, so just return 0. |
| return 0; |
| } |
| if (start.IsSynthetic()) { |
| // Try and approximate the source size using the parent's source size. |
| const auto& parent = Function::Handle(parent_function()); |
| ASSERT(!parent.IsNull()); |
| const intptr_t parent_size = parent.SourceSize(); |
| if (parent_size == 0) { |
| return parent_size; |
| } |
| // Parent must have a real ending position. |
| return parent_size - (parent.end_token_pos().Pos() - end.Pos()); |
| } |
| return end.Pos() - start.Pos(); |
| } |
| |
| const char* Function::ToCString() const { |
| if (IsNull()) { |
| return "Function: null"; |
| } |
| Zone* zone = Thread::Current()->zone(); |
| ZoneTextBuffer buffer(zone); |
| buffer.Printf("Function '%s':", String::Handle(zone, name()).ToCString()); |
| if (is_static()) { |
| buffer.AddString(" static"); |
| } |
| if (is_abstract()) { |
| buffer.AddString(" abstract"); |
| } |
| switch (kind()) { |
| case UntaggedFunction::kRegularFunction: |
| case UntaggedFunction::kClosureFunction: |
| case UntaggedFunction::kImplicitClosureFunction: |
| case UntaggedFunction::kGetterFunction: |
| case UntaggedFunction::kSetterFunction: |
| break; |
| case UntaggedFunction::kConstructor: |
| buffer.AddString(is_static() ? " factory" : " constructor"); |
| break; |
| case UntaggedFunction::kImplicitGetter: |
| buffer.AddString(" getter"); |
| break; |
| case UntaggedFunction::kImplicitSetter: |
| buffer.AddString(" setter"); |
| break; |
| case UntaggedFunction::kImplicitStaticGetter: |
| buffer.AddString(" static-getter"); |
| break; |
| case UntaggedFunction::kFieldInitializer: |
| buffer.AddString(" field-initializer"); |
| break; |
| case UntaggedFunction::kMethodExtractor: |
| buffer.AddString(" method-extractor"); |
| break; |
| case UntaggedFunction::kNoSuchMethodDispatcher: |
| buffer.AddString(" no-such-method-dispatcher"); |
| break; |
| case UntaggedFunction::kDynamicInvocationForwarder: |
| buffer.AddString(" dynamic-invocation-forwarder"); |
| break; |
| case UntaggedFunction::kInvokeFieldDispatcher: |
| buffer.AddString(" invoke-field-dispatcher"); |
| break; |
| case UntaggedFunction::kIrregexpFunction: |
| buffer.AddString(" irregexp-function"); |
| break; |
| case UntaggedFunction::kFfiTrampoline: |
| buffer.AddString(" ffi-trampoline-function"); |
| break; |
| case UntaggedFunction::kRecordFieldGetter: |
| buffer.AddString(" record-field-getter"); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| if (HasSavedArgumentsDescriptor()) { |
| const auto& args_desc_array = Array::Handle(zone, saved_args_desc()); |
| const ArgumentsDescriptor args_desc(args_desc_array); |
| buffer.AddChar('['); |
| args_desc.PrintTo(&buffer); |
| buffer.AddChar(']'); |
| } |
| if (is_const()) { |
| buffer.AddString(" const"); |
| } |
| buffer.AddChar('.'); |
| return buffer.buffer(); |
| } |
| |
| void FunctionType::set_packed_parameter_counts( |
| uint32_t packed_parameter_counts) const { |
| untag()->packed_parameter_counts_ = packed_parameter_counts; |
| } |
| |
| void FunctionType::set_packed_type_parameter_counts( |
| uint16_t packed_type_parameter_counts) const { |
| untag()->packed_type_parameter_counts_ = packed_type_parameter_counts; |
| } |
| |
| void FunctionType::set_num_implicit_parameters(intptr_t value) const { |
| ASSERT(value >= 0); |
| untag()->packed_parameter_counts_.Update<PackedNumImplicitParameters>(value); |
| } |
| |
| void ClosureData::set_default_type_arguments_instantiation_mode( |
| InstantiationMode value) const { |
| untag()->packed_fields_.Update<PackedInstantiationMode>(value); |
| } |
| |
| Function::AwaiterLink ClosureData::awaiter_link() const { |
| const uint8_t depth = |
| untag() |
| ->packed_fields_.Read<UntaggedClosureData::PackedAwaiterLinkDepth>(); |
| const uint8_t index = |
| untag() |
| ->packed_fields_.Read<UntaggedClosureData::PackedAwaiterLinkIndex>(); |
| return {depth, index}; |
| } |
| |
| void ClosureData::set_awaiter_link(Function::AwaiterLink link) const { |
| untag()->packed_fields_.Update<UntaggedClosureData::PackedAwaiterLinkDepth>( |
| link.depth); |
| untag()->packed_fields_.Update<UntaggedClosureData::PackedAwaiterLinkIndex>( |
| link.index); |
| } |
| |
| ClosureDataPtr ClosureData::New() { |
| ASSERT(Object::closure_data_class() != Class::null()); |
| return Object::Allocate<ClosureData>(Heap::kOld); |
| } |
| |
| const char* ClosureData::ToCString() const { |
| if (IsNull()) { |
| return "ClosureData: null"; |
| } |
| auto const zone = Thread::Current()->zone(); |
| ZoneTextBuffer buffer(zone); |
| buffer.Printf("ClosureData: context_scope: 0x%" Px "", |
| static_cast<uword>(context_scope())); |
| buffer.AddString(" parent_function: "); |
| if (parent_function() == Object::null()) { |
| buffer.AddString("null"); |
| } else { |
| buffer.AddString(Object::Handle(parent_function()).ToCString()); |
| } |
| buffer.Printf(" implicit_static_closure: 0x%" Px "", |
| static_cast<uword>(implicit_static_closure())); |
| return buffer.buffer(); |
| } |
| |
| void FunctionType::set_num_fixed_parameters(intptr_t value) const { |
| ASSERT(value >= 0); |
| untag()->packed_parameter_counts_.Update<PackedNumFixedParameters>(value); |
| } |
| |
| void FfiTrampolineData::set_callback_target(const Function& value) const { |
| untag()->set_callback_target(value.ptr()); |
| } |
| |
| void FunctionType::SetNumOptionalParameters( |
| intptr_t value, |
| bool are_optional_positional) const { |
| // HasOptionalNamedParameters only checks this bit, so only set it if there |
| // are actual named parameters. |
| untag()->packed_parameter_counts_.Update<PackedHasNamedOptionalParameters>( |
| (value > 0) && !are_optional_positional); |
| untag()->packed_parameter_counts_.Update<PackedNumOptionalParameters>(value); |
| } |
| |
| FunctionTypePtr FunctionType::New(Heap::Space space) { |
| return Object::Allocate<FunctionType>(space); |
| } |
| |
| FunctionTypePtr FunctionType::New(intptr_t num_parent_type_arguments, |
| Nullability nullability, |
| Heap::Space space) { |
| Zone* Z = Thread::Current()->zone(); |
| const FunctionType& result = |
| FunctionType::Handle(Z, FunctionType::New(space)); |
| result.set_packed_parameter_counts(0); |
| result.set_packed_type_parameter_counts(0); |
| result.set_named_parameter_names(Object::empty_array()); |
| result.SetNumParentTypeArguments(num_parent_type_arguments); |
| result.SetHash(0); |
| result.set_flags(0); |
| result.set_nullability(nullability); |
| result.set_type_state(UntaggedAbstractType::kAllocated); |
| result.InitializeTypeTestingStubNonAtomic( |
| Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result))); |
| return result.ptr(); |
| } |
| |
| FunctionTypePtr FunctionType::Clone(const FunctionType& orig, |
| Heap::Space space) { |
| if (orig.IsGeneric()) { |
| // Need a deep clone in order to update owners of type parameters. |
| return FunctionType::RawCast( |
| orig.UpdateFunctionTypes(0, kAllFree, space, nullptr)); |
| } else { |
| return FunctionType::RawCast(Object::Clone(orig, space)); |
| } |
| } |
| |
| const char* FunctionType::ToUserVisibleCString() const { |
| Zone* zone = Thread::Current()->zone(); |
| ZoneTextBuffer printer(zone); |
| Print(kUserVisibleName, &printer); |
| return printer.buffer(); |
| } |
| |
| StringPtr FunctionType::ToUserVisibleString() const { |
| Thread* thread = Thread::Current(); |
| ZoneTextBuffer printer(thread->zone()); |
| Print(kUserVisibleName, &printer); |
| return Symbols::New(thread, printer.buffer()); |
| } |
| |
| const char* FunctionType::ToCString() const { |
| if (IsNull()) { |
| return "FunctionType: null"; |
| } |
| Zone* zone = Thread::Current()->zone(); |
| ZoneTextBuffer printer(zone); |
| const char* suffix = NullabilitySuffix(kInternalName); |
| if (suffix[0] != '\0') { |
| printer.AddString("("); |
| } |
| Print(kInternalName, &printer); |
| if (suffix[0] != '\0') { |
| printer.AddString(")"); |
| printer.AddString(suffix); |
| } |
| return printer.buffer(); |
| } |
| |
| void ClosureData::set_context_scope(const ContextScope& value) const { |
| untag()->set_context_scope(value.ptr()); |
| } |
| |
| void ClosureData::set_implicit_static_closure(const Closure& closure) const { |
| ASSERT(!closure.IsNull()); |
| ASSERT(untag()->closure() == Closure::null()); |
| untag()->set_closure<std::memory_order_release>(closure.ptr()); |
| } |
| |
| void FfiTrampolineData::set_c_signature(const FunctionType& value) const { |
| untag()->set_c_signature(value.ptr()); |
| } |
| |
| void FfiTrampolineData::set_callback_id(int32_t callback_id) const { |
| StoreNonPointer(&untag()->callback_id_, callback_id); |
| } |
| |
| void FfiTrampolineData::set_callback_exceptional_return( |
| const Instance& value) const { |
| untag()->set_callback_exceptional_return(value.ptr()); |
| } |
| |
| void FfiTrampolineData::set_ffi_function_kind(FfiCallbackKind kind) const { |
| StoreNonPointer(&untag()->ffi_function_kind_, static_cast<uint8_t>(kind)); |
| } |
| |
| FfiTrampolineDataPtr FfiTrampolineData::New() { |
| ASSERT(Object::ffi_trampoline_data_class() != Class::null()); |
| const auto& data = FfiTrampolineData::Handle( |
| Object::Allocate<FfiTrampolineData>(Heap::kOld)); |
| data.set_callback_id(-1); |
| return data.ptr(); |
| } |
| |
| const char* FfiTrampolineData::ToCString() const { |
| const FunctionType& c_sig = FunctionType::Handle(c_signature()); |
| return OS::SCreate(Thread::Current()->zone(), |
| "TrampolineData: c_signature=%s", |
| c_sig.ToUserVisibleCString()); |
| } |
| |
| FieldPtr Field::CloneFromOriginal() const { |
| return this->Clone(*this); |
| } |
| |
| FieldPtr Field::Original() const { |
| if (IsNull()) { |
| return Field::null(); |
| } |
| if (untag()->owner()->IsField()) { |
| return static_cast<FieldPtr>(untag()->owner()); |
| } |
| return this->ptr(); |
| } |
| |
| intptr_t Field::guarded_cid() const { |
| #if defined(DEBUG) |
| // This assertion ensures that the cid seen by the background compiler is |
| // consistent. So the assertion passes if the field is a clone. It also |
| // passes if the field is static, because we don't use field guards on |
| // static fields. It also passes if we're compiling unoptimized |
| // code (in which case the caller might get different answers if it obtains |
| // the guarded cid multiple times). |
| Thread* thread = Thread::Current(); |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| ASSERT(!thread->IsInsideCompiler() || is_static()); |
| #else |
| ASSERT(!thread->IsInsideCompiler() || |
| ((CompilerState::Current().should_clone_fields() == !IsOriginal())) || |
| is_static()); |
| #endif |
| #endif |
| return LoadNonPointer<ClassIdTagType, std::memory_order_relaxed>( |
| &untag()->guarded_cid_); |
| } |
| |
| bool Field::is_nullable() const { |
| #if defined(DEBUG) |
| // Same assert as guarded_cid(), because is_nullable() also needs to be |
| // consistent for the background compiler. |
| Thread* thread = Thread::Current(); |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| ASSERT(!thread->IsInsideCompiler() || is_static()); |
| #else |
| ASSERT(!thread->IsInsideCompiler() || |
| ((CompilerState::Current().should_clone_fields() == !IsOriginal())) || |
| is_static()); |
| #endif |
| #endif |
| return is_nullable_unsafe(); |
| } |
| |
| void Field::SetOriginal(const Field& value) const { |
| ASSERT(value.IsOriginal()); |
| ASSERT(!value.IsNull()); |
| untag()->set_owner(static_cast<ObjectPtr>(value.ptr())); |
| } |
| |
| StringPtr Field::GetterName(const String& field_name) { |
| return String::Concat(Symbols::GetterPrefix(), field_name); |
| } |
| |
| StringPtr Field::GetterSymbol(const String& field_name) { |
| return Symbols::FromGet(Thread::Current(), field_name); |
| } |
| |
| StringPtr Field::LookupGetterSymbol(const String& field_name) { |
| return Symbols::LookupFromGet(Thread::Current(), field_name); |
| } |
| |
| StringPtr Field::SetterName(const String& field_name) { |
| return String::Concat(Symbols::SetterPrefix(), field_name); |
| } |
| |
| StringPtr Field::SetterSymbol(const String& field_name) { |
| return Symbols::FromSet(Thread::Current(), field_name); |
| } |
| |
| StringPtr Field::LookupSetterSymbol(const String& field_name) { |
| return Symbols::LookupFromSet(Thread::Current(), field_name); |
| } |
| |
| StringPtr Field::NameFromGetter(const String& getter_name) { |
| return Symbols::New(Thread::Current(), getter_name, kGetterPrefixLength, |
| getter_name.Length() - kGetterPrefixLength); |
| } |
| |
| StringPtr Field::NameFromSetter(const String& setter_name) { |
| return Symbols::New(Thread::Current(), setter_name, kSetterPrefixLength, |
| setter_name.Length() - kSetterPrefixLength); |
| } |
| |
| StringPtr Field::NameFromInit(const String& init_name) { |
| return Symbols::New(Thread::Current(), init_name, kInitPrefixLength, |
| init_name.Length() - kInitPrefixLength); |
| } |
| |
| bool Field::IsGetterName(const String& function_name) { |
| return function_name.StartsWith(Symbols::GetterPrefix()); |
| } |
| |
| bool Field::IsSetterName(const String& function_name) { |
| return function_name.StartsWith(Symbols::SetterPrefix()); |
| } |
| |
| bool Field::IsInitName(const String& function_name) { |
| return function_name.StartsWith(Symbols::InitPrefix()); |
| } |
| |
| void Field::set_name(const String& value) const { |
| ASSERT(value.IsSymbol()); |
| ASSERT(IsOriginal()); |
| untag()->set_name(value.ptr()); |
| } |
| |
| ObjectPtr Field::RawOwner() const { |
| if (IsOriginal()) { |
| return untag()->owner(); |
| } else { |
| const Field& field = Field::Handle(Original()); |
| ASSERT(field.IsOriginal()); |
| ASSERT(!Object::Handle(field.untag()->owner()).IsField()); |
| return field.untag()->owner(); |
| } |
| } |
| |
| ClassPtr Field::Owner() const { |
| const Field& field = Field::Handle(Original()); |
| ASSERT(field.IsOriginal()); |
| const Object& obj = Object::Handle(field.untag()->owner()); |
| if (obj.IsClass()) { |
| return Class::Cast(obj).ptr(); |
| } |
| ASSERT(obj.IsPatchClass()); |
| return PatchClass::Cast(obj).wrapped_class(); |
| } |
| |
| ScriptPtr Field::Script() const { |
| // NOTE(turnidge): If you update this function, you probably want to |
| // update Class::PatchFieldsAndFunctions() at the same time. |
| const Field& field = Field::Handle(Original()); |
| ASSERT(field.IsOriginal()); |
| const Object& obj = Object::Handle(field.untag()->owner()); |
| if (obj.IsClass()) { |
| return Class::Cast(obj).script(); |
| } |
| ASSERT(obj.IsPatchClass()); |
| return PatchClass::Cast(obj).script(); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| KernelProgramInfoPtr Field::KernelProgramInfo() const { |
| const auto& owner = Object::Handle(RawOwner()); |
| if (owner.IsClass()) { |
| return Class::Cast(owner).KernelProgramInfo(); |
| } |
| return PatchClass::Cast(owner).kernel_program_info(); |
| } |
| #endif |
| |
| uint32_t Field::Hash() const { |
| return String::HashRawSymbol(name()); |
| } |
| |
| #if defined(DART_DYNAMIC_MODULES) |
| bool Field::is_declared_in_bytecode() const { |
| return Class::Handle(Owner()).is_declared_in_bytecode(); |
| } |
| #endif |
| |
| void Field::InheritKernelOffsetFrom(const Field& src) const { |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| StoreNonPointer(&untag()->kernel_offset_, src.untag()->kernel_offset_); |
| #endif |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| TypedDataViewPtr Field::KernelLibrary() const { |
| const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); |
| return info.KernelLibrary(KernelLibraryIndex()); |
| } |
| |
| intptr_t Field::KernelLibraryOffset() const { |
| const intptr_t kernel_library_index = KernelLibraryIndex(); |
| if (kernel_library_index == -1) return 0; |
| const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); |
| return info.KernelLibraryStartOffset(kernel_library_index); |
| } |
| |
| intptr_t Field::KernelLibraryIndex() const { |
| const Object& obj = Object::Handle(untag()->owner()); |
| // During background JIT compilation field objects are copied |
| // and copy points to the original field via the owner field. |
| if (obj.IsField()) { |
| return Field::Cast(obj).KernelLibraryIndex(); |
| } else if (obj.IsClass()) { |
| const auto& lib = Library::Handle(Class::Cast(obj).library()); |
| return lib.kernel_library_index(); |
| } |
| ASSERT(obj.IsPatchClass()); |
| return PatchClass::Cast(obj).kernel_library_index(); |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| void Field::SetFieldTypeSafe(const AbstractType& value) const { |
| ASSERT(IsOriginal()); |
| ASSERT(!value.IsNull()); |
| if (value.ptr() != type()) { |
| untag()->set_type(value.ptr()); |
| } |
| } |
| |
| // Called at finalization time |
| void Field::SetFieldType(const AbstractType& value) const { |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| SetFieldTypeSafe(value); |
| } |
| |
| FieldPtr Field::New() { |
| ASSERT(Object::field_class() != Class::null()); |
| return Object::Allocate<Field>(Heap::kOld); |
| } |
| |
| void Field::InitializeNew(const Field& result, |
| const String& name, |
| bool is_static, |
| bool is_final, |
| bool is_const, |
| bool is_reflectable, |
| bool is_late, |
| const Object& owner, |
| TokenPosition token_pos, |
| TokenPosition end_token_pos) { |
| result.set_name(name); |
| result.set_is_static(is_static); |
| if (is_static) { |
| result.set_field_id_unsafe(-1); |
| } else { |
| result.SetOffset(0, 0); |
| } |
| result.set_is_final(is_final); |
| result.set_is_const(is_const); |
| result.set_is_reflectable(is_reflectable); |
| result.set_is_late(is_late); |
| result.set_owner(owner); |
| result.set_token_pos(token_pos); |
| result.set_end_token_pos(end_token_pos); |
| result.set_has_nontrivial_initializer_unsafe(false); |
| result.set_has_initializer_unsafe(false); |
| // We will make unboxing decision once we read static type or |
| // in KernelLoader::ReadInferredType. |
| result.set_is_unboxed_unsafe(false); |
| result.set_initializer_changed_after_initialization(false); |
| NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); |
| result.set_has_pragma(false); |
| result.set_static_type_exactness_state_unsafe( |
| StaticTypeExactnessState::NotTracking()); |
| auto isolate_group = IsolateGroup::Current(); |
| |
| // Use field guards if they are enabled and the isolate has never reloaded. |
| // TODO(johnmccutchan): The reload case assumes the worst case (everything is |
| // dynamic and possibly null). Attempt to relax this later. |
| // |
| // Do not use field guards for late fields as late field initialization |
| // doesn't update guarded cid and length. |
| #if defined(PRODUCT) |
| const bool use_guarded_cid = |
| FLAG_precompiled_mode || (isolate_group->use_field_guards() && !is_late); |
| #else |
| const bool use_guarded_cid = |
| FLAG_precompiled_mode || |
| (isolate_group->use_field_guards() && |
| !isolate_group->HasAttemptedReload() && !is_late); |
| #endif // !defined(PRODUCT) |
| result.set_guarded_cid_unsafe(use_guarded_cid ? kIllegalCid : kDynamicCid); |
| result.set_is_nullable_unsafe(use_guarded_cid ? false : true); |
| result.set_guarded_list_length_in_object_offset_unsafe( |
| Field::kUnknownLengthOffset); |
| // Presently, we only attempt to remember the list length for final fields. |
| if (is_final && use_guarded_cid) { |
| result.set_guarded_list_length_unsafe(Field::kUnknownFixedLength); |
| } else { |
| result.set_guarded_list_length_unsafe(Field::kNoFixedLength); |
| } |
| } |
| |
| FieldPtr Field::New(const String& name, |
| bool is_static, |
| bool is_final, |
| bool is_const, |
| bool is_reflectable, |
| bool is_late, |
| const Object& owner, |
| const AbstractType& type, |
| TokenPosition token_pos, |
| TokenPosition end_token_pos) { |
| ASSERT(!owner.IsNull()); |
| const Field& result = Field::Handle(Field::New()); |
| InitializeNew(result, name, is_static, is_final, is_const, is_reflectable, |
| is_late, owner, token_pos, end_token_pos); |
| result.SetFieldTypeSafe(type); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| compiler::target::UnboxFieldIfSupported(result, type); |
| #endif |
| return result.ptr(); |
| } |
| |
| FieldPtr Field::NewTopLevel(const String& name, |
| bool is_final, |
| bool is_const, |
| bool is_late, |
| const Object& owner, |
| TokenPosition token_pos, |
| TokenPosition end_token_pos) { |
| ASSERT(!owner.IsNull()); |
| const Field& result = Field::Handle(Field::New()); |
| InitializeNew(result, name, true, /* is_static */ |
| is_final, is_const, true, /* is_reflectable */ |
| is_late, owner, token_pos, end_token_pos); |
| return result.ptr(); |
| } |
| |
| FieldPtr Field::Clone(const Field& original) const { |
| if (original.IsNull()) { |
| return Field::null(); |
| } |
| ASSERT(original.IsOriginal()); |
| Field& clone = Field::Handle(); |
| // Using relaxed loading is fine because concurrent fields changes are all |
| // guarded, will be reconciled during optimized code installation. |
| clone ^= Object::Clone(*this, Heap::kOld, /*load_with_relaxed_atomics=*/true); |
| clone.SetOriginal(original); |
| clone.InheritKernelOffsetFrom(original); |
| return clone.ptr(); |
| } |
| |
| int32_t Field::SourceFingerprint() const { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (is_declared_in_bytecode()) { |
| return 0; |
| } |
| return kernel::KernelSourceFingerprintHelper::CalculateFieldFingerprint( |
| *this); |
| #else |
| return 0; |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| } |
| |
| StringPtr Field::InitializingExpression() const { |
| UNREACHABLE(); |
| return String::null(); |
| } |
| |
| const char* Field::UserVisibleNameCString() const { |
| NoSafepointScope no_safepoint; |
| if (FLAG_show_internal_names) { |
| return String::Handle(name()).ToCString(); |
| } |
| return String::ScrubName(String::Handle(name()), |
| is_extension_member() || is_extension_type_member()); |
| } |
| |
| StringPtr Field::UserVisibleName() const { |
| if (FLAG_show_internal_names) { |
| return name(); |
| } |
| return Symbols::New( |
| Thread::Current(), |
| String::ScrubName(String::Handle(name()), |
| is_extension_member() || is_extension_type_member())); |
| } |
| |
| intptr_t Field::guarded_list_length() const { |
| return Smi::Value(untag()->guarded_list_length()); |
| } |
| |
| void Field::set_guarded_list_length_unsafe(intptr_t list_length) const { |
| ASSERT(IsOriginal()); |
| untag()->set_guarded_list_length(Smi::New(list_length)); |
| } |
| |
| intptr_t Field::guarded_list_length_in_object_offset() const { |
| return untag()->guarded_list_length_in_object_offset_ + kHeapObjectTag; |
| } |
| |
| void Field::set_guarded_list_length_in_object_offset_unsafe( |
| intptr_t list_length_offset) const { |
| ASSERT(IsOriginal()); |
| StoreNonPointer<int8_t, int8_t, std::memory_order_relaxed>( |
| &untag()->guarded_list_length_in_object_offset_, |
| static_cast<int8_t>(list_length_offset - kHeapObjectTag)); |
| ASSERT(guarded_list_length_in_object_offset() == list_length_offset); |
| } |
| |
| bool Field::NeedsSetter() const { |
| // According to the Dart language specification, final fields don't have |
| // a setter, except late final fields without initializer. |
| if (is_final()) { |
| // Late final fields without initializer always need a setter to check |
| // if they are already initialized. |
| if (is_late() && !has_initializer()) { |
| return true; |
| } |
| return false; |
| } |
| |
| // Instance non-final fields always need a setter. |
| if (!is_static()) { |
| return true; |
| } |
| |
| // Otherwise, setters for static fields can be omitted |
| // and fields can be accessed directly. |
| return false; |
| } |
| |
| bool Field::NeedsGetter() const { |
| // All instance fields need a getter. |
| if (!is_static()) return true; |
| |
| // Static fields also need a getter if they have a non-trivial initializer, |
| // because it needs to be initialized lazily. |
| if (has_nontrivial_initializer()) return true; |
| |
| // Static late fields with no initializer also need a getter, to check if it's |
| // been initialized. |
| return is_late() && !has_initializer(); |
| } |
| |
| const char* Field::ToCString() const { |
| NoSafepointScope no_safepoint; |
| if (IsNull()) { |
| return "Field: null"; |
| } |
| const char* kF0 = is_static() ? " static" : ""; |
| const char* kF1 = is_late() ? " late" : ""; |
| const char* kF2 = is_final() ? " final" : ""; |
| const char* kF3 = is_const() ? " const" : ""; |
| const char* kF4 = is_shared() ? " shared" : ""; |
| const char* field_name = String::Handle(name()).ToCString(); |
| const Class& cls = Class::Handle(Owner()); |
| const char* cls_name = String::Handle(cls.Name()).ToCString(); |
| return OS::SCreate(Thread::Current()->zone(), "Field <%s.%s>:%s%s%s%s%s", |
| cls_name, field_name, kF0, kF1, kF2, kF3, kF4); |
| } |
| |
| // Build a closure object that gets (or sets) the contents of a static |
| // field f and cache the closure in a newly created static field |
| // named #f (or #f= in case of a setter). |
| InstancePtr Field::AccessorClosure(bool make_setter) const { |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| ASSERT(is_static()); |
| const Class& field_owner = Class::Handle(zone, Owner()); |
| |
| String& closure_name = String::Handle(zone, this->name()); |
| closure_name = Symbols::FromConcat(thread, Symbols::HashMark(), closure_name); |
| if (make_setter) { |
| closure_name = |
| Symbols::FromConcat(thread, Symbols::HashMark(), closure_name); |
| } |
| |
| Field& closure_field = Field::Handle(zone); |
| closure_field = field_owner.LookupStaticField(closure_name); |
| if (!closure_field.IsNull()) { |
| ASSERT(closure_field.is_static()); |
| const Instance& closure = |
| Instance::Handle(zone, Instance::RawCast(closure_field.StaticValue())); |
| ASSERT(!closure.IsNull()); |
| ASSERT(closure.IsClosure()); |
| return closure.ptr(); |
| } |
| |
| UNREACHABLE(); |
| return Instance::null(); |
| } |
| |
| InstancePtr Field::GetterClosure() const { |
| return AccessorClosure(false); |
| } |
| |
| InstancePtr Field::SetterClosure() const { |
| return AccessorClosure(true); |
| } |
| |
| WeakArrayPtr Field::dependent_code() const { |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadReader()); |
| return untag()->dependent_code(); |
| } |
| |
| void Field::set_dependent_code(const WeakArray& array) const { |
| ASSERT(IsOriginal()); |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| untag()->set_dependent_code(array.ptr()); |
| } |
| |
| class FieldDependentArray : public WeakCodeReferences { |
| public: |
| explicit FieldDependentArray(const Field& field) |
| : WeakCodeReferences(WeakArray::Handle(field.dependent_code())), |
| field_(field) {} |
| |
| virtual void UpdateArrayTo(const WeakArray& value) { |
| field_.set_dependent_code(value); |
| } |
| |
| virtual void ReportDeoptimization(const Code& code) { |
| if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { |
| Function& function = Function::Handle(code.function()); |
| THR_Print("Deoptimizing %s because guard on field %s failed.\n", |
| function.ToFullyQualifiedCString(), field_.ToCString()); |
| } |
| } |
| |
| virtual void ReportSwitchingCode(const Code& code) { |
| if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { |
| Function& function = Function::Handle(code.function()); |
| THR_Print( |
| "Switching '%s' to unoptimized code because guard" |
| " on field '%s' was violated.\n", |
| function.ToFullyQualifiedCString(), field_.ToCString()); |
| } |
| } |
| |
| private: |
| const Field& field_; |
| DISALLOW_COPY_AND_ASSIGN(FieldDependentArray); |
| }; |
| |
| void Field::RegisterDependentCode(const Code& code) const { |
| ASSERT(IsOriginal()); |
| DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint()); |
| ASSERT(code.is_optimized()); |
| FieldDependentArray a(*this); |
| a.Register(code); |
| } |
| |
| void Field::DeoptimizeDependentCode(bool are_mutators_stopped) const { |
| DEBUG_ASSERT( |
| IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); |
| ASSERT(IsOriginal()); |
| FieldDependentArray a(*this); |
| if (FLAG_trace_deoptimization && a.HasCodes()) { |
| THR_Print("Deopt for field guard (field %s)\n", ToCString()); |
| } |
| a.DisableCode(are_mutators_stopped); |
| } |
| |
| bool Field::IsConsistentWith(const Field& other) const { |
| return (untag()->guarded_cid_ == other.untag()->guarded_cid_) && |
| (untag()->is_nullable_ == other.untag()->is_nullable_) && |
| (untag()->guarded_list_length() == |
| other.untag()->guarded_list_length()) && |
| (is_unboxed() == other.is_unboxed()) && |
| (static_type_exactness_state().Encode() == |
| other.static_type_exactness_state().Encode()); |
| } |
| |
| bool Field::IsUninitialized() const { |
| Thread* thread = Thread::Current(); |
| const FieldTable* field_table = thread->isolate()->field_table(); |
| const ObjectPtr raw_value = field_table->At(field_id()); |
| return raw_value == Object::sentinel().ptr(); |
| } |
| |
| FunctionPtr Field::EnsureInitializerFunction() const { |
| ASSERT(has_nontrivial_initializer()); |
| ASSERT(IsOriginal()); |
| Thread* thread = Thread::Current(); |
| Zone* zone = thread->zone(); |
| Function& initializer = Function::Handle(zone, InitializerFunction()); |
| if (initializer.IsNull()) { |
| #if defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_DYNAMIC_MODULES) |
| UNREACHABLE(); |
| #else |
| SafepointMutexLocker ml( |
| thread->isolate_group()->initializer_functions_mutex()); |
| // Double check after grabbing the lock. |
| initializer = InitializerFunction(); |
| if (initializer.IsNull()) { |
| initializer = CreateFieldInitializerFunction(thread); |
| } |
| #endif |
| } |
| return initializer.ptr(); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) || defined(DART_DYNAMIC_MODULES) |
| |
| FunctionPtr Field::CreateFieldInitializerFunction(Thread* thread) const { |
| Zone* zone = thread->zone(); |
| ASSERT(InitializerFunction() == Function::null()); |
| |
| String& init_name = String::Handle(zone, name()); |
| init_name = Symbols::FromConcat(thread, Symbols::InitPrefix(), init_name); |
| |
| const auto& field_owner = Class::Handle(zone, Owner()); |
| |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const auto& initializer_owner = Class::Handle(zone, field_owner.ptr()); |
| #else |
| // Static field initializers are not added as members of their owning class, |
| // so they must be preemptively given a patch class to avoid the meaning of |
| // their kernel/token position changing during a reload. Compare |
| // Class::PatchFieldsAndFunctions(). |
| // This might also be necessary for lazy computation of local var descriptors. |
| // Compare https://codereview.chromium.org//1317753004 |
| const auto& script = Script::Handle(zone, Script()); |
| const auto& kernel_program_info = |
| KernelProgramInfo::Handle(zone, KernelProgramInfo()); |
| const auto& initializer_owner = PatchClass::Handle( |
| zone, PatchClass::New(field_owner, kernel_program_info, script)); |
| if (!is_declared_in_bytecode()) { |
| const Library& lib = Library::Handle(zone, field_owner.library()); |
| initializer_owner.set_kernel_library_index(lib.kernel_library_index()); |
| } |
| #endif |
| |
| // Create a static initializer. |
| FunctionType& signature = FunctionType::Handle(zone, FunctionType::New()); |
| const Function& initializer_fun = Function::Handle( |
| zone, |
| Function::New(signature, init_name, UntaggedFunction::kFieldInitializer, |
| is_static(), // is_static |
| false, // is_const |
| false, // is_abstract |
| false, // is_external |
| false, // is_native |
| initializer_owner, TokenPosition::kNoSource)); |
| if (!is_static()) { |
| signature.set_num_fixed_parameters(1); |
| signature.set_parameter_types( |
| Array::Handle(zone, Array::New(1, Heap::kOld))); |
| signature.SetParameterTypeAt( |
| 0, AbstractType::Handle(zone, field_owner.DeclarationType())); |
| NOT_IN_PRECOMPILED(initializer_fun.CreateNameArray()); |
| NOT_IN_PRECOMPILED(initializer_fun.SetParameterNameAt(0, Symbols::This())); |
| } |
| signature.set_result_type(AbstractType::Handle(zone, type())); |
| initializer_fun.set_is_reflectable(false); |
| initializer_fun.set_is_inlinable(false); |
| NOT_IN_PRECOMPILED(initializer_fun.set_token_pos(token_pos())); |
| NOT_IN_PRECOMPILED(initializer_fun.set_end_token_pos(end_token_pos())); |
| initializer_fun.set_accessor_field(*this); |
| initializer_fun.InheritKernelOffsetFrom(*this); |
| initializer_fun.set_is_extension_member(is_extension_member()); |
| initializer_fun.set_is_extension_type_member(is_extension_type_member()); |
| |
| signature ^= ClassFinalizer::FinalizeType(signature); |
| initializer_fun.SetSignature(signature); |
| |
| SetInitializerFunction(initializer_fun); |
| return initializer_fun.ptr(); |
| } |
| |
| void Field::SetInitializerFunction(const Function& initializer) const { |
| ASSERT(IsOriginal()); |
| ASSERT(IsolateGroup::Current() |
| ->initializer_functions_mutex() |
| ->IsOwnedByCurrentThread()); |
| // We have to ensure that all stores into the initializer function object |
| // happen before releasing the pointer to the initializer as it may be |
| // accessed without grabbing the lock. |
| untag()->set_initializer_function<std::memory_order_release>( |
| initializer.ptr()); |
| } |
| |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) || defined(DART_DYNAMIC_MODULES) |
| |
| bool Field::HasInitializerFunction() const { |
| return untag()->initializer_function() != Function::null(); |
| } |
| |
| ErrorPtr Field::InitializeInstance(const Instance& instance) const { |
| ASSERT(IsOriginal()); |
| ASSERT(is_instance()); |
| ASSERT(instance.GetField(*this) == Object::sentinel().ptr()); |
| Object& value = Object::Handle(); |
| |
| if (has_nontrivial_initializer()) { |
| const Function& initializer = Function::Handle(EnsureInitializerFunction()); |
| const Array& args = Array::Handle(Array::New(1)); |
| args.SetAt(0, instance); |
| value = DartEntry::InvokeFunction(initializer, args); |
| if (!value.IsNull() && value.IsError()) { |
| return Error::Cast(value).ptr(); |
| } |
| } else { |
| if (is_late() && !has_initializer()) { |
| Exceptions::ThrowLateFieldNotInitialized(String::Handle(name())); |
| UNREACHABLE(); |
| } |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| UNREACHABLE(); |
| #else |
| // Our trivial initializer is `null`. Any non-`null` initializer is |
| // non-trivial (see `KernelLoader::CheckForInitializer()`). |
| value = Object::null(); |
| #endif |
| } |
| ASSERT(value.IsNull() || value.IsInstance()); |
| if (is_late() && is_final() && |
| (instance.GetField(*this) != Object::sentinel().ptr())) { |
| Exceptions::ThrowLateFieldAssignedDuringInitialization( |
| String::Handle(name())); |
| UNREACHABLE(); |
| } |
| instance.SetField(*this, value); |
| return Error::null(); |
| } |
| |
| ErrorPtr Field::InitializeStatic() const { |
| ASSERT(IsOriginal()); |
| ASSERT(is_static()); |
| if (StaticValue() == Object::sentinel().ptr()) { |
| ASSERT(is_late()); |
| auto& value = Object::Handle(); |
| if (!has_initializer()) { |
| Exceptions::ThrowLateFieldNotInitialized(String::Handle(name())); |
| UNREACHABLE(); |
| } |
| value = EvaluateInitializer(); |
| if (value.IsError()) { |
| return Error::Cast(value).ptr(); |
| } |
| if (is_final() && (StaticValue() != Object::sentinel().ptr())) { |
| Exceptions::ThrowLateFieldAssignedDuringInitialization( |
| String::Handle(name())); |
| UNREACHABLE(); |
| } |
| ASSERT(value.IsNull() || value.IsInstance()); |
| SetStaticValue(value.IsNull() ? Instance::null_instance() |
| : Instance::Cast(value)); |
| return Error::null(); |
| } |
| return Error::null(); |
| } |
| |
| ObjectPtr Field::StaticConstFieldValue() const { |
| ASSERT(is_static() && |
| (is_const() || (is_final() && has_trivial_initializer()))); |
| |
| auto thread = Thread::Current(); |
| auto zone = thread->zone(); |
| auto initial_field_table = thread->isolate_group()->initial_field_table(); |
| |
| // We can safely cache the value of the static const field in the initial |
| // field table. |
| ASSERT(!is_shared()); |
| auto& value = Object::Handle( |
| zone, initial_field_table->At(field_id(), /*concurrent_use=*/true)); |
| if (value.ptr() == Object::sentinel().ptr()) { |
| // Fields with trivial initializers get their initial value |
| // eagerly when they are registered. |
| ASSERT(is_const()); |
| ASSERT(has_initializer()); |
| ASSERT(has_nontrivial_initializer()); |
| value = EvaluateInitializer(); |
| if (!value.IsError()) { |
| ASSERT(value.IsNull() || value.IsInstance()); |
| SetStaticConstFieldValue(value.IsNull() ? Instance::null_instance() |
| : Instance::Cast(value)); |
| } |
| } |
| return value.ptr(); |
| } |
| |
| void Field::SetStaticConstFieldValue(const Instance& value, |
| bool assert_initializing_store) const { |
| ASSERT(is_static()); |
| ASSERT(!is_shared()); |
| auto thread = Thread::Current(); |
| auto initial_field_table = thread->isolate_group()->initial_field_table(); |
| |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| ASSERT(initial_field_table->At(field_id()) == Object::sentinel().ptr() || |
| initial_field_table->At(field_id()) == value.ptr() || |
| !assert_initializing_store); |
| initial_field_table->SetAt(field_id(), |
| value.IsNull() ? Instance::null_instance().ptr() |
| : Instance::Cast(value).ptr(), |
| /*concurrent_use=*/true); |
| } |
| |
| ObjectPtr Field::EvaluateInitializer() const { |
| ASSERT(Thread::Current()->IsDartMutatorThread()); |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (is_static() && is_const()) { |
| return kernel::EvaluateStaticConstFieldInitializer(*this); |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| const Function& initializer = Function::Handle(EnsureInitializerFunction()); |
| return DartEntry::InvokeFunction(initializer, Object::empty_array()); |
| } |
| |
| static intptr_t GetListLength(const Object& value) { |
| if (value.IsTypedDataBase()) { |
| return TypedDataBase::Cast(value).Length(); |
| } else if (value.IsArray()) { |
| return Array::Cast(value).Length(); |
| } else if (value.IsGrowableObjectArray()) { |
| // List length is variable. |
| return Field::kNoFixedLength; |
| } |
| return Field::kNoFixedLength; |
| } |
| |
| static intptr_t GetListLengthOffset(intptr_t cid) { |
| if (IsTypedDataClassId(cid) || IsTypedDataViewClassId(cid) || |
| IsUnmodifiableTypedDataViewClassId(cid) || |
| IsExternalTypedDataClassId(cid)) { |
| return TypedData::length_offset(); |
| } else if (cid == kArrayCid || cid == kImmutableArrayCid) { |
| return Array::length_offset(); |
| } else if (cid == kGrowableObjectArrayCid) { |
| // List length is variable. |
| return Field::kUnknownLengthOffset; |
| } |
| return Field::kUnknownLengthOffset; |
| } |
| |
| const char* Field::GuardedPropertiesAsCString() const { |
| if (guarded_cid() == kIllegalCid) { |
| return "<?>"; |
| } else if (guarded_cid() == kDynamicCid) { |
| ASSERT(!static_type_exactness_state().IsExactOrUninitialized()); |
| return "<*>"; |
| } |
| |
| Zone* zone = Thread::Current()->zone(); |
| |
| const char* exactness = ""; |
| if (static_type_exactness_state().IsTracking()) { |
| exactness = |
| zone->PrintToString(" {%s}", static_type_exactness_state().ToCString()); |
| } |
| |
| const Class& cls = |
| Class::Handle(IsolateGroup::Current()->class_table()->At(guarded_cid())); |
| const char* class_name = String::Handle(cls.Name()).ToCString(); |
| |
| if (IsBuiltinListClassId(guarded_cid()) && !is_nullable() && is_final()) { |
| ASSERT(guarded_list_length() != kUnknownFixedLength); |
| if (guarded_list_length() == kNoFixedLength) { |
| return zone->PrintToString("<%s [*]%s>", class_name, exactness); |
| } else { |
| return zone->PrintToString( |
| "<%s [%" Pd " @%" Pd "]%s>", class_name, guarded_list_length(), |
| guarded_list_length_in_object_offset(), exactness); |
| } |
| } |
| |
| return zone->PrintToString("<%s %s%s>", |
| is_nullable() ? "nullable" : "not-nullable", |
| class_name, exactness); |
| } |
| |
| void Field::InitializeGuardedListLengthInObjectOffset(bool unsafe) const { |
| auto setter = unsafe ? &Field::set_guarded_list_length_in_object_offset_unsafe |
| : &Field::set_guarded_list_length_in_object_offset; |
| ASSERT(IsOriginal()); |
| if (needs_length_check() && |
| (guarded_list_length() != Field::kUnknownFixedLength)) { |
| const intptr_t offset = GetListLengthOffset(guarded_cid()); |
| (this->*setter)(offset); |
| ASSERT(offset != Field::kUnknownLengthOffset); |
| } else { |
| (this->*setter)(Field::kUnknownLengthOffset); |
| } |
| } |
| |
| class FieldGuardUpdater { |
| public: |
| FieldGuardUpdater(const Field* field, const Object& value); |
| |
| bool IsUpdateNeeded() { |
| return does_guarded_cid_need_update_ || does_is_nullable_need_update_ || |
| does_list_length_and_offset_need_update_ || |
| does_static_type_exactness_state_need_update_; |
| } |
| void DoUpdate(); |
| |
| private: |
| void ReviewExactnessState(); |
| void ReviewGuards(); |
| |
| intptr_t guarded_cid() { return guarded_cid_; } |
| void set_guarded_cid(intptr_t guarded_cid) { |
| guarded_cid_ = guarded_cid; |
| does_guarded_cid_need_update_ = true; |
| } |
| |
| bool is_nullable() { return is_nullable_; } |
| void set_is_nullable(bool is_nullable) { |
| is_nullable_ = is_nullable; |
| does_is_nullable_need_update_ = true; |
| } |
| |
| intptr_t guarded_list_length() { return list_length_; } |
| void set_guarded_list_length_and_offset( |
| intptr_t list_length, |
| intptr_t list_length_in_object_offset) { |
| list_length_ = list_length; |
| list_length_in_object_offset_ = list_length_in_object_offset; |
| does_list_length_and_offset_need_update_ = true; |
| } |
| |
| StaticTypeExactnessState static_type_exactness_state() { |
| return static_type_exactness_state_; |
| } |
| void set_static_type_exactness_state(StaticTypeExactnessState state) { |
| static_type_exactness_state_ = state; |
| does_static_type_exactness_state_need_update_ = true; |
| } |
| |
| const Field* field_; |
| const Object& value_; |
| |
| intptr_t guarded_cid_; |
| bool is_nullable_; |
| intptr_t list_length_; |
| intptr_t list_length_in_object_offset_; |
| StaticTypeExactnessState static_type_exactness_state_; |
| |
| bool does_guarded_cid_need_update_ = false; |
| bool does_is_nullable_need_update_ = false; |
| bool does_list_length_and_offset_need_update_ = false; |
| bool does_static_type_exactness_state_need_update_ = false; |
| }; |
| |
| void FieldGuardUpdater::ReviewGuards() { |
| ASSERT(field_->IsOriginal()); |
| const intptr_t cid = value_.GetClassId(); |
| |
| if (guarded_cid() == kIllegalCid) { |
| set_guarded_cid(cid); |
| set_is_nullable(cid == kNullCid); |
| |
| // Start tracking length if needed. |
| ASSERT((guarded_list_length() == Field::kUnknownFixedLength) || |
| (guarded_list_length() == Field::kNoFixedLength)); |
| if (field_->needs_length_check()) { |
| ASSERT(guarded_list_length() == Field::kUnknownFixedLength); |
| set_guarded_list_length_and_offset(GetListLength(value_), |
| GetListLengthOffset(cid)); |
| } |
| |
| if (FLAG_trace_field_guards) { |
| THR_Print(" => %s\n", field_->GuardedPropertiesAsCString()); |
| } |
| return; |
| } |
| |
| if ((cid == guarded_cid()) || ((cid == kNullCid) && is_nullable())) { |
| // Class id of the assigned value matches expected class id and nullability. |
| |
| // If we are tracking length check if it has matches. |
| if (field_->needs_length_check() && |
| (guarded_list_length() != GetListLength(value_))) { |
| ASSERT(guarded_list_length() != Field::kUnknownFixedLength); |
| set_guarded_list_length_and_offset(Field::kNoFixedLength, |
| Field::kUnknownLengthOffset); |
| return; |
| } |
| |
| // Everything matches. |
| return; |
| } |
| |
| if ((cid == kNullCid) && !is_nullable()) { |
| // Assigning null value to a non-nullable field makes it nullable. |
| set_is_nullable(true); |
| } else if ((cid != kNullCid) && (guarded_cid() == kNullCid)) { |
| // Assigning non-null value to a field that previously contained only null |
| // turns it into a nullable field with the given class id. |
| ASSERT(is_nullable()); |
| set_guarded_cid(cid); |
| } else { |
| // Give up on tracking class id of values contained in this field. |
| ASSERT(guarded_cid() != cid); |
| set_guarded_cid(kDynamicCid); |
| set_is_nullable(true); |
| } |
| |
| // If we were tracking length drop collected feedback. |
| if (field_->needs_length_check()) { |
| ASSERT(guarded_list_length() != Field::kUnknownFixedLength); |
| set_guarded_list_length_and_offset(Field::kNoFixedLength, |
| Field::kUnknownLengthOffset); |
| } |
| } |
| |
| bool Class::FindInstantiationOf(Zone* zone, |
| const Class& cls, |
| GrowableArray<const Type*>* path, |
| bool consider_only_super_classes) const { |
| ASSERT(cls.is_type_finalized()); |
| if (cls.ptr() == ptr()) { |
| return true; // Found instantiation. |
| } |
| |
| Class& cls2 = Class::Handle(zone); |
| Type& super = Type::Handle(zone, super_type()); |
| if (!super.IsNull() && !super.IsObjectType()) { |
| cls2 = super.type_class(); |
| if (path != nullptr) { |
| path->Add(&super); |
| } |
| if (cls2.FindInstantiationOf(zone, cls, path, |
| consider_only_super_classes)) { |
| return true; // Found instantiation. |
| } |
| if (path != nullptr) { |
| path->RemoveLast(); |
| } |
| } |
| |
| if (!consider_only_super_classes) { |
| Array& super_interfaces = Array::Handle(zone, interfaces()); |
| for (intptr_t i = 0; i < super_interfaces.Length(); i++) { |
| super ^= super_interfaces.At(i); |
| cls2 = super.type_class(); |
| if (path != nullptr) { |
| path->Add(&super); |
| } |
| if (cls2.FindInstantiationOf(zone, cls, path)) { |
| return true; // Found instantiation. |
| } |
| if (path != nullptr) { |
| path->RemoveLast(); |
| } |
| } |
| } |
| |
| return false; // Not found. |
| } |
| |
| bool Class::FindInstantiationOf(Zone* zone, |
| const Type& type, |
| GrowableArray<const Type*>* path, |
| bool consider_only_super_classes) const { |
| return FindInstantiationOf(zone, Class::Handle(zone, type.type_class()), path, |
| consider_only_super_classes); |
| } |
| |
| TypePtr Class::GetInstantiationOf(Zone* zone, const Class& cls) const { |
| if (ptr() == cls.ptr()) { |
| return DeclarationType(); |
| } |
| if (FindInstantiationOf(zone, cls, /*consider_only_super_classes=*/true)) { |
| // Since [cls] is a superclass of [this], use [cls]'s declaration type. |
| return cls.DeclarationType(); |
| } |
| const auto& decl_type = Type::Handle(zone, DeclarationType()); |
| GrowableArray<const Type*> path(zone, 0); |
| if (!FindInstantiationOf(zone, cls, &path)) { |
| return Type::null(); |
| } |
| Thread* thread = Thread::Current(); |
| ASSERT(!path.is_empty()); |
| auto& calculated_type = Type::Handle(zone, decl_type.ptr()); |
| auto& calculated_type_class = |
| Class::Handle(zone, calculated_type.type_class()); |
| auto& calculated_type_args = |
| TypeArguments::Handle(zone, calculated_type.arguments()); |
| calculated_type_args = calculated_type_args.ToInstantiatorTypeArguments( |
| thread, calculated_type_class); |
| for (auto* const type : path) { |
| calculated_type ^= type->ptr(); |
| if (!calculated_type.IsInstantiated()) { |
| calculated_type ^= calculated_type.InstantiateFrom( |
| calculated_type_args, Object::null_type_arguments(), kAllFree, |
| Heap::kNew); |
| } |
| calculated_type_class = calculated_type.type_class(); |
| calculated_type_args = calculated_type.arguments(); |
| calculated_type_args = calculated_type_args.ToInstantiatorTypeArguments( |
| thread, calculated_type_class); |
| } |
| ASSERT_EQUAL(calculated_type.type_class_id(), cls.id()); |
| return calculated_type.ptr(); |
| } |
| |
| TypePtr Class::GetInstantiationOf(Zone* zone, const Type& type) const { |
| return GetInstantiationOf(zone, Class::Handle(zone, type.type_class())); |
| } |
| |
| void Field::SetStaticValue(const Object& value) const { |
| ASSERT(!is_shared()); |
| auto thread = Thread::Current(); |
| ASSERT(thread->IsDartMutatorThread()); |
| ASSERT(value.IsNull() || value.IsSentinel() || value.IsInstance()); |
| |
| ASSERT(is_static()); // Valid only for static dart fields. |
| const intptr_t id = field_id(); |
| ASSERT(id >= 0); |
| |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| thread->isolate()->field_table()->SetAt(id, value.ptr()); |
| } |
| |
| static StaticTypeExactnessState TrivialTypeExactnessFor(const Class& cls) { |
| const intptr_t type_arguments_offset = cls.host_type_arguments_field_offset(); |
| ASSERT(type_arguments_offset != Class::kNoTypeArguments); |
| if (StaticTypeExactnessState::CanRepresentAsTriviallyExact( |
| type_arguments_offset / kCompressedWordSize)) { |
| return StaticTypeExactnessState::TriviallyExact(type_arguments_offset / |
| kCompressedWordSize); |
| } else { |
| return StaticTypeExactnessState::NotExact(); |
| } |
| } |
| |
| static const char* SafeTypeArgumentsToCString(const TypeArguments& args) { |
| return (args.ptr() == TypeArguments::null()) ? "<null>" : args.ToCString(); |
| } |
| |
| StaticTypeExactnessState StaticTypeExactnessState::Compute( |
| const Type& static_type, |
| const Instance& value, |
| bool print_trace /* = false */) { |
| ASSERT(!value.IsNull()); // Should be handled by the caller. |
| ASSERT(value.ptr() != Object::sentinel().ptr()); |
| |
| Thread* thread = Thread::Current(); |
| Zone* const zone = thread->zone(); |
| const TypeArguments& static_type_args = |
| TypeArguments::Handle(zone, static_type.GetInstanceTypeArguments(thread)); |
| |
| TypeArguments& args = TypeArguments::Handle(zone); |
| |
| ASSERT(static_type.IsFinalized()); |
| const Class& cls = Class::Handle(zone, value.clazz()); |
| GrowableArray<const Type*> path(10); |
| |
| bool is_super_class = true; |
| if (!cls.FindInstantiationOf(zone, static_type, &path, |
| /*consider_only_super_classes=*/true)) { |
| is_super_class = false; |
| bool found_super_interface = |
| cls.FindInstantiationOf(zone, static_type, &path); |
| ASSERT(found_super_interface); |
| } |
| |
| // Trivial case: field has type G<T0, ..., Tn> and value has type |
| // G<U0, ..., Un>. Check if type arguments match. |
| if (path.is_empty()) { |
| ASSERT(cls.ptr() == static_type.type_class()); |
| args = value.GetTypeArguments(); |
| // TODO(dartbug.com/34170) Evaluate if comparing relevant subvectors (that |
| // disregards superclass own arguments) improves precision of the |
| // tracking. |
| if (args.ptr() == static_type_args.ptr()) { |
| return TrivialTypeExactnessFor(cls); |
| } |
| |
| if (print_trace) { |
| THR_Print(" expected %s got %s type arguments\n", |
| SafeTypeArgumentsToCString(static_type_args), |
| SafeTypeArgumentsToCString(args)); |
| } |
| return StaticTypeExactnessState::NotExact(); |
| } |
| |
| // Value has type C<U0, ..., Un> and field has type G<T0, ..., Tn> and G != C. |
| // Compute C<X0, ..., Xn> at G (Xi are free type arguments). |
| // Path array contains a chain of immediate supertypes S0 <: S1 <: ... Sn, |
| // such that S0 is an immediate supertype of C and Sn is G<...>. |
| // Each Si might depend on type parameters of the previous supertype S{i-1}. |
| // To compute C<X0, ..., Xn> at G we walk the chain backwards and |
| // instantiate Si using type parameters of S{i-1} which gives us a type |
| // depending on type parameters of S{i-2}. |
| Type& type = Type::Handle(zone, path.Last()->ptr()); |
| for (intptr_t i = path.length() - 2; (i >= 0) && !type.IsInstantiated(); |
| i--) { |
| args = path[i]->GetInstanceTypeArguments(thread, /*canonicalize=*/false); |
| type ^= type.InstantiateFrom(args, TypeArguments::null_type_arguments(), |
| kAllFree, Heap::kNew); |
| } |
| |
| if (type.IsInstantiated()) { |
| // C<X0, ..., Xn> at G is fully instantiated and does not depend on |
| // Xi. In this case just check if type arguments match. |
| args = type.GetInstanceTypeArguments(thread, /*canonicalize=*/false); |
| if (args.Equals(static_type_args)) { |
| return is_super_class ? StaticTypeExactnessState::HasExactSuperClass() |
| : StaticTypeExactnessState::HasExactSuperType(); |
| } |
| |
| if (print_trace) { |
| THR_Print(" expected %s got %s type arguments\n", |
| SafeTypeArgumentsToCString(static_type_args), |
| SafeTypeArgumentsToCString(args)); |
| } |
| |
| return StaticTypeExactnessState::NotExact(); |
| } |
| |
| // The most complicated case: C<X0, ..., Xn> at G depends on |
| // Xi values. To compare type arguments we would need to instantiate |
| // it fully from value's type arguments and compare with <U0, ..., Un>. |
| // However this would complicate fast path in the native code. To avoid this |
| // complication we would optimize for the trivial case: we check if |
| // C<X0, ..., Xn> at G is exactly G<X0, ..., Xn> which means we can simply |
| // compare values type arguments (<T0, ..., Tn>) to fields type arguments |
| // (<U0, ..., Un>) to establish if field type is exact. |
| ASSERT(cls.IsGeneric()); |
| const intptr_t num_type_params = cls.NumTypeParameters(); |
| bool trivial_case = |
| (num_type_params == |
| Class::Handle(zone, static_type.type_class()).NumTypeParameters()) && |
| (value.GetTypeArguments() == static_type_args.ptr()); |
| if (!trivial_case && FLAG_trace_field_guards) { |
| THR_Print("Not a simple case: %" Pd " vs %" Pd |
| " type parameters, %s vs %s type arguments\n", |
| num_type_params, |
| Class::Handle(zone, static_type.type_class()).NumTypeParameters(), |
| SafeTypeArgumentsToCString( |
| TypeArguments::Handle(zone, value.GetTypeArguments())), |
| SafeTypeArgumentsToCString(static_type_args)); |
| } |
| |
| AbstractType& type_arg = AbstractType::Handle(zone); |
| args = type.GetInstanceTypeArguments(thread, /*canonicalize=*/false); |
| for (intptr_t i = 0; (i < num_type_params) && trivial_case; i++) { |
| type_arg = args.TypeAt(i); |
| if (!type_arg.IsTypeParameter() || |
| (TypeParameter::Cast(type_arg).index() != i)) { |
| if (FLAG_trace_field_guards) { |
| THR_Print(" => encountered %s at index % " Pd "\n", |
| type_arg.ToCString(), i); |
| } |
| trivial_case = false; |
| } |
| } |
| |
| return trivial_case ? TrivialTypeExactnessFor(cls) |
| : StaticTypeExactnessState::NotExact(); |
| } |
| |
| const char* StaticTypeExactnessState::ToCString() const { |
| if (!IsTracking()) { |
| return "not-tracking"; |
| } else if (!IsExactOrUninitialized()) { |
| return "not-exact"; |
| } else if (IsTriviallyExact()) { |
| return Thread::Current()->zone()->PrintToString( |
| "trivially-exact(%hhu)", GetTypeArgumentsOffsetInWords()); |
| } else if (IsHasExactSuperType()) { |
| return "has-exact-super-type"; |
| } else if (IsHasExactSuperClass()) { |
| return "has-exact-super-class"; |
| } else { |
| ASSERT(IsUninitialized()); |
| return "uninitialized-exactness"; |
| } |
| } |
| |
| void FieldGuardUpdater::ReviewExactnessState() { |
| if (!static_type_exactness_state().IsExactOrUninitialized()) { |
| // Nothing to update. |
| return; |
| } |
| |
| if (guarded_cid() == kDynamicCid) { |
| if (FLAG_trace_field_guards) { |
| THR_Print( |
| " => switching off exactness tracking because guarded cid is " |
| "dynamic\n"); |
| } |
| set_static_type_exactness_state(StaticTypeExactnessState::NotExact()); |
| return; |
| } |
| |
| // If we are storing null into a field or we have an exact super type |
| // then there is nothing to do. |
| if (value_.IsNull() || static_type_exactness_state().IsHasExactSuperType() || |
| static_type_exactness_state().IsHasExactSuperClass()) { |
| return; |
| } |
| |
| // If we are storing a non-null value into a field that is considered |
| // to be trivially exact then we need to check if value has an appropriate |
| // type. |
| ASSERT(guarded_cid() != kNullCid); |
| |
| const Type& field_type = Type::Cast(AbstractType::Handle(field_->type())); |
| const Instance& instance = Instance::Cast(value_); |
| |
| if (static_type_exactness_state().IsTriviallyExact()) { |
| const TypeArguments& args = |
| TypeArguments::Handle(instance.GetTypeArguments()); |
| const TypeArguments& field_type_args = TypeArguments::Handle( |
| field_type.GetInstanceTypeArguments(Thread::Current())); |
| if (args.ptr() == field_type_args.ptr()) { |
| return; |
| } |
| |
| if (FLAG_trace_field_guards) { |
| THR_Print(" expected %s got %s type arguments\n", |
| field_type_args.ToCString(), args.ToCString()); |
| } |
| |
| set_static_type_exactness_state(StaticTypeExactnessState::NotExact()); |
| return; |
| } |
| |
| ASSERT(static_type_exactness_state().IsUninitialized()); |
| set_static_type_exactness_state(StaticTypeExactnessState::Compute( |
| field_type, instance, FLAG_trace_field_guards)); |
| return; |
| } |
| |
| FieldGuardUpdater::FieldGuardUpdater(const Field* field, const Object& value) |
| : field_(field), |
| value_(value), |
| guarded_cid_(field->guarded_cid()), |
| is_nullable_(field->is_nullable()), |
| list_length_(field->guarded_list_length()), |
| list_length_in_object_offset_( |
| field->guarded_list_length_in_object_offset()), |
| static_type_exactness_state_(field->static_type_exactness_state()) { |
| ReviewGuards(); |
| ReviewExactnessState(); |
| } |
| |
| void FieldGuardUpdater::DoUpdate() { |
| if (does_guarded_cid_need_update_) { |
| field_->set_guarded_cid(guarded_cid_); |
| } |
| if (does_is_nullable_need_update_) { |
| field_->set_is_nullable(is_nullable_); |
| } |
| if (does_list_length_and_offset_need_update_) { |
| field_->set_guarded_list_length(list_length_); |
| field_->set_guarded_list_length_in_object_offset( |
| list_length_in_object_offset_); |
| } |
| if (does_static_type_exactness_state_need_update_) { |
| field_->set_static_type_exactness_state(static_type_exactness_state_); |
| } |
| } |
| |
| void Field::RecordStore(const Object& value) const { |
| ASSERT(IsOriginal()); |
| Thread* const thread = Thread::Current(); |
| if (!thread->isolate_group()->use_field_guards()) { |
| return; |
| } |
| |
| // We should never try to record a sentinel. |
| ASSERT(value.ptr() != Object::sentinel().ptr()); |
| |
| SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); |
| if ((guarded_cid() == kDynamicCid) || |
| (is_nullable() && value.ptr() == Object::null())) { |
| // Nothing to do: the field is not guarded or we are storing null into |
| // a nullable field. |
| return; |
| } |
| |
| if (FLAG_trace_field_guards) { |
| THR_Print("Store %s %s <- %s\n", ToCString(), GuardedPropertiesAsCString(), |
| value.ToCString()); |
| } |
| |
| FieldGuardUpdater updater(this, value); |
| if (updater.IsUpdateNeeded()) { |
| if (FLAG_trace_field_guards) { |
| THR_Print(" => %s\n", GuardedPropertiesAsCString()); |
| } |
| // Nobody else could have updated guard state since we are holding write |
| // program lock. But we need to ensure we stop mutators as we update |
| // guard state as we can't have optimized code running with updated fields. |
| auto isolate_group = IsolateGroup::Current(); |
| isolate_group->RunWithStoppedMutators([&]() { |
| updater.DoUpdate(); |
| DeoptimizeDependentCode(/*are_mutators_stopped=*/true); |
| }); |
| } |
| } |
| |
| void Field::ForceDynamicGuardedCidAndLength() const { |
| if (!is_unboxed()) { |
| set_guarded_cid(kDynamicCid); |
| set_is_nullable(true); |
| } |
| set_guarded_list_length(Field::kNoFixedLength); |
| set_guarded_list_length_in_object_offset(Field::kUnknownLengthOffset); |
| if (static_type_exactness_state().IsTracking()) { |
| set_static_type_exactness_state(StaticTypeExactnessState::NotExact()); |
| } |
| // Drop any code that relied on the above assumptions. |
| DeoptimizeDependentCode(); |
| } |
| |
| StringPtr Script::resolved_url() const { |
| #if defined(DART_PRECOMPILER) |
| return String::RawCast( |
| WeakSerializationReference::Unwrap(untag()->resolved_url())); |
| #else |
| return untag()->resolved_url(); |
| #endif |
| } |
| |
| bool Script::HasSource() const { |
| return untag()->source() != String::null(); |
| } |
| |
| StringPtr Script::Source() const { |
| return untag()->source(); |
| } |
| |
| bool Script::IsPartOfDartColonLibrary() const { |
| const String& script_url = String::Handle(url()); |
| return (script_url.StartsWith(Symbols::DartScheme()) || |
| script_url.StartsWith(Symbols::DartSchemePrivate())); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| void Script::LoadSourceFromKernel(const uint8_t* kernel_buffer, |
| intptr_t kernel_buffer_len) const { |
| String& uri = String::Handle(resolved_url()); |
| String& source = String::Handle(kernel::KernelLoader::FindSourceForScript( |
| kernel_buffer, kernel_buffer_len, uri)); |
| set_source(source); |
| } |
| |
| void Script::InitializeFromKernel( |
| const KernelProgramInfo& info, |
| intptr_t script_index, |
| const TypedData& line_starts, |
| const TypedDataView& constant_coverage) const { |
| StoreNonPointer(&untag()->kernel_script_index_, script_index); |
| untag()->set_kernel_program_info(info.ptr()); |
| untag()->set_line_starts(line_starts.ptr()); |
| untag()->set_debug_positions(Array::null_array().ptr()); |
| NOT_IN_PRODUCT(untag()->set_constant_coverage(constant_coverage.ptr())); |
| } |
| #endif |
| |
| GrowableObjectArrayPtr Script::GenerateLineNumberArray() const { |
| Zone* zone = Thread::Current()->zone(); |
| const GrowableObjectArray& info = |
| GrowableObjectArray::Handle(zone, GrowableObjectArray::New()); |
| const Object& line_separator = Object::Handle(zone); |
| if (line_starts() == TypedData::null()) { |
| // Scripts in the AOT snapshot do not have a line starts array. |
| // A well-formed line number array has a leading null. |
| info.Add(line_separator); // New line. |
| return info.ptr(); |
| } |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| Smi& value = Smi::Handle(zone); |
| const TypedData& line_starts_data = TypedData::Handle(zone, line_starts()); |
| intptr_t line_count = line_starts_data.Length(); |
| const Array& debug_positions_array = Array::Handle(debug_positions()); |
| intptr_t token_count = debug_positions_array.Length(); |
| int token_index = 0; |
| |
| kernel::KernelLineStartsReader line_starts_reader(line_starts_data, zone); |
| for (int line_index = 0; line_index < line_count; ++line_index) { |
| intptr_t start = line_starts_reader.At(line_index); |
| // Output the rest of the tokens if we have no next line. |
| intptr_t end = TokenPosition::kMaxSourcePos; |
| if (line_index + 1 < line_count) { |
| end = line_starts_reader.At(line_index + 1); |
| } |
| bool first = true; |
| while (token_index < token_count) { |
| value ^= debug_positions_array.At(token_index); |
| intptr_t debug_position = value.Value(); |
| if (debug_position >= end) break; |
| |
| if (first) { |
| info.Add(line_separator); // New line. |
| value = Smi::New(line_index + 1); // Line number. |
| info.Add(value); |
| first = false; |
| } |
| |
| value ^= debug_positions_array.At(token_index); |
| info.Add(value); // Token position. |
| value = Smi::New(debug_position - start + 1); // Column. |
| info.Add(value); |
| ++token_index; |
| } |
| } |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| return info.ptr(); |
| } |
| |
| TokenPosition Script::MaxPosition() const { |
|